repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ricket1978/db.py | db/db.py | 1 | 63918 | import threading
import glob
import gzip
try:
from StringIO import StringIO # Python 2.7
except:
from io import StringIO # Python 3.3+
import uuid
import json
import base64
import re
import os
import sys
import pandas as pd
from prettytable import PrettyTable
import pybars
from .queries import mysql as mysql_templates
from .queries import postgres as postgres_templates
from .queries import sqlite as sqlite_templates
from .queries import mssql as mssql_templates
queries_templates = {
"mysql": mysql_templates,
"postgres": postgres_templates,
"redshift": postgres_templates,
"sqlite": sqlite_templates,
"mssql": mssql_templates,
}
# attempt to import the relevant database libraries
# TODO: maybe add warnings?
try:
import psycopg2 as pg
HAS_PG = True
except ImportError:
HAS_PG = False
try:
import MySQLdb
mysql_connect = MySQLdb.connect
HAS_MYSQL = True
except ImportError:
try:
import pymysql
mysql_connect = pymysql.connect
HAS_MYSQL = True
except ImportError:
HAS_MYSQL = False
try:
import sqlite3 as sqlite
HAS_SQLITE = True
except ImportError:
HAS_SQLITE = False
try:
import pyodbc as pyo
HAS_ODBC = True
except ImportError:
try:
import pypyodbc as pyo
HAS_ODBC = True
except ImportError:
HAS_ODBC = False
try:
import pymssql
HAS_PYMSSQL = True
except ImportError:
HAS_PYMSSQL = False
class Column(object):
"""
A Columns is an in-memory reference to a column in a particular table. You
can use it to do some basic DB exploration and you can also use it to
execute simple queries.
"""
def __init__(self, con, query_templates, table, name, dtype, keys_per_column):
self._con = con
self._query_templates = query_templates
self.table = table
self.name = name
self.type = dtype
self.keys_per_column = keys_per_column
self.foreign_keys = []
self.ref_keys = []
def __repr__(self):
tbl = PrettyTable(["Table", "Name", "Type", "Foreign Keys",
"Reference Keys"])
tbl.add_row([self.table, self.name, self.type, self._str_foreign_keys(),
self._str_ref_keys()])
return str(tbl)
def __str__(self):
return "Column({0})<{1}>".format(self.name, self.__hash__())
def _repr_html_(self):
tbl = PrettyTable(["Table", "Name", "Type"])
tbl.add_row([self.table, self.name, self.type])
return tbl.get_html_string()
def _str_foreign_keys(self):
keys = []
for col in self.foreign_keys:
keys.append("%s.%s" % (col.table, col.name))
if self.keys_per_column is not None and len(keys) > self.keys_per_column:
keys = keys[0:self.keys_per_column] + ['(+ {0} more)'.format(len(keys)-self.keys_per_column)]
return ", ".join(keys)
def _str_ref_keys(self):
keys = []
for col in self.ref_keys:
keys.append("%s.%s" % (col.table, col.name))
if self.keys_per_column is not None and len(keys) > self.keys_per_column:
keys = keys[0:self.keys_per_column] + ['(+ {0} more)'.format(len(keys)-self.keys_per_column)]
return ", ".join(keys)
def head(self, n=6):
"""
Returns first n values of your column as a DataFrame. This is executing:
SELECT
<name_of_the_column>
FROM
<name_of_the_table>
LIMIT <n>
Parameters
----------
n: int
number of rows to return
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Customer.City.head()
0 Sao Jose dos Campos
1 Stuttgart
2 Montreal
3 Oslo
4 Prague
5 Prague
Name: City, dtype: object
>>> db.tables.Customer.City.head(2)
0 Sao Jose dos Campos
1 Stuttgart
Name: City, dtype: object
"""
q = self._query_templates['column']['head'].format(column=self.name, table=self.table, n=n)
return pd.io.sql.read_sql(q, self._con)[self.name]
def all(self):
"""
Returns entire column as a DataFrame. This is executing:
SELECT
DISTINCT
<name_of_the_column>
FROM
<name_of_the_table>
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Customer.Email.all().head()
0 [email protected]
1 [email protected]
2 [email protected]
3 [email protected]
4 [email protected]
Name: Email, dtype: object
>>> df = db.tables.Customer.Email.all()
>>> len(df)
59
"""
q = self._query_templates['column']['all'].format(column=self.name, table=self.table)
return pd.io.sql.read_sql(q, self._con)[self.name]
def unique(self):
"""
Returns all unique values as a DataFrame. This is executing:
SELECT
DISTINCT
<name_of_the_column>
FROM
<name_of_the_table>
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Customer.FirstName.unique().head(10)
0 Luis
1 Leonie
2 Francois
3 Bjorn
4 Franti\u0161ek
5 Helena
6 Astrid
7 Daan
8 Kara
9 Eduardo
Name: FirstName, dtype: object
>>> len(db.tables.Customer.LastName.unique())
59
"""
q = self._query_templates['column']['unique'].format(column=self.name, table=self.table)
return pd.io.sql.read_sql(q, self._con)[self.name]
def sample(self, n=10):
"""
Returns random sample of n rows as a DataFrame. This is executing:
SELECT
<name_of_the_column>
FROM
<name_of_the_table>
ORDER BY
RANDOM()
LIMIT <n>
Parameters
----------
n: int
number of rows to sample
Examples (removed from doctest as we can't predict random names...)
--------
from db import DemoDB
db = DemoDB()
db.tables.Artist.Name.sample(10)
0 Pedro Luis & A Parede
1 Santana Feat. Eric Clapton
2 Os Mutantes
3 Banda Black Rio
4 Adrian Leaper & Doreen de Feis
5 Chicago Symphony Orchestra & Fritz Reiner
6 Smashing Pumpkins
7 Spyro Gyra
8 Aaron Copland & London Symphony Orchestra
9 Sir Georg Solti & Wiener Philharmoniker
Name: Name, dtype: object
>>> from db import DemoDB
>>> db = DemoDB()
>>> df = db.tables.Artist.Name.sample(10)
>>> len(df)
10
"""
q = self._query_templates['column']['sample'].format(column=self.name, table=self.table, n=n)
return pd.io.sql.read_sql(q, self._con)[self.name]
class Table(object):
"""
A Table is an in-memory reference to a table in a database. You can use it to get more info
about the columns, schema, etc. of a table and you can also use it to execute queries.
"""
def __init__(self, con, query_templates, name, cols, keys_per_column):
self.name = name
self._con = con
self._cur = con.cursor()
self._query_templates = query_templates
self.foreign_keys = []
self.ref_keys = []
self.keys_per_column = keys_per_column
self._columns = cols
for col in cols:
attr = col.name
if attr in ("name", "con", "count"):
attr = self.name + "_" + col.name
setattr(self, attr, col)
self._cur.execute(self._query_templates['system']['foreign_keys_for_table'].format(table=self.name))
for (column_name, foreign_table, foreign_column) in self._cur:
col = getattr(self, column_name)
foreign_key = Column(con, queries_templates, foreign_table, foreign_column, col.type, self.keys_per_column)
self.foreign_keys.append(foreign_key)
col.foreign_keys.append(foreign_key)
setattr(self, column_name, col)
self.foreign_keys = ColumnSet(self.foreign_keys)
self._cur.execute(self._query_templates['system']['ref_keys_for_table'].format(table=self.name))
for (column_name, ref_table, ref_column) in self._cur:
col = getattr(self, column_name)
ref_key = Column(con, queries_templates, ref_table, ref_column, col.type, self.keys_per_column)
self.ref_keys.append(ref_key)
col.ref_keys.append(ref_key)
setattr(self, column_name, col)
self.ref_keys = ColumnSet(self.ref_keys)
def _tablify(self):
tbl = PrettyTable(["Column", "Type", "Foreign Keys", "Reference Keys"])
tbl.align["Column"] = "l"
tbl.align["Type"] = "l"
tbl.align["Foreign Keys"] = "l"
tbl.align["Reference Keys"] = "l"
for col in self._columns:
tbl.add_row([col.name, col.type, col._str_foreign_keys(), col._str_ref_keys()])
return tbl
def __repr__(self):
tbl = str(self._tablify())
r = tbl.split('\n')[0]
brk = "+" + "-"*(len(r)-2) + "+"
title = "|" + self.name.center(len(r)-2) + "|"
return brk + "\n" + title + "\n" + tbl
def __str__(self):
return "Table({0})<{1}>".format(self.name, self.__hash__())
def _repr_html_(self):
return self._tablify().get_html_string()
def select(self, *args):
"""
Returns DataFrame of table with arguments selected as columns. This is
executing:
SELECT
<name of column 1>
, <name of column 2>
, <name of column 3>
FROM
<name_of_the_table>
Parameters
----------
*args: str
columns to select
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Track.select("Name")[:1].Name
0 For Those About To Rock (We Salute You)
Name: Name, dtype: object
# select name from the Track table
db.tables.Track.select("Name")
Name
0 For Those About To Rock (We Salute You)
1 Balls to the Wall
2 Fast As a Shark
3 Restless and Wild
4 Princess of the Dawn
5 Put The Finger On You
6 Let's Get It Up
7 Inject The Venom
8 Snowballed
9 Evil Walks
...
# select name & composer from the Track table
>>> df = db.tables.Track.select("Name", "Composer")
"""
q = self._query_templates['table']['select'].format(columns=", ".join(args), table=self.name)
return pd.io.sql.read_sql(q, self._con)
def head(self, n=6):
"""
Returns first n values of your table as a DataFrame. This is executing:
SELECT
*
FROM
<name_of_the_table>
LIMIT <n>
Parameters
----------
n: int
number of rows to return
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Track.count
3503
-= Not in doctest as output is hard to predict
# select name from the Track table
db.tables.Track.head()
TrackId Name AlbumId MediaTypeId \
0 1 For Those About To Rock (We Salute You) 1 1
1 2 Balls to the Wall 2 2
2 3 Fast As a Shark 3 2
3 4 Restless and Wild 3 2
4 5 Princess of the Dawn 3 2
5 6 Put The Finger On You 1 1
GenreId Composer Milliseconds \
0 1 Angus Young, Malcolm Young, Brian Johnson 343719
1 1 None 342562
2 1 F. Baltes, S. Kaufman, U. Dirkscneider & W. Ho... 230619
3 1 F. Baltes, R.A. Smith-Diesel, S. Kaufman, U. D... 252051
4 1 Deaffy & R.A. Smith-Diesel 375418
5 1 Angus Young, Malcolm Young, Brian Johnson 205662
Bytes UnitPrice
0 11170334 0.99
1 5510424 0.99
2 3990994 0.99
3 4331779 0.99
4 6290521 0.99
5 6713451 0.99
db.tables.Track.head(1)
TrackId Name AlbumId MediaTypeId \
0 1 For Those About To Rock (We Salute You) 1 1
GenreId Composer Milliseconds Bytes \
0 1 Angus Young, Malcolm Young, Brian Johnson 343719 11170334
UnitPrice
0 0.99
"""
q = self._query_templates['table']['head'].format(table=self.name, n=n)
return pd.io.sql.read_sql(q, self._con)
def all(self):
"""
Returns entire table as a DataFrame. This is executing:
SELECT
*
FROM
<name_of_the_table>
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> len(db.tables.Track.all())
3503
>>> df = db.tables.Track.all()
"""
q = self._query_templates['table']['all'].format(table=self.name)
return pd.io.sql.read_sql(q, self._con)
def unique(self, *args):
"""
Returns all unique values as a DataFrame. This is executing:
SELECT
DISTINCT
<name_of_the_column_1>
, <name_of_the_column_2>
, <name_of_the_column_3>
...
FROM
<name_of_the_table>
Parameters
----------
*args: columns as strings
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Track.unique("GenreId")
GenreId
0 1
1 2
2 3
3 4
4 5
5 6
6 7
7 8
8 9
9 10
10 11
11 12
12 13
13 14
14 15
15 16
16 17
17 18
18 19
19 20
20 21
21 22
22 23
23 24
24 25
>>> len(db.tables.Track.unique("GenreId", "MediaTypeId"))
38
"""
if len(args)==0:
columns = "*"
else:
columns = ", ".join(args)
q = self._query_templates['table']['unique'].format(columns=columns, table=self.name)
return pd.io.sql.read_sql(q, self._con)
def sample(self, n=10):
"""
Returns random sample of n rows as a DataFrame. This is executing:
SELECT
*
FROM
<name_of_the_table>
ORDER BY
RANDOM()
LIMIT <n>
Parameters
----------
n: int
number of rows to sample
Examples
--------
from db import DemoDB
db = DemoDB()
Not in doctest : can't predict sample
db.tables.Track.sample(10)
TrackId Name AlbumId \
0 274 Samba Makossa 25
1 1971 Girls, Girls, Girls 162
2 843 Otay 68
3 3498 Concerto for Violin, Strings and Continuo in G... 342
4 3004 Pride (In The Name Of Love) 238
5 2938 Beautiful Day 233
6 2023 O Braco Da Minha Guitarra 165
7 1920 Caxanga 158
8 3037 The Wanderer 240
9 1487 Third Stone From The Sun 120
MediaTypeId GenreId Composer \
0 1 7 None
1 1 3 Mick Mars/Nikki Sixx/Tommy Lee
2 1 2 John Scofield, Robert Aries, Milton Chambers a...
3 4 24 Pietro Antonio Locatelli
4 1 1 U2
5 1 1 Adam Clayton, Bono, Larry Mullen, The Edge
6 1 1 None
7 1 7 Milton Nascimento, Fernando Brant
8 1 1 U2; Bono
9 1 1 Jimi Hendrix
Milliseconds Bytes UnitPrice
0 271856 9095410 0.99
1 270288 8874814 0.99
2 423653 14176083 0.99
3 493573 16454937 0.99
4 230243 7549085 0.99
5 248163 8056723 0.99
6 258351 8469531 0.99
7 245551 8144179 0.99
8 283951 9258717 0.99
9 404453 13186975 0.99
"""
q = self._query_templates['table']['sample'].format(table=self.name, n=n)
return pd.io.sql.read_sql(q, self._con)
@property
def count(self):
"""Return total of rows from table."""
return len(self.all())
class TableSet(object):
"""
Set of Tables. Used for displaying search results in terminal/ipython notebook.
"""
def __init__(self, tables):
for tbl in tables:
setattr(self, tbl.name, tbl)
self.tables = tables
def __getitem__(self, i):
return self.tables[i]
def _tablify(self):
tbl = PrettyTable(["Table", "Columns"])
tbl.align["Table"] = "l"
tbl.align["Columns"] = "l"
for table in self.tables:
column_names = [col.name for col in table._columns]
column_names = ", ".join(column_names)
pretty_column_names = ""
for i in range(0, len(column_names), 80):
pretty_column_names += column_names[i:(i+80)] + "\n"
pretty_column_names = pretty_column_names.strip()
tbl.add_row([table.name, pretty_column_names])
return tbl
def __repr__(self):
tbl = str(self._tablify())
return tbl
def _repr_html_(self):
return self._tablify().get_html_string()
def __len__(self):
return len(self.tables)
class ColumnSet(object):
"""
Set of Columns. Used for displaying search results in terminal/ipython
notebook.
"""
def __init__(self, columns):
self.columns = columns
def __getitem__(self, i):
return self.columns[i]
def _tablify(self):
tbl = PrettyTable(["Table", "Column Name", "Type"])
tbl.align["Table"] = "l"
tbl.align["Column"] = "l"
tbl.align["Type"] = "l"
for col in self.columns:
tbl.add_row([col.table, col.name, col.type])
return tbl
def __repr__(self):
tbl = str(self._tablify())
return tbl
def _repr_html_(self):
return self._tablify().get_html_string()
class S3(object):
"""
Simple object for storing AWS credentials
"""
def __init__(self, access_key, secret_key, profile=None):
if profile:
self.load_credentials(profile)
else:
self.access_key = access_key
self.secret_key = secret_key
def save_credentials(self, profile):
"""
Saves credentials to a dotfile so you can open them grab them later.
Parameters
----------
profile: str
name for your profile (i.e. "dev", "prod")
"""
home = os.path.expanduser("~")
filename = os.path.join(home, ".db.py_s3_" + profile)
creds = {
"access_key": self.access_key,
"secret_key": self.secret_key
}
with open(filename, 'wb') as f:
data = json.dumps(creds)
try:
f.write(base64.encodestring(data))
except:
f.write(base64.encodestring(bytes(data, 'utf-8')))
def load_credentials(self, profile):
"""
Loads crentials for a given profile. Profiles are stored in
~/.db.py_s3_{profile_name} and are a base64 encoded JSON file. This is
not to say this a secure way to store sensitive data, but it will
probably stop your little sister from spinning up EC2 instances.
Parameters
----------
profile: str
identifier/name for your database (i.e. "dev", "prod")
"""
user = os.path.expanduser("~")
f = os.path.join(user, ".db.py_s3_" + profile)
if os.path.exists(f):
creds = json.loads(base64.decodestring(open(f, 'rb').read()).encode('utf-8'))
if 'access_key' not in creds:
raise Exception("`access_key` not found in s3 profile '{0}'".format(profile))
self.access_key = creds['access_key']
if 'access_key' not in creds:
raise Exception("`secret_key` not found in s3 profile '{0}'".format(profile))
self.secret_key = creds['secret_key']
class DB(object):
"""
Utility for exploring and querying a database.
Parameters
----------
username: str
Your username for the database
password: str
Your password for the database
hostname: str
Hostname your database is running on (i.e. "localhost", "10.20.1.248")
port: int
Port the database is running on. defaults to default port for db.
portgres: 5432
redshift: 5439
mysql: 3306
sqlite: n/a
mssql: 1433
filename: str
path to sqlite database
dbname: str
Name of the database
schemas: list
List of schemas to include. Defaults to all.
profile: str
Preconfigured database credentials / profile for how you like your queries
exclude_system_tables: bool
Whether or not to include "system" tables (the ones that the database needs
in order to operate). This includes things like schema definitions. Most of
you probably don't need this, but if you're a db admin you might actually
want to query the system tables.
limit: int, None
Default number of records to return in a query. This is used by the DB.query
method. You can override it by adding limit={X} to the `query` method, or
by passing an argument to `DB()`. None indicates that there will be no
limit (That's right, you'll be limitless. Bradley Cooper style.)
keys_per_column: int, None
Default number of keys to display in the foreign and reference keys.
This is used to control the rendering of PrettyTable a bit. None means
that you'll have verrrrrrrry wide columns in some cases.
driver: str, None
Driver for mssql/pyodbc connections.
Examples
--------
db = DB(dbname="AdventureWorks2012", dbtype="mssql", driver="{FreeTDS}")
from db import DB
try:
__import__('imp').find_module('psycopg2')
db = DB(username="kermit", password="ilikeflies", hostname="themuppets.com", port=5432, dbname="muppets", dbtype="postgres")
db = DB(username="dev", hostname="localhost", port=5432, dbname="devdb", dbtype="postgres")
db = DB(username="fozzybear", password="wakawakawaka", hostname="ec2.523.24.131", port=5432, dbname="muppets_redshift", dbtype="redshift")
except ImportError:
pass
try:
__import__('imp').find_module('pymysql')
db = DB(username="root", hostname="localhost", dbname="employees", dbtype="mysql")
db = DB(filename="/path/to/mydb.sqlite", dbtype="sqlite")
except ImportError:
pass
"""
def __init__(self, username=None, password=None, hostname="localhost",
port=None, filename=None, dbname=None, dbtype=None, schemas=None,
profile="default", exclude_system_tables=True, limit=1000,
keys_per_column=None, driver=None):
if port is None:
if dbtype=="postgres":
port = 5432
elif dbtype=="redshift":
port = 5439
elif dbtype=="mysql":
port = 3306
elif dbtype=="sqlite":
port = None
elif dbtype=="mssql":
port = 1433
elif profile is not None:
pass
else:
raise Exception("Database type not specified! Must select one of: postgres, sqlite, mysql, mssql, or redshift")
if not dbtype in ("sqlite", "mssql") and username is None:
self.load_credentials(profile)
elif dbtype=="sqlite" and filename is None:
self.load_credentials(profile)
else:
self.username = username
self.password = password
self.hostname = hostname
self.port = port
self.filename = filename
self.dbname = dbname
self.dbtype = dbtype
self.schemas = schemas
self.limit = limit
self.keys_per_column = keys_per_column
self.driver = driver
if self.dbtype is None:
raise Exception("Database type not specified! Must select one of: postgres, sqlite, mysql, mssql, or redshift")
self._query_templates = queries_templates.get(self.dbtype).queries
if self.dbtype=="postgres" or self.dbtype=="redshift":
if not HAS_PG:
raise Exception("Couldn't find psycopg2 library. Please ensure it is installed")
self.con = pg.connect(user=self.username, password=self.password,
host=self.hostname, port=self.port, dbname=self.dbname)
self.cur = self.con.cursor()
elif self.dbtype=="sqlite":
if not HAS_SQLITE:
raise Exception("Couldn't find sqlite library. Please ensure it is installed")
self.con = sqlite.connect(self.filename)
self.cur = self.con.cursor()
self._create_sqlite_metatable()
elif self.dbtype=="mysql":
if not HAS_MYSQL:
raise Exception("Couldn't find MySQLdb or pymysql library. Please ensure it is installed")
creds = {}
for arg in ["username", "password", "hostname", "port", "dbname"]:
if getattr(self, arg):
value = getattr(self, arg)
if arg=="username":
arg = "user"
elif arg=="password":
arg = "passwd"
elif arg=="dbname":
arg = "db"
elif arg=="hostname":
arg = "host"
creds[arg] = value
self.con = mysql_connect(**creds)
self.con.autocommit(True)
self.cur = self.con.cursor()
elif self.dbtype=="mssql":
if not HAS_ODBC and not HAS_PYMSSQL:
raise Exception("Couldn't find pyodbc or pymssql libraries. Please ensure one of them is installed")
if HAS_ODBC:
base_con = "Driver={driver};Server={server};Database={database};".format(
driver=self.driver or "SQL Server",
server=self.hostname or "localhost",
database=self.dbname or ''
)
conn_str = ((self.username and self.password) and "{}{}".format(
base_con,
"User Id={username};Password={password};".format(
username=self.username,
password=self.password
)
) or "{}{}".format(base_con, "Trusted_Connection=Yes;"))
try:
self.con = pyo.connect(conn_str)
self.cur = self.con.cursor()
except:
self.con = pyo.connect(
driver=self.driver or "SQL Server",
server=self.hostname or "localhost",
port=self.port,
database=self.dbname or '',
uid=self.username,
pwd=self.password)
self.cur = self.con.cursor()
elif HAS_PYMSSQL:
if hasattr(self, 'port'):
hostname = '{0}:{1}'.format(self.hostname, self.port)
else:
hostname = self.hostname
self.con = pymssql.connect(host=hostname,
user=self.username,
password=self.password,
database=self.dbname)
self.cur = self.con.cursor()
self._tables = TableSet([])
self._exclude_system_tables = exclude_system_tables
self.handlebars = pybars.Compiler()
@property
def tables(self):
"""A lazy loaded reference to the table metadata for the DB."""
if len(self._tables) == 0:
self.refresh_schema(self._exclude_system_tables)
return self._tables
def __str__(self):
return "DB[{dbtype}][{hostname}]:{port} > {user}@{dbname}".format(
dbtype=self.dbtype, hostname=self.hostname, port=self.port, user=self.username, dbname=self.dbname)
def __repr__(self):
return self.__str__()
def __delete__(self):
del self.cur
del self.con
def load_credentials(self, profile="default"):
"""
Loads crentials for a given profile. Profiles are stored in
~/.db.py_{profile_name} and are a base64 encoded JSON file. This is not
to say this a secure way to store sensitive data, but it will probably
stop your little sister from stealing your passwords.
Parameters
----------
profile: str
(optional) identifier/name for your database (i.e. "dw", "prod")
"""
user = os.path.expanduser("~")
f = os.path.join(user, ".db.py_" + profile)
if os.path.exists(f):
raw_creds = open(f, 'rb').read()
raw_creds = base64.decodestring(raw_creds).decode('utf-8')
creds = json.loads(raw_creds)
self.username = creds.get('username')
self.password = creds.get('password')
self.hostname = creds.get('hostname')
self.port = creds.get('port')
self.filename = creds.get('filename')
self.dbname = creds.get('dbname')
self.dbtype = creds.get('dbtype')
self.schemas = creds.get('schemas')
self.limit = creds.get('limit')
self.keys_per_column = creds.get('keys_per_column')
else:
raise Exception("Credentials not configured!")
def save_credentials(self, profile="default"):
"""
Save your database credentials so you don't have to save them in script.
Parameters
----------
profile: str
(optional) identifier/name for your database (i.e. "dw", "prod")
from db import DB
import pymysql
db = DB(username="hank", password="foo", hostname="prod.mardukas.com", dbname="bar", dbtype="mysql")
db.save_credentials(profile="production")
db = DB(username="hank", password="foo", hostname="staging.mardukas.com", dbname="bar", dbtype="mysql")
db.save_credentials(profile="staging")
db = DB(profile="staging")
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.save_credentials(profile='test')
"""
if self.filename:
db_filename = os.path.join(os.getcwd(), self.filename)
else:
db_filename = None
user = os.path.expanduser("~")
#if not os.path.exists(user,".db.py_"):
# os.makedirs(user,".db.py_")
dotfile = os.path.join(user, ".db.py_" + profile)
creds = {
"username": self.username,
"password": self.password,
"hostname": self.hostname,
"port": self.port,
"filename": db_filename,
"dbname": self.dbname,
"dbtype": self.dbtype,
"schemas": self.schemas,
"limit": self.limit,
"keys_per_column": self.keys_per_column,
}
with open(dotfile, 'wb') as f:
data = json.dumps(creds)
try:
f.write(base64.encodestring(data))
except:
f.write(base64.encodestring(bytes(data, 'utf-8')))
def find_table(self, search):
"""
Aggresively search through your database's schema for a table.
Parameters
-----------
search: str
glob pattern for what you're looking for
Examples
----------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.find_table("A*")
+--------+--------------------------+
| Table | Columns |
+--------+--------------------------+
| Album | AlbumId, Title, ArtistId |
| Artist | ArtistId, Name |
+--------+--------------------------+
>>> results = db.find_table("tmp*") # returns all tables prefixed w/ tmp
>>> results = db.find_table("prod_*") # returns all tables prefixed w/ prod_
>>> results = db.find_table("*Invoice*") # returns all tables containing trans
>>> results = db.find_table("*") # returns everything
"""
tables = []
for table in self.tables:
if glob.fnmatch.fnmatch(table.name, search):
tables.append(table)
return TableSet(tables)
def find_column(self, search, data_type=None):
"""
Aggresively search through your database's schema for a column.
Parameters
-----------
search: str
glob pattern for what you're looking for
data_type: str, list
(optional) specify which data type(s) you want to return
Examples
----------
>>> from db import DemoDB
>>> db = DemoDB()
>>> len(db.find_column("Name").columns)
5
>>> len(db.find_column("*Id").columns)
20
>>> len(db.find_column("*Address*").columns)
3
>>> len(db.find_column("*Address*", data_type="NVARCHAR(70)").columns)
3
>>> len(db.find_column("*e*", data_type=["NVARCHAR(70)", "INTEGER"]).columns)
17
-= Should sort in some way for all those doctests to be viable...
-= if not, there's always a random issue where rows are not in the same order, making doctest fail.
db.find_column("Name") # returns all columns named "Name"
+-----------+-------------+---------------+
| Table | Column Name | Type |
+-----------+-------------+---------------+
| Artist | Name | NVARCHAR(120) |
| Genre | Name | NVARCHAR(120) |
| MediaType | Name | NVARCHAR(120) |
| Playlist | Name | NVARCHAR(120) |
| Track | Name | NVARCHAR(200) |
+-----------+-------------+---------------+
db.find_column("*Id") # returns all columns ending w/ Id
+---------------+---------------+---------+
| Table | Column Name | Type |
+---------------+---------------+---------+
| Album | AlbumId | INTEGER |
| Album | ArtistId | INTEGER |
| Artist | ArtistId | INTEGER |
| Customer | SupportRepId | INTEGER |
| Customer | CustomerId | INTEGER |
| Employee | EmployeeId | INTEGER |
| Genre | GenreId | INTEGER |
| Invoice | InvoiceId | INTEGER |
| Invoice | CustomerId | INTEGER |
| InvoiceLine | TrackId | INTEGER |
| InvoiceLine | InvoiceLineId | INTEGER |
| InvoiceLine | InvoiceId | INTEGER |
| MediaType | MediaTypeId | INTEGER |
| Playlist | PlaylistId | INTEGER |
| PlaylistTrack | TrackId | INTEGER |
| PlaylistTrack | PlaylistId | INTEGER |
| Track | TrackId | INTEGER |
| Track | AlbumId | INTEGER |
| Track | MediaTypeId | INTEGER |
| Track | GenreId | INTEGER |
+---------------+---------------+---------+
db.find_column("*Address*") # returns all columns containing Address
+----------+----------------+--------------+
| Table | Column Name | Type |
+----------+----------------+--------------+
| Customer | Address | NVARCHAR(70) |
| Employee | Address | NVARCHAR(70) |
| Invoice | BillingAddress | NVARCHAR(70) |
+----------+----------------+--------------+
db.find_column("*Address*", data_type="NVARCHAR(70)") # returns all columns containing Address that are varchars
+----------+----------------+--------------+
| Table | Column Name | Type |
+----------+----------------+--------------+
| Customer | Address | NVARCHAR(70) |
| Employee | Address | NVARCHAR(70) |
| Invoice | BillingAddress | NVARCHAR(70) |
+----------+----------------+--------------+
db.find_column("*e*", data_type=["NVARCHAR(70)", "INTEGER"]) # returns all columns have an "e" and are NVARCHAR(70)S or INTEGERS
+-------------+----------------+--------------+
| Table | Column Name | Type |
+-------------+----------------+--------------+
| Customer | Address | NVARCHAR(70) |
| Customer | SupportRepId | INTEGER |
| Customer | CustomerId | INTEGER |
| Employee | ReportsTo | INTEGER |
| Employee | EmployeeId | INTEGER |
| Employee | Address | NVARCHAR(70) |
| Genre | GenreId | INTEGER |
| Invoice | InvoiceId | INTEGER |
| Invoice | CustomerId | INTEGER |
| Invoice | BillingAddress | NVARCHAR(70) |
| InvoiceLine | InvoiceLineId | INTEGER |
| InvoiceLine | InvoiceId | INTEGER |
| MediaType | MediaTypeId | INTEGER |
| Track | MediaTypeId | INTEGER |
| Track | Milliseconds | INTEGER |
| Track | GenreId | INTEGER |
| Track | Bytes | INTEGER |
+-------------+----------------+--------------+
"""
if isinstance(data_type, str):
data_type = [data_type]
cols = []
for table in self.tables:
for col in vars(table):
if glob.fnmatch.fnmatch(col, search):
if data_type and isinstance(getattr(table, col), Column) and getattr(table, col).type not in data_type:
continue
if isinstance(getattr(table, col), Column):
cols.append(getattr(table, col))
return ColumnSet(cols)
def _assign_limit(self, q, limit=1000):
# postgres, mysql, & sqlite
if self.dbtype in ["postgres", "redshift", "sqlite", "mysql"]:
if limit:
q = q.rstrip().rstrip(";")
q = "select * from ({q}) q limit {limit}".format(q=q, limit=limit)
return q
# mssql
else:
if limit:
q = "select top {limit} * from ({q}) q".format(limit=limit, q=q)
return q
def _apply_handlebars(self, q, data, union=True):
if (sys.version_info < (3, 0)):
q = unicode(q)
template = self.handlebars.compile(q)
if isinstance(data, list):
query = [template(item) for item in data]
query = [str(item) for item in query]
if union==True:
query = "\nUNION ALL".join(query)
else:
query = "\n".join(query)
elif isinstance(data, dict):
query = template(data)
query = str(query)
else:
return q
return query
def query(self, q, data=None, union=True, limit=None):
"""
Query your database with a raw string.
Parameters
----------
q: str
Query string to execute
data: list, dict
Optional argument for handlebars-queries. Data will be passed to the
template and rendered using handlebars.
union: bool
Whether or not "UNION ALL" handlebars templates. This will return
any handlebars queries as a single data frame.
limit: int
Number of records to return
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
db.query("select * from Track").head(2)
TrackId Name AlbumId MediaTypeId \\\r
0 1 For Those About To Rock (We Salute You) 1 1
1 2 Balls to the Wall 2 2
<BLANKLINE>
GenreId Composer Milliseconds Bytes \\\r
0 1 Angus Young, Malcolm Young, Brian Johnson 343719 11170334
1 1 None 342562 5510424
<BLANKLINE>
UnitPrice
0 0.99
1 0.99
db.query("select * from Track", limit=10)
TrackId Name AlbumId MediaTypeId \
0 1 For Those About To Rock (We Salute You) 1 1
1 2 Balls to the Wall 2 2
2 3 Fast As a Shark 3 2
3 4 Restless and Wild 3 2
4 5 Princess of the Dawn 3 2
5 6 Put The Finger On You 1 1
6 7 Let's Get It Up 1 1
7 8 Inject The Venom 1 1
8 9 Snowballed 1 1
9 10 Evil Walks 1 1
GenreId Composer Milliseconds \
0 1 Angus Young, Malcolm Young, Brian Johnson 343719
1 1 None 342562
2 1 F. Baltes, S. Kaufman, U. Dirkscneider & W. Ho... 230619
3 1 F. Baltes, R.A. Smith-Diesel, S. Kaufman, U. D... 252051
4 1 Deaffy & R.A. Smith-Diesel 375418
5 1 Angus Young, Malcolm Young, Brian Johnson 205662
6 1 Angus Young, Malcolm Young, Brian Johnson 233926
7 1 Angus Young, Malcolm Young, Brian Johnson 210834
8 1 Angus Young, Malcolm Young, Brian Johnson 203102
9 1 Angus Young, Malcolm Young, Brian Johnson 263497
Bytes UnitPrice
0 11170334 0.99
1 5510424 0.99
2 3990994 0.99
3 4331779 0.99
4 6290521 0.99
5 6713451 0.99
6 7636561 0.99
7 6852860 0.99
8 6599424 0.99
9 8611245 0.99
>>> q = '''
... SELECT
... a.Title,
... t.Name,
... t.UnitPrice
... FROM
... Album a
... INNER JOIN
... Track t
... on a.AlbumId = t.AlbumId;
... '''
>>> len(db.query(q))
3503
db.query(q, limit=10)
Title \
0 For Those About To Rock We Salute You
1 Balls to the Wall
2 Restless and Wild
3 Restless and Wild
4 Restless and Wild
5 For Those About To Rock We Salute You
6 For Those About To Rock We Salute You
7 For Those About To Rock We Salute You
8 For Those About To Rock We Salute You
9 For Those About To Rock We Salute You
Name UnitPrice
0 For Those About To Rock (We Salute You) 0.99
1 Balls to the Wall 0.99
2 Fast As a Shark 0.99
3 Restless and Wild 0.99
4 Princess of the Dawn 0.99
5 Put The Finger On You 0.99
6 Let's Get It Up 0.99
7 Inject The Venom 0.99
8 Snowballed 0.99
9 Evil Walks 0.99
>>> template = '''
... SELECT
... '{{ name }}' as table_name,
... COUNT(*) as cnt
... FROM
... {{ name }}
... GROUP BY
... table_name
... '''
>>> data = [
... {"name": "Album"},
... {"name": "Artist"},
... {"name": "Track"}
... ]
>>>
db.query(q, data=data)
table_name cnt
0 Album 347
1 Artist 275
2 Track 3503
>>> q = '''
... SELECT
... {{#cols}}
... {{#if @last}}
... {{ . }}
... {{else}}
... {{ . }} ,
... {{/if}}
... {{/cols}}
... FROM
... Album;
... '''
>>> data = {"cols": ["AlbumId", "Title", "ArtistId"]}
>>> len(db.query(q, data=data, union=False))
347
db.query(q, data=data, union=False)
AlbumId Title ArtistId
0 1 For Those About To Rock We Salute You 1
1 2 Balls to the Wall 2
2 3 Restless and Wild 2
3 4 Let There Be Rock 1
4 5 Big Ones 3
"""
if data:
q = self._apply_handlebars(q, data, union)
#if limit==None:
# pass
#else:
if limit:
q = self._assign_limit(q, limit)
return pd.io.sql.read_sql(q, self.con)
def query_from_file(self, filename, data=None, union=True, limit=None):
"""
Query your database from a file.
Parameters
----------
filename: str
A SQL script
data: list, dict
Optional argument for handlebars-queries. Data will be passed to the
template and rendered using handlebars.
union: bool
Whether or not "UNION ALL" handlebars templates. This will return
any handlebars queries as a single data frame.
limit: int
Number of records to return
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> q = '''
... SELECT
... a.Title,
... t.Name,
... t.UnitPrice
... FROM
... Album a
... INNER JOIN
... Track t
... on a.AlbumId = t.AlbumId;
... '''
>>> with open("db/tests/myscript.sql", "w") as f:
... f.write(q)
109
>>> len(db.query_from_file("db/tests/myscript.sql", limit=10))
10
db.query_from_file("db/tests/myscript.sql", limit=10)
Title \
0 For Those About To Rock We Salute You
1 Balls to the Wall
2 Restless and Wild
3 Restless and Wild
4 Restless and Wild
5 For Those About To Rock We Salute You
6 For Those About To Rock We Salute You
7 For Those About To Rock We Salute You
8 For Those About To Rock We Salute You
9 For Those About To Rock We Salute You
Name UnitPrice
0 For Those About To Rock (We Salute You) 0.99
1 Balls to the Wall 0.99
2 Fast As a Shark 0.99
3 Restless and Wild 0.99
4 Princess of the Dawn 0.99
5 Put The Finger On You 0.99
6 Let's Get It Up 0.99
7 Inject The Venom 0.99
8 Snowballed 0.99
9 Evil Walks 0.99
"""
with open(filename) as fp:
q = fp.read()
return self.query(q, data=data, union=union, limit=limit)
def _create_sqlite_metatable(self):
"""
SQLite doesn't come with any metatables (at least ones that fit into our
framework), so we're going to create them.
"""
sys.stderr.write("Indexing schema. This will take a second...")
rows_to_insert = []
tables = [row[0] for row in self.cur.execute("select name from sqlite_master where type='table';")]
for table in tables:
for row in self.cur.execute("pragma table_info({0})".format(table)):
rows_to_insert.append((table, row[1], row[2]))
# find for table and column names
self.cur.execute("drop table if exists tmp_dbpy_schema;")
self.cur.execute("create temp table tmp_dbpy_schema(table_name varchar, column_name varchar, data_type varchar);")
for row in rows_to_insert:
self.cur.execute("insert into tmp_dbpy_schema(table_name, column_name, data_type) values('{0}', '{1}', '{2}');".format(*row))
self.cur.execute("SELECT name, sql FROM sqlite_master where sql like '%REFERENCES%';")
# find for foreign keys
self.cur.execute("drop table if exists tmp_dbpy_foreign_keys;")
self.cur.execute("create temp table tmp_dbpy_foreign_keys(table_name varchar, column_name varchar, foreign_table varchar, foreign_column varchar);")
foreign_keys = []
self.cur.execute("SELECT name, sql FROM sqlite_master ;")
for (table_name, sql) in self.cur:
rgx = "FOREIGN KEY \(\[(.*)\]\) REFERENCES \[(.*)\] \(\[(.*)\]\)"
if sql is None:
continue
for (column_name, foreign_table, foreign_key) in re.findall(rgx, sql):
foreign_keys.append((table_name, column_name, foreign_table, foreign_key))
for row in foreign_keys:
sql_insert = "insert into tmp_dbpy_foreign_keys(table_name, column_name, foreign_table, foreign_column) values('{0}', '{1}', '{2}', '{3}');"
self.cur.execute(sql_insert.format(*row))
self.con.commit()
sys.stderr.write("finished!\n")
def refresh_schema(self, exclude_system_tables=True):
"""
Pulls your database's schema again and looks for any new tables and
columns.
"""
sys.stderr.write("Refreshing schema. Please wait...")
if self.schemas is not None and isinstance(self.schemas, list) and 'schema_specified' in self._query_templates['system']:
schemas_str = ','.join([repr(schema) for schema in self.schemas])
q = self._query_templates['system']['schema_specified'] % schemas_str
elif exclude_system_tables==True:
q = self._query_templates['system']['schema_no_system']
else:
q = self._query_templates['system']['schema_with_system']
self.cur.execute(q)
tables = {}
for (table_name, column_name, data_type)in self.cur:
if table_name not in tables:
tables[table_name] = []
tables[table_name].append(Column(self.con, self._query_templates, table_name, column_name, data_type, self.keys_per_column))
self._tables = TableSet([Table(self.con, self._query_templates, t, tables[t], keys_per_column=self.keys_per_column) for t in sorted(tables.keys())])
sys.stderr.write("done!\n")
def _try_command(self, cmd):
try:
self.cur.execute(cmd)
except Exception as e:
print ("Error executing command:")
print ("\t '{0}'".format(cmd))
print ("Exception: {0}".format(e))
self.con.rollback()
def to_redshift(self, name, df, drop_if_exists=False, chunk_size=10000,
AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None,
print_sql=False, bucket_location=None, s3_bucket=None):
"""
Upload a dataframe to redshift via s3.
Parameters
----------
name: str
name for your shiny new table
df: DataFrame
data frame you want to save to the db
drop_if_exists: bool (False)
whether you'd like to drop the table if it already exists
chunk_size: int (10000)
Number of DataFrame chunks to upload and COPY from S3. Upload speed
is *much* faster if chunks = multiple-of-slices. Ex: DW1.XL nodes
have 2 slices per node, so if running 2 nodes you will want
chunk_size=4, 8, etc
AWS_ACCESS_KEY: str
your aws access key. if this is None, the function will try
and grab AWS_ACCESS_KEY from your environment variables
AWS_SECRET_KEY: str
your aws secrety key. if this is None, the function will try
and grab AWS_SECRET_KEY from your environment variables
s3: S3
alternative to using keys, you can use an S3 object
print_sql: bool (False)
option for printing sql statement that will be executed
bucket_location: boto.s3.connection.Location
a specific AWS location in which to create the temporary transfer s3
bucket. This should match your redshift cluster's region.
Examples
--------
"""
if self.dbtype!="redshift":
raise Exception("Sorry, feature only available for redshift.")
try:
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.s3.connection import Location
# if boto is present, set the bucket_location to default.
# we can't do this in the function definition because we're
# lazily importing boto only if necessary here.
if bucket_location is None:
bucket_location = Location.DEFAULT
except ImportError:
raise Exception("Couldn't find boto library. Please ensure it is installed")
if s3 is not None:
AWS_ACCESS_KEY = s3.access_key
AWS_SECRET_KEY = s3.secret_key
if AWS_ACCESS_KEY is None:
AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY')
if AWS_SECRET_KEY is None:
AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY')
if AWS_ACCESS_KEY is None:
raise Exception("Must specify AWS_ACCESS_KEY as either function argument or as an environment variable `AWS_ACCESS_KEY`")
if AWS_SECRET_KEY is None:
raise Exception("Must specify AWS_SECRET_KEY as either function argument or as an environment variable `AWS_SECRET_KEY`")
conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
#this way users with permission on specific buckets can use this feature
bucket_name = "dbpy-{0}".format(uuid.uuid4())
if s3_bucket:
bucket = conn.get_bucket(s3_bucket)
bucket_name = s3_bucket
else:
bucket = conn.create_bucket(bucket_name, location=bucket_location)
# we're going to chunk the file into pieces. according to amazon, this is
# much faster when it comes time to run the \COPY statment.
#
# see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html
sys.stderr.write("Transfering {0} to s3 in chunks".format(name))
len_df = len(df)
chunks = range(0, len_df, chunk_size)
def upload_chunk(i):
conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
chunk = df[i:(i+chunk_size)]
k = Key(bucket)
k.key = 'data-%d-%d.csv.gz' % (i, i + chunk_size)
k.set_metadata('parent', 'db.py')
out = StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(chunk.to_csv(index=False, encoding='utf-8'))
k.set_contents_from_string(out.getvalue())
sys.stderr.write(".")
return i
threads = []
for i in chunks:
t = threading.Thread(target=upload_chunk, args=(i, ))
t.start()
threads.append(t)
# join all threads
for t in threads:
t.join()
sys.stderr.write("done\n")
if drop_if_exists:
sql = "DROP TABLE IF EXISTS {0};".format(name)
if print_sql:
sys.stderr.write(sql + "\n")
self._try_command(sql)
# generate schema from pandas and then adapt for redshift
sql = pd.io.sql.get_schema(df, name)
# defaults to using SQLite format. need to convert it to Postgres
sql = sql.replace("[", "").replace("]", "")
# we'll create the table ONLY if it doens't exist
sql = sql.replace("CREATE TABLE", "CREATE TABLE IF NOT EXISTS")
if print_sql:
sys.stderr.write(sql + "\n")
self._try_command(sql)
self.con.commit()
# perform the \COPY here. the s3 argument is a prefix, so it'll pick up
# all of the data*.gz files we've created
sys.stderr.write("Copying data from s3 to redshfit...")
sql = """
copy {name} from 's3://{bucket_name}/data'
credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}'
CSV IGNOREHEADER as 1 GZIP;
""".format(name=name, bucket_name=bucket_name,
AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY)
if print_sql:
sys.stderr.write(sql + "\n")
self._try_command(sql)
self.con.commit()
sys.stderr.write("done!\n")
# tear down the bucket
sys.stderr.write("Tearing down bucket...")
for key in bucket.list():
key.delete()
if not s3_bucket:
conn.delete_bucket(bucket_name)
sys.stderr.write("done!")
def list_profiles():
"""
Lists all of the database profiles available
Examples
--------
No doctest, covered by unittest
list_profiles()
{'demo': {u'dbname': None,
u'dbtype': u'sqlite',
u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite',
u'hostname': u'localhost',
u'password': None,
u'port': 5432,
u'username': None},
'muppets': {u'dbname': u'muppetdb',
u'dbtype': u'postgres',
u'filename': None,
u'hostname': u'muppets.yhathq.com',
u'password': None,
u'port': 5432,
u'username': u'kermit'}}
"""
profiles = {}
user = os.path.expanduser("~")
for f in os.listdir(user):
if f.startswith(".db.py_"):
profilePath = os.path.join(user, f)
profile = json.loads(base64.decodestring(open(profilePath,'rb').read()).decode('utf-8'))
profiles[f[7:]] = profile
return profiles
def remove_profile(name, s3=False):
"""
Removes a profile from your config
"""
user = os.path.expanduser("~")
if s3==True:
f = os.path.join(user, ".db.py_s3_" + name)
else:
f = os.path.join(user, ".db.py_" + name)
try:
try:
open(f)
except:
raise Exception("Profile '{0}' does not exist. Could not find file {1}".format(name, f))
os.remove(f)
except Exception as e:
raise Exception("Could not remove profile {0}! Excpetion: {1}".format(name, e))
def DemoDB(keys_per_column=None):
"""
Provides an instance of DB that hooks up to the Chinook DB
See http://chinookdatabase.codeplex.com/ for more info.
"""
_ROOT = os.path.abspath(os.path.dirname(__file__))
chinook = os.path.join(_ROOT, 'data', "chinook.sqlite")
return DB(filename=chinook, dbtype="sqlite", keys_per_column=keys_per_column)
| bsd-2-clause |
ndevenish/simplehistogram | simplehist/hists.py | 1 | 6669 | # coding: utf-8
"""
hists.py
Copyright (c) 2014 Nicholas Devenish <[email protected]>
An easy, quick, lightweight histogram class based on ndarray
Initialise with bin indices:
>>> a = Hist([0, 1, 2, 3])
>>> len(a)
3
>>> a.bins
array([0, 1, 2, 3])
Optionally include data:
>>> Hist([0, 1, 2, 3], data=[1, 0.2, 3])
Hist([0, 1, 2, 3], data=[ 1. , 0.2, 3. ])
Or just specify the blank data type:
>>> a = Hist([0, 1, 2, 3], dtype=int)
>>> a
Hist([0, 1, 2, 3], data=[0, 0, 0])
You can do any normal numpy arithmetic operations:
>>> a = Hist([0, 1, 2, 3], data=[1, 0.2, 3])
>>> b = a + a
>>> b -= a
>>> all(a == b)
True
And you can fill bins from values:
>>> a = Hist([0,1,2,3])
>>> a.fill(1.4, 3)
>>> a
Hist([0, 1, 2, 3], data=[ 0., 3., 0.])
Or from arrays:
>>> a = Hist([0,1,2,3])
>>> a.fill([1.4, 2.4], weights=[1, 2])
>>> a
Hist([0, 1, 2, 3], data=[ 0., 1., 2.])
If you use pyROOT, you can convert from 1D histograms:
>>> type(source)
<class 'ROOT.TH1D'>
>>> convert = ashist(source)
>>> type(convert)
<class 'simplehist.hists.Hist'>
Or conversion from custom types - see simplehist.converter for
implementation details.
You can also draw histograms, using any of the options
that can be passed to matplotlib.pyplot.plot:
>>> hist_object.draw_hist(lw=2)
"""
import sys
import numpy
# A numpy array with bins, and constraints on those bins
class Hist(numpy.ndarray):
def __new__(cls, bins, data=None, **kwargs):
# If bins contains items that are list-like then it is probably multidim
if isinstance(bins[0], (tuple, list)):
# It must be multi-dimension...
bins = tuple(numpy.asarray(x) for x in bins)
ndims = len(bins)
shape = tuple(len(x)-1 for x in bins)
else:
# Just a single dimension
bins = numpy.asarray(bins)
assert bins.ndim == 1
ndims = 1
shape = (len(bins)-1,)
# Create or validate the data shape
if data is None:
# data = numpy.zeros(tuple(x-1 for x in bins.shape), **kwargs)
data = numpy.zeros(shape, **kwargs)
else:
data = numpy.asarray(data, **kwargs)
# Same dimensions and shape-1
assert ndims == data.ndim
if ndims == 1:
assert all(x == len(y)-1 for x, y in zip(data.shape, [bins]))
else:
assert all(x == len(y)-1 for x, y in zip(data.shape, bins))
# Cast from our data array
obj = data.view(cls)
obj._bins = bins
return obj
def __array_finalize__(self, obj):
# Since always creating as an alternate, this should never happen
assert obj is not None
# Other should always have a _bins object
self._bins = getattr(obj,"_bins",None)
def __array_wrap__(self,obj,context=None):
# if obj.ndim == 0 and obj.size == 1:
# return obj.item()
# Don't wrap as a hist if the shape changed - we have no idea how it did so
if not obj.shape == self.shape:
return obj
return super(Hist,self).__array_wrap__(obj,context)
@property
def bins(self):
return self._bins
@bins.setter
def bins(self, value):
value = numpy.asarray(value)
assert value.ndim == self.ndim
assert all(x == y-1 for x, y in zip(self.shape, value.shape))
self._bins = value
def __getitem__(self, index):
"""Return a value, or a subhist from a slice.
Getting singular indices just returns the values, whilst slices return
subhists, with applicable bins."""
return super(Hist, self).__getitem__(index)
if isinstance(index, tuple) and self.ndim == 1:
binSel = []
# Build a new tuple for each of the entries
for selection in index:
if selection is Ellipsis:
binSel.append(Ellipsis)
elif isinstance(selection, slice):
# Stepping really doesn't make much sense with bins
assert selection.step is None or selection.step == 1
if selection.stop is not None:
binSel.append(slice(selection.start, min(sys.maxint,selection.stop+1)))
else:
binSel.append(slice(selection.start, None))
elif isinstance(selection, int):
binSel.append(slice(selection, selection+1))
else:
# Throw away the hist information as we don't understand the request
return super(Hist, self).__getitem__(index).view(numpy.ndarray)
#assert False
# Build a new histogram with these bins
ret = super(Hist,self).__getitem__(index).view(Hist)
# If this gave us a hist..
if hasattr(ret, "_bins"):
ret._bins = self._bins.__getitem__(tuple(binSel))
return ret
else:
return super(Hist, self).__getitem__(index)
def __getslice__(self, i, j):
return self.__getitem__((slice(i,j),))
def __repr__(self):
# if numpy.all(self == 0):
# # Bin-only output
# return "{}(bins={})".format(type(self).__name__, numpy.array_repr(self._bins))
# else:
if self.ndim == 1:
return "{}({}, data={})".format(type(self).__name__,
numpy.array_repr(self._bins)[len("array("):-1],
numpy.array_repr(self)[len(type(self).__name__)+1:-1])
else:
return "{}(({}), data={})".format(type(self).__name__,
",".join([numpy.array_repr(x)[6:-1] for x in self._bins]),
numpy.array_repr(self)[len(type(self).__name__)+1:-1])
def fill(self, values, weights=None):
values = numpy.asarray(values)
if weights is not None:
weights = numpy.asarray(weights)
else:
weights = numpy.ones(values.shape)
assert values.shape == weights.shape
# Promote scalars, if required
if values.ndim == 0:
values = values[numpy.newaxis]
weights = weights[numpy.newaxis]
bins = numpy.digitize(values, self._bins)
newValues = numpy.zeros(self.shape)
# Now fill all the bins
for _bin, weight in zip(bins, weights):
if _bin < 1 or _bin > len(newValues):
continue
newValues[_bin-1] += weight
# add to the current instance
self += newValues
def draw_hist(self, **kwargs):
assert self.ndim == 1
import matplotlib.pyplot as plt
x = numpy.zeros(len(self)*2)
x[0::2] = self.bins[:-1]
x[1::2] = self.bins[1:]
y = numpy.array(numpy.repeat(self,2))
# import pdb
# pdb.set_trace()
return plt.plot(x,y,**kwargs)
def pcolor(self, *args, **kwargs):
assert self.ndim == 2
import matplotlib.pyplot as plt
plt.pcolor(self.bins[0], self.bins[1], self.T, *args, **kwargs)
def pcolormesh(self, *args, **kwargs):
assert self.ndim == 2
import matplotlib.pyplot as plt
plt.pcolor(self.bins[0], self.bins[1], self.T, *args, **kwargs) | mit |
rsteed11/GAT | gat/core/sna/pmesii.py | 1 | 5988 | # import packages
import networkx as nx
import numpy as np
import pandas as pd
# import csv or xlsx into pandas
dataFrameList = (pd.read_excel("World Bank Data Iran.xlsx"),
pd.read_excel("CIRI Iran.xlsx"),
pd.read_excel("DPI Iran.xlsx"))
df = pd.concat(dataFrameList)
headerList = df.columns.values.tolist()
iran = df.to_dict(orient='index')
# initiali]ze iterative lists
attrList = []
edgeList = []
initList = []
y = len(iran)
# iterate through data set and assign PMESII points to weighted edge lists
for x in range(0, y):
edgeList.append((iran[x]['Variable Code'], iran[x]['Domain']))
edgeList.append((iran[x]['Domain'], 'PMESII Resources'))
for y in range(1975, 1979):
if iran[x][y] != str(iran[x][y]):
initList.append(iran[x][y])
attrList.append((iran[x]['Variable Code'], iran[x]['Domain'], np.mean(initList)))
initList = []
# create network and graph
G = nx.Graph()
G.add_weighted_edges_from(attrList, 'W')
# construct baseline task models over PMESII network
tmEdgeList_1 = [('IC.IMP.TMDC', 'TM.VAL.MMTL.ZS.UN', -G['Economic']['IC.IMP.TMDC']['W']),
('IC.IMP.TMBC', 'TM.VAL.MMTL.ZS.UN', -G['Economic']['IC.IMP.TMBC']['W']),
('IC.IMP.CSDC.CD', 'TM.VAL.MMTL.ZS.UN', -G['Economic']['IC.IMP.CSDC.CD']['W']),
('IC.IMP.CSBC.CD', 'TM.VAL.MMTL.ZS.UN', -G['Economic']['IC.IMP.CSBC.CD']['W']),
('TM.VAL.MMTL.ZS.UN', 'NV.IND.MANF.CD', G['Economic']['TM.VAL.MMTL.ZS.UN']['W']),
('SL.IND.EMPL.ZS', 'NV.IND.MANF.CD', G['Economic']['SL.IND.EMPL.ZS']['W']),
('NV.IND.TOTL.KD.ZG', 'NV.IND.MANF.CD', G['Economic']['NV.IND.TOTL.KD.ZG']['W']),
('SL.TLF.TOTL.IN', 'NV.IND.MANF.CD', G['Economic']['SL.TLF.TOTL.IN']['W']),
('NV.IND.MANF.CD', 'IS.RRS.GOOD.MT.K6', G['Economic']['NV.IND.MANF.CD']['W']),
('BM.GSR.TRAN.ZS', 'IS.RRS.GOOD.MT.K6', G['Economic']['BM.GSR.TRAN.ZS']['W']),
('IS.AIR.GOOD.MT.K1', 'IS.RRS.GOOD.MT.K6', G['Infrastructure']['IS.AIR.GOOD.MT.K1']['W']),
('IS.AIR.DPRT', 'IS.RRS.GOOD.MT.K6', G['Infrastructure']['IS.AIR.DPRT']['W']),
('IS.RRS.GOOD.MT.K6', 'MS.MIL.XPRT.KD', G['Infrastructure']['IS.RRS.GOOD.MT.K6']['W']),
('IC.EXP.DURS', 'MS.MIL.XPRT.KD', -G['Economic']['IC.EXP.DURS']['W']),
('IC.EXP.DOCS', 'MS.MIL.XPRT.KD', -G['Economic']['IC.EXP.DOCS']['W']),
('IC.EXP.COST.CD', 'MS.MIL.XPRT.KD', -G['Economic']['IC.EXP.COST.CD']['W'])]
VC_1 = nx.DiGraph()
VC_1.add_weighted_edges_from(tmEdgeList_1)
aspl_1 = nx.average_shortest_path_length(VC_1, weight='W')
print(aspl_1)
tmEdgeList_2 = [('BX.GRT.TECH.CD.WD', 'BX.GRT.EXTA.CD.WD', G['BX.GRT.TECH.CD.WD']['Economic']['W']),
('SP.POP.TECH.RD.P6', 'BX.GRT.EXTA.CD.WD', G['SP.POP.TECH.RD.P6']['Societal']['W']),
('SP.POP.SCIE.RD.P6', 'BX.GRT.EXTA.CD.WD', G['SP.POP.SCIE.RD.P6']['Societal']['W']),
('BX.GRT.EXTA.CD.WD', 'NV.IND.MANF.CD', G['BX.GRT.EXTA.CD.WD']['Economic']['W']),
('SL.IND.EMPL.ZS', 'NV.IND.MANF.CD', G['SL.IND.EMPL.ZS']['Economic']['W']),
('NV.IND.TOTL.KD.ZG', 'NV.IND.MANF.CD', G['NV.IND.TOTL.KD.ZG']['Economic']['W']),
('SL.TLF.TOTL.IN', 'NV.IND.MANF.CD', G['SL.TLF.TOTL.IN']['Economic']['W']),
('NV.IND.MANF.CD', 'IS.RRS.GOOD.MT.K6', G['NV.IND.MANF.CD']['Economic']['W']),
('BM.GSR.TRAN.ZS', 'IS.RRS.GOOD.MT.K6', G['BM.GSR.TRAN.ZS']['Economic']['W']),
('IS.AIR.GOOD.MT.K1', 'IS.RRS.GOOD.MT.K6', G['IS.AIR.GOOD.MT.K1']['Infrastructure']['W']),
('IS.AIR.DPRT', 'IS.RRS.GOOD.MT.K6', G['IS.AIR.DPRT']['Infrastructure']['W']),
('IS.RRS.GOOD.MT.K6', 'TX.VAL.TECH.CD', G['IS.RRS.GOOD.MT.K6']['Infrastructure']['W']),
('IC.EXP.DURS', 'TX.VAL.TECH.CD', -G['IC.EXP.DURS']['Economic']['W']),
('IC.EXP.DOCS', 'TX.VAL.TECH.CD', -G['IC.EXP.DOCS']['Economic']['W']),
('IC.EXP.COST.CD', 'TX.VAL.TECH.CD', -G['IC.EXP.COST.CD']['Economic']['W'])]
VC_2 = nx.DiGraph()
VC_2.add_weighted_edges_from(tmEdgeList_2)
aspl_2 = nx.average_shortest_path_length(VC_2, weight='W')
print(aspl_2)
tmEdgeList_3 = [('NE.IMP.GNFS.CD', 'TM.VAL.SERV.CD.WT', G['NE.IMP.GNFS.CD']['Economic']['W']),
('BM.GSR.MRCH.CD', 'TM.VAL.SERV.CD.WT', G['BM.GSR.MRCH.CD']['Economic']['W']),
('TM.VAL.SERV.CD.WT', 'NV.IND.MANF.CD', G['TM.VAL.SERV.CD.WT']['Economic']['W']),
('SL.IND.EMPL.ZS', 'NV.IND.MANF.CD', G['SL.IND.EMPL.ZS']['Economic']['W']),
('NV.IND.TOTL.KD.ZG', 'NV.IND.MANF.CD', G['NV.IND.TOTL.KD.ZG']['Economic']['W']),
('SL.TLF.TOTL.IN', 'NV.IND.MANF.CD', G['SL.TLF.TOTL.IN']['Economic']['W']),
('NV.IND.MANF.CD', 'IS.RRS.GOOD.MT.K6', G['NV.IND.MANF.CD']['Economic']['W']),
('BM.GSR.TRAN.ZS', 'IS.RRS.GOOD.MT.K6', G['BM.GSR.TRAN.ZS']['Economic']['W']),
('IS.AIR.GOOD.MT.K1', 'IS.RRS.GOOD.MT.K6', G['IS.AIR.GOOD.MT.K1']['Infrastructure']['W']),
('IS.AIR.DPRT', 'IS.RRS.GOOD.MT.K6', G['IS.AIR.DPRT']['Infrastructure']['W']),
('IS.RRS.GOOD.MT.K6', 'TX.VAL.MANF.ZS.UN', G['IS.RRS.GOOD.MT.K6']['Infrastructure']['W']),
('IC.EXP.DURS', 'TX.VAL.MANF.ZS.UN', -G['IC.EXP.DURS']['Economic']['W']),
('IC.EXP.DOCS', 'TX.VAL.MANF.ZS.UN', -G['IC.EXP.DOCS']['Economic']['W']),
('IC.EXP.COST.CD', 'TX.VAL.MANF.ZS.UN', -G['IC.EXP.COST.CD']['Economic']['W'])]
VC_3 = nx.DiGraph()
VC_3.add_weighted_edges_from(tmEdgeList_3)
aspl_3 = nx.average_shortest_path_length(VC_3, weight='W')
print(aspl_3)
| mit |
MohammedWasim/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
wbinventor/openmc | tests/regression_tests/mgxs_library_no_nuclides/test.py | 4 | 2092 | import hashlib
import openmc
import openmc.mgxs
from openmc.examples import pwr_pin_cell
from tests.testing_harness import PyAPITestHarness
class MGXSTestHarness(PyAPITestHarness):
def __init__(self, *args, **kwargs):
# Generate inputs using parent class routine
super().__init__(*args, **kwargs)
# Initialize a two-group structure
energy_groups = openmc.mgxs.EnergyGroups(group_edges=[0, 0.625, 20.e6])
# Initialize MGXS Library for a few cross section types
self.mgxs_lib = openmc.mgxs.Library(self._model.geometry)
self.mgxs_lib.by_nuclide = False
# Test all MGXS types
self.mgxs_lib.mgxs_types = openmc.mgxs.MGXS_TYPES + \
openmc.mgxs.MDGXS_TYPES
self.mgxs_lib.energy_groups = energy_groups
self.mgxs_lib.num_delayed_groups = 6
self.mgxs_lib.legendre_order = 3
self.mgxs_lib.domain_type = 'material'
self.mgxs_lib.build_library()
# Add tallies
self.mgxs_lib.add_to_tallies_file(self._model.tallies, merge=False)
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Load the MGXS library from the statepoint
self.mgxs_lib.load_from_statepoint(sp)
# Build a string from Pandas Dataframe for each MGXS
outstr = ''
for domain in self.mgxs_lib.domains:
for mgxs_type in self.mgxs_lib.mgxs_types:
mgxs = self.mgxs_lib.get_mgxs(domain, mgxs_type)
df = mgxs.get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def test_mgxs_library_no_nuclides():
model = pwr_pin_cell()
harness = MGXSTestHarness('statepoint.10.h5', model)
harness.main()
| mit |
mortada/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py | 75 | 29377 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import batch
from tensorflow.contrib.learn.python.learn.dataframe.transforms import csv_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import example_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import reader_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import sparsify
from tensorflow.contrib.learn.python.learn.dataframe.transforms import split_mask
from tensorflow.python.client import session as sess
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner as qr
def _expand_file_names(filepatterns):
"""Takes a list of file patterns and returns a list of resolved file names."""
if not isinstance(filepatterns, (list, tuple, set)):
filepatterns = [filepatterns]
filenames = set()
for filepattern in filepatterns:
names = set(gfile.Glob(filepattern))
filenames |= names
return list(filenames)
def _dtype_to_nan(dtype):
if dtype is dtypes.string:
return b""
elif dtype.is_integer:
return np.nan
elif dtype.is_floating:
return np.nan
elif dtype is dtypes.bool:
return np.nan
else:
raise ValueError("Can't parse type without NaN into sparse tensor: %s" %
dtype)
def _get_default_value(feature_spec):
if isinstance(feature_spec, parsing_ops.FixedLenFeature):
return feature_spec.default_value
else:
return _dtype_to_nan(feature_spec.dtype)
class TensorFlowDataFrame(df.DataFrame):
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
def run(self,
num_batches=None,
graph=None,
session=None,
start_queues=True,
initialize_variables=True,
**kwargs):
"""Builds and runs the columns of the `DataFrame` and yields batches.
This is a generator that yields a dictionary mapping column names to
evaluated columns.
Args:
num_batches: the maximum number of batches to produce. If none specified,
the returned value will iterate through infinite batches.
graph: the `Graph` in which the `DataFrame` should be built.
session: the `Session` in which to run the columns of the `DataFrame`.
start_queues: if true, queues will be started before running and halted
after producting `n` batches.
initialize_variables: if true, variables will be initialized.
**kwargs: Additional keyword arguments e.g. `num_epochs`.
Yields:
A dictionary, mapping column names to the values resulting from running
each column for a single batch.
"""
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
if session is None:
session = sess.Session()
self_built = self.build(**kwargs)
keys = list(self_built.keys())
cols = list(self_built.values())
if initialize_variables:
if variables.local_variables():
session.run(variables.local_variables_initializer())
if variables.global_variables():
session.run(variables.global_variables_initializer())
if start_queues:
coord = coordinator.Coordinator()
threads = qr.start_queue_runners(sess=session, coord=coord)
i = 0
while num_batches is None or i < num_batches:
i += 1
try:
values = session.run(cols)
yield collections.OrderedDict(zip(keys, values))
except errors.OutOfRangeError:
break
if start_queues:
coord.request_stop()
coord.join(threads)
def select_rows(self, boolean_series):
"""Returns a `DataFrame` with only the rows indicated by `boolean_series`.
Note that batches may no longer have consistent size after calling
`select_rows`, so the new `DataFrame` may need to be rebatched.
For example:
'''
filtered_df = df.select_rows(df["country"] == "jp").batch(64)
'''
Args:
boolean_series: a `Series` that evaluates to a boolean `Tensor`.
Returns:
A new `DataFrame` with the same columns as `self`, but selecting only the
rows where `boolean_series` evaluated to `True`.
"""
result = type(self)()
for key, col in self._columns.items():
try:
result[key] = col.select_rows(boolean_series)
except AttributeError as e:
raise NotImplementedError((
"The select_rows method is not implemented for Series type {}. "
"Original error: {}").format(type(col), e))
return result
def split(self, index_series, proportion, batch_size=None):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
left_rows = self.select_rows(left_mask)
right_rows = self.select_rows(right_mask)
if batch_size:
left_rows = left_rows.batch(batch_size=batch_size, shuffle=False)
right_rows = right_rows.batch(batch_size=batch_size, shuffle=False)
return left_rows, right_rows
def split_fast(self, index_series, proportion, batch_size,
base_batch_size=1000):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
base_batch_size: the batch size to use for materialized data, prior to the
split.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
self["left_mask__"] = left_mask
self["right_mask__"] = right_mask
# TODO(soergel): instead of base_batch_size can we just do one big batch?
# avoid computing the hashes twice
m = self.materialize_to_memory(batch_size=base_batch_size)
left_rows_df = m.select_rows(m["left_mask__"])
right_rows_df = m.select_rows(m["right_mask__"])
del left_rows_df[["left_mask__", "right_mask__"]]
del right_rows_df[["left_mask__", "right_mask__"]]
# avoid recomputing the split repeatedly
left_rows_df = left_rows_df.materialize_to_memory(batch_size=batch_size)
right_rows_df = right_rows_df.materialize_to_memory(batch_size=batch_size)
return left_rows_df, right_rows_df
def run_one_batch(self):
"""Creates a new 'Graph` and `Session` and runs a single batch.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
batch of the `DataFrame`.
"""
return list(self.run(num_batches=1))[0]
def run_one_epoch(self):
"""Creates a new 'Graph` and `Session` and runs a single epoch.
Naturally this makes sense only for DataFrames that fit in memory.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
epoch of the `DataFrame`.
"""
# batches is a list of dicts of numpy arrays
batches = [b for b in self.run(num_epochs=1)]
# first invert that to make a dict of lists of numpy arrays
pivoted_batches = {}
for k in batches[0].keys():
pivoted_batches[k] = []
for b in batches:
for k, v in b.items():
pivoted_batches[k].append(v)
# then concat the arrays in each column
result = {k: np.concatenate(column_batches)
for k, column_batches in pivoted_batches.items()}
return result
def materialize_to_memory(self, batch_size):
unordered_dict_of_arrays = self.run_one_epoch()
# there may already be an 'index' column, in which case from_ordereddict)
# below will complain because it wants to generate a new one.
# for now, just remove it.
# TODO(soergel): preserve index history, potentially many levels deep
del unordered_dict_of_arrays["index"]
# the order of the columns in this dict is arbitrary; we just need it to
# remain consistent.
ordered_dict_of_arrays = collections.OrderedDict(unordered_dict_of_arrays)
return TensorFlowDataFrame.from_ordereddict(ordered_dict_of_arrays,
batch_size=batch_size)
def batch(self,
batch_size,
shuffle=False,
num_threads=1,
queue_capacity=None,
min_after_dequeue=None,
seed=None):
"""Resize the batches in the `DataFrame` to the given `batch_size`.
Args:
batch_size: desired batch size.
shuffle: whether records should be shuffled. Defaults to true.
num_threads: the number of enqueueing threads.
queue_capacity: capacity of the queue that will hold new batches.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` with `batch_size` rows.
"""
column_names = list(self._columns.keys())
if shuffle:
batcher = batch.ShuffleBatch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
seed=seed)
else:
batcher = batch.Batch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity)
batched_series = batcher(list(self._columns.values()))
dataframe = type(self)()
dataframe.assign(**(dict(zip(column_names, batched_series))))
return dataframe
@classmethod
def _from_csv_base(cls, filepatterns, get_default_values, has_header,
column_names, num_threads, enqueue_size,
batch_size, queue_capacity, min_after_dequeue, shuffle,
seed):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
get_default_values: a function that produces a list of default values for
each column, given the column names.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if column_names is None:
if not has_header:
raise ValueError("If column_names is None, has_header must be true.")
with gfile.GFile(filenames[0]) as f:
column_names = csv.DictReader(f).fieldnames
if "index" in column_names:
raise ValueError(
"'index' is reserved and can not be used for a column name.")
default_values = get_default_values(column_names)
reader_kwargs = {"skip_header_lines": (1 if has_header else 0)}
index, value = reader_source.TextFileSource(
filenames,
reader_kwargs=reader_kwargs,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = csv_parser.CSVParser(column_names, default_values)
parsed = parser(value)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_csv(cls,
filepatterns,
default_values,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
default_values: a list of default values for each column.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
# pylint: disable=unused-argument
return default_values
return cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
@classmethod
def from_csv_with_feature_spec(cls,
filepatterns,
feature_spec,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files, given a feature_spec.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
feature_spec: a dict mapping column names to `FixedLenFeature` or
`VarLenFeature`.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
return [_get_default_value(feature_spec[name]) for name in column_names]
dataframe = cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
# replace the dense columns with sparse ones in place in the dataframe
for name in dataframe.columns():
if name != "index" and isinstance(feature_spec[name],
parsing_ops.VarLenFeature):
strip_value = _get_default_value(feature_spec[name])
(dataframe[name],) = sparsify.Sparsify(strip_value)(dataframe[name])
return dataframe
@classmethod
def from_examples(cls,
filepatterns,
features,
reader_cls=io_ops.TFRecordReader,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from `tensorflow.Example`s.
Args:
filepatterns: a list of file patterns containing `tensorflow.Example`s.
features: a dict mapping feature names to `VarLenFeature` or
`FixedLenFeature`.
reader_cls: a subclass of `tensorflow.ReaderBase` that will be used to
read the `Example`s.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with `Example`s from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if "index" in features:
raise ValueError(
"'index' is reserved and can not be used for a feature name.")
index, record = reader_source.ReaderSource(
reader_cls,
filenames,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = example_parser.ExampleParser(features)
parsed = parser(record)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_pandas(cls,
pandas_dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="pandas_data"):
"""Create a `tf.learn.DataFrame` from a `pandas.DataFrame`.
Args:
pandas_dataframe: `pandas.DataFrame` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
`pandas_dataframe`.
"""
pandas_source = in_memory_source.PandasSource(
pandas_dataframe,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(pandas_source()._asdict()))
return dataframe
@classmethod
def from_numpy(cls,
numpy_array,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from a `numpy.ndarray`.
The returned `DataFrame` contains two columns: 'index' and 'value'. The
'value' column contains a row from the array. The 'index' column contains
the corresponding row number.
Args:
numpy_array: `numpy.ndarray` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
array.
"""
numpy_source = in_memory_source.NumpySource(
numpy_array,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
@classmethod
def from_ordereddict(cls,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from an `OrderedDict` of `numpy.ndarray`.
The returned `DataFrame` contains a column for each key of the dict plus an
extra 'index' column. The 'index' column contains the row number. Each of
the other columns contains a row from the corresponding array.
Args:
ordered_dict_of_arrays: `OrderedDict` of `numpy.ndarray` that serves as a
data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given arrays.
Raises:
ValueError: `ordered_dict_of_arrays` contains the reserved name 'index'.
"""
numpy_source = in_memory_source.OrderedDictNumpySource(
ordered_dict_of_arrays,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
| apache-2.0 |
jeffzheng1/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/arithmetic_transform_test.py | 24 | 2349 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for arithmetic transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class SumTestCase(tf.test.TestCase):
"""Test class for `Sum` transform."""
def testSum(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a+b"] = frame["a"] + frame["b"]
expected_sum = pandas_df["a"] + pandas_df["b"]
actual_sum = frame.run_one_batch()["a+b"]
np.testing.assert_array_equal(expected_sum, actual_sum)
class DifferenceTestCase(tf.test.TestCase):
"""Test class for `Difference` transform."""
def testDifference(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a-b"] = frame["a"] - frame["b"]
expected_diff = pandas_df["a"] - pandas_df["b"]
actual_diff = frame.run_one_batch()["a-b"]
np.testing.assert_array_equal(expected_diff, actual_diff)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
mxjl620/scikit-learn | sklearn/tests/test_grid_search.py | 53 | 28730 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
larsmans/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 20 | 21057 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
""" Test the pairwise_distance helper function. """
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Tests that precomputed metric returns pointer to, and not copy of, X.
S = np.dot(X, X.T)
S2 = pairwise_distances(S, metric="precomputed")
assert_true(S is S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_parallel():
rng = np.random.RandomState(0)
for func in (np.array, csr_matrix):
X = func(rng.random_sample((5, 4)))
Y = func(rng.random_sample((3, 4)))
S = euclidean_distances(X)
S2 = _parallel_pairwise(X, None, euclidean_distances, n_jobs=3)
assert_array_almost_equal(S, S2)
S = euclidean_distances(X, Y)
S2 = _parallel_pairwise(X, Y, euclidean_distances, n_jobs=3)
assert_array_almost_equal(S, S2)
def test_pairwise_kernels():
""" Test the pairwise_kernels helper function. """
def callable_rbf_kernel(x, y, **kwds):
""" Callable version of pairwise.rbf_kernel. """
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
""" Test the pairwise_distance helper function. """
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y,
metric=lambda x, y: np.abs(x -y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
""" Check pairwise minimum distances computation for any metric"""
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
""" Check the pairwise Euclidean distances computation"""
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
# Paired distances
def test_paired_euclidean_distances():
""" Check the paired Euclidean distances computation"""
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
""" Check the paired manhattan distances computation"""
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
""" Valid kernels should be symmetric"""
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity():
""" Test the cosine_similarity. """
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
""" Ensure that pairwise array check works for dense matrices."""
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
""" Ensure that if XA and XB are given correctly, they return as equal."""
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
""" Ensure an error is raised if the dimensions are different. """
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
""" Ensure an error is raised on 1D input arrays. """
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
""" Ensures that checks return valid sparse matrices. """
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
""" Turns a numpy matrix (any n-dimensional array) into tuples."""
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
""" Ensures that checks return valid tuples. """
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
""" Ensures that type float32 is preserved. """
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
ch3ll0v3k/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
nlholdem/icodoom | ICO1/deep_feedback_learning_old/vizdoom/backprop_old.py | 1 | 8854 | #!/usr/bin/env python
from __future__ import print_function
from vizdoom import *
import sys
import threading
from time import sleep
from matplotlib import pyplot as plt
import numpy as np
import cv2
sys.path.append('../../deep_feedback_learning')
import deep_feedback_learning
# Create DoomGame instance. It will run the game and communicate with you.
game = DoomGame()
# Now it's time for configuration!
# load_config could be used to load configuration instead of doing it here with code.
# If load_config is used in-code configuration will also work - most recent changes will add to previous ones.
# game.load_config("../../scenarios/basic.cfg")
# Sets path to additional resources wad file which is basically your scenario wad.
# If not specified default maps will be used and it's pretty much useless... unless you want to play good old Doom.
game.set_doom_scenario_path("./basic.wad")
# Sets map to start (scenario .wad files can contain many maps).
game.set_doom_map("map01")
# Sets resolution. Default is 320X240
game.set_screen_resolution(ScreenResolution.RES_640X480)
# create masks for left and right visual fields - note that these only cover the upper half of the image
# this is to help prevent the tracking getting confused by the floor pattern
width = 640
widthNet = 320
height = 480
heightNet = 240
# Sets the screen buffer format. Not used here but now you can change it. Defalut is CRCGCB.
game.set_screen_format(ScreenFormat.RGB24)
# Enables depth buffer.
game.set_depth_buffer_enabled(True)
# Enables labeling of in game objects labeling.
game.set_labels_buffer_enabled(True)
# Enables buffer with top down map of the current episode/level.
game.set_automap_buffer_enabled(True)
# Sets other rendering options
game.set_render_hud(False)
game.set_render_minimal_hud(False) # If hud is enabled
game.set_render_crosshair(True)
game.set_render_weapon(False)
game.set_render_decals(False)
game.set_render_particles(False)
game.set_render_effects_sprites(False)
game.set_render_messages(False)
game.set_render_corpses(False)
# Adds buttons that will be allowed.
# game.add_available_button(Button.MOVE_LEFT)
# game.add_available_button(Button.MOVE_RIGHT)
game.add_available_button(Button.MOVE_LEFT_RIGHT_DELTA, 50)
game.add_available_button(Button.ATTACK)
# Adds game variables that will be included in state.
game.add_available_game_variable(GameVariable.AMMO2)
# Causes episodes to finish after 200 tics (actions)
game.set_episode_timeout(500)
# Makes episodes start after 10 tics (~after raising the weapon)
game.set_episode_start_time(10)
# Makes the window appear (turned on by default)
game.set_window_visible(True)
# Turns on the sound. (turned off by default)
game.set_sound_enabled(True)
# Sets the livin reward (for each move) to -1
game.set_living_reward(-1)
# Sets ViZDoom mode (PLAYER, ASYNC_PLAYER, SPECTATOR, ASYNC_SPECTATOR, PLAYER mode is default)
game.set_mode(Mode.PLAYER)
# Enables engine output to console.
#game.set_console_enabled(True)
nFiltersInput = 0
nFiltersHidden = 0
minT = 3
maxT = 30
nHidden0 = 4
net = deep_feedback_learning.DeepFeedbackLearning(307200, [nHidden0*nHidden0], 1, nFiltersInput, nFiltersHidden, minT,maxT)
#net.enableDebugOutput()
#net.getLayer(0).setConvolution(widthNet,heightNet)
#net.getLayer(1).setConvolution(nHidden0,nHidden0)
net.setAlgorithm(deep_feedback_learning.DeepFeedbackLearning.backprop)
net.setLearningRate(0.0001)
net.setMomentum(0.5)
net.initWeights(0.001,1,deep_feedback_learning.Neuron.MAX_OUTPUT_RANDOM)
net.setUseDerivative(0)
net.setBias(1)
# Initialize the game. Further configuration won't take any effect from now on.
game.init()
# Run this many episodes
episodes = 100
# Sets time that will pause the engine after each action (in seconds)
# Without this everything would go too fast for you to keep track of what's happening.
sleep_time = 1.0 / DEFAULT_TICRATE # = 0.028
delta2 = 0
dontshoot = 1
deltaZeroCtr = 1
inp = np.zeros(widthNet*heightNet)
sharpen = np.array((
[0, 1, 0],
[1, 4, 1],
[0, 1, 0]), dtype="int")
edge = np.array((
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]), dtype="int")
plt.ion()
plt.show()
ln1 = False
ln2 = [False,False,False,False]
def buildFilters():
ksize = 35
sigma = 5.
gamma = 1.
theta_vals = np.linspace(0., np.pi, 8)
lambd_vals = (7, 13, 27)
sigma_vals = (3, 7, 15)
coeffs = ((theta, lambd, sigma) for lambd, sigma in zip(lambd_vals, sigma_vals) for theta in theta_vals)
filters = [(cv2.getGaborKernel((ksize,ksize), coeff[2], coeff[0], coeff[1], gamma)/(0.01*ksize*ksize*sigma), coeff[2])
for coeff in coeffs]
return filters
def getWeights2D(neuron):
n_neurons = net.getLayer(0).getNneurons()
n_inputs = net.getLayer(0).getNeuron(neuron).getNinputs()
weights = np.zeros(n_inputs)
for i in range(n_inputs):
if net.getLayer(0).getNeuron(neuron).getMask(i):
weights[i] = net.getLayer(0).getNeuron(neuron).getAvgWeight(i)
else:
weights[i] = np.nan
return weights.reshape(heightNet,widthNet)
def getWeights1D(layer,neuron):
n_neurons = net.getLayer(layer).getNneurons()
n_inputs = net.getLayer(layer).getNeuron(neuron).getNinputs()
weights = np.zeros(n_inputs)
for i in range(n_inputs):
weights[i] = net.getLayer(layer).getNeuron(neuron).getAvgWeight(i)
return weights
def plotWeights():
global ln1
global ln2
while True:
if ln1:
ln1.remove()
plt.figure(1)
w1 = getWeights2D(0)
for i in range(1,net.getLayer(0).getNneurons()):
w2 = getWeights2D(i)
w1 = np.where(np.isnan(w2),w1,w2)
ln1 = plt.imshow(w1,cmap='gray')
plt.draw()
plt.pause(0.1)
for j in range(1,3):
if ln2[j]:
ln2[j].remove()
plt.figure(j+1)
w1 = np.zeros( (net.getLayer(j).getNneurons(),net.getLayer(j).getNeuron(0).getNinputs()) )
for i in range(0,net.getLayer(j).getNneurons()):
w1[i,:] = getWeights1D(j,i)
ln2[j] = plt.imshow(w1,cmap='gray')
plt.draw()
plt.pause(0.1)
#t1 = threading.Thread(target=plotWeights)
#t1.start()
spatialFilters = buildFilters()
for i in range(episodes):
print("Episode #" + str(i + 1))
# Starts a new episode. It is not needed right after init() but it doesn't cost much. At least the loop is nicer.
game.new_episode()
while not game.is_episode_finished():
# Gets the state
state = game.get_state()
# Which consists of:
n = state.number
vars = state.game_variables
screen_buf = state.screen_buffer
depth_buf = state.depth_buffer
labels_buf = state.labels_buffer
automap_buf = state.automap_buffer
labels = state.labels
midlinex = int(width/2);
midliney = int(height*0.75);
crcb = screen_buf
screen_left = screen_buf[100:midliney,0:midlinex-1,2]
screen_right = screen_buf[100:midliney,midlinex+1:(width-1),2]
screen_left = cv2.filter2D(screen_left, -1, sharpen);
screen_right = cv2.filter2D(screen_right, -1, sharpen);
# cv2.imwrite('/tmp/left.png',screen_left)
# cv2.imwrite('/tmp/right.png',screen_right)
lavg = np.average(screen_left)
ravg = np.average(screen_right)
delta = (lavg - ravg)*15
dd = delta - delta2
delta2 = delta
# print(delta)
# Makes a random action and get remember reward.
shoot = 0
if (dontshoot > 1) :
dontshoot = dontshoot - 1
else :
if (abs(dd) < 10) :
shoot = 1
dontshoot = 60
deltaZeroCtr = 4
if deltaZeroCtr>0:
deltaZeroCtr = deltaZeroCtr - 1
delta = 0
inputImage = np.zeros(0)
# for f in spatialFilters:
# gray1 = cv2.filter2D(crcb[:,:,2], -1, f[0])
# gray1 = cv2.resize(gray1, (int(width / f[1]), int(height / f[1])))
# inputImage = np.append(inputImage, gray1)
inputImage = crcb[:,:,2].flatten()
if (i>300):
delta = 0
err = np.zeros(1)
err[0] = delta
net.doStep(inputImage,err)
output = net.getOutput(0)*5000
print(delta,output)
action = [ delta+output , shoot ]
r = game.make_action(action)
# if sleep_time > 0:
# sleep(sleep_time)
if ((i % 10) == 0):
net.saveModel("modelBP.txt")
# Check how the episode went.
print("Episode finished.")
print("Total reward:", game.get_total_reward())
print("************************")
sleep(1)
# It will be done automatically anyway but sometimes you need to do it in the middle of the program...
game.close()
| gpl-3.0 |
ljwolf/pysal_core | libpysal/weights/tests/test_Distance.py | 2 | 11190 |
from ...common import RTOL, ATOL, pandas
from ...cg.kdtree import KDTree
from ..util import get_points_array
from ... import cg
from ... import weights
from .. import Distance as d, Contiguity as c
from ...io import geotable as pdio
from ...io.FileIO import FileIO as psopen
import numpy as np
from ... import examples as pysal_examples
import unittest as ut
PANDAS_EXTINCT = pandas is None
# All instances should test these four methods, and define their own functional
# tests based on common codepaths/estimated weights use cases.
class Distance_Mixin(object):
polygon_path = pysal_examples.get_path('columbus.shp')
arc_path = pysal_examples.get_path('stl_hom.shp')
points = [(10, 10), (20, 10), (40, 10),
(15, 20), (30, 20), (30, 30)]
euclidean_kdt = KDTree(points, distance_metric='euclidean')
polygon_f = psopen(polygon_path) # our file handler
poly_centroids = get_points_array(polygon_f) # our iterable
polygon_f.seek(0) #go back to head of file
arc_f = psopen(arc_path)
arc_points = get_points_array(arc_f)
arc_f.seek(0)
arc_kdt = KDTree(arc_points, distance_metric='Arc',
radius=cg.sphere.RADIUS_EARTH_KM)
cls = object # class constructor
known_wi = None #index of known w entry to compare
known_w = dict() #actual w entry
known_name = known_wi
def setUp(self):
self.__dict__.update({k:v for k,v in Distance_Mixin.__dict__.items()
if not k.startswith('_')})
def test_init(self):
# test vanilla, named
raise NotImplementedError('You need to implement this test '
'before this module will pass')
def test_from_shapefile(self):
# test vanilla, named, sparse
raise NotImplementedError('You need to implement this test '
'before this module will pass')
def test_from_array(self):
# test named, sparse
raise NotImplementedError('You need to implement this test '
'before this module will pass')
def test_from_dataframe(self):
# test named, columnar, defau
raise NotImplementedError('You need to implement this test '
'before this module will pass')
class Test_KNN(ut.TestCase, Distance_Mixin):
def setUp(self):
Distance_Mixin.setUp(self)
self.known_wi0 = 7
self.known_w0 = [3, 6, 12, 11]
self.known_wi1 = 0
self.known_w1 = [2, 1, 3 ,7]
self.known_wi2 = 4
self.known_w2 = [1, 3, 9, 12]
self.known_wi3 = 40
self.known_w3 = [31, 38, 45, 49]
##########################
# Classmethod tests #
##########################
def test_init(self):
w = d.KNN(self.euclidean_kdt, k=2)
self.assertEqual(w.neighbors[0], [1,3])
@ut.skipIf(PANDAS_EXTINCT, 'Missing pandas')
def test_from_dataframe(self):
df = pdio.read_files(self.polygon_path)
w = d.KNN.from_dataframe(df, k=4)
self.assertEqual(w.neighbors[self.known_wi0], self.known_w0)
self.assertEqual(w.neighbors[self.known_wi1], self.known_w1)
def test_from_array(self):
w = d.KNN.from_array(self.poly_centroids, k=4)
self.assertEqual(w.neighbors[self.known_wi0], self.known_w0)
self.assertEqual(w.neighbors[self.known_wi1], self.known_w1)
def test_from_shapefile(self):
w = d.KNN.from_shapefile(self.polygon_path, k=4)
self.assertEqual(w.neighbors[self.known_wi0], self.known_w0)
self.assertEqual(w.neighbors[self.known_wi1], self.known_w1)
##########################
# Function/User tests #
##########################
def test_reweight(self):
w = d.KNN(self.points, k=2)
new_point = [(21,21)]
wnew = w.reweight(k=4, p=1, new_data=new_point, inplace=False)
self.assertEqual(wnew[0], {1: 1.0, 3: 1.0, 4: 1.0, 6: 1.0})
class Test_DistanceBand(ut.TestCase, Distance_Mixin):
def setUp(self):
Distance_Mixin.setUp(self)
self.grid_path = pysal_examples.get_path('lattice10x10.shp')
self.grid_rook_w = c.Rook.from_shapefile(self.grid_path)
self.grid_f = psopen(self.grid_path)
self.grid_points = get_points_array(self.grid_f)
self.grid_f.seek(0)
self.grid_kdt = KDTree(self.grid_points)
##########################
# Classmethod tests #
##########################
def test_init(self):
w = d.DistanceBand(self.grid_kdt, 1)
for k,v in w:
self.assertEquals(v, self.grid_rook_w[k])
def test_from_shapefile(self):
w = d.DistanceBand.from_shapefile(self.grid_path, 1)
for k,v in w:
self.assertEquals(v, self.grid_rook_w[k])
def test_from_array(self):
w = d.DistanceBand.from_array(self.grid_points, 1)
for k,v in w:
self.assertEquals(v, self.grid_rook_w[k])
@ut.skipIf(PANDAS_EXTINCT, 'Missing pandas')
def test_from_dataframe(self):
import pandas as pd
geom_series = pdio.shp.shp2series(self.grid_path)
random_data = np.random.random(size=len(geom_series))
df = pd.DataFrame({'obs':random_data, 'geometry':geom_series})
w = d.DistanceBand.from_dataframe(df, 1)
for k,v in w:
self.assertEquals(v, self.grid_rook_w[k])
##########################
# Function/User tests #
##########################
def test_integers(self):
"""
see issue #126
"""
grid_integers = [tuple(map(int, poly.vertices[0]))
for poly in self.grid_f]
self.grid_f.seek(0)
grid_dbw = d.DistanceBand(grid_integers, 1)
for k,v in grid_dbw:
self.assertEquals(v, self.grid_rook_w[k])
def test_arcdist(self):
arc = cg.sphere.arcdist
kdt = KDTree(self.arc_points, distance_metric='Arc',
radius=cg.sphere.RADIUS_EARTH_KM)
npoints = self.arc_points.shape[0]
full = np.matrix([[arc(self.arc_points[i], self.arc_points[j])
for j in xrange(npoints)]
for i in xrange(npoints)])
maxdist = full.max()
w = d.DistanceBand(kdt, maxdist, binary=False, alpha=1.0)
np.testing.assert_allclose(w.sparse.todense(), full)
def test_dense(self):
w_rook = c.Rook.from_shapefile(
pysal_examples.get_path('lattice10x10.shp'))
polys = psopen(pysal_examples.get_path('lattice10x10.shp'))
centroids = [p.centroid for p in polys]
w_db = d.DistanceBand(centroids, 1, build_sp=False)
for k in w_db.id_order:
np.testing.assert_equal(w_db[k], w_rook[k])
class Test_Kernel(ut.TestCase, Distance_Mixin):
def setUp(self):
Distance_Mixin.setUp(self)
self.known_wi0 = 0
self.known_w0 = {0: 1, 1: 0.500000049999995, 3: 0.4409830615267465}
self.known_wi1 = 0
self.known_w1 = {0: 1.0, 1: 0.33333333333333337,
3: 0.2546440075000701}
self.known_w1_bw = 15.
self.known_wi2 = 0
self.known_w2 = {0: 1.0, 1: 0.59999999999999998,
3: 0.55278640450004202, 4: 0.10557280900008403}
self.known_w2_bws = [25.0, 15.0, 25.0, 16.0, 14.5, 25.0]
self.known_wi3 = 0
self.known_w3 = [1.0, 0.10557289844279438, 9.9999990066379496e-08]
self.known_w3_abws =[[11.180341005532938], [11.180341005532938],
[20.000002000000002], [11.180341005532938],
[14.142137037944515], [18.027758180095585]]
self.known_wi4 = 0
self.known_w4 = {0: 0.3989422804014327,
1: 0.26741902915776961,
3: 0.24197074871621341}
self.known_w4_abws = self.known_w3_abws
self.known_wi5 = 1
self.known_w5 = {4: 0.0070787731484506233,
2: 0.2052478782400463,
3: 0.23051223027663237,
1: 1.0}
self.known_wi6 = 0
self.known_w6 = {0: 1.0, 2: 0.03178906767736345,
1: 9.9999990066379496e-08}
#stick answers & params here
##########################
# Classmethod tests #
##########################
def test_init(self):
w = d.Kernel(self.euclidean_kdt)
for k,v in w[self.known_wi0].items():
np.testing.assert_allclose(v, self.known_w0[k], rtol=RTOL)
def test_from_shapefile(self):
w = d.Kernel.from_shapefile(self.polygon_path, idVariable='POLYID')
for k,v in w[self.known_wi5].items():
np.testing.assert_allclose((k,v), (k,self.known_w5[k]), rtol=RTOL)
w = d.Kernel.from_shapefile(self.polygon_path, fixed=False)
for k,v in w[self.known_wi6].items():
np.testing.assert_allclose((k,v), (k,self.known_w6[k]), rtol=RTOL)
def test_from_array(self):
w = d.Kernel.from_array(self.points)
for k,v in w[self.known_wi0].items():
np.testing.assert_allclose(v, self.known_w0[k], rtol=RTOL)
@ut.skipIf(PANDAS_EXTINCT, 'Missing pandas')
def test_from_dataframe(self):
df = pdio.read_files(self.polygon_path)
w = d.Kernel.from_dataframe(df)
for k,v in w[self.known_wi5-1].items():
np.testing.assert_allclose(v, self.known_w5[k+1], rtol=RTOL)
##########################
# Function/User tests #
##########################
def test_fixed_bandwidth(self):
w = d.Kernel(self.points, bandwidth=15.0)
for k,v in w[self.known_wi1].items():
np.testing.assert_allclose((k,v), (k, self.known_w1[k]))
np.testing.assert_allclose(np.ones((w.n,1))*15, w.bandwidth)
w = d.Kernel(self.points, bandwidth=self.known_w2_bws)
for k,v in w[self.known_wi2].items():
np.testing.assert_allclose((k,v), (k, self.known_w2[k]), rtol=RTOL)
for i in range(w.n):
np.testing.assert_allclose(w.bandwidth[i], self.known_w2_bws[i], rtol=RTOL)
def test_adaptive_bandwidth(self):
w = d.Kernel(self.points, fixed=False)
np.testing.assert_allclose(sorted(w[self.known_wi3].values()),
sorted(self.known_w3), rtol=RTOL)
bws = w.bandwidth.tolist()
np.testing.assert_allclose(bws, self.known_w3_abws, rtol=RTOL)
w = d.Kernel(self.points, fixed=False, function='gaussian')
for k,v in w[self.known_wi4].items():
np.testing.assert_allclose((k,v), (k, self.known_w4[k]), rtol=RTOL)
bws = w.bandwidth.tolist()
np.testing.assert_allclose(bws, self.known_w4_abws, rtol=RTOL)
knn = ut.TestLoader().loadTestsFromTestCase(Test_KNN)
kern = ut.TestLoader().loadTestsFromTestCase(Test_Kernel)
db = ut.TestLoader().loadTestsFromTestCase(Test_DistanceBand)
suite = ut.TestSuite([knn, kern, db])
if __name__ == '__main__':
runner = ut.TextTestRunner()
runner.run(suite)
| bsd-3-clause |
dwaithe/FCS_point_correlator | focuspoint/correlation_gui.py | 1 | 51790 | import struct
import numpy as np
#import scipy.weave as weave
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import sys, csv, os
from PyQt5 import QtGui, QtCore, QtWidgets
#import matplotlib
#matplotlib.use('Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from matplotlib.transforms import ScaledTranslation
import random
import errno
import os.path
from scipy.special import _ufuncs_cxx
import pickle
from correlation_objects import *
import tifffile as tif_fn
import json
"""FCS Bulk Correlation Software
Copyright (C) 2015 Dominic Waithe
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
class folderOutput(QtWidgets.QMainWindow):
def __init__(self,parent):
super(folderOutput, self).__init__()
self.initUI()
self.parent = parent
self.parent.config ={}
try:
self.parent.config = pickle.load(open(os.path.expanduser('~')+'/FCS_Analysis/config.p', "rb" ));
self.filepath = self.parent.config['output_corr_filepath']
except:
self.filepath = os.path.expanduser('~')+'/FCS_Analysis/output/'
try:
os.makedirs(self.filepath)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def initUI(self):
self.textEdit = QtWidgets.QTextEdit()
self.setCentralWidget(self.textEdit)
self.statusBar()
openFile = QtWidgets.QAction(QtGui.QIcon('open.png'), 'Open', self)
openFile.triggered.connect(self.showDialog)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(openFile)
self.setGeometry(300, 300, 350, 500)
self.setWindowTitle('Select a Folder')
#self.show()
def showDialog(self):
if self.type == 'output_corr_dir':
#folderSelect = QtGui.QFileDialog()
#folderSelect.setDirectory(self.filepath);
tfilepath = str(QtWidgets.QFileDialog.getExistingDirectory(self, "Select Directory",self.filepath))
if tfilepath !='':
self.filepath = tfilepath
#Save to the config file.
self.parent.config['output_corr_filepath'] = str(tfilepath)
pickle.dump(self.parent.config, open(str(os.path.expanduser('~')+'/FCS_Analysis/config.p'), "wb" ))
class Annotate():
def __init__(self,win_obj,par_obj,scrollBox):
self.ax = plt.gca()
self.x0 = []
self.par_obj = par_obj
self.win_obj = win_obj
self.scrollBox = scrollBox
self.pickerSelect = False;
self.ax.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.ax.figure.canvas.mpl_connect('button_release_event', self.on_release)
def on_press(self, event):
self.ax.figure.canvas.draw()
self.x0 = event.xdata
def on_release(self, event):
#self.rect.remove()
self.x1 = event.xdata
if(self.x0 <0): self.x0 =0
if(self.x1 <0): self.x1 =0
if(self.x0 >self.x1): self.x1b =self.x0;self.x0=self.x1;self.x0=self.x1b
self.scrollBox.rect.append(plt.axvspan(self.x0, self.x1, facecolor=self.par_obj.colors[self.scrollBox.rect.__len__() % len(self.par_obj.colors)], alpha=0.5,picker=True))
self.ax.figure.canvas.draw()
#Saves regions to series of arrays. Opted not to make class for this. Not sure why :-)
self.scrollBox.x0.append(self.x0)
self.scrollBox.x1.append(self.x1)
self.scrollBox.color = self.par_obj.colors[self.scrollBox.rect.__len__()]
self.scrollBox.TGid.append(self.par_obj.TGnumOfRgn)
self.scrollBox.facecolor.append(self.par_obj.colors[self.par_obj.TGnumOfRgn])
self.par_obj.TGnumOfRgn = self.par_obj.TGnumOfRgn + 1
self.scrollBox.generateList()
#refreshTable()
def freshDraw(self):
self.scrollBox.rect =[]
for i in range(0,self.scrollBox.x0.__len__()):
self.scrollBox.rect.append(plt.axvspan(self.scrollBox.x0[i], self.scrollBox.x1[i], facecolor=self.par_obj.colors[i % len(self.par_obj.colors)], alpha=0.5,picker=True))
self.win_obj.canvas5.draw()
def redraw(self):
for i in range(0,self.scrollBox.rect.__len__()):
self.scrollBox.rect[i].remove()
self.scrollBox.rect =[]
for i in range(0,self.scrollBox.x0.__len__()):
self.scrollBox.rect.append(plt.axvspan(self.scrollBox.x0[i], self.scrollBox.x1[i], facecolor=self.par_obj.colors[i % len(self.par_obj.colors)], alpha=0.5,picker=True))
self.win_obj.canvas5.draw()
class baseList(QtWidgets.QLabel):
def __init__(self):
super(baseList, self).__init__()
self.listId=0
def mousePressEvent(self,ev):
print(self.listId)
class FileDialog(QtWidgets.QMainWindow):
def __init__(self, win_obj, par_obj, fit_obj):
super(FileDialog, self).__init__()
self.initUI()
self.par_obj = par_obj
self.fit_obj = fit_obj
self.win_obj = win_obj
def initUI(self):
self.textEdit = QtWidgets.QTextEdit()
self.setCentralWidget(self.textEdit)
self.statusBar()
openFile = QtWidgets.QAction(QtGui.QIcon('open.png'), 'Open', self)
openFile.setShortcut('Ctrl+O')
openFile.setStatusTip('Open new File')
openFile.triggered.connect(self.showDialog)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(openFile)
self.setGeometry(300, 300, 350, 500)
self.setWindowTitle('File dialog')
def count(self):
print('workes')
#self.show()
def showDialog(self):
#Intialise Dialog.
fileInt = QtWidgets.QFileDialog()
try:
#Try and read the default location for a file.
f = open(os.path.expanduser('~')+'/FCS_Analysis/configLoad', 'r')
self.loadpath =f.readline()
f.close()
except:
#If not default will do.
self.loadpath = os.path.expanduser('~')+'/FCS_Analysis/'
#Create loop which opens dialog box and allows selection of files.
self.win_obj.update_correlation_parameters()
file_imports = fileInt.getOpenFileNames(self, 'Open a data file',self.loadpath, 'pt3 files (*.pt3);ptU files (*.ptU);asc files (*.asc);spc files (*.spc);All Files (*.*)')
bt = QtWidgets.QPushButton("cancel")
for c,filename in enumerate(file_imports[0]):
self.win_obj.image_status_text.setStyleSheet("QStatusBar{padding-left:8px;color:green;font-weight:regular;}")
self.win_obj.image_status_text.showMessage("Processing file "+str(c+1)+" of "+str(file_imports[0].__len__()))
self.fit_obj.app.processEvents()
pic = picoObject(filename,self.par_obj,self.fit_obj);
if pic.exit == True:
self.win_obj.image_status_text.setStyleSheet("QStatusBar{padding-left:8px;color:red;font-weight:bold;}")
self.win_obj.image_status_text.showMessage("Your data-file is not a supported format.")
self.fit_obj.app.processEvents()
return
self.loadpath = str(QtCore.QFileInfo(filename).absolutePath())
self.par_obj.numOfLoaded = self.par_obj.numOfLoaded+1
self.win_obj.label.generateList()
self.win_obj.TGScrollBoxObj.generateList()
self.win_obj.updateCombo()
self.win_obj.cbx.setCurrentIndex(self.par_obj.numOfLoaded-1)
self.win_obj.plot_PhotonCount()
self.win_obj.plotDataQueueFn()
self.win_obj.image_status_text.setStyleSheet("QStatusBar{padding-left:8px;color:green;font-weight:regular;}")
self.win_obj.image_status_text.showMessage("Processing finished")
try:
f = open(os.path.expanduser('~')+'/FCS_Analysis/configLoad', 'w')
f.write(self.loadpath)
f.close()
except:
print('nofile')
#Update listing:
#main.label.remakeList()
class Window(QtWidgets.QWidget):
def __init__(self, par_obj, fit_obj):
super(Window, self).__init__()
self.fit_obj = fit_obj
self.par_obj = par_obj
self.generateWindow()
def on_resize1(self,event):
self.figure1.subplots_adjust(wspace=0.73, hspace=0.24,top=0.96, bottom =0.14,left=0.09, right=0.98)
self.figure1.tight_layout()
def on_resize4(self,event):
self.figure4.tight_layout(pad=1.08)
def on_resize5(self,event):
self.figure5.tight_layout(pad=1.08)
def generateWindow(self):
# a figure instance to plot on
self.figure1 = plt.figure()
self.figure1.set_size_inches(5.0,5.4)
self.figure1.subplots_adjust(wspace=0.73, hspace=0.24,top=0.96, bottom =0.14,left=0.09, right=0.98)
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
#self.canvas1 = FigureCanvas(self.figure1)
self.canvas1 = FigureCanvas(self.figure1)
self.figure1.patch.set_facecolor('white')
# this is the Navigation widget
# it takes the Canvas widget and a parent
self.toolbar1 = NavigationToolbar(self.canvas1, self)
#self.figure2 = plt.figure()
#self.canvas2 = FigureCanvas(self.figure2)
#self.toolbar2 = NavigationToolbar(self.canvas2, self)
#self.figure3 = plt.figure()
#self.canvas3 = FigureCanvas(self.figure3)
#self.toolbar3 = NavigationToolbar(self.canvas3, self)
self.figure4 = plt.figure(figsize=(5.4,2.1))
self.figure4.subplots_adjust(left=0.15,bottom=0.25)
self.canvas4 = FigureCanvas(self.figure4)
self.figure4.patch.set_facecolor('white')
#self.toolbar4 = NavigationToolbar(self.canvas4, self)
self.figure5 = plt.figure(figsize=(5.4,2.1))
self.figure5.subplots_adjust(left=0.15,bottom=0.25)
# this is the Navigation widget
self.canvas5 = FigureCanvas(self.figure5)
self.figure5.patch.set_facecolor('white')
#Tself.toolbar5 = NavigationToolbar(self.canvas5, self)
self.canvas1.mpl_connect('resize_event',self.on_resize1)
#self.canvas2.mpl_connect('resize_event',self.on_resize2)
#self.canvas3.mpl_connect('resize_event',self.on_resize3)
self.canvas4.mpl_connect('resize_event',self.on_resize4)
self.canvas5.mpl_connect('resize_event',self.on_resize5)
self.ex = FileDialog(self, self.par_obj, self.fit_obj)
self.folderOutput = folderOutput(self.par_obj)
self.folderOutput.type = 'output_corr_dir'
# Just some button connected to `plot` method
self.openFile = QtWidgets.QPushButton('Open File')
self.openFile.setFixedWidth(120)
self.openFile.clicked.connect(self.ex.showDialog)
self.replot_btn = QtWidgets.QPushButton('Replot Data')
self.replot_btn.clicked.connect(self.plotDataQueueFn)
self.replot_btn2 = QtWidgets.QPushButton('Replot Data')
self.replot_btn2.clicked.connect(self.plotDataQueueFn)
self.saveAll_btn = QtWidgets.QPushButton('Save all as corr. files (.csv)')
self.saveAll_btn.clicked.connect(self.saveDataQueue)
self.normPlot = QtWidgets.QCheckBox('Normalise')
self.normPlot.setChecked(False)
#self.figure.canvas.mpl_connect('button_press_event', self.on_press)
#self.figure.canvas.mpl_connect('button_release_event', self.on_release)
# set the layout
self.spacer = QtWidgets.QLabel()
main_layout = QtWidgets.QHBoxLayout()
self.globalText = QtWidgets.QLabel()
self.globalText.setText('Correlation Para:')
self.reprocess_btn = QtWidgets.QPushButton('reprocess data')
self.reprocess_btn.clicked.connect(self.reprocessDataFn)
self.reprocess_btn.setFixedWidth(120)
self.reprocess_btn2 = QtWidgets.QPushButton('reprocess data')
self.reprocess_btn2.clicked.connect(self.reprocessDataFn)
self.reprocess_btn2.setFixedWidth(120)
self.reprocess_btn3 = QtWidgets.QPushButton('reprocess data')
self.reprocess_btn3.clicked.connect(self.reprocessDataFn)
self.reprocess_btn3.setFixedWidth(120)
self.NsubText = QtWidgets.QLabel('Nsub:')
self.NsubText.resize(50,40)
self.NsubEdit =lineEditSp('6',self)
self.NsubEdit.setFixedWidth(60)
self.NsubEdit.type ='nsub'
self.NcascStartText = QtWidgets.QLabel('Ncasc Start:')
self.NcascStartEdit = lineEditSp('0',self)
self.NcascStartEdit.setFixedWidth(60)
self.NcascStartEdit.parentId = self
self.NcascStartEdit.type = 'ncasc'
self.NcascEndText = QtWidgets.QLabel('Ncasc End:')
self.NcascEndEdit = lineEditSp('25',self)
self.NcascEndEdit.setFixedWidth(60)
self.NcascEndEdit.type = 'ncascEnd'
self.NcascEndEdit.parentId = self
self.winIntText = QtWidgets.QLabel('Bin Size (CH):')
self.winIntEdit = lineEditSp('10',self)
self.winIntEdit.setMaxLength(5)
self.winIntEdit.setFixedWidth(40)
self.winIntEdit.type = 'winInt'
self.winIntEdit.parObj = self
self.folderSelect_btn = QtWidgets.QPushButton('Output Folder')
self.folderSelect_btn.clicked.connect(self.folderOutput.showDialog)
#Adds an all option to the combobox.lfkk
#grid1.addWidget(self.folderSelect_btn,11,0)
#grid1.addWidget(self.spacer,12,0,20,0)
self.label =scrollBox(self,self.par_obj)
self.TGScrollBoxObj =TGscrollBox(self,self.par_obj)
#The table which shows the details of the time-gating.
self.modelTab = QtWidgets.QTableWidget(self)
self.modelTab.setRowCount(0)
self.modelTab.setColumnCount(7)
self.modelTab.setColumnWidth(0,20);
self.modelTab.setColumnWidth(1,40);
self.modelTab.setColumnWidth(2,20);
self.modelTab.setColumnWidth(3,40);
self.modelTab.setColumnWidth(4,85);
self.modelTab.setColumnWidth(5,70);
self.modelTab.setColumnWidth(6,20);
#self.modelTab.horizontalHeader().setStretchLastSection(True)
self.modelTab.resize(350,200)
self.modelTab.setMinimumSize(310,200)
self.modelTab.setMaximumSize(310,200)
self.modelTab.setHorizontalHeaderLabels(["","From:","","To:","Apply to:", "", "", "", ""])
#The table which shows the details of each correlated file.
self.modelTab2 = QtWidgets.QTableWidget(self)
self.modelTab2.setRowCount(0)
self.modelTab2.setColumnCount(5)
self.modelTab2.setColumnWidth(0,80);
self.modelTab2.setColumnWidth(1,140);
self.modelTab2.setColumnWidth(2,30);
self.modelTab2.setColumnWidth(3,150);
self.modelTab2.setColumnWidth(4,100);
self.modelTab2.setColumnWidth(5,100);
self.modelTab2.horizontalHeader().setStretchLastSection(True)
self.modelTab2.resize(800,400)
self.modelTab2.setHorizontalHeaderLabels(["","data name","plot","save","file name"])
tableAndBtns = QtWidgets.QVBoxLayout()
channelPlotBtns = QtWidgets.QHBoxLayout()
correlationBtns = QtWidgets.QHBoxLayout()
#self.label.setText('<HTML><H3>DATA file: </H3><P>'+str(6)+' Click here to load in this sample and what happens if I make it too long.</P></HTML>')
#self.label.listId = 6
self.fileDialog = QtWidgets.QFileDialog()
self.centre_panel = QtWidgets.QVBoxLayout()
self.right_panel = QtWidgets.QVBoxLayout()
#Adds the main graph components to the top panel
#LEFT PANEL
self.left_panel = QtWidgets.QVBoxLayout()
self.left_panel_top = QtWidgets.QHBoxLayout()
self.left_panel.addLayout(self.left_panel_top)
self.left_panel_top.addWidget(self.canvas4)
self.left_panel_top.addStretch()
#LEFT PANEL TOP
self.left_panel_top_btns= QtWidgets.QHBoxLayout()
self.plotText =QtWidgets.QLabel()
self.plotText.setText('Plot: ')
self.left_panel_top_btns.addWidget(self.plotText)
self.left_panel_second_row_btns = QtWidgets.QHBoxLayout()
self.photonCountText = QtWidgets.QLabel()
self.photonCountText.setText('Bin Duration (ms): ')
self.photonCountEdit = lineEditSp('25',self)
self.photonCountEdit.type ='int_bin'
self.photonCountEdit.setMaxLength(5)
self.photonCountEdit.setFixedWidth(40)
self.photonCountEdit.parObj = self
self.photonCountEdit.resize(40,50)
self.photonCountExport_label = QtWidgets.QLabel("Export Individual Timeseries as: ")
self.photonIntensityTraceExportCSV = QtWidgets.QPushButton('.csv')
self.photonIntensityTraceExportTIF = QtWidgets.QPushButton('.tiff')
self.left_panel_third_row_btns = QtWidgets.QHBoxLayout()
self.save_int_timeSeries_csv = QtWidgets.QPushButton('Export all Timeseries as .csv')
self.save_int_timeSeries_tif = QtWidgets.QPushButton('Export all Timeseries as .tiff')
self.left_panel_third_row_btns.addWidget(self.save_int_timeSeries_csv)
self.left_panel_third_row_btns.addWidget(self.save_int_timeSeries_tif)
self.left_panel_third_row_btns.addStretch()
self.save_int_timeSeries_csv.clicked.connect(self.save_all_PhotonBinFnCSV)
self.save_int_timeSeries_tif.clicked.connect(self.save_all_PhotonBinFnTIF)
self.cbx = comboBoxSp(self)
self.cbx.type ='PhotonCount'
self.updateCombo()
self.replot_photon_btn = QtWidgets.QPushButton('replot Photon Count')
self.replot_photon_btn.clicked.connect(self.plot_PhotonCount)
self.left_panel_top_btns.addWidget(self.cbx)
self.left_panel_top_btns.addWidget(self.replot_photon_btn)
self.plotText1 =QtWidgets.QLabel()
self.left_panel_top_btns.addWidget(self.plotText1)
self.plotText2 =QtWidgets.QLabel()
self.left_panel_top_btns.addWidget(self.plotText2)
self.left_panel_top_btns.addStretch()
self.left_panel_export_fns = QtWidgets.QGroupBox('Export Binned Intensities')
self.left_panel_second_row_btns.addWidget(self.photonCountText)
self.left_panel_second_row_btns.addWidget(self.photonCountEdit)
self.left_panel_second_row_btns.addWidget(self.photonCountExport_label)
self.left_panel_second_row_btns.addWidget(self.photonIntensityTraceExportCSV)
self.left_panel_second_row_btns.addWidget(self.photonIntensityTraceExportTIF)
self.left_panel_second_row_btns.addStretch()
self.photonIntensityTraceExportCSV.clicked.connect(self.reprocessPhotonBinFnCSV)
self.photonIntensityTraceExportTIF.clicked.connect(self.reprocessPhotonBinFnTIF)
self.left_panel.addLayout(self.left_panel_top_btns)
self.left_panel_vertical_export = QtWidgets.QVBoxLayout()
self.left_panel_export_fns.setLayout(self.left_panel_vertical_export)
self.left_panel_vertical_export.addLayout(self.left_panel_second_row_btns)
self.left_panel_vertical_export.addLayout(self.left_panel_third_row_btns)
self.left_panel.addWidget(self.left_panel_export_fns)
#LEFT PANEL centre
self.left_panel_centre = QtWidgets.QHBoxLayout()
#LEFT PANEL centre right
self.left_panel_centre_right = QtWidgets.QVBoxLayout()
self.left_panel.addLayout(self.left_panel_centre)
self.left_panel_centre.addWidget(self.modelTab)
self.left_panel_centre.addLayout(self.left_panel_centre_right)
self.left_panel_centre.addStretch()
#LEFT PANEL bottom
self.left_panel_bottom = QtWidgets.QVBoxLayout()
self.left_panel_bottom_fig = QtWidgets.QHBoxLayout()
self.left_panel_bottom.addLayout(self.left_panel_bottom_fig)
self.left_panel_bottom_fig.addWidget(self.canvas5)
self.left_panel_bottom_fig.addStretch()
self.left_panel.addLayout(self.left_panel_bottom)
#LEFT PANEL bottom buttons
self.left_panel_bottom_btns = QtWidgets.QHBoxLayout()
self.left_panel_bottom.addLayout(self.left_panel_bottom_btns)
self.left_panel_bottom_btns.addWidget(self.normPlot)
self.left_panel_bottom_btns.addWidget(self.winIntText)
self.left_panel_bottom_btns.addWidget(self.winIntEdit)
self.left_panel_bottom_btns.addWidget(self.reprocess_btn2)
#
self.left_panel_bottom_btns.addStretch()
self.left_panel_centre_right.setSpacing(2)
self.left_panel_centre_right.addWidget(self.openFile)
self.left_panel_centre_right.addWidget(self.globalText)
self.left_panel_centre_right.addWidget(self.NsubText)
self.left_panel_centre_right.addWidget(self.NsubEdit)
self.left_panel_centre_right.addWidget(self.NcascStartText)
self.left_panel_centre_right.addWidget(self.NcascStartEdit)
self.left_panel_centre_right.addWidget(self.NcascEndText)
self.left_panel_centre_right.addWidget(self.NcascEndEdit)
self.left_panel_centre_right.addWidget(self.reprocess_btn)
self.left_panel_bottom_bottom = QtWidgets.QHBoxLayout()
self.image_status_text = QtWidgets.QStatusBar()
self.left_panel_bottom_bottom.addWidget(self.image_status_text)
self.left_panel.addLayout(self.left_panel_bottom_bottom)
self.left_panel_centre_right.setAlignment(QtCore.Qt.AlignTop)
self.right_panel.addWidget(self.canvas1)
self.right_panel.addLayout(channelPlotBtns)
self.right_panel.addLayout(correlationBtns)
self.right_panel.addWidget(self.modelTab2)
self.right_panel.addStrut(800)
#self.updateCombo()
self.channel1_lbl = QtWidgets.QLabel('Visualization: Primary Channel')
self.channel1_sel = comboBoxSp(self)
self.channel1_sel.type = 'channel1_sel'
self.channel2_lbl = QtWidgets.QLabel('Secondary Channel')
self.channel2_sel = comboBoxSp(self)
self.channel2_sel.type = 'channel2_sel'
for i in range(0,8):
self.channel1_sel.addItem('CH'+str(i+1))
self.channel2_sel.addItem('CH'+str(i+1))
self.channel1_sel.setCurrentIndex(0)
self.channel2_sel.setCurrentIndex(1)
channelPlotBtns.addWidget(self.channel1_lbl)
channelPlotBtns.addWidget(self.channel1_sel)
channelPlotBtns.addWidget(self.channel2_lbl)
channelPlotBtns.addWidget(self.channel2_sel)
channelPlotBtns.addStretch()
correlationBtns.addWidget(self.replot_btn)
correlationBtns.addWidget(self.folderSelect_btn)
correlationBtns.addWidget(self.saveAll_btn)
correlationBtns.addWidget(self.toolbar1)
correlationBtns.setAlignment(QtCore.Qt.AlignLeft)
self.left_panel.addStretch()
tableAndBtns.addWidget(self.modelTab2)
self.setLayout(main_layout)
main_layout.addLayout(self.left_panel)
main_layout.addStretch()
#main_layout.addLayout(self.centre_panel)
main_layout.addLayout(self.right_panel)
#main_layout.addLayout(self.right_panel)
self.plt1= self.figure1.add_subplot(311)
self.plt2= self.figure1.add_subplot(312)
self.plt3= self.figure1.add_subplot(313)
self.plt4= self.figure4.add_subplot(111)
self.plt5= self.figure5.add_subplot(111)
self.plt1.format_coord = lambda x, y: ''
self.plt2.format_coord = lambda x, y: ''
self.plt3.format_coord = lambda x, y: ''
self.plt1.set_title('Correlation', fontsize=12)
self.plt1.set_ylabel('Auto-correlation CH0 (tau)', fontsize=8)
self.plt2.set_ylabel('Auto-correlation CH1 (tau)', fontsize=8)
self.plt3.set_ylabel('Cross-correlation CH01 (tau)', fontsize=8)
self.figure4.suptitle('Photon Count', fontsize=12)
self.figure5.suptitle('Photon Decay Curve', fontsize=12)
self.plt5.a = Annotate(self,self.par_obj,self.TGScrollBoxObj)
def update_correlation_parameters(self):
self.par_obj.NcascStart = int(self.NcascStartEdit.text())
self.par_obj.NcascEnd = int(self.NcascEndEdit.text())
self.par_obj.Nsub = int(self.NsubEdit.text())
self.par_obj.winInt = float(self.winIntEdit.text())
self.par_obj.photonCountBin = float(self.photonCountEdit.text())
def plotDataQueueFn(self):
self.plt1.cla()
self.plt2.cla()
self.plt3.cla()
self.plt5.clear()
self.canvas1.draw()
self.canvas5.draw()
for x in range(0, self.par_obj.numOfLoaded):
if(self.label.objCheck[x].isChecked() == True):
self.plot(self.par_obj.objectRef[x])
for y in range(0, self.par_obj.subNum):
if(self.label.objCheck[y+x+1].isChecked() == True):
self.plot(self.par_obj.subObjectRef[y])
self.plt5.ax = plt.gca()
self.plt5.a.freshDraw()
def save_all_PhotonBinFnCSV(self):
"""Reprocess all images and export .csv images."""
for x in range(0, self.par_obj.numOfLoaded):
self.reprocessPhotonBinFn('CSV',x)
for y in range(0, self.par_obj.subNum):
self.reprocessPhotonBinFn('CSV',x+y+1)
def save_all_PhotonBinFnTIF(self):
"""Reprocess all images and export .tiff images."""
for x in range(0, self.par_obj.numOfLoaded):
self.reprocessPhotonBinFn('TIFF',x)
for y in range(0, self.par_obj.subNum):
self.reprocessPhotonBinFn('TIFF',x+y+1)
def reprocessPhotonBinFnCSV(self):
index = self.cbx.currentIndex()
self.reprocessPhotonBinFn('CSV',index)
def reprocessPhotonBinFnTIF(self):
index = self.cbx.currentIndex()
self.reprocessPhotonBinFn('TIFF',index)
def reprocessPhotonBinFn(self,type_ex,index):
#Time series of photon counts. For visualisation.
objId = None
if index < self.par_obj.numOfLoaded:
objId = self.par_obj.objectRef[index]
else:
objId = self.par_obj.subObjectRef[index-self.par_obj.numOfLoaded]
for i in range(0,objId.numOfCH):
timeSeries, timeSeriesScale = delayTime2bin(np.array(objId.trueTimeArr)/1000000,np.array(objId.subChanArr),objId.ch_present[i],objId.photonCountBin)
objId.timeSeries.append(timeSeries)
objId.timeSeriesScale.append(timeSeriesScale)
if type_ex == 'CSV':
f = open(self.folderOutput.filepath+'/'+objId.name+'_intensity.csv', 'w')
f.write('version,'+str(3)+'\n')
f.write('numOfCH,'+str(objId.numOfCH)+'\n')
strt = "Time (ms)"
for i in range(0,objId.numOfCH):
strt += ",intensityCH"+str(i+1)
f.write(strt+'\n')
if objId == None:
return
for x in range(0,objId.timeSeriesScale[0].__len__()):
strt = ""
for i in range(0,objId.numOfCH):
strt += ","+str(objId.timeSeries[i][x])
f.write(str(objId.timeSeriesScale[0][x])+strt+'\n')
if type_ex == 'TIFF':
height = objId.timeSeries[0].__len__()
export_im =np.zeros((objId.numOfCH,1,1,height))
for i in range(0,objId.numOfCH):
export_im[i,0,0,:] = np.array(objId.timeSeries[i]).astype(np.float32)
metadata = dict(microscope='', dtype=export_im.dtype.str)
metadata = json.dumps(metadata)
tif_fn.imsave(self.folderOutput.filepath+'/'+objId.name+'_raw.tiff', export_im.astype(np.float32), shape=export_im.shape,imagej=True,description=metadata)
def save_raw_carpet_fn(self):
"""Saves the carpet raw data to an image file"""
def reprocessDataFn(self):
for i in range(0, self.par_obj.numOfLoaded):
self.par_obj.objectRef[i].processData()
for i in range(0, self.par_obj.subNum):
self.par_obj.subObjectRef[i].processData()
self.plotDataQueueFn();
self.updateCombo()
self.plot_PhotonCount()
def updateCombo(self):
"""Updates photon counting combox box"""
self.cbx.clear()
#Populates comboBox with datafiles to which to apply the time-gating.
for b in range(0,self.par_obj.numOfLoaded):
self.cbx.addItem("Data: "+str(b), b)
for i in range(0, self.par_obj.subNum):
self.cbx.addItem("subData: "+str(b+i+1), b+i+1)
def plot_PhotonCount(self):
"""Plots the photon counting"""
index = self.cbx.currentIndex();
if index < self.par_obj.numOfLoaded:
objId = self.par_obj.objectRef[index]
else:
objId = self.par_obj.subObjectRef[index-self.par_obj.numOfLoaded]
self.plt4.clear()
self.canvas4.draw()
chplt1 = self.channel1_sel.currentIndex()
chplt2 = self.channel2_sel.currentIndex()
if chplt1 < objId.timeSeriesScale.__len__():
self.plt4.bar(np.array(objId.timeSeriesScale[chplt1]),np.array(objId.timeSeries[chplt1]), float(objId.photonCountBin), color=objId.color,linewidth=0)
self.plt4.set_xlim(0,objId.timeSeriesScale[chplt1][-1])
self.plotText1.setText('CH'+str(self.channel1_sel.currentIndex()+1))
self.plotText1.setStyleSheet("color:"+objId.color+";")
else:
self.plotText1.setText('')
if objId.numOfCH >1 and chplt2 < objId.timeSeriesScale.__len__():
self.plt4.bar(np.array(objId.timeSeriesScale[chplt2]),-1*np.array(objId.timeSeries[chplt2]).astype(np.float32),float(objId.photonCountBin),color="grey",linewidth=0,edgecolor = None)
self.plotText2.setText('CH'+str(self.channel2_sel.currentIndex()+1))
self.plotText2.setStyleSheet("color:grey;")
else:
self.plotText2.setText('')
self.figure4.subplots_adjust(left=0.15,bottom=0.25)
self.plt4.set_xlabel('Time (ms)', fontsize=12)
self.plt4.set_ylabel('Photon counts', fontsize=12)
self.plt4.xaxis.grid(True,'minor')
self.plt4.xaxis.grid(True,'major')
self.plt4.yaxis.grid(True,'minor')
self.plt4.yaxis.grid(True,'major')
self.canvas4.draw()
def plot(self,objId):
''' plot some random stuff '''
autotime = objId.autotime
idx1 = self.channel1_sel.currentIndex()
if objId.autoNorm.__len__() > idx1:
auto = objId.autoNorm[idx1][idx1]
self.plt1.plot(autotime,auto,objId.color)
corrText = 'Auto-correlation'
subDTimeMax = objId.subDTimeMax
subDTimeMin = objId.subDTimeMin
self.plt1.set_xscale('log')
self.plt1.set_xlim([0, np.max(autotime)])
self.plt1.set_xlabel('Tau (ms)', fontsize=12)
self.plt1.set_ylabel('Auto-correlation CH'+str(idx1+1)+'(tau)', fontsize=8)
self.plt1.xaxis.grid(True,'minor')
self.plt1.xaxis.grid(True,'major')
self.plt1.yaxis.grid(True,'minor')
self.plt1.yaxis.grid(True,'major')
if objId.numOfCH >1:
idx2 = self.channel2_sel.currentIndex()
if objId.autoNorm.__len__() > idx2:
auto2 = objId.autoNorm[idx2][idx2]
self.plt2.plot(autotime,auto2,objId.color)
if objId.autoNorm.__len__() > idx1:
if objId.autoNorm[idx1].__len__() > idx2:
cross = objId.autoNorm[idx1][idx2]
self.plt3.plot(autotime,cross,objId.color)
self.plt2.set_ylabel('Auto-correlation CH'+str(idx2+1)+' (tau)', fontsize=8)
self.plt2.xaxis.grid(True,'minor')
self.plt3.set_ylabel('Cross-correlation CH'+str(idx1+1)+str(idx2+1)+' (tau)', fontsize=8)
self.plt3.xaxis.grid(True,'minor')
self.plt2.set_xscale('log')
self.plt2.set_xlim([0, np.max(autotime)])
self.plt2.set_xlabel('Tau (ms)', fontsize=12)
self.plt2.xaxis.grid(True,'minor')
self.plt2.xaxis.grid(True,'major')
self.plt2.yaxis.grid(True,'minor')
self.plt2.yaxis.grid(True,'major')
self.plt3.set_xscale('log')
self.plt3.set_xlim([1e-6, np.max(autotime)])
self.plt3.set_xlabel('Tau (ms)', fontsize=12)
self.plt3.xaxis.grid(True,'minor')
self.plt3.xaxis.grid(True,'major')
self.plt3.yaxis.grid(True,'minor')
self.plt3.yaxis.grid(True,'major')
if objId.type == 'mainObject':
if self.normPlot.isChecked() == True:
axisText = 'No. of photons (Norm)'
else:
axisText = 'No. of photons '
if objId.autoNorm.__len__() > idx1:
decayScale1 = objId.decayScale[idx1]
if self.normPlot.isChecked() == True:
photonDecayCh1 = objId.photonDecayNorm[idx1]
else:
photonDecayCh1 = objId.photonDecay[idx1]
self.plt5.plot(decayScale1[1:-2], photonDecayCh1[1:-2],objId.color)
if objId.numOfCH >1 and objId.autoNorm.__len__() > idx2:
decayScale2 = objId.decayScale[idx2]
if self.normPlot.isChecked() == True:
photonDecayCh2 = objId.photonDecayNorm[idx2]
else:
photonDecayCh2 = objId.photonDecay[idx2]
self.plt5.plot(decayScale2[1:-2], photonDecayCh2[1:-2], objId.color,linestyle='dashed')
#
self.figure5.subplots_adjust(left=0.1,right=0.95, bottom=0.20,top=0.90)
if objId.resolution != None:
self.plt5.set_xlabel('Time channels (1 ='+str(np.round(objId.resolution,4))+' ns)', fontsize=12)
else:
self.plt5.set_xlabel('Time channels (No micro time in file))', fontsize=12)
self.plt5.set_ylabel(axisText, fontsize=12)
self.plt5.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
self.plt5.xaxis.grid(True,'minor')
self.plt5.xaxis.grid(True,'major')
self.plt5.yaxis.grid(True,'minor')
self.plt5.yaxis.grid(True,'major')
self.canvas5.draw()
# refresh canvas
self.canvas1.draw()
def saveDataQueue(self):
for obj in self.par_obj.objectRef:
self.saveFile(obj)
for obj in self.par_obj.subObjectRef:
self.saveFile(obj)
def saveFile(self,objId):
"""Save files as .csv"""
f = open(self.folderOutput.filepath+'/'+objId.name+'_correlation.csv', 'w')
f.write('version,'+str(3)+'\n')
f.write('numOfCH,'+str(objId.numOfCH)+'\n')
f.write('type, point\n')
f.write('parent_name,'+objId.name+'\n')
strt = "ch_type"
for indx_arr in objId.indx_arr:
strt += ","+str(indx_arr[0]+1)+"_"+str(indx_arr[1]+1)
f.write(strt+'\n')
strt = "kcount"
for kcount in objId.kcount:
strt += ","+str(kcount)
f.write(strt+'\n')
strt = "numberNandB"
for numberNandB in objId.numberNandB:
strt += ","+str(numberNandB)
f.write(strt+'\n')
strt = "brightnessNandB"
for brightnessNandB in objId.brightnessNandB:
strt += ","+str(brightnessNandB)
f.write(strt+'\n')
strt = "CV"
for CV in objId.CV:
strt += ","+str(CV)
f.write(strt+'\n')
f.write('carpet pos, 0 \n')
f.write('pc, 0\n');
strt = "Time (ms)"
for i,j in objId.indx_arr:
if i ==j:
strt += ",CH"+str(i+1)+" Auto-Correlation"
else:
strt += ",CH"+str(i+1)+str(j+1)+" Cross-Correlation"
f.write(strt+'\n')
for x in range(0,objId.autotime.shape[0]):
strt = ""
for i,j in objId.indx_arr:
strt += ","+str(objId.autoNorm[i][j][x])
f.write(str(objId.autotime[x])+strt+'\n')
f.write('end\n')
class checkBoxSp(QtWidgets.QCheckBox):
def __init__(self):
QtWidgets.QCheckBox.__init__(self)
self.obj = []
self.type = []
self.name =[]
def updateChecked(self):
self.obj.plotOn = self.isChecked()
class checkBoxSp2(QtWidgets.QCheckBox):
def __init__(self, win_obj, par_obj):
QtWidgets.QCheckBox.__init__(self, parent)
self.obj = []
self.type = []
self.name =[]
self.stateChanged.connect(self.__changed)
def __changed(self,state):
if state == 2:
if self.obj.carpetDisplay == 0:
self.obj.CH0AutoFn()
if self.obj.carpetDisplay == 1:
self.obj.CH1AutoFn()
if self.obj.carpetDisplay == 2:
self.obj.CH01CrossFn()
if state == 0:
if self.obj.carpetDisplay == 3:
self.obj.CH0AutoFn()
if self.obj.carpetDisplay == 4:
self.obj.CH1AutoFn()
if self.obj.carpetDisplay == 5:
self.obj.CH01CrossFn()
#plotDataQueueFn()
class lineEditSp(QtWidgets.QLineEdit):
def __init__(self, txt, win_obj):
QtWidgets.QLineEdit.__init__(self, txt)
self.editingFinished.connect(self.__handleEditingFinished)
self.textChanged.connect(self.__handleTextChanged)
self.obj = []
self.type = []
self.TGid =[]
self.win_obj = win_obj
def __handleEditingFinished(self):
if(self.type == 'tgt0' ):
self.win_obj.TGScrollBoxObj.x0[self.TGid] = float(self.text())
self.win_obj.plt5.a.redraw()
#plotDataQueueFn()
if(self.type == 'tgt1' ):
self.win_obj.TGScrollBoxObj.x1[self.TGid] = float(self.text())
self.win_obj.plt5.a.redraw()
if(self.type == 'name' ):
self.obj.name = str(self.text())
if(self.type == 'ncasc' or self.type =='ncascEnd' or self.type =='nsub' or self.type =='int_bin'):
self.win_obj.update_correlation_parameters()
def __handleTextChanged(self):
if self.type == 'int_bin':
self.win_obj.update_correlation_parameters()
class comboBoxSp(QtWidgets.QComboBox):
def __init__(self,win_obj):
QtWidgets.QComboBox.__init__(self,parent=None)
self.activated[str].connect(self.__activated)
self.obj = []
self.TGid =[]
self.type = []
self.win_obj = win_obj
def __activated(self,selected):
if self.type == 'AUG':
if self.currentIndex() == 1:
self.obj.aug = 'PIE'
self.obj.PIE = self.currentIndex();
self.obj.processData()
self.win_obj.plotDataQueueFn()
if self.currentIndex() == 2:
self.obj.aug = 'PIE'
self.obj.PIE = self.currentIndex();
self.obj.processData()
self.win_obj.plotDataQueueFn()
if self.currentIndex() == 3:
self.obj.aug = 'rmAP'
self.obj.processData()
self.win_obj.plotDataQueueFn()
#if self.type == 'PhotonCount':
#if self.type == 'channel1_sel':
# self.win_obj.plot_PhotonCount(self.win_obj.cbx.currentIndex());
#if self.type == 'channel2_sel':
# self.win_obj.plot_PhotonCount(self.win_obj.cbx.currentIndex());
class pushButtonSp(QtWidgets.QPushButton):
def __init__(self, win_obj, par_obj):
QtWidgets.QPushButton.__init__(self)
self.clicked.connect(self.__activated)
self.par_obj = par_obj;
self.win_obj = win_obj;
#Which list is should look at.
self.objList = []
self.xmin = []
self.xmax =[]
self.TGid = []
def __activated(self):
if self.type =='photoCrr':
self.par_obj.clickedS1 = self.xmin
self.par_obj.clickedS2 = self.xmax
self.win_obj.bleachInt.create_main_frame()
if self.type =='remove':
self.par_obj.TGnumOfRgn -= 1
self.win_obj.TGScrollBoxObj.rect.pop(self.TGid)
self.win_obj.TGScrollBoxObj.x0.pop(self.TGid)
self.win_obj.TGScrollBoxObj.x1.pop(self.TGid)
self.win_obj.TGScrollBoxObj.facecolor.pop(self.TGid)
self.win_obj.modelTab.clear()
self.win_obj.TGScrollBoxObj.generateList()
self.win_obj.plotDataQueueFn()
if self.type =='create':
self.xmin = self.win_obj.TGScrollBoxObj.x0[self.TGid]
self.xmax = self.win_obj.TGScrollBoxObj.x1[self.TGid]
if (self.objList.currentIndex() == self.par_obj.objectRef.__len__()):
for i in range(0,self.par_obj.objectRef.__len__()):
picoSub = subPicoObject(self.par_obj.objectRef[i],self.xmin,self.xmax,self.TGid,self.par_obj)
self.win_obj.updateCombo()
self.par_obj.subNum = self.par_obj.subNum+1
else:
picoSub = subPicoObject(self.par_obj.objectRef[self.objList.currentIndex()],self.xmin,self.xmax,self.TGid,self.par_obj)
self.win_obj.updateCombo()
self.par_obj.subNum = self.par_obj.subNum+1
self.win_obj.label.generateList()
self.win_obj.plotDataQueueFn()
self.win_obj.updateCombo()
class pushButtonSp2(QtWidgets.QPushButton):
"""Save button"""
def __init__(self, txt, win_obj, par_obj):
QtWidgets.QPushButton.__init__(self,txt)
self.clicked.connect(self.__clicked)
self.win_obj = win_obj;
self.par_obj = par_obj;
self.corr_obj =[]
def __clicked(self):
self.win_obj.saveFile(self.corr_obj)
class scrollBox():
def __init__(self, win_obj, par_obj):
self.win_obj = win_obj
self.par_obj = par_obj
self.par_obj.numOfLoaded = 0
self.par_obj.subNum =0
self.generateList()
def generateList(self):
self.obj =[];
self.objCheck =[];
for i in range(0, self.par_obj.numOfLoaded):
self.win_obj.modelTab2.setRowCount(i+1)
#Represents each y
self._l=QtWidgets.QHBoxLayout()
self.obj.append(self._l)
#HTML text
a =baseList()
a.listId = i
a.setText('<HTML><p style="color:'+str(self.par_obj.colors[i % len(self.par_obj.colors)])+';margin-top:0">Data '+str(i)+': </p></HTML>')
self.win_obj.modelTab2.setCellWidget(i, 0, a)
#Line edit for each entry in the file list
lb = lineEditSp(self.win_obj, self.par_obj)
lb.type ='name'
lb.obj = self.par_obj.objectRef[i]
lb.setText(self.par_obj.objectRef[i].name);
self.win_obj.modelTab2.setCellWidget(i, 1, lb)
cb = checkBoxSp()
cb.setChecked(self.par_obj.objectRef[i].plotOn)
cb.obj = self.par_obj.objectRef[i]
cb.stateChanged.connect(cb.updateChecked)
self.win_obj.modelTab2.setCellWidget(i, 2, cb)
#cbx = comboBoxSp(self.win_obj)
#Populates comboBox with datafiles to which to apply the time-gating.
#cbx.addItem("norm", 0)
#cbx.addItem("PIE-CH-0", 1)
#cbx.addItem("PIE-CH-1", 2)
#cbx.addItem("rm AfterPulse", 3)
#cbx.obj = self.par_obj.objectRef[i]
#cbx.type = 'AUG'
#Adds an all option to the combobox.lfkk
#cbx.TGid = i
#self.win_obj.modelTab2.setCellWidget(i, 3, cbx)
#Adds save button to the file.
sb = pushButtonSp2('save corr. file (.csv)', self.win_obj, self.par_obj)
sb.corr_obj = self.par_obj.objectRef[i]
self.win_obj.modelTab2.setCellWidget(i, 3, sb)
b = baseList()
b.setText('<HTML><p style="margin-top:0">'+str(self.par_obj.objectRef[i].ext)+' file :'+str(self.par_obj.data[i])+' </p></HTML>')
self.win_obj.modelTab2.setCellWidget(i, 4, b)
self.objCheck.append(cb)
j = i+1
for i in range(0,self.par_obj.subNum):
self.win_obj.modelTab2.setRowCount(j+i+1)
a =baseList()
a.listId = i
a.setText('<HTML><p style="color:'+str(self.par_obj.subObjectRef[i].color)+';margin-top:0">TG-'+str(self.par_obj.subObjectRef[i].TGid)+': Data:'+str(self.par_obj.subObjectRef[i].parentUnqID)+'-xmin:'+str(round(self.par_obj.subObjectRef[i].xmin,1))+'-xmax:'+str(round(self.par_obj.subObjectRef[i].xmax,1))+' </p></HTML>')
self.win_obj.modelTab2.setCellWidget(i+j, 0, a)
#Line edit for each entry in the file list
lb =lineEditSp(self.win_obj, self.par_obj)
lb.type ='name'
lb.obj = self.par_obj.subObjectRef[i]
lb.setText(self.par_obj.subObjectRef[i].name);
self.win_obj.modelTab2.setCellWidget(i+j, 1, lb)
#Main text for file menu.
#Adds the plot checkBox:
cb = checkBoxSp()
cb.setChecked(self.par_obj.subObjectRef[i].plotOn)
cb.obj = self.par_obj.subObjectRef[i]
cb.stateChanged.connect(cb.updateChecked)
self.win_obj.modelTab2.setCellWidget(i+j, 2, cb)
#Adds save button to the file.
sb = pushButtonSp2('save corr. file (.csv)', self.win_obj, self.par_obj)
sb.corr_obj = self.par_obj.subObjectRef[i]
self.win_obj.modelTab2.setCellWidget(i+j, 3, sb)
b = baseList()
b.setText('<HTML><p style="margin-top:0">'+str(self.par_obj.subObjectRef[i].ext)+' file :'+str(self.par_obj.subObjectRef[i].filepath)+' </p></HTML>')
self.win_obj.modelTab2.setCellWidget(i+j, 4, b)
#Adds the checkBox to a list.
self.objCheck.append(cb)
class TGscrollBox():
#Generates scroll box for time-gating data.
def __init__(self, win_obj, par_obj):
self.win_obj = win_obj
self.par_obj = par_obj
self.par_obj.TGnumOfRgn = 0
self.x0 =[]
self.x1 =[]
self.facecolor =[]
self.TGid = []
self.rect =[]
def generateList(self):
for i in range(0, self.par_obj.TGnumOfRgn):
self.win_obj.modelTab.setRowCount(i+1)
txt2 = QtWidgets.QLabel()
txt2.setText('<HTML><p style="color:'+str(self.par_obj.colors[i % len(self.par_obj.colors)])+';margin-top:0">tg1:</p></HTML>')
self.win_obj.modelTab.setCellWidget(i, 0, txt2)
lb1 = lineEditSp('', self.win_obj)
lb1.setMaxLength(5)
lb1.setFixedWidth(40)
lb1.setText(str(self.win_obj.TGScrollBoxObj.x0[i]))
lb1.type = 'tgt0'
lb1.TGid = i
self.win_obj.modelTab.setCellWidget(i, 1, lb1)
txt3 = QtWidgets.QLabel()
txt3.setText('<HTML><p style="color:'+str(self.par_obj.colors[i % len(self.par_obj.colors)])+';margin-top:0">tg2:</p></HTML>')
self.win_obj.modelTab.setCellWidget(i, 2, txt3)
lb2 = lineEditSp('', self.win_obj)
lb2.setMaxLength(5)
lb2.setFixedWidth(40)
lb2.setText(str(self.win_obj.TGScrollBoxObj.x1[i]))
lb2.type = 'tgt1'
lb2.TGid = i
self.win_obj.modelTab.setCellWidget(i, 3, lb2)
cbx = comboBoxSp(self.win_obj)
#Populates comboBox with datafiles to which to apply the time-gating.
for b in range(0,self.par_obj.numOfLoaded):
cbx.addItem("Data: "+str(b), b)
#Adds an all option to the combobox.lfkk
cbx.addItem("All",b+1)
cbx.TGid = i
self.win_obj.modelTab.setCellWidget(i, 4, cbx)
cbtn = pushButtonSp(self.win_obj, self.par_obj)
cbtn.setText('Create')
cbtn.type ='create'
cbtn.TGid = i
cbtn.xmin = self.win_obj.TGScrollBoxObj.x0[i]
cbtn.xmax = self.win_obj.TGScrollBoxObj.x1[i]
self.win_obj.modelTab.setCellWidget(i, 5, cbtn)
#Make sure the btn knows which list it is connected to.
cbtn.objList = cbx
rmbtn = pushButtonSp(self.win_obj, self.par_obj)
rmbtn.setText('X')
rmbtn.TGid = i
rmbtn.type = 'remove'
self.win_obj.modelTab.setCellWidget(i, 6, rmbtn)
class ParameterClass():
def __init__(self):
#Where the data is stored.
self.data = []
self.objectRef =[]
self.subObjectRef =[]
self.colors = ['blue','green','red','cyan','magenta','yellow','black'] | gpl-2.0 |
cdegroc/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 7 | 7349 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhoer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD Style.
from time import time
import numpy as np
import pylab as pl
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print __doc__
def create_species_bunch(species_name,
train, test,
coverages, xgrid, ygrid):
"""
create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
points = dict(test=test, train=train)
for label, pts in points.iteritems():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=["bradypus_variegatus_0",
"microryzomys_minutus_0"]):
"""
Plot the species distribution.
"""
if len(species) > 2:
print ("Note: when more than two species are provided, only "
"the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print "_" * 80
print "Modeling distribution of species '%s'" % species.name
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print " - fit OneClassSVM ... ",
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print "done. "
# Plot map of South America
pl.subplot(1, 2, i + 1)
if basemap:
print " - plot coastlines using basemap"
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print " - plot coastlines from coverage"
pl.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
pl.xticks([])
pl.yticks([])
print " - predict species distribution"
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
pl.contourf(X, Y, Z, levels=levels, cmap=pl.cm.Reds)
pl.colorbar(format='%.2f')
# scatter training/testing points
pl.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
pl.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
pl.legend()
pl.title(species.name)
pl.axis('equal')
# Compute AUC w.r.t. background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
pl.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print "\n Area under the ROC curve : %f" % roc_auc
print "\ntime elapsed: %.2fs" % (time() - t0)
plot_species_distribution()
pl.show()
| bsd-3-clause |
gbrammer/unicorn | object_examples.py | 2 | 57686 | import os
import pyfits
import numpy as np
import glob
import shutil
import matplotlib.pyplot as plt
USE_PLOT_GUI=False
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
import threedhst
import threedhst.eazyPy as eazy
import threedhst.catIO as catIO
import unicorn
import unicorn.brown_dwarf
import re
root = None
left = 0.1
bottom = 0.13
dy2d = 0.67
aspect = 0.65
temp_color = (8/255.,47/255.,101/255.)
lrange = np.array([1.05e4,1.68e4])
spec_linewidth=2
pad_linewidth=2
import unicorn
unicorn.catalogs.read_catalogs()
from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit, zsp
USE_TEX = True
def fainter_examples():
"""
AEGIS-15-G141_00120 lines, z=2
AEGIS-14-G141_00426, z=2.3, continuum break
AEGIS-1-G141_00891, z=1.6, continuum break
AEGIS-28-G141_00684, H=23, continuum break
COSMOS-17-G141_00451, H=22.1, continuum break
COSMOS-18-G141_00996, H=22.8 continuum break
COSMOS-2-G141_00335, H=22.6, faint continuum + line in massive, dusty galaxy
COSMOS-25-G141_00280, H=22.8, faint continuum break + OIII line, again line looks like comes from elsewhere
COSMOS-25-G141_01354, H=22, nice continuum break
COSMOS-6-G141_00325, H=22.9, high eqw OIII + Hb, xxxx lines come from nearby high eqw object
COSMOS-6-G141_0330, High eqw, H=24.03 (this is the object contaminating the object above)
GOODS-S-23-G141_00780, H=22.6, contamination removal, continuum break, OIII + OII
MARSHALL-225-G141_00356, H=22.9, morphology mess, IR excess, OIII
x = ['AEGIS-15-G141_00120', 'AEGIS-14-G141_00426', 'AEGIS-1-G141_00891','AEGIS-28-G141_00684', 'COSMOS-17-G141_00451','COSMOS-18-G141_00996']
"""
import unicorn.object_examples
unicorn.object_examples.lrange = np.array([1.08e4,1.75e4])
unicorn.object_examples.general_plot(object='MARSHALL-225-G141_00356', show_SED=True, sync=False, y0=14, y1=None, SED_voffset=0.40, SED_hoffset=0.05, plot_min=0.0, plot_max=9.5, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=-9, remove_contamination=True, vscale=0.1, vthumb=(-0.1,0.01), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, show_line_stats=True, line_stats_pos=(-0.2, 0.05))
unicorn.object_examples.lrange = np.array([1.08e4,1.75e4])
unicorn.object_examples.general_plot(object='GOODS-S-23-G141_00780', show_SED=True, sync=False, y0=13, y1=70, SED_voffset=0.07, SED_hoffset=0.05, plot_min=-0.1, plot_max=9, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=-2, remove_contamination=True, vscale=0.2, vthumb=(-0.2,0.02), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, show_line_stats=True, line_stats_pos=(-0.2, 0.05))
unicorn.object_examples.lrange = np.array([1.08e4,1.68e4])
unicorn.object_examples.general_plot(object='COSMOS-6-G141_00330', show_SED=False, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0.5, plot_max=4, yticks=[0,1,2,3,4], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.5, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-18, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.0e4,1.75e4])
unicorn.object_examples.general_plot(object='COSMOS-25-G141_01354', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=13, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.08e4,1.75e4])
unicorn.object_examples.general_plot(object='COSMOS-25-G141_00280', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.12, SED_hoffset=0.05, plot_min=0, plot_max=9, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.08e4,1.75e4])
unicorn.object_examples.general_plot(object='COSMOS-2-G141_00335', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=30, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.0e4,1.75e4])
unicorn.object_examples.general_plot(object='COSMOS-18-G141_00996', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=7, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.0e4,1.75e4])
unicorn.object_examples.general_plot(object='AEGIS-28-G141_00684', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=7, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.08e4,1.8e4])
unicorn.object_examples.general_plot(object='AEGIS-15-G141_00120', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=3, plot_max=12, yticks=[4,6,8,10,12], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.8e4])
unicorn.object_examples.general_plot(object='AEGIS-1-G141_00891', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=8, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='AEGIS-14-G141_00426', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=-0.5, plot_max=5.8, yticks=[0,2,4], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.0e4,1.75e4])
unicorn.object_examples.general_plot(object='COSMOS-17-G141_00451', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=14, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.4, vthumb=(-0.8,0.08), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True, line_stats_pos=(-0.2, 0.05))
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='AEGIS-28-G141_00684', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.07, SED_hoffset=0.05, plot_min=-0.5, plot_max=6.2, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.5,0.05), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
ids = ['AEGIS-15-G141_00120', 'AEGIS-14-G141_00426', 'AEGIS-1-G141_00891','AEGIS-28-G141_00684', 'COSMOS-17-G141_00451','COSMOS-18-G141_00996']
for id in ids:
unicorn.object_examples.lrange = np.array([1.0e4,1.75e4])
unicorn.object_examples.general_plot(object=id, show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=7, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
xx = """
Line emitters:
AEGIS-12-G141_00566, H=23.05
AEGIS-12-G141_00702, H=23.29
AEGIS-28-G141_00159
"""
ids = ['AEGIS-12-G141_00566','AEGIS-12-G141_00702','AEGIS-28-G141_00159','AEGIS-4-G141_00202','COSMOS-11-G141_00650','COSMOS-13-G141_01167','COSMOS-15-G141_00275','COSMOS-15-G141_00284','COSMOS-18-G141_00556','COSMOS-23-G141_00521','COSMOS-4-G141_00596','COSMOS-9-G141_01078','GOODS-S-27-G141_00387','PRIMO-1026-G141_00196','AEGIS-4-G141_00432','PRIMO-1026-G141_00491','PRIMO-1101-G141_00280']
for id in ids:
unicorn.object_examples.lrange = np.array([1.0e4,1.75e4])
unicorn.object_examples.general_plot(object=id, show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=7, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
#
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='AEGIS-4-G141_00202', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0.5, plot_max=14, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='AEGIS-4-G141_00432', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0.5, plot_max=18, yticks=[0,5,10,15], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='AEGIS-12-G141_00566', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0., plot_max=11, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='AEGIS-12-G141_00702', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0., plot_max=9, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='AEGIS-28-G141_00159', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0., plot_max=9, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='COSMOS-4-G141_00596', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0., plot_max=16, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='COSMOS-9-G141_01078', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0., plot_max=9, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='COSMOS-11-G141_00650', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0., plot_max=12, yticks=[0,2,4,6,8,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='COSMOS-13-G141_01167', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0.5, plot_max=7, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='COSMOS-15-G141_00275', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0.5, plot_max=7, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='COSMOS-18-G141_00556', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-2, plot_max=14, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='COSMOS-23-G141_00521', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0.5, plot_max=7, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='GOODS-S-27-G141_00387', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0.5, plot_max=9, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='PRIMO-1026-G141_00196', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=0, plot_max=14, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='PRIMO-1026-G141_00491', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=0, plot_max=16, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
def run_all():
import unicorn.object_examples
unicorn.object_examples.agn_group()
unicorn.object_examples.z4_quasar()
unicorn.object_examples.big_dead_galaxy()
unicorn.object_examples.high_signal_to_noise_galaxy()
unicorn.object_examples.l_dwarf()
unicorn.object_examples.t_dwarf()
def agn_group():
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/OBJECT_EXAMPLES')
### F_lambda
## obs_convert = 10**(-0.4*(abzp+48.6))*3.e18/lc**2/10.**-18
######## AGN/Quasars
### Binary quasar: GOODS-N-42-G141_00388/384
### z=2.2 quasar: COSMOS-1-G141_00206
### very broad H-a line, z=1.22: PRIMO-1101-G141_00993
### Even more interesting merger/quasar, z=1.778: GOODS-N-36-G141_00991
### Another mess: COSMOS-3-G141_01156, z=1.34
### Multiple components, z=1.27 GOODS-N-33-G141_01028/1073/1069/1055
### z=4.6, MgII: COSMOS-28-G141_00896
###################################################
####
#### AGN group
####
###################################################
# GOODS-N-36-G141_00991 / 1005
# for object in ['GOODS-N-36-G141_00991','GOODS-N-36-G141_01005']:
# os.system('rsync -avz $UNICORN:/Users/gbrammer/Sites_GLOBAL/P/GRISM_v1.6/images/%s* DATA/' %(object))
# os.system('rsync -avz $UNICORN:/3DHST/Spectra/Work/ANALYSIS/REDSHIFT_FITS_v1.6/OUTPUT/%s* DATA/' %(object))
thumb = pyfits.open('DATA/GOODS-N-36-G141_00991_thumb.fits.gz')
twod = pyfits.open('DATA/GOODS-N-36-G141_00991_2d.fits.gz')
spec2d = twod[1].data
y0, y1 = 24, 79
if USE_TEX:
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=aspect, left=0.12)
#### Twod
ax = fig.add_axes((left, bottom+dy2d, 0.99-left, 0.99-bottom-dy2d))
ax.plot([0,1])
head = twod[1].header
lam_idx = np.arange(head['NAXIS1'])
lam = (lam_idx+1-head['CRPIX1'])*head['CDELT1']+head['CRVAL1']
lam_mima = np.cast[int](np.round(np.interp(lrange, lam, lam_idx)))
tick_int = np.interp(np.array([1.2,1.4,1.6])*1.e4, lam, lam_idx) - np.interp(lrange[0], lam, lam_idx)-0.75
plot_aspect = (bottom+dy2d)/(0.99-bottom-dy2d)/aspect
pix_aspect = (lam_mima[1]-lam_mima[0])*1./(y1-y0)
spec2d_sub = spec2d[y0:y1,lam_mima[0]:lam_mima[1]]
ax.imshow(0-spec2d_sub, aspect='auto', vmin=-0.2, vmax=0.025, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks(tick_int); ytick = ax.set_yticks([0,y1-y0])
#### Thumb
ax = fig.add_axes((left, bottom+dy2d, (0.99-bottom-dy2d)*aspect*plot_aspect/pix_aspect, 0.99-bottom-dy2d))
ax.imshow(0-thumb[0].data[y0:y1, y0+5:y1+5], vmin=-0.8, vmax=0.1, interpolation='nearest', zorder=2, aspect='auto')
ax.set_yticklabels([])
ax.set_xticklabels([])
xtick = ax.set_xticks([0,y1-y0]); ytick = ax.set_yticks([0,y1-y0])
#### Spectrum
ax = fig.add_axes((left, bottom, 0.99-left, dy2d))
## Primary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='GOODS-N-36-G141_00991', OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**-18*(lci/5500.)**2
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert
ymax = max(fobs[is_spec & (fobs > 0)])
ax.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='red', linewidth=1, alpha=0.7)
## Secondary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='GOODS-N-36-G141_01005', OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**-18*(lci/5500.)**2
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert
#ymax = max(fobs[is_spec & (fobs > 0)])
ax.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='orange', linewidth=1, alpha=0.7)
####
zspec = 1.773
mag = phot.mag_f1392w[phot.id == 'GOODS-N-36-G141_00991'][0]
ax.text(0.05,0.8,r'$a)\ z=%.3f,\ m_{140}=%.1f$' %(zspec, mag), transform=ax.transAxes, fontsize=11)
lines = [4102, 4341, 4862, 4980*1.08]
y0 = [0.7, 0.7, 1, 1.5]
labels = [r'H$\delta$',r'H$\gamma$', r'H$\beta$','[OIII]4959+5007']
for i in range(len(lines)):
ax.text(lines[i]*(1+zspec), 3*y0[i], labels[i], horizontalalignment='center')
ax.set_ylim(-0.1,ymax*1.1)
ax.set_xlim(lrange[0], lrange[1])
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(r'$f_\lambda\ [10^{-18}\ \mathrm{erg\ s^{-1}\ cm^{-2}\ \AA^{-1}}]$')
print 'Savefig'
print os.getcwd()
fig.savefig('agn_group.pdf')
######## Brown dwarf
### AEGIS-3-G141_00195 T-type
### GOODS-N-24-G141_01148 L-type
####### Massive galaxies
### z=2.0, huge, old: COSMOS-26-G141_00725
### z=1.9, zspec, beautiful fit UDF: PRIMO-1101-G141_01022
def z4_quasar():
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/OBJECT_EXAMPLES')
### F_lambda
## obs_convert = 10**(-0.4*(abzp+48.6))*3.e18/lc**2/10.**-18
######## AGN/Quasars
### Binary quasar: GOODS-N-42-G141_00388/384
### z=2.2 quasar: COSMOS-1-G141_00206
### very broad H-a line, z=1.22: PRIMO-1101-G141_00993
### Even more interesting merger/quasar, z=1.778: GOODS-N-36-G141_00991
### Another mess: COSMOS-3-G141_01156, z=1.34
### Multiple components, z=1.27 GOODS-N-33-G141_01028/1073/1069/1055
### z=4.6, MgII: COSMOS-28-G141_00896
###################################################
####
#### z=4.6 quasar
####
###################################################
# for object in ['COSMOS-28-G141_00896']:
# os.system('rsync -avz $UNICORN:/Users/gbrammer/Sites_GLOBAL/P/GRISM_v1.6/images/%s* DATA/' %(object))
# os.system('rsync -avz $UNICORN:/3DHST/Spectra/Work/ANALYSIS/REDSHIFT_FITS/OUTPUT/%s* DATA/' %(object))
thumb = pyfits.open('DATA/COSMOS-28-G141_00896_thumb.fits.gz')
twod = pyfits.open('DATA/COSMOS-28-G141_00896_2d.fits.gz')
spec2d = twod[1].data
y0, y1 = 10,32
if USE_TEX:
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=aspect, left=0.12)
#### Twod
ax = fig.add_axes((left, bottom+dy2d, 0.99-left, 0.99-bottom-dy2d))
ax.plot([0,1])
head = twod[1].header
lam_idx = np.arange(head['NAXIS1'])
lam = (lam_idx+1-head['CRPIX1'])*head['CDELT1']+head['CRVAL1']
lam_mima = np.cast[int](np.round(np.interp(lrange, lam, lam_idx)))
tick_int = np.interp(np.array([1.2,1.4,1.6])*1.e4, lam, lam_idx) - np.interp(lrange[0], lam, lam_idx)-0.75
plot_aspect = (bottom+dy2d)/(0.99-bottom-dy2d)/aspect
pix_aspect = (lam_mima[1]-lam_mima[0])*1./(y1-y0)
spec2d_sub = spec2d[y0:y1,lam_mima[0]:lam_mima[1]]
ax.imshow(0-spec2d_sub, aspect='auto', vmin=-0.2, vmax=0.025, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks(tick_int); ytick = ax.set_yticks([0,y1-y0])
#### Thumb
ax = fig.add_axes((left, bottom+dy2d, (0.99-bottom-dy2d)*aspect, 0.99-bottom-dy2d))
ax.imshow(0-thumb[0].data[y0-2:y1-2, y0-2:y1-2], vmin=-2.4, vmax=0.3, interpolation='nearest', zorder=2, aspect='auto')
ax.set_yticklabels([])
ax.set_xticklabels([])
xtick = ax.set_xticks([0,y1-y0]); ytick = ax.set_yticks([0,y1-y0])
#### Spectrum
ax = fig.add_axes((left, bottom, 0.99-left, dy2d))
## Primary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='COSMOS-28-G141_00896', OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**-18*(lci/5500.)**2
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert*1.15
ymax = max(fobs[is_spec & (fobs > 0)])
ax.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='red', linewidth=1, alpha=0.7)
####
zspec = 4.656
mag = phot.mag_f1392w[phot.id == 'COSMOS-28-G141_00896'][0]
ax.text(0.05,0.8,r'$b)\ z=%.3f,\ m_{140}=%.1f$' %(zspec, mag), transform=ax.transAxes, fontsize=11)
lines = [2799, 2326, 2439.]
y0 = [1.5, 0.7, 0.7, 0.7]
labels = ['Mg II', 'C II', 'Ne IV']
for i in range(len(lines)):
ax.text(lines[i]*(1+zspec), 0.5*y0[i], labels[i], horizontalalignment='center')
ax.set_ylim(-0.1,ymax*1.1)
ax.set_xlim(lrange[0], lrange[1])
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(r'$f_\lambda\ [10^{-18}\ \mathrm{erg\ s^{-1}\ cm^{-2}\ \AA^{-1}}]$')
ytick = ax.set_yticks([0,1,2])
print 'Savefig'
print os.getcwd()
fig.savefig('z4_quasar.pdf')
def big_dead_galaxy():
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/OBJECT_EXAMPLES')
### F_lambda
## obs_convert = 10**(-0.4*(abzp+48.6))*3.e18/lc**2/10.**-18
# for object in ['COSMOS-26-G141_00725']:
# os.system('rsync -avz $UNICORN:/Users/gbrammer/Sites_GLOBAL/P/GRISM_v1.6/images/%s* DATA/' %(object))
# os.system('rsync -avz $UNICORN:/3DHST/Spectra/Work/ANALYSIS/REDSHIFT_FITS/OUTPUT/%s* DATA/' %(object))
thumb = pyfits.open('DATA/COSMOS-26-G141_00725_thumb.fits.gz')
twod = pyfits.open('DATA/COSMOS-26-G141_00725_2d.fits.gz')
spec2d = twod[1].data-twod[4].data
y0, y1 = 24, 60
if USE_TEX:
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=aspect, left=0.12)
#### Twod
ax = fig.add_axes((left, bottom+dy2d, 0.99-left, 0.99-bottom-dy2d))
ax.plot([0,1])
head = twod[1].header
lam_idx = np.arange(head['NAXIS1'])
lam = (lam_idx+1-head['CRPIX1'])*head['CDELT1']+head['CRVAL1']
lam_mima = np.cast[int](np.round(np.interp(lrange, lam, lam_idx)))
tick_int = np.interp(np.array([1.2,1.4,1.6])*1.e4, lam, lam_idx) - np.interp(lrange[0], lam, lam_idx)-0.75
plot_aspect = (bottom+dy2d)/(0.99-bottom-dy2d)/aspect
pix_aspect = (lam_mima[1]-lam_mima[0])*1./(y1-y0)
spec2d_sub = spec2d[y0+1:y1+1,lam_mima[0]:lam_mima[1]]
ax.imshow(0-spec2d_sub, aspect='auto', vmin=-0.1*1.2, vmax=0.0125*1.2, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks(tick_int); ytick = ax.set_yticks([0,y1-y0])
#### Thumb
ax = fig.add_axes((left, bottom+dy2d, (0.99-bottom-dy2d)*aspect, 0.99-bottom-dy2d))
ax.imshow(0-thumb[0].data[y0:y1, y0:y1], vmin=-2.4, vmax=0.3, interpolation='nearest', zorder=2, aspect='auto')
ax.set_yticklabels([])
ax.set_xticklabels([])
xtick = ax.set_xticks([0,y1-y0]); ytick = ax.set_yticks([0,y1-y0])
#### Spectrum
ax = fig.add_axes((left, bottom, 0.99-left, dy2d))
## Primary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='COSMOS-26-G141_00725', OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**-18*(lci/5500.)**2
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert
temp_sed *= 10**(-0.4*(25+48.6))*3.e18/lambdaz**2/10.**-18*(lambdaz/5500.)**2
ymax = max(fobs[is_spec & (fobs > 0)])
ax.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='red', linewidth=1, alpha=0.7)
####
zspec = 2.0832
mag = phot.mag_f1392w[phot.id == 'COSMOS-26-G141_00725'][0]
ax.text(0.05,0.8,r'$c)\ z=%.3f,\ m_{140}=%.1f$' %(zspec, mag), transform=ax.transAxes, fontsize=11)
# lines = [4102, 4341, 4862, 4980]
# y0 = [0.7, 0.7, 1, 1.5]
# labels = [r'H$\delta$',r'H$\gamma$', r'H$\beta$','O III 4959+5007']
# for i in range(len(lines)):
# ax.text(lines[i]*(1+zspec), 0.5*y0[i], labels[i], horizontalalignment='center')
ax.set_ylim(-0.1,ymax*1.2)
ax.set_xlim(lrange[0], lrange[1])
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(r'$f_\lambda\ [10^{-18}\ \mathrm{erg\ s^{-1}\ cm^{-2}\ \AA^{-1}}]$')
ytick = ax.set_yticks([0,1,2,3])
#### Inset full sed
ax = fig.add_axes((left+0.55, bottom+0.1, 0.99-left-0.6, dy2d*0.4))
ax.plot(lci[is_spec], fobs[is_spec], alpha=0.9, color='black', linewidth=2)
ax.plot(lambdaz,temp_sed, color='red', linewidth=1, alpha=0.3)
ax.plot(lci[~is_spec], fobs[~is_spec], marker='o', linestyle='None', alpha=0.3, color='black')
ax.semilogx()
ax.set_xlim(3000,9.e4)
ax.set_ylim(-0.1*ymax,ymax*1.2)
ax.set_yticklabels([])
ax.set_xticklabels([r'$10^4$',r'$5\times10^4$'])
xtick = ax.set_xticks([1.e4,5.e4]); ytick = ax.set_yticks([0,1,2,3])
#print os.getcwd()
fig.savefig('big_dead_galaxy.pdf')
def high_signal_to_noise_galaxy():
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/OBJECT_EXAMPLES')
### F_lambda
## obs_convert = 10**(-0.4*(abzp+48.6))*3.e18/lc**2/10.**-18
######## AGN/Quasars
### Binary quasar: GOODS-N-42-G141_00388/384
### z=2.2 quasar: COSMOS-1-G141_00206
### very broad H-a line, z=1.22: PRIMO-1101-G141_00993
### Even more interesting merger/quasar, z=1.778: GOODS-N-36-G141_00991
### Another mess: COSMOS-3-G141_01156, z=1.34
### Multiple components, z=1.27 GOODS-N-33-G141_01028/1073/1069/1055
### z=4.6, MgII: COSMOS-28-G141_00896
###################################################
####
#### z=4.6 quasar
####
###################################################
#
# for object in ['PRIMO-1101-G141_01022']:
# os.system('rsync -avz $UNICORN:/Users/gbrammer/Sites_GLOBAL/P/GRISM_v1.6/images/%s* DATA/' %(object))
# os.system('rsync -avz $UNICORN:/3DHST/Spectra/Work/ANALYSIS/REDSHIFT_FITS_v1.6/OUTPUT/%s* DATA/' %(object))
thumb = pyfits.open('DATA/PRIMO-1101-G141_01022_thumb.fits.gz')
twod = pyfits.open('DATA/PRIMO-1101-G141_01022_2d.fits.gz')
spec2d = twod[1].data-twod[4].data
y0, y1 = 31, 56
if USE_TEX:
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=aspect, left=0.12)
#### Twod
ax = fig.add_axes((left, bottom+dy2d, 0.99-left, 0.99-bottom-dy2d))
ax.plot([0,1])
head = twod[1].header
lam_idx = np.arange(head['NAXIS1'])
lam = (lam_idx+1-head['CRPIX1'])*head['CDELT1']+head['CRVAL1']
lam_mima = np.cast[int](np.round(np.interp(lrange, lam, lam_idx)))
tick_int = np.interp(np.array([1.2,1.4,1.6])*1.e4, lam, lam_idx) - np.interp(lrange[0], lam, lam_idx)-0.75
plot_aspect = (bottom+dy2d)/(0.99-bottom-dy2d)/aspect
pix_aspect = (lam_mima[1]-lam_mima[0])*1./(y1-y0)
spec2d_sub = spec2d[y0:y1,lam_mima[0]:lam_mima[1]]
ax.imshow(0-spec2d_sub, aspect='auto', vmin=-0.1*0.8, vmax=0.0125*0.8, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks(tick_int); ytick = ax.set_yticks([0,y1-y0])
#### Thumb
ax = fig.add_axes((left, bottom+dy2d, (0.99-bottom-dy2d)*aspect, 0.99-bottom-dy2d))
ax.imshow(0-thumb[0].data[y0:y1, y0:y1], vmin=-1.4, vmax=0.15, interpolation='nearest', zorder=2, aspect='auto')
ax.set_yticklabels([])
ax.set_xticklabels([])
xtick = ax.set_xticks([0,y1-y0]); ytick = ax.set_yticks([0,y1-y0])
#### Spectrum
ax = fig.add_axes((left, bottom, 0.99-left, dy2d))
## Primary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='PRIMO-1101-G141_01022', OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**-19*(lci/5500.)**2
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert
temp_sed *= 10**(-0.4*(25+48.6))*3.e18/lambdaz**2/10.**-19*(lambdaz/5500.)**2
ymax = max(fobs[is_spec & (fobs > 0)])
ax.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='red', linewidth=1, alpha=0.7)
####
zspec = 1.905
mag = phot.mag_f1392w[phot.id == 'PRIMO-1101-G141_01022'][0]
#mass = mcat.lmass
ax.text(0.05,0.8,r'$d)\ z=%.3f,\ m_{140}=%.1f$' %(zspec, mag), transform=ax.transAxes, fontsize=11)
# lines = [4102, 4341, 4862, 4980]
# y0 = [0.7, 0.7, 1, 1.5]
# labels = [r'H$\delta$',r'H$\gamma$', r'H$\beta$','O III 4959+5007']
# for i in range(len(lines)):
# ax.text(lines[i]*(1+zspec), 0.5*y0[i], labels[i], horizontalalignment='center')
ax.set_ylim(-0.1*ymax,ymax*1.3)
ax.set_xlim(lrange[0], lrange[1])
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(r'$f_\lambda\ [10^{-19}\ \mathrm{erg\ s^{-1}\ cm^{-2}\ \AA^{-1}}]$')
ytick = ax.set_yticks([0,1,2,3,4,5])
#### Inset full sed
ax = fig.add_axes((left+0.55, bottom+0.1, 0.99-left-0.6, dy2d*0.4))
ax.plot(lci[is_spec], fobs[is_spec], alpha=0.9, color='black', linewidth=2)
ax.plot(lambdaz,temp_sed, color='red', linewidth=1, alpha=0.3)
ax.plot(lci[~is_spec], fobs[~is_spec], marker='o', linestyle='None', alpha=0.3, color='black')
ax.semilogx()
ax.set_xlim(3000,9.e4)
ax.set_ylim(-0.1*ymax,ymax*1.1)
ax.set_yticklabels([])
ax.set_xticklabels([r'$10^4$',r'$5\times10^4$'])
xtick = ax.set_xticks([1.e4,5.e4]); ytick = ax.set_yticks([0,1,2,3,4,5])
print 'Savefig'
#print os.getcwd()
fig.savefig('high_signal_to_noise_galaxy.pdf')
#
def l_dwarf():
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/OBJECT_EXAMPLES')
### F_lambda
## obs_convert = 10**(-0.4*(abzp+48.6))*3.e18/lc**2/10.**-18
######## AGN/Quasars
### Binary quasar: GOODS-N-42-G141_00388/384
### z=2.2 quasar: COSMOS-1-G141_00206
### very broad H-a line, z=1.22: PRIMO-1101-G141_00993
### Even more interesting merger/quasar, z=1.778: GOODS-N-36-G141_00991
### Another mess: COSMOS-3-G141_01156, z=1.34
### Multiple components, z=1.27 GOODS-N-33-G141_01028/1073/1069/1055
### z=4.6, MgII: COSMOS-28-G141_00896
###################################################
####
#### L dwarf
####
###################################################
#
# for object in ['GOODS-N-24-G141_01148']:
# os.system('rsync -avz $UNICORN:/Users/gbrammer/Sites_GLOBAL/P/GRISM_v1.6/images/%s* DATA/' %(object))
# os.system('rsync -avz $UNICORN:/3DHST/Spectra/Work/ANALYSIS/REDSHIFT_FITS_v1.6/OUTPUT/%s* DATA/' %(object))
thumb = pyfits.open('DATA/GOODS-N-24-G141_01148_thumb.fits.gz')
twod = pyfits.open('DATA/GOODS-N-24-G141_01148_2d.fits.gz')
spec2d = twod[1].data-twod[4].data
y0, y1 = 10, 30
if USE_TEX:
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=aspect, left=0.12)
#### Twod
ax = fig.add_axes((left, bottom+dy2d, 0.99-left, 0.99-bottom-dy2d))
ax.plot([0,1])
head = twod[1].header
lam_idx = np.arange(head['NAXIS1'])
lam = (lam_idx+1-head['CRPIX1'])*head['CDELT1']+head['CRVAL1']
lam_mima = np.cast[int](np.round(np.interp(lrange, lam, lam_idx)))
tick_int = np.interp(np.array([1.2,1.4,1.6])*1.e4, lam, lam_idx) - np.interp(lrange[0], lam, lam_idx)-0.75
plot_aspect = (bottom+dy2d)/(0.99-bottom-dy2d)/aspect
pix_aspect = (lam_mima[1]-lam_mima[0])*1./(y1-y0)
spec2d_sub = spec2d[y0:y1,lam_mima[0]:lam_mima[1]]
ax.imshow(0-spec2d_sub, aspect='auto', vmin=-0.1*0.8, vmax=0.0125*0.8, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks(tick_int); ytick = ax.set_yticks([0,y1-y0])
#### Thumb
ax = fig.add_axes((left, bottom+dy2d, (0.99-bottom-dy2d)*aspect, 0.99-bottom-dy2d))
ax.imshow(0-thumb[0].data[y0:y1, y0:y1], vmin=-1.4, vmax=0.15, interpolation='nearest', zorder=2, aspect='auto')
ax.set_yticklabels([])
ax.set_xticklabels([])
xtick = ax.set_xticks([0,y1-y0]); ytick = ax.set_yticks([0,y1-y0])
#### Spectrum
ax = fig.add_axes((left, bottom, 0.99-left, dy2d))
## Primary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='GOODS-N-24-G141_01148', OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**-19*(lci/5500.)**2
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert
temp_sed *= 10**(-0.4*(25+48.6))*3.e18/lambdaz**2/10.**-19*(lambdaz/5500.)**2
ymax = max(fobs[is_spec & (fobs > 0)])
ax.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
bd = unicorn.brown_dwarf.BD_fit()
type = ['L4']
ii = 0
colors = ['green','blue','red','orange']
for temp in bd.templates:
if temp.type[0:2] in type:
if temp.type == 'L3+/-1':
continue
print temp.type
yint = np.interp(lci[is_spec], temp.wave, temp.flux)
norm = np.sum(fobs[is_spec]*yint)/np.sum(yint**2)
ax.plot(temp.wave, temp.flux*norm, color='white', linewidth=2, alpha=0.4)
ax.plot(temp.wave, temp.flux*norm, color=colors[ii % 4], linewidth=2, alpha=0.7)
ax.text(0.9-ii*0.08, 0.83, temp.type, color=colors[ii % 4], transform=ax.transAxes)
ii = ii + 1
#ax.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
#ax.plot(lci[is_spec],obs_sed[is_spec], color='red', linewidth=1, alpha=0.5)
####
zspec = 1.905
mag = phot.mag_f1392w[phot.id == 'GOODS-N-24-G141_01148'][0]
ax.text(0.05,0.8,r'$f)\ m_{140}=%.1f$' %(mag), transform=ax.transAxes, fontsize=11)
# lines = [4102, 4341, 4862, 4980]
# y0 = [0.7, 0.7, 1, 1.5]
# labels = [r'H$\delta$',r'H$\gamma$', r'H$\beta$','O III 4959+5007']
# for i in range(len(lines)):
# ax.text(lines[i]*(1+zspec), 0.5*y0[i], labels[i], horizontalalignment='center')
ax.set_ylim(-0.1*ymax,ymax*1.3)
ax.set_xlim(lrange[0], lrange[1])
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(r'$f_\lambda\ [10^{-19}\ \mathrm{erg\ s^{-1}\ cm^{-2}\ \AA^{-1}}]$')
ytick = ax.set_yticks([0,5,10,15])
print 'Savefig'
#print os.getcwd()
fig.savefig('l_dwarf.pdf')
def t_dwarf():
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/OBJECT_EXAMPLES')
### F_lambda
## obs_convert = 10**(-0.4*(abzp+48.6))*3.e18/lc**2/10.**-18
######## AGN/Quasars
### Binary quasar: GOODS-N-42-G141_00388/384
### z=2.2 quasar: COSMOS-1-G141_00206
### very broad H-a line, z=1.22: PRIMO-1101-G141_00993
### Even more interesting merger/quasar, z=1.778: GOODS-N-36-G141_00991
### Another mess: COSMOS-3-G141_01156, z=1.34
### Multiple components, z=1.27 GOODS-N-33-G141_01028/1073/1069/1055
### z=4.6, MgII: COSMOS-28-G141_00896
###################################################
####
#### L dwarf
####
###################################################
#
# for object in ['AEGIS-3-G141_00195']:
# os.system('rsync -avz $UNICORN:/Users/gbrammer/Sites_GLOBAL/P/GRISM_v1.6/images/%s* DATA/' %(object))
# os.system('rsync -avz $UNICORN:/3DHST/Spectra/Work/ANALYSIS/REDSHIFT_FITS_v1.6/OUTPUT/%s* DATA/' %(object))
thumb = pyfits.open('DATA/AEGIS-3-G141_00195_thumb.fits.gz')
twod = pyfits.open('DATA/AEGIS-3-G141_00195_2d.fits.gz')
spec2d = twod[1].data-twod[4].data
y0, y1 = 10, 30
if USE_TEX:
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=aspect, left=0.12)
#### Twod
ax = fig.add_axes((left, bottom+dy2d, 0.99-left, 0.99-bottom-dy2d))
ax.plot([0,1])
head = twod[1].header
lam_idx = np.arange(head['NAXIS1'])
lam = (lam_idx+1-head['CRPIX1'])*head['CDELT1']+head['CRVAL1']
lam_mima = np.cast[int](np.round(np.interp(lrange, lam, lam_idx)))
tick_int = np.interp(np.array([1.2,1.4,1.6])*1.e4, lam, lam_idx) - np.interp(lrange[0], lam, lam_idx)-0.75
plot_aspect = (bottom+dy2d)/(0.99-bottom-dy2d)/aspect
pix_aspect = (lam_mima[1]-lam_mima[0])*1./(y1-y0)
spec2d_sub = spec2d[y0:y1,lam_mima[0]:lam_mima[1]]
ax.imshow(0-spec2d_sub, aspect='auto', vmin=-0.1*0.8, vmax=0.0125*0.8, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks(tick_int); ytick = ax.set_yticks([0,y1-y0])
#### Thumb
ax = fig.add_axes((left, bottom+dy2d, (0.99-bottom-dy2d)*aspect, 0.99-bottom-dy2d))
ax.imshow(0-thumb[0].data[y0-1:y1-1, y0-1:y1-1], vmin=-1.4, vmax=0.15, interpolation='nearest', zorder=2, aspect='auto')
ax.set_yticklabels([])
ax.set_xticklabels([])
xtick = ax.set_xticks([0,y1-y0]); ytick = ax.set_yticks([0,y1-y0])
#### Spectrum
ax = fig.add_axes((left, bottom, 0.99-left, dy2d))
## Primary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='AEGIS-3-G141_00195', OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**-18*(lci/5500.)**2
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert
temp_sed *= 10**(-0.4*(25+48.6))*3.e18/lambdaz**2/10.**-18*(lambdaz/5500.)**2
ymax = max(fobs[is_spec & (fobs > 0)])
ax.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
bd = unicorn.brown_dwarf.BD_fit()
type = ['T6','T5']
ii = 0
colors = ['green','blue','red','orange']
for temp in bd.templates:
if temp.type[0:2] in type:
if temp.type == 'L3+/-1':
continue
print temp.type
yint = np.interp(lci[is_spec], temp.wave, temp.flux)
norm = np.sum(fobs[is_spec]*yint)/np.sum(yint**2)
ax.plot(temp.wave, temp.flux*norm, color='white', linewidth=2, alpha=0.4)
ax.plot(temp.wave, temp.flux*norm, color=colors[ii % 4], linewidth=2, alpha=0.7)
ax.text(0.9-ii*0.05, 0.83, temp.type, color=colors[ii % 4], transform=ax.transAxes, fontsize=11)
ii = ii + 1
#ax.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
#ax.plot(lci[is_spec],obs_sed[is_spec], color='red', linewidth=1, alpha=0.5)
####
zspec = 1.905
mag = phot.mag_f1392w[phot.id == 'AEGIS-3-G141_00195'][0]
ax.text(0.05,0.8,r'$e)\ m_{140}=%.1f$' %(mag), transform=ax.transAxes, fontsize=11)
# lines = [4102, 4341, 4862, 4980]
# y0 = [0.7, 0.7, 1, 1.5]
# labels = [r'H$\delta$',r'H$\gamma$', r'H$\beta$','O III 4959+5007']
# for i in range(len(lines)):
# ax.text(lines[i]*(1+zspec), 0.5*y0[i], labels[i], horizontalalignment='center')
ax.set_ylim(-0.1*ymax,ymax*1.3)
ax.set_xlim(lrange[0], lrange[1])
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(r'$f_\lambda\ [10^{-18}\ \mathrm{erg\ s^{-1}\ cm^{-2}\ \AA^{-1}}]$')
ytick = ax.set_yticks([0,1,2])
print 'Savefig'
#print os.getcwd()
fig.savefig('t_dwarf.pdf')
######## Brown dwarf
### AEGIS-3-G141_00195 T-type
### GOODS-N-24-G141_01148 L-type
####### Massive galaxies
### z=2.0, huge, old: COSMOS-26-G141_00725
### z=1.9, zspec, beautiful fit UDF: PRIMO-1101-G141_01022
def get_tdwarf_mag():
"""
Get the broad / medium-band H magnitudes of the T dwarf
to compare to m140
"""
unicorn.catalogs.read_catalogs()
from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit
object = 'AEGIS-3-G141_00195'
ra = phot.x_world[phot.id == object][0]
dec = phot.y_world[phot.id == object][0]
m140 = phot.mag_f1392w[phot.id == object][0]
nmbs_cat, nmbs_zout, nmbs_fout = unicorn.analysis.read_catalogs(root=object)
dr = np.sqrt((nmbs_cat.ra-ra)**2*np.cos(dec/360*2*np.pi)**2+(nmbs_cat.dec-dec)**2)*3600.
h1mag = 25-2.5*np.log10((nmbs_cat.H1*nmbs_cat.Ktot/nmbs_cat.K)[dr == dr.min()][0])
h2mag = 25-2.5*np.log10((nmbs_cat.H2*nmbs_cat.Ktot/nmbs_cat.K)[dr == dr.min()][0])
hmag = 25-2.5*np.log10(((nmbs_cat.H1+nmbs_cat.H2)/2.*nmbs_cat.Ktot/nmbs_cat.K)[dr == dr.min()][0])
jmag = 25-2.5*np.log10(((nmbs_cat.J2+nmbs_cat.J3)/2.*nmbs_cat.Ktot/nmbs_cat.K)[dr == dr.min()][0])
jmag = 25-2.5*np.log10(((nmbs_cat.J3)/1.*nmbs_cat.Ktot/nmbs_cat.K)[dr == dr.min()][0])
wirds = catIO.Readfile('/Users/gbrammer/research/drg/PHOTZ/EAZY/WIRDS/WIRDS_D3-95_Ks_ugrizJHKs_141927+524056_T0002.cat.candels')
dr = np.sqrt((wirds.ra-ra)**2*np.cos(dec/360.*2*np.pi)**2+(wirds.dec-dec)**2)*3600.
jwirds = wirds.jtot[dr == dr.min()][0]
hwirds = wirds.htot[dr == dr.min()][0]
print ' J H J-H H1 H2'
print 'NMBS %5.2f %5.2f %5.2f %5.2f %5.2f' %(jmag, hmag, jmag-hmag, h1mag, h2mag)
print 'WIRDS %5.2f %5.2f %5.2f' %(jwirds, hwirds, jwirds-hwirds)
#### Vrba et al. (2004)
#absH = np.array([14.52,14.78,15.07])
#d =
def misc_objects():
unicorn.object_examples.general_plot('UDF-Full-G141_00624', flam_norm=-19, vscale=0.1, vthumb=(-0.08*0.3,0.01*0.3), SED_voffset=0.42, SED_hoffset=0.05, remove_contamination=False)
def general_plot(object='AEGIS-9-G141_00154', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.1, SED_hoffset=0, plot_min=None, plot_max=None, yticks=None, fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=1, vthumb=(-1,0.1), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-18, scale_to_f140_mag=True, show_line_stats=False, line_stats_pos=(0.05, 0.05)):
import unicorn.catalogs
lines = unicorn.catalogs.lines
import unicorn.object_examples
dy2d = unicorn.object_examples.dy2d
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/OBJECT_EXAMPLES')
### F_lambda
## obs_convert = 10**(-0.4*(abzp+48.6))*3.e18/lc**2/10.**-18
if not os.path.exists('DATA/%s.zout' %(object)):
sync=True
if sync:
os.system('rsync -avz --progress $UNICORN:/Users/gbrammer/Sites_GLOBAL/P/GRISM_v1.6/images/%s* DATA/' %(object))
os.system('rsync -avz --progress $UNICORN:/3DHST/Spectra/Work/ANALYSIS/%s/OUTPUT/%s* DATA/' %(fit_path, object))
zout_file = catIO.Readfile('DATA/%s.zout' %(object))
thumb = pyfits.open('DATA/%s_thumb.fits.gz' %(object))
twod = pyfits.open('DATA/%s_2d.fits.gz' %(object))
spec2d = twod[1].data
if remove_contamination:
spec2d -= twod[4].data
#y0, y1 = 24, 60
if y0 is None:
y0 = 0
if y1 is None:
y1 = spec2d.shape[0]
print 'NY: %d' %(spec2d.shape[0])
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=aspect, left=0.12, use_tex=USE_TEX)
#### Twod
if show_2D:
ax2D = fig.add_axes((left, bottom+dy2d, 0.99-left, 0.99-bottom-dy2d))
ax2D.plot([0,1])
head = twod[1].header
lam_idx = np.arange(head['NAXIS1'])
lam = (lam_idx+1-head['CRPIX1'])*head['CDELT1']+head['CRVAL1']
lam_mima = np.cast[int](np.round(np.interp(lrange, lam, lam_idx)))
tick_int = np.interp(np.array([1.2,1.4,1.6])*1.e4, lam, lam_idx) - np.interp(lrange[0], lam, lam_idx)-0.75
plot_aspect = (bottom+dy2d)/(0.99-bottom-dy2d)/aspect
pix_aspect = (lam_mima[1]-lam_mima[0])*1./(y1-y0)
spec2d_sub = spec2d[y0:y1,lam_mima[0]:lam_mima[1]]
ax2D.imshow(0-spec2d_sub, aspect='auto', vmin=-0.1*1.2*vscale, vmax=0.0125*1.2*vscale, interpolation='nearest')
ax2D.set_yticklabels([]); ax2D.set_xticklabels([])
xtick = ax2D.set_xticks(tick_int); ytick = ax2D.set_yticks([0,y1-y0])
#### Thumb
if show_Thumb:
axThumb = fig.add_axes((left, bottom+dy2d, (0.99-bottom-dy2d)*aspect, 0.99-bottom-dy2d))
if dx_thumb is None:
dx_thumb = dy_thumb
axThumb.imshow(0-thumb[0].data[y0+dy_thumb:y1+dy_thumb, y0+dx_thumb:y1+dx_thumb], vmin=vthumb[0], vmax=vthumb[1], interpolation='nearest', zorder=2, aspect='auto')
axThumb.set_yticklabels([])
axThumb.set_xticklabels([])
xtick = axThumb.set_xticks([0,y1-y0]); ytick = axThumb.set_yticks([0,y1-y0])
else:
axThumb=None
else:
ax2D = None
axThumb=None
dy2d = 0.99-bottom
#### Spectrum
axSpec = fig.add_axes((left, bottom, 0.99-left, dy2d))
## Primary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(fit_version, MAIN_OUTPUT_FILE=object, OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same', scale_flambda=False)
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**flam_norm*(lci/5500.)**2
#obs_convert = 10**-17/10**flam_norm # now comes out of getEazySED in units of 10**-17 flam
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert
#### Try integrating the spectrum and comparing to mag
fnu = fobs*lci**2/3.e18*10**(flam_norm)
xfilt, yfilt = np.loadtxt(os.getenv('iref')+'/F140W.dat', unpack=True)
yint = np.interp(lci[is_spec], xfilt, yfilt)
m140_int = -2.5*np.log10(np.trapz(yint*fnu[is_spec],lci[is_spec])/np.trapz(yint,lci[is_spec]))-48.6
try:
mag = phot.mag_f1392w[phot.id == object][0]
except:
mag = -1
#
print m140_int, mag
if (mag > 0) & scale_to_f140_mag:
scale_to_f140 = 10**(-0.4*(mag-m140_int))
fobs, efobs, obs_sed = fobs*scale_to_f140, efobs*scale_to_f140, obs_sed*scale_to_f140
temp_sed = temp_sed * scale_to_f140
temp_sed *= 10**(-0.4*(25+48.6))*3.e18/lambdaz**2/10.**flam_norm*(lambdaz/5500.)**2
ymax = max(fobs[is_spec & (fobs > 0)])
axSpec.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
if show_Fit:
axSpec.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
axSpec.plot(lci[is_spec],obs_sed[is_spec], color='red', linewidth=1, alpha=0.7)
####
zspec = 2.0832
#zspec = zout.z_peak[0::3][zout.id[0::3] == object]
zspec = zout_file.z_peak[fit_version]
if USE_TEX:
object_str = object.replace('_','\_')
else:
object_str = object
axSpec.text(0.05,0.9, object_str, transform=axSpec.transAxes, fontsize=9, backgroundcolor='white')
if mag > 0:
axSpec.text(0.05,0.8,r'$ \ z=%.3f,\ m_{140}=%.1f$' %(zspec, mag), transform=axSpec.transAxes, fontsize=11, color='white', backgroundcolor='white', alpha=0.2)
axSpec.text(0.05,0.8,r'$ \ z=%.3f,\ m_{140}=%.1f$' %(zspec, mag), transform=axSpec.transAxes, fontsize=11)
else:
axSpec.text(0.05,0.8,r'$z=%.3f$' %(zspec), transform=axSpec.transAxes, fontsize=11)
# lines = [4102, 4341, 4862, 4980]
# y0 = [0.7, 0.7, 1, 1.5]
# labels = [r'H$\delta$',r'H$\gamma$', r'H$\beta$','O III 4959+5007']
# for i in range(len(lines)):
# axSpec.text(lines[i]*(1+zspec), 0.5*y0[i], labels[i], horizontalalignment='center')
if plot_min is None:
plot_min = -0.1*ymax
if plot_max is None:
plot_max = 1.2*ymax
axSpec.set_ylim(plot_min,plot_max)
axSpec.set_xlim(lrange[0], lrange[1])
axSpec.set_xlabel(r'$\lambda\ [\mathrm{\AA}]$')
axSpec.set_ylabel(r'$f_\lambda\ [10^{%0d}\ \mathrm{erg\ s^{-1}\ cm^{-2}\ \AA^{-1}}]$' %(flam_norm))
if yticks is not None:
ytick = axSpec.set_yticks(yticks)
#### Inset full sed
if show_SED:
axInset = fig.add_axes((left+0.55+SED_hoffset, bottom+SED_voffset, 0.99-left-0.6, dy2d*0.4))
axInset.plot(lci[is_spec], fobs[is_spec], alpha=0.9, color='black', linewidth=1)
axInset.plot(lambdaz, temp_sed, color='red', linewidth=1, alpha=0.3)
axInset.plot(lci[~is_spec], fobs[~is_spec], marker='o', linestyle='None', alpha=0.5, color='white')
axInset.plot(lci[~is_spec], fobs[~is_spec], marker='o', linestyle='None', alpha=0.3, color='black')
axInset.semilogx()
axInset.set_xlim(3000,9.e4)
axInset.set_ylim(-0.1*ymax,ymax*1.2)
axInset.set_xticklabels([r'$10^4$',r'$5\times10^4$'])
xtick = axInset.set_xticks([1.e4,5.e4])
if yticks is not None:
axInset.set_yticklabels([])
ytick = axInset.set_yticks(yticks)
else:
axInset = None
#print os.getcwd()
#
mat = lines.id == object
print '%s %.4f %.1f %.1f %.1e %.1f' %(object, lines.z_grism[mat][0], lines.oiii_eqw[mat][0], lines.oiii_eqw_err[mat][0], lines.oiii_flux[mat][0], lines.hbeta_eqw[mat][0])
if show_line_stats:
if (lines.z_grism[mat][0] < 1.5) & (lines.halpha_eqw_err[mat][0] > 0):
axSpec.text(line_stats_pos[0], line_stats_pos[1], r'${\rm EW}_{\rm H\alpha}=%d\pm%d,\ f_{\rm H\alpha}=%.1f\pm%.1f$' %(lines.halpha_eqw[mat][0], lines.halpha_eqw_err[mat][0], lines.halpha_flux[mat][0]/1.e-17, lines.halpha_eqw_err[mat][0]/lines.halpha_eqw[mat][0]*lines.halpha_flux[mat][0]/1.e-17), horizontalalignment='left', transform=axSpec.transAxes, backgroundcolor='white', fontsize=9)
#
if (lines.z_grism[mat][0] > 1.19) & (lines.z_grism[mat][0] < 2.3) & (lines.oiii_eqw_err[mat][0] > 0):
axSpec.text(line_stats_pos[0]+0.45, line_stats_pos[1], r'${\rm EW}_{\rm OIII}=%d\pm%d,\ f_{\rm OIII}=%.1f\pm%.1f$' %(lines.oiii_eqw[mat][0], lines.oiii_eqw_err[mat][0], lines.oiii_flux[mat][0]/1.e-17, lines.oiii_eqw_err[mat][0]/lines.oiii_eqw[mat][0]*lines.oiii_flux[mat][0]/1.e-17), horizontalalignment='left', transform=axSpec.transAxes, backgroundcolor='white', fontsize=9)
unicorn.catalogs.savefig(fig, object+'_display.pdf')
return fig, ax2D, axThumb, axSpec, axInset
| mit |
guschmue/tensorflow | tensorflow/python/estimator/canned/dnn_test.py | 31 | 16390 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifier(*args, **kwargs)
class DNNModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, dnn._dnn_model_fn)
class DNNLogitFnTest(dnn_testing_utils.BaseDNNLogitFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNLogitFnTest.__init__(self,
dnn._dnn_logit_fn_builder)
class DNNClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressor(*args, **kwargs)
class DNNRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
| apache-2.0 |
supriyantomaftuh/syzygy | third_party/numpy/files/numpy/core/function_base.py | 82 | 5474 | __all__ = ['logspace', 'linspace']
import numeric as _nx
from numeric import array
def linspace(start, stop, num=50, endpoint=True, retstep=False):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop` ].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float (only if `retstep` is True)
Size of spacing between samples.
See Also
--------
arange : Similiar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num <= 0:
return array([], float)
if endpoint:
if num == 1:
return array([float(start)])
step = (stop-start)/float((num-1))
y = _nx.arange(0, num) * step + start
y[-1] = stop
else:
step = (stop-start)/float(num)
y = _nx.arange(0, num) * step + start
if retstep:
return y, step
else:
return y
def logspace(start,stop,num=50,endpoint=True,base=10.0):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similiar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start,stop,num=num,endpoint=endpoint)
return _nx.power(base,y)
| apache-2.0 |
DonBeo/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 26 | 13430 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_tfidf` function will in addition do a simple tf-idf
vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warn("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warn("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
open(archive_path, 'wb').write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
open(cache_path, 'wb').write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
data.description = 'the 20 newsgroups by date dataset'
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
rohanp/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 56 | 2400 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float64)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
spacelis/anatool | anatool/experiment/predcate.py | 1 | 10575 | #!python
# -*- coding: utf-8 -*-
"""File: timeweb.py
Description:
History:
0.1.0 The first version.
"""
__version__ = '0.1.0'
__author__ = 'SpaceLis'
from matplotlib import pyplot as plt
from anatool.analysis.timemodel import TimeModel
from anatool.analysis.textmodel import LanguageModel
from anatool.analysis.ranking import ranke, linearjoin, randranke
from anatool.analysis.evaluation import batcheval, wilcoxontest, placetotalrank
from anatool.dm.dataset import Dataset, loadrows, place_name
from anatool.dm.db import GEOTWEET
import seaborn as sns
sns.set_palette("deep", desat=.6)
sns.set_style("white")
sns.set_context(font_scale=1.5, rc={"figure.figsize": (3, 2), 'axes.grid': False, 'axes.linewidth': 1,})
def cmptimeweb(cities, numtwts, numtest):
""" compare the time model + web model to original pure text model
"""
lmranks = [list() for _ in numtwts]
tmranks = [list() for _ in numtwts]
wmranks = list()
randranks = list()
lmtmranks = [list() for _ in numtwts]
wmlmranks = [list() for _ in numtwts]
wmlmtmranks = [list() for _ in numtwts]
test = Dataset()
for city in cities:
lms = [dict() for _ in numtwts]
tms = [dict() for _ in numtwts]
wms = dict()
tst = Dataset()
for pid in city:
twtp = loadrows(GEOTWEET, ('place_id', 'text', 'created_at'),
('place_id=\'{0}\''.format(pid),), 'sample_switch_place_cate',
'order by rand() limit {0}'.format(max(numtwts) + numtest))
for i, n in enumerate(numtwts):
lms[i][pid] = LanguageModel(twtp['text'][:n])
tms[i][pid] = TimeModel(twtp['created_at'][:n])
web = loadrows(GEOTWEET, ('place_id', 'web'),
('place_id=\'{0}\''.format(pid),), 'web',
'order by rand() limit 30')
try:
wms[pid] = LanguageModel(web['web'])
except KeyError:
wms[pid] = LanguageModel('')
# Prepare test data by the tail part of the data retrieved from db
test_pos = max(numtwts)
for i in range(test_pos, test_pos + numtest):
tst.append({'label': pid,
'lm': LanguageModel([twtp['text'][i],]),
'tm': TimeModel([twtp['created_at'][i],])})
test.extend(tst)
# rank
for item in tst:
for i, _ in enumerate(numtwts):
lmranks[i].append(ranke(lms[i], item['lm']))
tmranks[i].append(ranke(tms[i], item['tm']))
wmranks.append(ranke(wms, item['lm']))
randranks.append(randranke(city))
for i in range(len(numtwts)):
for ranklm, ranktm in zip(lmranks[i], tmranks[i]):
lmtmranks[i].append(linearjoin([ranklm, ranktm], [0.5, 0.5]))
for ranklm, rankwm in zip(lmranks[i], wmranks):
wmlmranks[i].append(linearjoin([ranklm, rankwm], [0.5, 0.5]))
for ranklm, ranktm, rankwm in zip(lmranks[i], tmranks[i], wmranks):
wmlmtmranks[i].append(\
linearjoin([ranklm, ranktm, rankwm], [0.33, 0.33, 0.33]))
# plot
candls = ['-', '--', '-.']
# mks = ['o', '^', '*']
#for i in range(len(numtwts)):
#lmeval = batcheval(lmranks[i], test['label'])
#plt.plot(lmeval['pos'], lmeval['rate'],
#label='tweet(s={0})'.format(numtwts[i]),
#ls=candls[i%2], marker=mks[i/2])
#for i in range(len(numtwts)):
#for plc in placetotalrank(lmranks[i], test)['label'][-10:]:
#print place_name(plc), plc
#print placetotalrank(lmranks[i], test)['totalrank'][-10:]
#print wilcoxontest(lmranks[i], lmranks[i-1], test)
#plt.legend(loc='lower right')
#---------------------------------------------------------------
for i in range(len(numtwts)):
lmeval = batcheval(lmranks[i], test['label'])
plt.plot(lmeval['pos'], lmeval['rate'],
label='tweet(s={0})'.format(numtwts[i]),
ls=candls[i], marker='o')
# wmlmeval = batcheval(wmlmranks[i], test['label'])
# plt.plot(wmlmeval['pos'], wmlmeval['rate'],
# label='tweet(s={0})+web'.format(numtwts[i]),
# ls=candls[i], marker='^')
# print 'Wilcoxon (lm vs wmlm):', wilcoxontest(lmranks[i], wmlmranks[i], test)
# print 'Place id -> name:'
# for plc in placetotalrank(wmlmranks[i], test)['label'][-10:]:
# print place_name(plc), plc
# print 'Place Total Rank:'
# print placetotalrank(wmlmranks[i], test)['totalrank'][-10:]
plt.plot(lmeval['pos'], [float(r) / max(lmeval['pos']) for r in lmeval['pos']],
ls='-.', marker='s',
label='Random Baseline')
# wmeval = batcheval(wmranks, test['label'])
# print 'Place id -> name:'
# for plc in placetotalrank(wmranks, test)['label'][-10:]:
# print place_name(plc), plc
# print 'Place Total Rank'
# print placetotalrank(wmranks, test)['totalrank'][-10:]
# plt.plot(wmeval['pos'], wmeval['rate'],
# label='web',
# ls=':')
#---------------------------------------------------------------
#for i in range(len(numtwts)):
#plt.subplot(121 + i)
#plt.title('$s={0}$'.format(numtwts[i]))
#lmeval = batcheval(lmranks[i], test['label'])
#plt.plot(lmeval['pos'], lmeval['rate'],
#label='tweet',
#ls=candls[i], marker='o')
#lmtmeval = batcheval(lmtmranks[i], test['label'])
#plt.plot(lmtmeval['pos'], lmtmeval['rate'],
#label='tweet+time',
#ls=candls[i], marker='^')
#wmlmtmeval = batcheval(wmlmtmranks[i], test['label'])
#plt.plot(wmlmtmeval['pos'], wmlmtmeval['rate'],
#label='tweet+time+web',
#ls=candls[i], marker='*')
#plt.legend(loc='lower right')
#plt.ylabel('Rate containing Reference POI')
#plt.xlabel('Top $p$ places')
#plt.show()
#---------------------------------------------------------------
#i=0
#plt.subplot(121 + i)
#plt.title('$s={0}$'.format(numtwts[i]))
#tmeval = batcheval(tmranks[i], test['label'])
#plt.plot(tmeval['pos'], tmeval['rate'],
#label='time',
#ls=candls[i], marker='o')
#lmeval = batcheval(lmranks[i], test['label'])
#plt.plot(lmeval['pos'], lmeval['rate'],
#label='tweet',
#ls=candls[i], marker='^')
#lmtmeval = batcheval(lmtmranks[i], test['label'])
#plt.plot(lmtmeval['pos'], lmtmeval['rate'],
#label='tweet+time',
#ls=candls[i], marker='*')
#for plc in placetotalrank(tmranks[i], test)['label'][-10:]:
#print place_name(plc), plc
#print placetotalrank(tmranks[i], test)['totalrank'][-10:]
#for plc in placetotalrank(lmtmranks[i], test)['label'][-10:]:
#print place_name(plc), plc
#print placetotalrank(lmtmranks[i], test)['totalrank'][-10:]
#print wilcoxontest(lmranks[i], lmtmranks[i], test)
#plt.legend(loc='lower right')
#plt.ylabel('Rate containing Reference POI')
#plt.xlabel('Top $p$ places')
#i=1
#plt.subplot(121 + i)
#plt.title('$s={0}$'.format(numtwts[i]))
#tmeval = batcheval(tmranks[i], test['label'])
#plt.plot(tmeval['pos'], tmeval['rate'],
#label='time',
#ls=candls[i], marker='o')
#wmlmeval = batcheval(wmlmranks[i], test['label'])
#plt.plot(wmlmeval['pos'], wmlmeval['rate'],
#label='tweet + web',
#ls=candls[i], marker='^')
#wmlmtmeval = batcheval(wmlmtmranks[i], test['label'])
#plt.plot(wmlmtmeval['pos'], wmlmtmeval['rate'],
#label='tweet+time+web',
#ls=candls[i], marker='*')
#for plc in placetotalrank(wmlmranks[i], test)['label'][-10:]:
#print place_name(plc), plc
#print placetotalrank(wmlmranks[i], test)['totalrank'][-10:]
#for plc in placetotalrank(wmlmtmranks[i], test)['label'][-10:]:
#print place_name(plc), plc
#print placetotalrank(wmlmtmranks[i], test)['totalrank'][-10:]
#print wilcoxontest(wmlmranks[i], wmlmtmranks[i], test)
#plt.legend(loc='lower right')
#plt.ylabel('Rate containing Reference POI')
#plt.xlabel('Top $p$ places')
plt.legend(loc='lower right')
plt.tight_layout()
plt.show()
def richrank(cities, names):
candls = ['-', '--', '.-']
mks = ['o', '^', '*', 'v', 's']
for idx in range(len(cities)):
lms = dict()
test = Dataset()
for pid in cities[idx]:
twtp = loadrows(GEOTWEET, ('place_id', 'text', 'created_at'),
('place_id=\'{0}\''.format(pid),), 'sample_switch_place_cate',
'order by rand() limit 110')
lms[pid] = LanguageModel(twtp['text'][:100])
for cnt in range(100, 110):
test.append({'label': twtp['place_id'][cnt],
'lm': LanguageModel([twtp['text'][cnt],])})
lmranks = list()
for twtlm in test:
lmranks.append(ranke(lms, twtlm['lm']))
lmeval = batcheval(lmranks, test['label'])
plt.plot(lmeval['pos'], lmeval['rate'], ls=candls[idx%2], marker=mks[idx/2],
label='{0}($s=100$)'.format(names[idx]))
plt.legend(loc='lower right')
plt.ylabel('Rate containing referece POI')
plt.xlabel('Top $p$ places')
plt.show()
def cntdist():
"""docstring for cntdist
"""
with open('cntdist.csv') as fin:
cnts = [int(cnt.strip()) for cnt in fin]
plt.loglog(range(len(cnts)), cnts)
plt.xlabel('POIs ordered by # of tweets')
plt.ylabel('# of tweets')
plt.show()
def run():
""" Test this module
"""
cities = list()
for city in ['ch10_cate.lst', 'la10_cate.lst', 'ny10_cate.lst', 'sf10_cate.lst']:
with open('data/' + city) as fin:
cities.append([p.strip() for p in fin])
#cmptimeweb(cities, [100, 25, 10, 5], 10)
cmptimeweb(cities, [1000, 100, 5], 10)
#richrank(cities, ['Chicago', 'Los Angeles', 'New York', 'San Francisco'])
if __name__ == '__main__':
run()
| mit |
dingocuster/scikit-learn | examples/model_selection/plot_roc.py | 96 | 4487 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
barentsen/dave | lpp/newlpp/lppTransform.py | 1 | 8388 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 20:32:12 2018
Functions to correctly fold and bin a light curve.
Calculate the lpp metric: transform to lower dimensions, knn
Depends on class from reading in a previously created LPP metric Map
Depends on reading in the light curve to data structure.
input is a class called data
data contains
data.time (days)
data.tzero (day)
data.dur (hours)
data.period (days)
data.flux (normalized to 0)
After foldBinLightCurve it contains
data.binned
After transform it contains
data.lpp_transform
@author: smullally
"""
from __future__ import division
import numpy as np
from sklearn.neighbors import NearestNeighbors
from lpproj import LocalityPreservingProjection
import copy
def computeLPPTransitMetric(data,mapInfo):
"""
This function takes a data class with light curve info
and the mapInfo with information about the mapping to use.
It then returns a lpp metric value.
"""
binFlux, binPhase=foldBinLightCurve(data,mapInfo.ntrfr,mapInfo.npts)
#plt.figure()
#plt.plot(binPhase,binFlux,'.--')
#Dimensionality Reduction and knn parts
rawTLpp,transformedTransit=computeRawLPPTransitMetric(binFlux,mapInfo)
#Normalize by Period Dependence
normTLpp=periodNormalLPPTransitMetric(rawTLpp,np.array([data.period,data.mes]), mapInfo)
return normTLpp,rawTLpp,transformedTransit
def runningMedian(t,y,dt,runt):
"""
Take a running median of size dt
Return values at times given in runt
"""
newy=np.zeros(len(y))
newt=np.zeros(len(y))
srt = np.argsort(t)
newt = t[srt]
newy = y[srt]
runy=[]
for i in range(len(runt)):
tmp=[]
for j in range(len(newt)):
if (newt[j] >= (runt[i]-dt)) and (newt[j] <= (runt[i]+dt)):
tmp.append(newy[j])
if np.isnan(np.nanmedian(np.array(tmp))) :
runy.append(0)
else:
runy.append(np.nanmedian(np.array(tmp)))
return(list(runt),runy)
def foldBinLightCurve (data, ntrfr, npts):
"""
Fold and bin light curve for input to LPP metric calculation
data contains time, tzero, dur, priod,mes and flux (centered around zero)
ntrfr -- number of transit fraction for binning around transit ~1.5
npts -- number of points in the final binning.
"""
#Create phase light curve
phaselc =np.mod((data.time-(data.tzero-0.5*data.period))/data.period,1)
flux=data.flux
mes=data.mes
#Determine the fraction of the time the planet transits the star.
#Insist that ntrfr * transit fraction
if ~np.isnan(data.dur) & (data.dur >0):
transit_dur = data.dur
else:
transit_dur = 0.2 * data.period/24.
transit_fr=transit_dur/24./data.period
if (transit_fr * ntrfr) > 0.5 :
transit_fr = 0.5/ntrfr
#Specify the out of transit (a) and the in transit regions
binover=1.3
if mes <= 20:
binover=-(1/8.0)*mes + 3.8
endfr = .03
midfr= .11
a = np.concatenate((np.arange(endfr,.5-midfr,1/npts) , \
np.arange((0.5+midfr),(1-endfr),1/npts)), axis=None)
ovsamp=4.0
#bstep=(ovsamp*ntrfr*transit_fr)/npts
b_num=41
b =np.linspace((0.5-ntrfr*transit_fr),(0.5+ntrfr*transit_fr),b_num)
#print "length a: %u " % len(a)
#print "length b: %u" % len(b)
[runta,runya] = runningMedian(phaselc,flux,binover/npts,a)
[runtb,runyb] = runningMedian(phaselc,flux,\
(binover*ovsamp*ntrfr*transit_fr)/npts,b)
#Combine the two sets of bins
runymess=np.array(runya + runyb)
runtmess = np.array(runta + runtb)
srt=np.argsort(runtmess)
runy=runymess[srt]
runt=runtmess[srt]
#Scale the flux by the depth so everything has the same depth.
#Catch or dividing by zero is to not scale.
scale = -1*np.min(runyb)
if scale != 0:
scaledFlux=runy/scale
else:
scaledFlux=runy
binnedFlux=scaledFlux
phasebins=runt
return binnedFlux,phasebins
def computeRawLPPTransitMetric(binFlux,mapInfo):
"""
Perform the matrix transformation with LPP
Do the knn test to get a raw LPP transit metric number.
"""
Yorig=mapInfo.YmapMapped
lpp=LocalityPreservingProjection(n_components=mapInfo.n_dim)
lpp.projection_=mapInfo.YmapM
#To equate to Matlab LPP methods, we need to remove mean of transform.
normBinFlux=binFlux-mapInfo.YmapMean
inputY=lpp.transform(normBinFlux.reshape(1,-1))
knownTransitsY=Yorig[mapInfo.knnGood,:]
dist,ind = knnDistance_fromKnown(knownTransitsY,inputY,mapInfo.knn)
rawLppTrMetric=np.mean(dist)
return rawLppTrMetric,inputY
def knnDistance_fromKnown(knownTransits,new,knn):
"""
For a group of known transits and a new one.
Use knn to determine how close the new one is to the known transits
using knn minkowski p = 3 ()
Using scipy signal to do this.
"""
#p=3 sets a minkowski distance of 3. #Check that you really used 3 for matlab.
nbrs=NearestNeighbors(n_neighbors=int(knn), algorithm='kd_tree', p=2)
nbrs.fit(knownTransits)
distances,indices = nbrs.kneighbors(new)
return distances, indices
def periodNormalLPPTransitMetric(rawTLpp,newPerMes, mapInfo):
"""
Normalize the rawTransitMetric value by those with the closest period.
This part removes the period dependence of the metric at short periods.
Plus it makes a value near one be the threshold between good and bad.
newPerMes is the np.array([period, mes]) of the new sample
"""
knownTrPeriods=mapInfo.mappedPeriods[mapInfo.knnGood]
knownTrMes=mapInfo.mappedMes[mapInfo.knnGood]
knownTrrawLpp=mapInfo.dymeans[mapInfo.knnGood]
nPercentil=mapInfo.nPercentil
nPsample=mapInfo.nPsample
#Find the those with the nearest periods Npsample-nneighbors
logPeriods=np.log10(knownTrPeriods)
logMes=np.log10(knownTrMes)
knownPerMes=np.stack((logPeriods, logMes), axis=-1)
np.shape(knownPerMes)
logNew=np.log10(newPerMes).reshape(1,-1)
#logNew=np.array([np.log10(newPeriod)]).reshape(1,1)
dist,ind = knnDistance_fromKnown(knownPerMes,logNew,nPsample)
#Find the nthPercentile of the rawLpp of these indicies
nearPeriodLpp=knownTrrawLpp[ind]
LppNPercentile = np.percentile(nearPeriodLpp,nPercentil)
NormLppTransitMetric=rawTLpp/LppNPercentile
return NormLppTransitMetric
def lpp_onetransit(tcedata,mapInfo,ntransit):
"""
Chop down the full time series to one orbital period.
Then gather the lpp value for that one transit.
"""
startTime=tcedata.time[0]+ntransit*tcedata.period
endTime=tcedata.time[0]+(ntransit+1)*tcedata.period + 3/24.0 #A few cadences of overlap
want=(tcedata.time>=startTime) & (tcedata.time<=endTime)
newtime=tcedata.time[want]
newflux=tcedata.flux[want]
nExpCad=(tcedata.time[-1]-tcedata.time[0])/tcedata.period
if len(newtime>nExpCad*0.75):
onetransit=copy.deepcopy(tcedata)
onetransit.time=newtime
onetransit.flux=newflux
normTLpp, rawTLpp, transformedTr=computeLPPTransitMetric(onetransit,mapInfo)
else:
normTLpp=np.nan
rawTLpp=np.nan
return normTLpp,rawTLpp
def lpp_averageIndivTransit(tcedata,mapInfo):
"""
Create the loop over individual transits and return
array normalized lpp values, mean and std.
Input TCE object and mapInfo object.
It is unclear that this individual transit approach
separates out several new false positives.
It probably would require retuning for low SNR signals.
"""
length=tcedata.time[-1]-tcedata.time[0]
ntransits=int(np.floor(length/tcedata.period))
lppNorms=np.ones(ntransits)
lppRaws=np.ones(ntransits)
nExpCad=(tcedata.time[-1]-tcedata.time[0])/tcedata.period
for i in range(ntransits):
lppNorms[i],lppRaws[i] = lpp_onetransit(tcedata,mapInfo,i)
lppMed=np.nanmedian(lppNorms)
lppStd=np.nanstd(lppNorms)
return lppNorms,lppMed, lppStd, ntransits
| mit |
idc9/law-net | vertex_metrics_experiment/code/pipeline_helper_functions.py | 1 | 2134 | import glob
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
def load_snapshots(subnet_dir):
"""
Loads the snapshot data frames into a dict indexed by the file names
Parameters
----------
subnet_dir: path to experiment data files
Output
------
python dict
"""
path_to_vertex_metrics_folder = subnet_dir + 'snapshots/'
snapshot_paths = glob.glob(path_to_vertex_metrics_folder + \
"/vertex_metrics*.csv")
snapshots_dict = {}
for path in snapshot_paths:
# snapshot file name is key
snapshot_key = path.split('snapshots/')[1].split('.csv')[0]
snapshots_dict[snapshot_key] = pd.read_csv(path, index_col=0)
return snapshots_dict
def get_snapshot_year(ing_year, active_years):
"""
Returns the smallest year greater than ing year
"""
return min([y for y in active_years if ing_year <= y])
def edge_is_present(G, source, target):
"""
Returns true of there is an edge from source to target
Parameters
source, target: igraph vertex indices
G: directed igraph object
"""
return G.get_eid(v1=source, v2=target, directed=True, error=False) != -1
def standardize(X, center=False, scale=False):
"""
Standardizes a vector
Parameters
---------
cetner: to center or not to center (by mean)
scale: to scale or not to scale (by standard deviation)
"""
mu = 0
sigma = 1
if center:
mu = np.mean(X)
if scale:
sigma = np.std(X)
return (X - mu)/sigma
def save_sparse_csr(filename, array):
"""
saves a sparse CSR matrix
from http://stackoverflow.com/questions/8955448/save-load-scipy-sparse-csr-matrix-in-portable-data-format
"""
np.savez(filename, data=array.data, indices=array.indices,
indptr=array.indptr, shape=array.shape)
def load_sparse_csr(filename):
"""
Loads a saved CSR matrix
"""
loader = np.load(filename)
return csr_matrix((loader['data'], loader['indices'], loader['indptr']),
shape=loader['shape'])
| mit |
kbrannan/PyHSPF | src/pyhspf/forecasting/extract_timeseries.py | 2 | 7323 | #!/usr/bin/env python
#
# extract_NRCM.py
# David J. Lampert
#
# extracts the grid point for a watershed from the preprocessed NRCM data
import os, shutil, pickle, datetime, numpy
from multiprocessing import Pool, cpu_count
from matplotlib import pyplot, path, patches
from shapefile import Reader
def inside_box(p1, p2, p3, space = 0):
"""Checks if p3 is inside a box formed by p1 and p2."""
if p1[0] < p3[0] and p3[0] < p2[0] or p1[0] > p3[0] and p3[0] > p2[0]:
# x value is inside
if p1[1] < p3[1] and p3[1] < p2[1] or p1[1] > p3[1] and p3[1] > p2[1]:
# y value is inside
return True
else: return False
def get_boundaries(shapes, space = 0.1):
"""Gets the boundaries for the plot."""
boundaries = shapes[0].bbox
for shape in shapes[0:]:
b = shape.bbox
if b[0] < boundaries[0]: boundaries[0] = b[0]
if b[1] < boundaries[1]: boundaries[1] = b[1]
if b[2] > boundaries[2]: boundaries[2] = b[2]
if b[3] > boundaries[3]: boundaries[3] = b[3]
xmin = boundaries[0] - (boundaries[2] - boundaries[0]) * space
ymin = boundaries[1] - (boundaries[3] - boundaries[1]) * space
xmax = boundaries[2] + (boundaries[2] - boundaries[0]) * space
ymax = boundaries[3] + (boundaries[3] - boundaries[1]) * space
return xmin, ymin, xmax, ymax
def make_patch(points, facecolor, edgecolor = 'Black', width = 1, alpha = None,
hatch = None, label = None):
"""Uses a list or array of points to generate a matplotlib patch."""
vertices = [(point[0], point[1]) for point in points]
vertices.append((points[0][0], points[0][1]))
codes = [path.Path.LINETO for i in range(len(points) + 1)]
codes[0] = path.Path.MOVETO
patch = patches.PathPatch(path.Path(vertices, codes), facecolor = facecolor,
edgecolor = edgecolor, lw = width, hatch = hatch,
alpha = alpha, label = label)
return patch
def plot_NRCM(lons, lats, bfile = None, sfile = None, space = 0.05,
show = False, output = None):
fig = pyplot.figure()
sub = fig.add_subplot(111, aspect = 'equal')
sub.set_title('Nested Regional Climate Model Grid Points')
sub.scatter(lons, lats, marker = '+', c = 'r', s = 40)
if bfile is not None:
sf = Reader(bfile)
boundary = sf.shape(0).points
sub.add_patch(make_patch(boundary, (1, 0, 0, 0), width = 1.2))
if sfile is not None:
sf = Reader(sfile)
for s in sf.shapes():
boundary = s.points
sub.add_patch(make_patch(boundary, (1, 0, 0, 0), width = 0.2))
sub.set_xlabel('Longitude, Decimal Degrees', size = 13)
sub.set_ylabel('Latitude, Decimal Degrees', size = 13)
xmin, ymin, xmax, ymax = get_boundaries(sf.shapes(), space = space)
pyplot.xlim([xmin, xmax])
pyplot.ylim([ymin, ymax])
if output is not None: pyplot.savefig(output)
if show: pyplot.show()
pyplot.clf()
pyplot.close()
def extract_raw(source, destination, HUC8, plot = True, save = True,
verbose = True):
"""Extracts the grid data for the HUC8."""
# make a new directory for the HUC8
d = '{}/{}/NRCM'.format(destination, HUC8)
if not os.path.isdir(d): os.mkdir(d)
# make a "raw directory" for the unaltered info
raw = '{}/raw'.format(d)
if not os.path.isdir(raw):
os.mkdir(raw)
if verbose: print('extracting NRCM predictions...\n')
# use the boundary file to find the bounding box for the grid points
boundaryfile = '{0}/{1}/{1}boundaries'.format(destination, HUC8)
subbasinfile = '{0}/{1}/{1}subbasins'.format(destination, HUC8)
space = 0.1
sf = Reader(boundaryfile)
bbox = get_boundaries(sf.shapes(), space = space)
xmin, ymin, xmax, ymax = bbox
if verbose and not os.path.isdir(raw):
print('bounding box =', xmin, ymin, xmax, ymax, '\n')
lats, lons = [], []
for f in os.listdir(source):
i = f.index('_')
lon = float(f[:i])
lat = float(f[i+1:])
if inside_box([xmin, ymin], [xmax, ymax], [lon, lat]):
lats.append(lat)
lons.append(lon)
if not os.path.isfile('{}/{}'.format(raw, f)):
shutil.copy('{}/{}'.format(source, f), '{}/{}'.format(raw, f))
if plot:
if save: output = '{}/gridpoints'.format(d)
else: output = None
if not os.path.isfile(output):
plot_NRCM(lons, lats, bfile = boundaryfile, sfile = subbasinfile,
output = output, show = False)
def extract_timeseries(directory, start, end,
series = ['rain', 'temperature', 'wind', 'humidity',
'solar', 'snowdepth', 'evaporation']):
source = '{}/raw'.format(directory)
gridfiles = [os.path.join(source, f) for f in os.listdir(source)]
for ts in series:
destination = '{}/{}'.format(directory, ts)
if not os.path.isdir(destination):
os.mkdir(destination)
# iterate through the grid point data files
for f in gridfiles:
with open(f, 'rb') as p: g = pickle.load(p)
# dump each time series and get rid of time zone info
for ts in series:
data = [(datetime.datetime(t.year, t.month, t.day, t.hour), v)
for t, v in g.data[ts]]
data = [(t, v) for t, v in data if start <= t and t < end]
it = directory, ts, g.lon, g.lat
destination = '{}/{}/{:8.4f}_{:7.4f}'.format(*it)
if not os.path.isfile(destination):
with open(destination, 'wb') as f: pickle.dump(data, f)
def average_timeseries(directory,
variables = ['temperature', 'wind', 'humidity',
'solar', 'evaporation', 'snowdepth']):
averages = '{}/averages'.format(directory)
if not os.path.isdir(averages): os.mkdir(averages)
for v in variables:
destination = '{}/average_{}'.format(averages, v)
if not os.path.isfile(destination):
print('averaging {} timeseries...\n'.format(v))
series = []
source = '{}/{}'.format(directory, v)
for f in os.listdir(source):
p = '{}/{}'.format(source, f)
with open(p, 'rb') as d: ts, data = zip(*pickle.load(d))
series.append(numpy.array(data))
values = sum(series) / len(series)
average = [(t, va) for t, va in zip(ts, values)]
with open(destination, 'wb') as f: pickle.dump(average, f)
def extract_NRCM(source, destination, HUC8, start, end, plot = True):
"""Extracts the raw data from the regional climate model."""
extract_raw(source, destination, HUC8, plot = plot)
s = datetime.datetime(start, 1, 1)
e = datetime.datetime(end, 1, 1)
d = '{}/{}/NRCM'.format(destination, HUC8)
if not any([os.path.isdir('{}/{}'.format(d, ts))
for ts in ['rain', 'snowdepth', 'temperature', 'humidity',
'wind', 'solar', 'evaporation']]):
extract_timeseries(d, s, e)
| bsd-3-clause |
ebilionis/variational-reformulation-of-inverse-problems | unittests/test_optimize_catalysis_prop_noise_diff.py | 1 | 6076 | """
A first test for the ELBO on the catalysis problem.
The target is consisted of an uninformative prior and a Gaussian likelihood.
The approximating mixture has two components.
Author:
Panagiotis Tsilifis
Date:
6/6/2014
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import cPickle as pickle
from scipy.stats.distributions import norm
import math
from vuq import GammaPDF
from vuq import PDFCollection
from vuq import UninformativePDF
from vuq import ProportionalNoiseLikelihood
from vuq import LikelihoodCollection
from vuq import MultivariateNormal
from vuq import Joint
from vuq import MixturePDF
from vuq import MixtureOfMultivariateNormals
from vuq import FirstOrderEntropyApproximation
from vuq import ThirdOrderExpectationFunctional
from vuq import EvidenceLowerBound
from vuq import Optimizer
import sys
sys.path.insert(0,'demos/')
from catalysis import CatalysisModelDMNLY0 as CatalysisModel
# Number of dimensions
num_dim = 6
# The number of components to use for the mixture
num_comp = 1
#-------- The (hypothetical) joint distribution ----------------
# The prior
#collection = [GammaPDF(10, 1, 1) for i in xrange(num_dim-1) ]
#collection = np.hstack([collection, GammaPDF(5,0.1,1)])
#prior = PDFCollection(collection)
prior = UninformativePDF(num_dim)
# The data
data = np.loadtxt('data.txt').reshape((7, 6))
y = data[1:, 1:] / 500.
y = y.reshape((y.shape[0] * y.shape[1], ))
# The forward model
catal_model = CatalysisModel()
print 'Num_input'
print str(catal_model.num_input) + '\n'
# The isotropic Likelihood
like = ProportionalNoiseLikelihood(y, catal_model)
# The joint
log_p = Joint(like, prior)
print 'Target:'
print str(log_p)
# The approximating distribution
comp = [MultivariateNormal(np.random.gamma(10,1,num_dim))]#, MultivariateNormal(np.random.gamma(10,1,num_dim))]
log_q = MixtureOfMultivariateNormals(comp)
mu = np.random.rand(num_dim)
log_q.comp[0].mu = mu
log_q.comp[0].C = np.eye(num_dim) * 0.5
print 'Initial:'
print log_q
# Pick an entropy approximation
entropy = FirstOrderEntropyApproximation()
# Pick an approximation for the expectation of the joint
expectation_functional = ThirdOrderExpectationFunctional(log_p)
# Restrictions for mu
mu_bounds = (tuple((1e-6, None) for i in xrange(log_q.num_dim - 1))
+ ((1e-6, None), ))
C_bounds = tuple((1e-10, None) for i in xrange(log_q.num_comp * log_q.num_dim))
# Build the ELBO
elbo = EvidenceLowerBound(entropy, expectation_functional)
print 'ELBO:'
print str(elbo)
# Optimize the elbo
optimizer = Optimizer(elbo)
results_file = os.path.join('demos', 'catalysis_prop_noise_cali.pcl')
if os.path.exists(results_file):
print 'I found:', results_file
print 'I am skipping the experiment.'
print 'Delete the file if you want to repeat it.'
with open(results_file, 'rb') as fd:
results = pickle.load(fd)
L = results['L']
log_q = results['log_q']
else:
L = optimizer.optimize(log_q, max_it=10, mu_bounds=mu_bounds,
C_bounds=C_bounds)
result = {}
result['L'] = L
result['log_q'] = log_q
with open(os.path.join('demos', 'catalysis_prop_noise_cali.pcl'), 'wb') as fd:
pickle.dump(result, fd)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(L, linewidth=2)
ax.set_xlabel('Iteration', fontsize=16)
ax.set_ylabel('ELBO', fontsize=16)
plt.setp(ax.get_xticklabels(), fontsize=16)
plt.setp(ax.get_yticklabels(), fontsize=16)
png_file = os.path.join('figures', 'catalysis_prop_noise_elbo.png')
print 'Writing:', png_file
plt.savefig(png_file)
for i in xrange(log_q.num_dim):
mu = log_q.comp[0].mu[i]
s = math.sqrt(log_q.comp[0].C[i, i])
if i < 5:
name = 'kappa_{%s}' % (i+1)
else:
name = 'sigma^2'
print name, '=', mu, '+-', s
# Plot the calibration result
t = np.array([30., 60., 90., 120., 150., 180.]) / 180.
fig = plt.figure()
ax = fig.add_subplot(111)
m_state = catal_model(log_q.comp[0].mu[:5])
f = m_state['f']
Y = f.reshape(t.shape[0], f.shape[1] / t.shape[0])
styles = ['b', 'r', 'g', 'k', 'm']
for i in xrange(5):
ax.plot(t, Y[:, i], styles[i], linewidth=2)
ax.plot(t, data[1:, 1:][:, i] / 500., '+' + styles[i], markersize=10, markeredgewidth=2)
ax.set_xlabel('Time (t)', fontsize=16)
ax.set_ylabel('Concentration', fontsize=16)
plt.setp(ax.get_xticklabels(), fontsize=16)
plt.setp(ax.get_yticklabels(), fontsize=16)
png_file = os.path.join('figures', 'catalysis_prop_noise_cali_output.png')
print 'Writing:', png_file
plt.savefig(png_file)
# Do an uncertainty propagation test.
uq_file = os.path.join('demos', 'catalysis_prop_noise_cali_uq.pcl')
if os.path.exists(uq_file):
with open(uq_file, 'rb') as fd:
uq_results = pickle.load(fd)
Y_m = uq_results['Y_m']
Y_p05 = uq_results['Y_p05']
Y_p95 = uq_results['Y_p95']
else:
num_mcmc = 100
Y_s = []
for i in xrange(num_mcmc):
print 'taking sample', i + 1
omega = log_q.sample().flatten()
x = omega[:5]
sigma = omega[5]
y = catal_model(x)['f']
Y_s.append(y + sigma * y * np.random.randn(*y.shape))
Y_s = np.vstack(Y_s)
Y_m = np.percentile(Y_s, 50, axis=0).reshape(Y.shape)
Y_p05 = np.percentile(Y_s, 5, axis=0).reshape(Y.shape)
Y_p95 = np.percentile(Y_s, 95, axis=0).reshape(Y.shape)
uq_results = {}
uq_results['Y_m'] = Y_m
uq_results['Y_p05'] = Y_p05
uq_results['Y_p95'] = Y_p95
with open(uq_file, 'wb') as fd:
pickle.dump(uq_results, fd)
fig = plt.figure()
ax = fig.add_subplot(111)
for i in xrange(5):
ax.plot(t, Y_m[:, i], styles[i], linewidth=2)
ax.fill_between(t, Y_p05[:, i], Y_p95[:, i], color=styles[i], alpha=0.5)
ax.plot(t, data[1:, 1:][:, i] / 500., '+' + styles[i], markersize=10,
markeredgewidth=2)
ax.set_xlabel('Time (t)', fontsize=16)
ax.set_ylabel('Concentration', fontsize=16)
plt.setp(ax.get_xticklabels(), fontsize=16)
plt.setp(ax.get_yticklabels(), fontsize=16)
png_file = os.path.join('figures', 'catalysis_prop_noise_cali_uq.png')
print 'Writing:', png_file
plt.savefig(png_file)
| gpl-2.0 |
allenh1/rosdep | test/test_rosdep_dependency_graph.py | 3 | 8384 | # Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author William Woodall/[email protected]
def test_DependencyGraph_Linear():
from rosdep2.dependency_graph import DependencyGraph
# Normal A-B-C
dg = DependencyGraph()
dg['A']['installer_key'] = 'a_installer'
dg['A']['install_keys'] = ['a']
dg['A']['dependencies'] = ['B']
dg['B']['installer_key'] = 'b_installer'
dg['B']['install_keys'] = ['b']
dg['B']['dependencies'] = ['C']
dg['C']['installer_key'] = 'c_installer'
dg['C']['install_keys'] = ['c']
dg['C']['dependencies'] = []
result = dg.get_ordered_dependency_list()
expected = [('c_installer', ['c']), ('b_installer', ['b']), ('a_installer', ['a'])]
assert result == expected, 'Results did not match expectations: %s == %s' % (str(result), str(expected))
def test_DependencyGraph_Cycle():
from rosdep2.dependency_graph import DependencyGraph
# Full Loop A-B-C-A-...
dg = DependencyGraph()
dg['A']['installer_key'] = 'a_installer'
dg['A']['install_keys'] = ['a']
dg['A']['dependencies'] = ['B']
dg['B']['installer_key'] = 'b_installer'
dg['B']['install_keys'] = ['b']
dg['B']['dependencies'] = ['C']
dg['C']['installer_key'] = 'c_installer'
dg['C']['install_keys'] = ['c']
dg['C']['dependencies'] = ['A']
try:
result = dg.get_ordered_dependency_list()
assert False, "Doesn't fail, it should fail with an AssertionError because of the cycle."
except AssertionError as e:
if not str(e).startswith('A cycle in the dependency graph occurred with key'):
assert False, 'Throws AssertionError, but with the wrong message. Error was: %s: %s' % (type(e), str(e))
except Exception as e:
assert False, 'Throws and Exception, but not an AssertionError. Error was: %s: %s' % (type(e), str(e))
def test_DependencyGraph_Short_Cycle():
from rosdep2.dependency_graph import DependencyGraph
# Short cycle A-B-C-D-B-C-D-...
dg = DependencyGraph()
dg['A']['installer_key'] = 'a_installer'
dg['A']['install_keys'] = ['a']
dg['A']['dependencies'] = ['B']
dg['B']['installer_key'] = 'b_installer'
dg['B']['install_keys'] = ['b']
dg['B']['dependencies'] = ['C']
dg['C']['installer_key'] = 'c_installer'
dg['C']['install_keys'] = ['c']
dg['C']['dependencies'] = ['D']
dg['D']['installer_key'] = 'd_installer'
dg['D']['install_keys'] = ['d']
dg['D']['dependencies'] = ['B']
try:
result = dg.get_ordered_dependency_list()
assert False, "Doesn't fail, it should fail with an AssertionError because of the cycle."
except AssertionError as e:
if not str(e).startswith('A cycle in the dependency graph occurred with key'):
assert False, 'Throws AssertionError, but with the wrong message. Error was: %s: %s' % (type(e), str(e))
except Exception as e:
assert False, 'Throws and Exception, but not an AssertionError. Error was: %s: %s' % (type(e), str(e))
def test_DependencyGraph_Invalid_Key():
from rosdep2.dependency_graph import DependencyGraph
# Invalid graph A-B-C where C doesn't exist
dg = DependencyGraph()
dg['A']['installer_key'] = 'a_installer'
dg['A']['install_keys'] = ['a']
dg['A']['dependencies'] = ['B']
dg['B']['installer_key'] = 'b_installer'
dg['B']['install_keys'] = ['b']
dg['B']['dependencies'] = ['C']
try:
result = dg.get_ordered_dependency_list()
assert False, "Doesn't fail, it should fail with an KeyError because of the invalid rosdep key."
except KeyError as e:
if not str(e).endswith("does not exist in the dictionary of resolutions.'"):
assert False, 'Throws KeyError, but with the wrong message. Error was: %s: %s' % (type(e), str(e))
except Exception as e:
assert False, 'Throws and Exception, but not an KeyError. Error was: %s: %s' % (type(e), str(e))
def test_DependencyGraph_Invalid_Key2():
from rosdep2.dependency_graph import DependencyGraph
# Invalid graph A-B-C where B doesn't exist
dg = DependencyGraph()
dg['A']['installer_key'] = 'a_installer'
dg['A']['install_keys'] = ['a']
dg['A']['dependencies'] = ['B']
dg['C']['installer_key'] = 'c_installer'
dg['C']['install_keys'] = ['c']
dg['C']['dependencies'] = []
try:
result = dg.get_ordered_dependency_list()
assert False, "Doesn't fail, it should fail with an KeyError because of the invalid rosdep key."
except KeyError as e:
if not str(e).endswith("does not exist in the dictionary of resolutions.'"):
assert False, 'Throws KeyError, but with the wrong message. Error was: %s: %s' % (type(e), str(e))
except Exception as e:
assert False, 'Throws and Exception, but not an KeyError. Error was: %s: %s' % (type(e), str(e))
def test_DependencyGraph_Multi_Root():
from rosdep2.dependency_graph import DependencyGraph
# Multi root, shared dependency: A-B-C, D-C
dg = DependencyGraph()
dg['A']['installer_key'] = 'a_installer'
dg['A']['install_keys'] = ['a']
dg['A']['dependencies'] = ['B']
dg['B']['installer_key'] = 'b_installer'
dg['B']['install_keys'] = ['b']
dg['B']['dependencies'] = ['C']
dg['C']['installer_key'] = 'c_installer'
dg['C']['install_keys'] = ['c']
dg['C']['dependencies'] = []
dg['D']['installer_key'] = 'd_installer'
dg['D']['install_keys'] = ['d']
dg['D']['dependencies'] = ['C']
result = dg.get_ordered_dependency_list()
# TODO: The expected might also have a different order, for example it might be:
# [('c_installer', ['c']), ('d_installer', ['d']), ('b_installer', ['b']), ('a_installer', ['a'])]
# But that wont invalidate the order from a dependency graph stand point
expected = [
[('c_installer', ['c']), ('b_installer', ['b']), ('a_installer', ['a']), ('d_installer', ['d'])],
[('c_installer', ['c']), ('d_installer', ['d']), ('b_installer', ['b']), ('a_installer', ['a'])],
]
assert result in expected, 'Results did not match expectations: %s == %s' % (str(result), str(expected))
def test_DependencyGraph_Realworld():
from rosdep2.dependency_graph import DependencyGraph
# Real world example
dg = DependencyGraph()
dg['python-matplotlib']['installer_key'] = 'pip'
dg['python-matplotlib']['install_keys'] = ['matplotlib']
dg['python-matplotlib']['dependencies'] = ['pkg-config']
dg['pkg-config']['installer_key'] = 'homebrew'
dg['pkg-config']['install_keys'] = ['pkg-config']
dg['pkg-config']['dependencies'] = []
result = dg.get_ordered_dependency_list()
expected = [('homebrew', ['pkg-config']), ('pip', ['matplotlib'])]
assert result == expected, 'Results did not match expectations: %s == %s' % (str(result), str(expected))
| bsd-3-clause |
lys-coding/4gillian | One and Only Collective/pricelist/run.py | 1 | 1707 | #!/usr/local/bin/python3 -W ignore
import os
import pandas as pd
class PriceList:
def __init__(self):
self.number=[]
self.name=[]
self.price=[]
self.map={}
def from_file(self,filepath):
xl=pd.ExcelFile(filepath)
df=xl.parse(xl.sheet_names[0])
flag=False
for row in df.itertuples():
cur_number,cur_name,cur_price=str(row[2]).strip(' \t\r\n'),str(row[3]).strip(' \t\r\n'),str(row[4]).strip(' \t\r\n')
if (cur_number.isdigit()):
flag=True
if not flag or len(cur_number)==0:
continue
self.number.append(cur_number)
self.name.append(cur_name)
self.map[self.normalize(cur_number)]=cur_price
def normalize(self,s):
ns=s.strip(' \t\r\n').replace('\t',' ')
while ' ' in ns:
ns=ns.replace(' ',' ')
ns=list(ns)
for i in range(len(ns)):
if ns[i]>='A' and ns[i]<='Z':
ns[i]=chr(ord(ns[i])+ord('a')-ord('A'))
return ''.join(ns)
pricelist=PriceList()
pricelist.from_file('./PRCLSST1.xlsx')
df=pd.read_csv('./ITEM.CSV')
flag=False
index=-1
col_index=-1
for row in df.itertuples():
index+=1
for col in range(len(row)):
if str(row[col]).strip(' \t\r\n')=='Item Number':
col_index=col
flag=True
continue
if not flag:
continue
cur_number=str(row[col_index]).strip(' \t\r\n')
if pricelist.normalize(cur_number) in pricelist.map:
df.iat[index,1]=pricelist.map[pricelist.normalize(cur_number) ]
else:
df.iat[index,1]=''
print(cur_number)
df.to_csv('./result.csv')
| apache-2.0 |
greentfrapp/adversarialautoencoder | adversarialautoencoder.py | 1 | 22282 | """
Replicates the Adversarial Autoencoder architecture (Figure 1) from
Makhzani, Alireza, et al. "Adversarial autoencoders." arXiv preprint arXiv:1511.05644 (2015).
https://arxiv.org/abs/1511.05644
Refer to Appendix A.1 from paper for implementation details
"""
try:
raw_input
except:
raw_input = input
import argparse
import random
import tensorflow as tf
import numpy as np
import os
import datetime
import matplotlib.pyplot as plt
from matplotlib import gridspec
from sklearn import manifold
from sklearn.decomposition import PCA
from tensorflow.examples.tutorials.mnist import input_data
random.seed(1)
np.random.seed(1)
tf.set_random_seed(1)
# DenseNetwork class creates a network with defined nodes, activations and layer names
# Encoder, decoder, discriminator etc. networks are based on this
class DenseNetwork(object):
def __init__(self, nodes_per_layer, activations_per_layer, names_per_layer, network_name):
self.name = network_name
self.layers = []
for layer_no, layer_name in enumerate(names_per_layer):
self.layers.append({
"name": layer_name,
"nodes": nodes_per_layer[layer_no],
"activation": activations_per_layer[layer_no]
})
return None
def forwardprop(self, input_tensor, reuse_variables=False):
if reuse_variables:
tf.get_variable_scope().reuse_variables()
with tf.name_scope(self.name):
tensor = input_tensor
for layer in self.layers:
tensor = tf.layers.dense(
inputs=tensor,
units=layer["nodes"],
activation=layer["activation"],
kernel_initializer=tf.truncated_normal_initializer(.0,.01),
name=layer["name"])
return tensor
# AdversarialAutoencoder class
# creates its own tf.Session() for training and testing
class AdversarialAutoencoder(object):
def __init__(self, z_dim=2, batch_size=100, n_epochs=1000, results_folder='./Results'):
# Create results_folder
self.results_path = results_folder + '/AdversarialAutoencoder'
if not os.path.exists(self.results_path):
if not os.path.exists(results_folder):
os.mkdir(results_folder)
os.mkdir(self.results_path)
# Download data
self.mnist = input_data.read_data_sets('./Data', one_hot=True)
# Parameters for everything
self.img_width = 28
self.img_height = 28
self.img_dim = self.img_width * self.img_height
self.z_dim = z_dim
self.batch_size = batch_size
self.n_epochs = n_epochs
self.real_prior_mean = 0.0
self.real_prior_stdev = 5.0
self.learning_rate = 0.001
self.n_classes = 10
# Initialize networks
self.encoder = DenseNetwork(
nodes_per_layer=[1000, 1000, self.z_dim],
activations_per_layer=[tf.nn.relu, tf.nn.relu, None],
names_per_layer=["encoder_dense_1", "encoder_dense_2", "encoder_output"],
network_name="Encoder")
self.decoder = DenseNetwork(
nodes_per_layer=[1000, 1000, self.img_dim],
activations_per_layer=[tf.nn.relu, tf.nn.relu, tf.nn.sigmoid],
names_per_layer=["decoder_dense_1", "decoder_dense_2", "decoder_output"],
network_name="Decoder")
self.discriminator = DenseNetwork(
nodes_per_layer=[1000, 1000, 1],
activations_per_layer=[tf.nn.relu, tf.nn.relu, None],
names_per_layer=["discriminator_dense_1", "discriminator_dense_2", "discriminator_output"],
network_name="Discriminator")
# Create tf.placeholder variables for inputs
self.original_image = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, self.img_dim], name='original_image')
self.target_image = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, self.img_dim], name='target_image')
self.real_prior = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, self.z_dim], name='real_prior')
self.sample_latent_vector = tf.placeholder(dtype=tf.float32, shape=[1, self.z_dim], name='sample_latent_vector')
# Outputs from forwardproping networks
with tf.variable_scope(tf.get_variable_scope()):
self.latent_vector = self.encoder.forwardprop(self.original_image)
self.reconstruction = self.decoder.forwardprop(self.latent_vector)
self.score_real_prior = self.discriminator.forwardprop(self.real_prior)
self.score_fake_prior = self.discriminator.forwardprop(self.latent_vector, reuse_variables=True)
self.sample_image = self.decoder.forwardprop(self.sample_latent_vector, reuse_variables=True)
# Loss
self.reconstruction_loss = tf.reduce_mean(tf.square(self.target_image - self.reconstruction))
score_real_prior_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(self.score_real_prior), logits=self.score_real_prior))
score_fake_prior_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(self.score_fake_prior), logits=self.score_fake_prior))
self.discriminator_loss = score_real_prior_loss + score_fake_prior_loss
self.encoder_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(self.score_fake_prior), logits=self.score_fake_prior))
# Filtering the variables to be trained
all_variables = tf.trainable_variables()
self.discriminator_variables = [var for var in all_variables if 'discriminator' in var.name]
self.encoder_variables = [var for var in all_variables if 'encoder' in var.name]
# Training functions
self.autoencoder_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.reconstruction_loss)
self.discriminator_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.discriminator_loss, var_list=self.discriminator_variables)
self.encoder_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.encoder_loss, var_list=self.encoder_variables)
# Things to save in Tensorboard
self.input_images = tf.reshape(self.original_image, [-1, self.img_width, self.img_height, 1])
self.generated_images = tf.reshape(self.reconstruction, [-1, self.img_width, self.img_height, 1])
tf.summary.scalar(name="Autoencoder Loss", tensor=self.reconstruction_loss)
tf.summary.scalar(name="Discriminator Loss", tensor=self.discriminator_loss)
tf.summary.scalar(name="Encoder Loss", tensor=self.encoder_loss)
tf.summary.histogram(name="Encoder Distribution", values=self.latent_vector)
tf.summary.histogram(name="Real Distribution", values=self.real_prior)
tf.summary.image(name="Input Images", tensor=self.input_images, max_outputs=10)
tf.summary.image(name="Generated Images", tensor=self.generated_images, max_outputs=10)
self.summary_op = tf.summary.merge_all()
# Boilerplate Tensorflow stuff
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
self.saver = tf.train.Saver()
return None
# Creates the checkpoint folders
def load_checkpoint_folders(self, z_dim, batch_size, n_epochs):
folder_name = "/{0}_{1}_{2}_{3}_adversarial_autoencoder".format(
datetime.datetime.now(),
z_dim,
batch_size,
n_epochs).replace(':', '-')
tensorboard_path = self.results_path + folder_name + '/tensorboard'
saved_model_path = self.results_path + folder_name + '/saved_models/'
log_path = self.results_path + folder_name + '/log'
if not os.path.exists(self.results_path + folder_name):
os.mkdir(self.results_path + folder_name)
os.mkdir(tensorboard_path)
os.mkdir(saved_model_path)
os.mkdir(log_path)
return tensorboard_path, saved_model_path, log_path
# Samples a point from a normal distribution
def generate_sample_prior(self, mean, stdev):
return np.random.randn(self.batch_size, self.z_dim) * stdev + mean
# Returns the losses for logging
def get_loss(self, batch_x, z_real_dist):
a_loss, d_loss, e_loss, summary = self.sess.run([self.reconstruction_loss, self.discriminator_loss, self.encoder_loss, self.summary_op], feed_dict={self.original_image:batch_x, self.target_image:batch_x, self.real_prior:z_real_dist})
return (a_loss, d_loss, e_loss, summary)
# Loads most recent saved model
def load_last_saved_model(self, model_directory=None):
if model_directory is None:
all_results = os.listdir(self.results_path)
all_results.sort()
if tf.train.latest_checkpoint(self.results_path + '/' + all_results[-1] + '/saved_models/') is None:
print("No saved model found.")
quit()
model_directory = self.results_path + '/' + all_results[-1] + '/saved_models/'
self.saver.restore(self.sess, save_path=tf.train.latest_checkpoint(model_directory))
return None
# Train
def train(self):
self.step = 0
self.tensorboard_path, self.saved_model_path, self.log_path = self.load_checkpoint_folders(self.z_dim, self.batch_size, self.n_epochs)
self.writer = tf.summary.FileWriter(logdir=self.tensorboard_path, graph=self.sess.graph)
for epoch in range(1, self.n_epochs + 1):
n_batches = int(self.mnist.train.num_examples / self.batch_size)
print("------------------Epoch {}/{}------------------".format(epoch, self.n_epochs))
for batch in range(1, n_batches + 1):
z_real_dist = self.generate_sample_prior(self.real_prior_mean, self.real_prior_stdev)
batch_x, _ = self.mnist.train.next_batch(self.batch_size)
autoencoder_learning_rate = 0.001
discriminator_learning_rate = 0.001
encoder_learning_rate = 0.001
self.sess.run(self.autoencoder_optimizer, feed_dict={self.original_image:batch_x, self.target_image:batch_x})
self.sess.run(self.discriminator_optimizer, feed_dict={self.original_image: batch_x, self.target_image: batch_x, self.real_prior: z_real_dist, })
self.sess.run(self.encoder_optimizer, feed_dict={self.original_image: batch_x, self.target_image: batch_x})
# Print log and write to log.txt every 50 batches
if batch % 50 == 0:
a_loss, d_loss, e_loss, summary = self.get_loss(batch_x, z_real_dist)
self.writer.add_summary(summary, global_step=self.step)
print("Epoch: {}, iteration: {}".format(epoch, batch))
print("Autoencoder Loss: {}".format(a_loss))
print("Discriminator Loss: {}".format(d_loss))
print("Generator Loss: {}".format(e_loss))
with open(self.log_path + '/log.txt', 'a') as log:
log.write("Epoch: {}, iteration: {}\n".format(epoch, batch))
log.write("Autoencoder Loss: {}\n".format(a_loss))
log.write("Discriminator Loss: {}\n".format(d_loss))
log.write("Generator Loss: {}\n".format(e_loss))
self.step += 1
self.saver.save(self.sess, save_path=self.saved_model_path, global_step=self.step)
print("Model Trained!")
print("Tensorboard Path: {}".format(self.tensorboard_path))
print("Log Path: {}".format(self.log_path + '/log.txt'))
print("Saved Model Path: {}".format(self.saved_model_path))
return None
# Generate a single sample image
def generate_sample_image(self, sample_latent_vector=None, title=None):
if sample_latent_vector is None:
sample_latent_vector = np.zeros(self.z_dim)
elif len(sample_latent_vector) < self.z_dim:
print("Insufficient dimensions for latent vector, appending zeros...")
sample_latent_vector = np.concatenate((sample_latent_vector, np.zeros(self.z_dim - len(sample_latent_vector))))
elif len(sample_latent_vector) > self.z_dim:
print("Too many dimensions for latent vector, shortening vector...")
sample_latent_vector = sample_latent_vector[:self.z_dim]
print("Generating image for latent vector: {} of length {}".format(sample_latent_vector, np.linalg.norm(sample_latent_vector)))
scale_x = 4.
scale_y = 4.
fig = plt.figure(figsize=(scale_x, scale_y))
gs = gridspec.GridSpec(1, 1)
z = np.reshape(sample_latent_vector, (1, self.z_dim))
x = self.sess.run(self.sample_image, feed_dict={self.sample_latent_vector: z})
ax = plt.Subplot(fig, gs[0])
img = np.array(x.tolist()).reshape(self.img_width, self.img_height)
ax.imshow(img, cmap='gray')
ax.set_xticks([])
ax.set_yticks([])
ax.set_aspect('equal')
if title is None:
title = str(sample_latent_vector)
ax.set_title(title)
fig.add_subplot(ax)
plt.show(block=False)
return None
# Generate a grid of sample images
def generate_sample_image_grid(self, n_x=10, x_range=[-10, 10], n_y=10, y_range=[-10, 10]):
x_points = np.linspace(x_range[0], x_range[1], n_x).astype(np.float32)
y_points = np.linspace(y_range[0], y_range[1], n_y).astype(np.float32)[::-1] # reverses y_points so that graph is negative at bottom
scale_x = 8. / n_x
scale_y = 8. / n_y
fig = plt.figure(figsize=(n_x*scale_x, n_y*scale_y))
gs = gridspec.GridSpec(n_y, n_x, wspace=0.0, hspace=0.0)
for i, g in enumerate(gs):
z = np.concatenate(([x_points[int(i % n_x)]], [y_points[int(i / n_x)]], np.zeros(self.z_dim - 2)))
z = np.reshape(z, (1, 2))
x = self.sess.run(self.sample_image, feed_dict={self.sample_latent_vector: z})
ax = plt.Subplot(fig, g)
img = np.array(x.tolist()).reshape(self.img_width, self.img_height)
ax.imshow(img, cmap='gray')
ax.set_xticks([])
ax.set_yticks([])
ax.set_aspect('equal')
fig.add_subplot(ax)
plt.show(block=False)
return None
# Encodes images and plots the encodings
def plot_latent_vectors(self, dataset_x=None, dataset_y=None, n_test=10000):
if dataset_x is None or dataset_y is None:
print('Loading {} images from MNIST test data'.format(n_test))
dataset_x, dataset_y = self.mnist.test.next_batch(n_test)
colors = ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf"]
n_datapoints = len(dataset_x)
fig, ax = plt.subplots()
n_batch = int(n_datapoints / self.batch_size)
batch_no = 0
plot_data = {}
while batch_no < n_batch:
batch_x = dataset_x[batch_no * self.batch_size:(batch_no + 1) * self.batch_size]
batch_y = dataset_y[batch_no * self.batch_size:(batch_no + 1) * self.batch_size]
batch_z = self.sess.run(self.latent_vector, feed_dict={self.original_image: batch_x})
for i, z in enumerate(batch_z):
if batch_y[i].argmax() not in plot_data:
plot_data[batch_y[i].argmax()] = {'x':[], 'y':[]}
plot_data[batch_y[i].argmax()]['x'].append(z[0])
plot_data[batch_y[i].argmax()]['y'].append(z[1])
batch_no += 1
for label, data in plot_data.items():
ax.scatter(data['x'], data['y'], c=colors[label], label=label, edgecolors='none')
ax.legend()
plt.show(block=False)
return None
def plot_high_dim(self, dataset_x=None, dataset_y=None, n_test=10000, custom_latent_vectors=[]):
if dataset_x is None or dataset_y is None:
print('Loading {} images from MNIST test data'.format(n_test))
dataset_x, dataset_y = self.mnist.test.next_batch(n_test, shuffle=False)
colors = ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf", "#FFFFFF"]
edgecolors = ["none", "none", "none", "none", "none", "none", "none", "none", "none", "none", "#000000"]
n_datapoints = len(dataset_x)
fig, ax = plt.subplots()
n_batch = int(n_datapoints / self.batch_size)
batch_no = 0
all_z = None
plot_data = {}
while batch_no < n_batch:
batch_x = dataset_x[batch_no * self.batch_size:(batch_no + 1) * self.batch_size]
batch_y = dataset_y[batch_no * self.batch_size:(batch_no + 1) * self.batch_size]
batch_z = self.sess.run(self.latent_vector, feed_dict={self.original_image: batch_x})
if all_z is None:
all_z = batch_z
else:
all_z = np.concatenate((all_z, batch_z))
batch_no += 1
pca = PCA(n_components=2, random_state=1)
pca.fit(all_z)
transformed_z = pca.transform(all_z)
for i, z in enumerate(transformed_z):
if dataset_y[i].argmax() not in plot_data:
plot_data[dataset_y[i].argmax()] = {'x':[], 'y':[]}
plot_data[dataset_y[i].argmax()]['x'].append(z[0])
plot_data[dataset_y[i].argmax()]['y'].append(z[1])
for label, data in plot_data.items():
ax.scatter(data['x'], data['y'], c=colors[label], label=label, edgecolors=edgecolors[label])
if len(custom_latent_vectors) > 0:
transformed_vectors = pca.transform(custom_latent_vectors)
custom_x = []
custom_y = []
for i, z in enumerate(transformed_vectors):
custom_x.append(z[0])
custom_y.append(z[1])
ax.scatter(custom_x, custom_y, c=colors[-1], label='Custom', edgecolors=edgecolors[-1])
ax.legend()
plt.show(block=False)
new_pca_vector = None
while True:
new_pca_vector = raw_input("New point: ")
if new_pca_vector == "q":
break
#self.pca2img(pca, eval(new_pca_vector), n_variants=5)
self.pca2img(pca, eval(new_pca_vector))
return None
def pca2img(self, pca, pca_vector, n_variants=0):
pca_vector = np.array(pca_vector).reshape(1,2)
z = pca.inverse_transform(pca_vector)
if n_variants > 0:
z_variants = self.pca_inverse_variant(pca, z[0], n=n_variants)
for variant in z_variants:
self.generate_sample_image(sample_latent_vector=variant, title="PCA: {} Magnitude: {}".format(pca_vector, np.linalg.norm(variant)))
else:
#self.generate_sample_image(sample_latent_vector=z[0], title=str(pca_vector))
z_variant_min = self.pca_inverse_variant_min(pca, z[0])
self.generate_sample_image(sample_latent_vector=z_variant_min, title="PCA: {} Magnitude: {}".format(pca_vector, np.linalg.norm(z_variant_min)))
return None
def pca_inverse_variant(self, pca, z, n=10, min_mag=3, max_mag=5):
vector_a = pca.components_[0]
vector_b = pca.components_[1]
scale = vector_a[-1] / vector_b[-1]
ortho = np.random.random(len(vector_a))
n = int(n / 2)
sum_a = 0
sum_b = 0
for i in range(len(vector_a) - 2):
sum_a += vector_a[i] * ortho[i]
sum_b += vector_b[i] * ortho[i]
ortho[-2] = - (sum_a - scale * sum_b) / (vector_a[-2] - scale * vector_b[-2])
ortho[-1] = - (sum_a + vector_a[-2] * ortho[-2]) / vector_a[-1]
initial_scale = np.roots([np.linalg.norm(ortho)**2,2 * np.dot(ortho,z),np.linalg.norm(z)**2 - min_mag**2])
while np.sum(np.iscomplex(initial_scale)) != 0:
min_mag += 1
print("Unable to comply with minimum magnitude; increasing minimum magnitude to {}".format(min_mag))
if min_mag >= max_mag:
max_mag = min_mag + 1
print("Minimum magnitude exceeds maximum magnitude; increasing maximum magnitude to {}".format(max_mag))
initial_scale = np.roots([np.linalg.norm(ortho)**2,2 * np.dot(ortho,z),np.linalg.norm(z)**2 - min_mag**2])
mag_step = (max_mag - min_mag) / (n - 1.0)
mag_values = np.arange(min_mag,max_mag + mag_step / 10.0, mag_step)
scales = [initial_scale[np.argmax(initial_scale)],initial_scale[np.argmin(initial_scale)]]
for mag in mag_values[1:]:
roots = np.roots([np.linalg.norm(ortho)**2,2 * np.dot(ortho,z),np.linalg.norm(z)**2 - mag**2])
scales.append(roots[np.argmax(roots)])
scales.append(roots[np.argmin(roots)])
batch_z = []
for scale in scales:
batch_z.append(z + scale * ortho)
return batch_z
def pca_inverse_variant_min(self, pca, z):
vector_a = pca.components_[0]
vector_b = pca.components_[1]
scale = vector_a[-1] / vector_b[-1]
ortho = np.random.random(len(vector_a))
sum_a = 0
sum_b = 0
for i in range(len(vector_a) - 2):
sum_a += vector_a[i] * ortho[i]
sum_b += vector_b[i] * ortho[i]
ortho[-2] = - (sum_a - scale * sum_b) / (vector_a[-2] - scale * vector_b[-2])
ortho[-1] = - (sum_a + vector_a[-2] * ortho[-2]) / vector_a[-1]
min_scale = - (np.dot(z, ortho) / np.linalg.norm(ortho))
new_z = z + min_scale * ortho
return new_z
def main():
parser = argparse.ArgumentParser(
description="Replicates the Adversarial Autoencoder architecture, refer to https://arxiv.org/abs/1511.05644",
epilog="Use --train to train a model, followed by --sample/--samplegrid/--plot to generate sample images or plot encoded vectors.")
parser.add_argument('--train',
action='store_true',
default=False,
help='Train a model')
parser.add_argument('--sample',
action='store_true',
default=False,
help='Sample a single image')
parser.add_argument('-z', '--latent_vector',
action='store',
default=[0, 0],
help='Sample latent vector (default [0,0])')
parser.add_argument('--samplegrid',
action='store_true',
default=False,
help='Sample a grid of images')
parser.add_argument('-rz1', '--range_z1',
action='store',
default=[-10, 10],
help='Range of z1 values (default [-10,10])')
parser.add_argument('-rz2', '--range_z2',
action='store',
default=[-10, 10],
help='Range of z2 values (default [-10,10])')
parser.add_argument('-nz1', '--no_steps_z1',
action='store',
default=10,
help='Number of z1 values (default 10)')
parser.add_argument('-nz2', '--no_steps_z2',
action='store',
default=10,
help='Number of z2 values (default 10)')
parser.add_argument('--plot',
action='store_true',
default=False,
help='Convert images to latent vectors and plot vectors')
parser.add_argument('--plot_hi',
action='store_true',
default=False,
help='Convert images to latent vectors and plot vectors via t-SNE mapping')
parser.add_argument('-i', '--no_images',
action='store',
default=10000,
help='Number of images to plot (default 10000)')
parser.add_argument('--custom_latent_vectors',
action='store',
default='[]',
help='Set of latent vectors to map in t-SNE')
parser.add_argument('--z_dim',
action='store',
default=2,
help='Number of dimensions for latent vector')
args = parser.parse_args()
if args.train:
model = AdversarialAutoencoder(z_dim=int(args.z_dim))
model.train()
elif args.sample:
model = AdversarialAutoencoder(z_dim=int(args.z_dim))
model.load_last_saved_model()
model.generate_sample_image(sample_latent_vector=eval(args.latent_vector))
elif args.samplegrid:
if isinstance(args.range_z1, str):
args.range_z1 = eval(args.range_z1)
if isinstance(args.range_z2, str):
args.range_z2 = eval(args.range_z2)
model = AdversarialAutoencoder(z_dim=int(args.z_dim))
model.load_last_saved_model()
model.generate_sample_image_grid(
n_x=int(args.no_steps_z1),
x_range=args.range_z1,
n_y=int(args.no_steps_z2),
y_range=args.range_z2)
elif args.plot:
model = AdversarialAutoencoder(z_dim=int(args.z_dim))
model.load_last_saved_model()
model.plot_latent_vectors(n_test=int(args.no_images))
elif args.plot_hi:
model = AdversarialAutoencoder(z_dim=int(args.z_dim))
model.load_last_saved_model()
model.plot_high_dim(n_test=int(args.no_images), custom_latent_vectors=eval(args.custom_latent_vectors))
else:
parser.print_help()
raw_input("Hit Enter To Close")
if __name__ == "__main__":
main()
| mit |
actuaryzhang/spark | python/pyspark/sql/utils.py | 5 | 7527 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import py4j
class CapturedException(Exception):
def __init__(self, desc, stackTrace, cause=None):
self.desc = desc
self.stackTrace = stackTrace
self.cause = convert_exception(cause) if cause is not None else None
def __str__(self):
return repr(self.desc)
class AnalysisException(CapturedException):
"""
Failed to analyze a SQL query plan.
"""
class ParseException(CapturedException):
"""
Failed to parse a SQL command.
"""
class IllegalArgumentException(CapturedException):
"""
Passed an illegal or inappropriate argument.
"""
class StreamingQueryException(CapturedException):
"""
Exception that stopped a :class:`StreamingQuery`.
"""
class QueryExecutionException(CapturedException):
"""
Failed to execute a query.
"""
class UnknownException(CapturedException):
"""
None of the above exceptions.
"""
def convert_exception(e):
s = e.toString()
stackTrace = '\n\t at '.join(map(lambda x: x.toString(), e.getStackTrace()))
c = e.getCause()
if s.startswith('org.apache.spark.sql.AnalysisException: '):
return AnalysisException(s.split(': ', 1)[1], stackTrace, c)
if s.startswith('org.apache.spark.sql.catalyst.analysis'):
return AnalysisException(s.split(': ', 1)[1], stackTrace, c)
if s.startswith('org.apache.spark.sql.catalyst.parser.ParseException: '):
return ParseException(s.split(': ', 1)[1], stackTrace, c)
if s.startswith('org.apache.spark.sql.streaming.StreamingQueryException: '):
return StreamingQueryException(s.split(': ', 1)[1], stackTrace, c)
if s.startswith('org.apache.spark.sql.execution.QueryExecutionException: '):
return QueryExecutionException(s.split(': ', 1)[1], stackTrace, c)
if s.startswith('java.lang.IllegalArgumentException: '):
return IllegalArgumentException(s.split(': ', 1)[1], stackTrace, c)
return UnknownException(s, stackTrace, c)
def capture_sql_exception(f):
def deco(*a, **kw):
try:
return f(*a, **kw)
except py4j.protocol.Py4JJavaError as e:
converted = convert_exception(e.java_exception)
if not isinstance(converted, UnknownException):
raise converted
else:
raise
return deco
def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def toJArray(gateway, jtype, arr):
"""
Convert python list to java type array
:param gateway: Py4j Gateway
:param jtype: java type of element in array
:param arr: python type list
"""
jarr = gateway.new_array(jtype, len(arr))
for i in range(0, len(arr)):
jarr[i] = arr[i]
return jarr
def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.23.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__))
def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "0.12.1"
from distutils.version import LooseVersion
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
if not have_arrow:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
def require_test_compiled():
""" Raise Exception if test classes are not compiled
"""
import os
import glob
try:
spark_home = os.environ['SPARK_HOME']
except KeyError:
raise RuntimeError('SPARK_HOME is not defined in environment')
test_class_path = os.path.join(
spark_home, 'sql', 'core', 'target', '*', 'test-classes')
paths = glob.glob(test_class_path)
if len(paths) == 0:
raise RuntimeError(
"%s doesn't exist. Spark sql test classes are not compiled." % test_class_path)
class ForeachBatchFunction(object):
"""
This is the Python implementation of Java interface 'ForeachBatchFunction'. This wraps
the user-defined 'foreachBatch' function such that it can be called from the JVM when
the query is active.
"""
def __init__(self, sql_ctx, func):
self.sql_ctx = sql_ctx
self.func = func
def call(self, jdf, batch_id):
from pyspark.sql.dataframe import DataFrame
try:
self.func(DataFrame(jdf, self.sql_ctx), batch_id)
except Exception as e:
self.error = e
raise e
class Java:
implements = ['org.apache.spark.sql.execution.streaming.sources.PythonForeachBatchFunction']
def to_str(value):
"""
A wrapper over str(), but converts bool values to lower case strings.
If None is given, just returns None, instead of converting it to string "None".
"""
if isinstance(value, bool):
return str(value).lower()
elif value is None:
return value
else:
return str(value)
| apache-2.0 |
bikong2/scikit-learn | sklearn/utils/estimator_checks.py | 21 | 51976 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils import ConvergenceWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError :
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
brunston/stellarpyl | text.py | 3 | 12212 | # -*- coding: utf-8 -*-
"""
stellarPYL - python stellar spectra processing software
Copyright (c) 2016 Brunston Poon
@file: text
This program comes with absolutely no warranty.
"""
import time
def welcome():
print("""
Welcome to stellarPYL, Copyright (C) 2015 Brunston Poon, type 'licence' for info
Type 'quit' or 'exit' to leave the program. Use ctrl-c to force-interrupt.
TO VIEW HELP, WHICH WILL DESCRIBE A TYPICAL WORKFLOW SCENARIO, TYPE 'help'.
TO VIEW A LIST OF AVAILABLE FUNCTIONS & COMMANDS, TYPE 'commands'.
TO LEARN MORE ABOUT THIS PROGRAM, TYPE 'about'.
Help and information is also available online at http://st.bpbp.xyz/
or by viewing README.md
""")
return None
def firstrun():
print("""
SINCE this is your first time running the program, please take the time to read
the about, help, and commands documentation to familiarize yourself with this
program. For your convenience, press enter to view the commands now.
""")
return None
def about():
print("""
This is stellar spectra reduction and analysis command-line software written
using the Python 3.4 version of the Anaconda Scientific Python distribution. It
is work done for an internship at the Unversity of Hawaii in conjunction with
the St. Paul's School Engineering Honors program.
It aims to provide a simplified workflow for analyzing uncompressed TIFF stellar
spectra images obtained from a DSLR through a diffraction grating. The goals of
the project are: to automatically crop the image; to perform background
subtraction; to create an intensity plot of the spectrum (accounting for non-
orthogonal spectra); and to account for the use of a DSLR sensor by using either
a relative response function or an absolute response function to normalize the
intensity plot.
It is written by Brunston Poon.
""")
return None
def help():
print("""
AS AN ALTERNATIVE TO THE BELOW, make sure you set a default threshold using
'settings_threshold', and then simply type 'auto' to have the program do
the majority of the work.
You will be presented with a list of commands.
For a brand new image, run 'crop' first. Drag your file into the same directory
and enter the filename including the file extension. This program will accept
TIFF files, either in .tif or .tiff extension format. It will then ask you for a
threshold.
The threshold is used throughout the program to determine what data is relevant
and what parts of the image can be discarded without damaging the value of the
data. It needs to be an integer value between 0 and 765 as the threshold is
measured as the sum of the R, G, and B bin values in a pixel, therefore,
each RGB value can be an integer from 0-255; total value can be from 0-765. If
you do not have a value you are already using for all of your images, you can
type 'pixel_d' at the command prompt to run a function that plots the
distribution of binned pixel values in your image.
A typical threshold may be in the range from 100-130.
The program will run the cropping algorithm and ask for a filename to give to
the new file.
The next command you should run is 'intensity_saa'. It will take an image file
and a threshold and automatically perform linear regression to find the y=mx+b
line on which the spectral trace lies. It will then step one pixel at a time
along the spectral trace and add up all intensity values occuring along that
line.
The program will graph this intensity plot, which can be saved using the tools
already provided by matplotlib.
""")
return None
def commands():
print("""
---IMAGE PROCESSING---
- 'autoProcess' (short 'auto') -
autoProcess will take care of cropping and doing intensity plotting for you.
just provide a filename. In order to use this feature you must first set
a default threshold to use by using the 'settings_threshold' command.
- 'pixel_d' (short 'pd') -
takes an image and shows the pixel distribution of the image over the intensity
of the pixels.
- 'crop' -
takes an image and crop it based on your selected threshold.
- 'image_regression' (short 'imgreg') -
takes an image and finds the line which goes through the spectrum in that image.
- 'intensity_n' (short 'n') -
takes an image of a spectrum and converts it into an intensity plot using the
naive method of adding.
- 'intensity_saa' (short 'saa') -
takes an image of a spectrum and converts it into an intensity plot using
spatial anti-aliasing at a sub-sampling rate of one tenth of one pixel.
- 'show_threshold' -
see exactly what could be removed (assuming no crop stop has been set) using the
threshold that is currently set.
- 'show_regression' -
shows regressed line overlayed on the original (cropped) image.
- 'show_walks' -
shows walking lines overlayed on the original (cropped) image.
- 'dev_cgrowth' -
plots curve of growth
---PROGRAM---
- 'about' -
displays information about this program
- 'functions' -
where you are now
- 'help' -
brings up sample workflow
- 'settings_cropoverride' -
sets manual overrides for automatic cropping on the top, bottom, and sides
of an image. The default value is -1 (which is equivalent to no override)
for all values.
- 'settings_default' -
returns ALL settings back to default:
defaultThreshold = -1
autoIntensity = saa
manual overrides all to -1
step = 1
verbose = yes
showthresh = yes
- 'settings_intensity' -
sets default intensity processing method for the autoProcess feature.
The default setting is saa (for spatial anti-aliasing).
- 'settings_margin' -
sets margin for cropping. default is 5 pixels.
- 'settings_showthreshold' -
showThreshold takes a while to run. Set to 'no' for a faster autoProcess
run time. Default is 'yes'
- 'settings_step'
sets default step value along the spectral trace (and thus resolution of
resulting intensity plot). default is 1 pixel-equivalence.
- 'settings_threshold' -
sets default threshold. Set to -1 if you would like the program to always ask.
The default setting is -1 (always asks).
- 'settings_verbose' -
sets verboseness. 'yes' to include debug statements, 'no' is default.
- 'view_settings' -
view your current settings
""")
return None
def rehash():
print("""
Type 'quit' or 'exit' to leave this program. Alternately, you may use
ctrl-c to force-interrupt at any time. Type 'help' for sample workflow,
'commands' for a list of functions and commands, and 'about' for more info.
""")
return None
def viewSettings(config):
print("Current settings:")
print("default threshold: ", config['CONTROL']['defaultthreshold'])
print("autoIntensity: ", config['CONTROL']['autointensity'])
print("manual override top crop:", config['CONTROL']['manualtop'])
print("manual override bottom crop:", config['CONTROL']['manualbot'])
print("manual override left crop:", config['CONTROL']['manualleft'])
print("manual override right crop:", config['CONTROL']['manualright'])
print("step:", config['CONTROL']['r'])
print("verbose:", config['CONTROL']['verbose'])
print("showthresh:", config['CONTROL']['showthresh'])
print("margin:",config['CONTROL']['margin'])
return None
def licence():
print("""
This program comes with absolutely no warranty. This is libre/gratis software,
and you are welcome to redistribute it under certain conditions.
""")
time.sleep(3)
print("""
stellarPYL is copyright (c) 2015 Brunston Poon
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
THE REST OF THE LICENCE TEXT IS VIEWABLE IN LICENCE.txt
stellarPYL is copyright (c) 2015 Brunston Poon.
""")
return None
def jellyfish():
print("""
(hello!)
.'
'
_ -- ~~~ -- _ _______
.-~ ~-.{__-----. :
/ \ | |
: O O : | |
/\ /------' j
{ {/~-. \__/ .-~\~~~~~~~~~
\/ / |~:- .___. -.~\ \ \.
/ /\ \ | | { { \ \ } } \ \.
{ { \ \ | \ \ \ \ / } }
\ \ /\ \ \ \ /\ \ { {
} } { { \ \ \ \/ / \ \ \ \.
/ / } } \ \ }{ { \ \ } }
/ / { { \ \{\ \ } { {
/ / } } } \ \ / / \ \ \.
`-' { { `-'\ \`-'/ / `-'
`-' `-' `-'
unknown artist
""")
return "jellyfish"
| gpl-3.0 |
vishnumani2009/OpenSource-Open-Ended-Statistical-toolkit | FRONTEND/anovafront.py | 1 | 9167 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'anova.ui'
#
# Created: Sat Apr 11 09:33:51 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(238, 435)
self.scale=0.4
self.test="F"
self.fname=""
self.type=1
self.robust="hc3"
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setGeometry(QtCore.QRect(10, 10, 221, 61))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.lineEdit = QtGui.QLineEdit(self.groupBox)
self.lineEdit.setGeometry(QtCore.QRect(40, 20, 141, 20))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setGeometry(QtCore.QRect(10, 80, 221, 231))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.pushButton = QtGui.QPushButton(self.groupBox_2)
self.pushButton.setGeometry(QtCore.QRect(20, 20, 161, 23))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.doubleSpinBox = QtGui.QDoubleSpinBox(self.groupBox_2)
self.doubleSpinBox.setGeometry(QtCore.QRect(120, 50, 62, 22))
self.doubleSpinBox.setMaximum(999999.99)
self.doubleSpinBox.setObjectName(_fromUtf8("doubleSpinBox"))
self.doubleSpinBox.valueChanged.connect(self.setscale)
self.label = QtGui.QLabel(self.groupBox_2)
self.label.setGeometry(QtCore.QRect(50, 50, 46, 13))
self.label.setObjectName(_fromUtf8("label"))
self.comboBox = QtGui.QComboBox(self.groupBox_2)
self.comboBox.setGeometry(QtCore.QRect(110, 80, 69, 22))
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.activated[str].connect(self.settest)
self.label_2 = QtGui.QLabel(self.groupBox_2)
self.label_2.setGeometry(QtCore.QRect(40, 80, 46, 13))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(self.groupBox_2)
self.label_3.setGeometry(QtCore.QRect(40, 120, 46, 13))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.comboBox_2 = QtGui.QComboBox(self.groupBox_2)
self.comboBox_2.setGeometry(QtCore.QRect(110, 120, 69, 22))
self.comboBox_2.setObjectName(_fromUtf8("comboBox_2"))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.activated[str].connect(self.settype)
self.label_4 = QtGui.QLabel(self.groupBox_2)
self.label_4.setGeometry(QtCore.QRect(40, 160, 46, 13))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.comboBox_3 = QtGui.QComboBox(self.groupBox_2)
self.comboBox_3.setGeometry(QtCore.QRect(110, 160, 69, 22))
self.comboBox_3.setObjectName(_fromUtf8("comboBox_3"))
self.comboBox_3.addItem(_fromUtf8(""))
self.comboBox_3.addItem(_fromUtf8(""))
self.comboBox_3.addItem(_fromUtf8(""))
self.comboBox_3.addItem(_fromUtf8(""))
self.comboBox_3.addItem(_fromUtf8(""))
self.comboBox_3.activated[str].connect(self.sethc)
self.checkBox = QtGui.QCheckBox(self.groupBox_2)
self.checkBox.setGeometry(QtCore.QRect(50, 190, 131, 17))
self.checkBox.setObjectName(_fromUtf8("checkBox"))
self.pushButton_2 = QtGui.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(40, 320, 161, 23))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_3 = QtGui.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(40, 360, 161, 23))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.pushButton_3.clicked.connect(self.startanova)
self.pushButton_2.clicked.connect(self.takeinput)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def sethc(self,txt):
self.robust=str(txt)
print self.robust
def settest(self,txt):
self.test=str(txt)
print self.test
def settype(self,txt):
self.type=int(txt)
print self.type
def setscale(self):
self.scale=self.doubleSpinBox.value()
print self.scale
def takeinput(self):
self.fname = QtGui.QFileDialog.getOpenFileName(None, 'Open file', 'C:')
from urllib2 import urlopen
import numpy as np
import pandas
import matplotlib.pyplot as plt
from statsmodels.formula.api import ols
from statsmodels.graphics.api import interaction_plot, abline_plot
from statsmodels.stats.anova import anova_lm
try:
rehab_table = pandas.read_csv('rehab.table')
except:
url = 'http://stats191.stanford.edu/data/rehab.csv'
#the next line is not necessary with recent version of pandas
url = urlopen(url)
rehab_table = pandas.read_table(url, delimiter=",")
rehab_table.to_csv('rehab.table')
print rehab_table
plt.figure(figsize=(6, 6));
rehab_table.boxplot('Time', 'Fitness', ax=plt.gca())
rehab_lm = ols('Time ~ C(Fitness)', data=rehab_table).fit()
table9 = anova_lm(rehab_lm,test=self.test,robust=self.robust)
print table9
print rehab_lm.model.data.orig_exog
print rehab_lm.summary()
plt.show()
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "Statistical estimator", None))
self.lineEdit.setText(_translate("Form", "ANOVA", None))
self.groupBox_2.setTitle(_translate("Form", "Options", None))
self.pushButton.setText(_translate("Form", "Model equation file", None))
self.label.setText(_translate("Form", "Scale", None))
self.comboBox.setItemText(0, _translate("Form", "F", None))
self.comboBox.setItemText(1, _translate("Form", "Cp", None))
self.comboBox.setItemText(2, _translate("Form", "ChiSq", None))
self.label_2.setText(_translate("Form", "Test", None))
self.label_3.setText(_translate("Form", "Type", None))
self.comboBox_2.setItemText(0, _translate("Form", "1", None))
self.comboBox_2.setItemText(1, _translate("Form", "2", None))
self.comboBox_2.setItemText(2, _translate("Form", "3", None))
self.label_4.setText(_translate("Form", "Robust", None))
self.comboBox_3.setItemText(0, _translate("Form", "Noene", None))
self.comboBox_3.setItemText(1, _translate("Form", "hc0", None))
self.comboBox_3.setItemText(2, _translate("Form", "hc1", None))
self.comboBox_3.setItemText(3, _translate("Form", "hc2", None))
self.comboBox_3.setItemText(4, _translate("Form", "hc3", None))
self.checkBox.setText(_translate("Form", " print to a file", None))
self.pushButton_2.setText(_translate("Form", "Input File", None))
self.pushButton_3.setText(_translate("Form", "Start", None))
def startanova(self):
from urllib2 import urlopen
import numpy as np
import pandas
import matplotlib.pyplot as plt
from statsmodels.formula.api import ols
from statsmodels.graphics.api import interaction_plot, abline_plot
from statsmodels.stats.anova import anova_lm
try:
rehab_table = pandas.read_csv('rehab.table')
except:
url = 'http://stats191.stanford.edu/data/rehab.csv'
#the next line is not necessary with recent version of pandas
url = urlopen(url)
rehab_table = pandas.read_table(url, delimiter=",")
rehab_table.to_csv('rehab.table')
print rehab_table
plt.figure(figsize=(6, 6));
rehab_table.boxplot('Time', 'Fitness', ax=plt.gca())
rehab_lm = ols('Time ~ C(Fitness)', data=rehab_table).fit()
table9 = anova_lm(rehab_lm,test=self.test,robust=self.robust)
print table9
print rehab_lm.model.data.orig_exog
print rehab_lm.summary()
plt.show()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
ui = Ui_Form()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| gpl-3.0 |
jseabold/scikit-learn | sklearn/datasets/species_distributions.py | 64 | 7917 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.datasets.base import _pkl_filepath
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = DIRECTORY_URL + "samples.zip"
COVERAGES_URL = DIRECTORY_URL + "coverages.zip"
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = int(header[b'NODATA_value'])
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
| bsd-3-clause |
ssaeger/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 25 | 2252 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
jbosboom/opentuner | stats_app/stats_app/views/charts.py | 6 | 1669 | import datetime
import django
from django.shortcuts import render
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.dates import DateFormatter
from matplotlib.figure import Figure
import random
from opentuner.utils import stats_matplotlib as stats
def display_graph(request):
"""
Handles request to display graph with provided parameters
"""
request_dict = dict(request.GET.iterlists())
xlim = request_dict.get('xlim', None)
if xlim:
xlim = int(xlim[0])
else:
xlim = 5000
xlim = [0, xlim]
ylim = request_dict.get('ylim', None)
if ylim:
ylim = int(ylim[0])
else:
ylim = 10
ylim = [0, ylim]
labels = request_dict.get('labels', None)
disp_types = request_dict.get('disp_type', None)
if not disp_types:
disp_types = ['median']
fig = stats.matplotlibplot_file(labels, xlim=xlim, ylim=ylim, disp_types=disp_types)
canvas = FigureCanvas(fig)
response = django.http.HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
def display_full_page(request):
"""
Handles request to display the full page
"""
all_labels = stats.get_all_labels()
label_list = get_label_list(all_labels)
html = render(request, 'charts.html')
content = html.content
content = content.format(label_list)
html.content = content
return html
def get_label_list(all_labels):
"""
Returns list of html form inputs corresponding to the different
labels in the provided db file
"""
label_list = ''
for label in all_labels:
label_list += '<b>%s</b>:<input type="checkbox" name="labels" value="%s">' % (label, label)
return label_list
| mit |
alfonsokim/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/mlab.py | 69 | 104273 | """
Numerical python functions written for compatability with matlab(TM)
commands with the same names.
Matlab(TM) compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (power spectral density over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in matlab(TM), but are useful anyway:
:meth:`cohere_pairs`
Coherence over all pairs. This is not a matlab function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:meth:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly::
See :ref:`misc-examples-index`
:meth:`rec2txt`
pretty print a record array
:meth:`rec2csv`
store record array in CSV file
:meth:`csv2rec`
import record array from CSV file with type inspection
:meth:`rec_append_fields`
adds field(s)/array(s) to record array
:meth:`rec_drop_fields`
drop fields from record array
:meth:`rec_join`
join two record arrays on sequence of fields
:meth:`rec_groupby`
summarize data by groups (similar to SQL GROUP BY)
:meth:`rec_summarize`
helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:meth:`conv`
convolution (numpy.convolve)
:meth:`corrcoef`
The matrix of correlation coefficients
:meth:`hist`
Histogram (numpy.histogram)
:meth:`linspace`
Linear spaced array from min to max
:meth:`load`
load ASCII file - use numpy.loadtxt
:meth:`meshgrid`
Make a 2D grid from 2 1 arrays (numpy.meshgrid)
:meth:`polyfit`
least squares best polynomial fit of x to y (numpy.polyfit)
:meth:`polyval`
evaluate a vector for a vector of polynomial coeffs (numpy.polyval)
:meth:`save`
save ASCII file - use numpy.savetxt
:meth:`trapz`
trapeziodal integration (trapz(x,y) -> numpy.trapz(y,x))
:meth:`vander`
the Vandermonde matrix (numpy.vander)
"""
from __future__ import division
import csv, warnings, copy, os
import numpy as np
ma = np.ma
from matplotlib import verbose
import matplotlib.nxutils as nxutils
import matplotlib.cbook as cbook
# set is a new builtin function in 2.4; delete the following when
# support for 2.3 is dropped.
try:
set
except NameError:
from sets import Set as set
def linspace(*args, **kw):
warnings.warn("use numpy.linspace", DeprecationWarning)
return np.linspace(*args, **kw)
def meshgrid(x,y):
warnings.warn("use numpy.meshgrid", DeprecationWarning)
return np.meshgrid(x,y)
def mean(x, dim=None):
warnings.warn("Use numpy.mean(x) or x.mean()", DeprecationWarning)
if len(x)==0: return None
return np.mean(x, axis=dim)
def logspace(xmin,xmax,N):
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
"return sqrt(x dot x)"
return np.sqrt(np.dot(x,x))
def window_hanning(x):
"return x times the hanning window of len(x)"
return np.hanning(len(x))*x
def window_none(x):
"No window function; simply return x"
return x
#from numpy import convolve as conv
def conv(x, y, mode=2):
'convolve x with y'
warnings.warn("Use numpy.convolve(x, y, mode='full')", DeprecationWarning)
return np.convolve(x,y,mode)
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x)
def demean(x, axis=0):
"Return x minus its mean along the specified axis"
x = np.asarray(x)
if axis:
ind = [slice(None)] * axis
ind.append(np.newaxis)
return x - x.mean(axis)[ind]
return x - x.mean(axis)
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean()
def detrend_none(x):
"Return x: no detrending"
return x
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
#This is a helper function that implements the commonality between the
#psd, csd, and spectrogram. It is *NOT* meant to be used outside of mlab
def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None):
#The checks for if y is x are so that we can use the same function to
#implement the core of psd(), csd(), and spectrogram() without doing
#extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
#Make sure we're dealing with a numpy array. If y and x were the same
#object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x)<NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y)<NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if scale_by_freq is None:
warnings.warn("psd, csd, and specgram have changed to scale their "
"densities by the sampling frequency for better MatLab "
"compatibility. You can pass scale_by_freq=False to disable "
"this behavior. Also, one-sided densities are scaled by a "
"factor of 2.")
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':
numFreqs = pad_to
scaling_factor = 1.
elif sides in ('default', 'onesided'):
numFreqs = pad_to//2 + 1
scaling_factor = 2.
else:
raise ValueError("sides must be one of: 'default', 'onesided', or "
"'twosided'")
# Matlab divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
scaling_factor /= Fs
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
Pxy = np.zeros((numFreqs,n), np.complex_)
# do the ffts of the slices
for i in range(n):
thisX = x[ind[i]:ind[i]+NFFT]
thisX = windowVals * detrend(thisX)
fx = np.fft.fft(thisX, n=pad_to)
if same_data:
fy = fx
else:
thisY = y[ind[i]:ind[i]+NFFT]
thisY = windowVals * detrend(thisY)
fy = np.fft.fft(thisY, n=pad_to)
Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2. Also include
# scaling factors for one-sided densities and dividing by the sampling
# frequency, if desired.
Pxy *= scaling_factor / (np.abs(windowVals)**2).sum()
t = 1./Fs * (ind + NFFT / 2.)
freqs = float(Fs) / pad_to * np.arange(numFreqs)
return Pxy, freqs, t
#Split out these keyword docs so that they can be used elsewhere
kwdocd = dict()
kwdocd['PSD'] ="""
Keyword arguments:
*NFFT*: integer
The number of data points used in each block for the FFT.
Must be even; a power 2 is most efficient. The default value is 256.
*Fs*: scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
*detrend*: callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
matlab, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well.
*window*: callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the psd (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
*sides*: [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the PSD to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided PSD,
while 'twosided' forces two-sided.
*scale_by_freq*: boolean
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MatLab compatibility.
"""
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The power spectral density by Welch's average periodogram method.
The vector *x* is divided into *NFFT* length blocks. Each block
is detrended by the function *detrend* and windowed by the function
*window*. *noverlap* gives the length of the overlap between blocks.
The absolute(fft(block))**2 of each segment are averaged to compute
*Pxx*, with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxx*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
return Pxx.real,freqs
psd.__doc__ = psd.__doc__ % kwdocd
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The cross power spectral density by Welch's average periodogram
method. The vectors *x* and *y* are divided into *NFFT* length
blocks. Each block is detrended by the function *detrend* and
windowed by the function *window*. *noverlap* gives the length
of the overlap between blocks. The product of the direct FFTs
of *x* and *y* are averaged over each segment to compute *Pxy*,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxy*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
if len(Pxy.shape) == 2 and Pxy.shape[1]>1:
Pxy = Pxy.mean(axis=1)
return Pxy, freqs
csd.__doc__ = csd.__doc__ % kwdocd
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=128, pad_to=None, sides='default', scale_by_freq=None):
"""
Compute a spectrogram of data in *x*. Data are split into *NFFT*
length segements and the PSD of each section is computed. The
windowing function *window* is applied to each segment, and the
amount of overlap of each segment is specified with *noverlap*.
If *x* is real (i.e. non-complex) only the spectrum of the positive
frequencie is returned. If *x* is complex then the complete
spectrum is returned.
%(PSD)s
Returns a tuple (*Pxx*, *freqs*, *t*):
- *Pxx*: 2-D array, columns are the periodograms of
successive segments
- *freqs*: 1-D array of frequencies corresponding to the rows
in Pxx
- *t*: 1-D array of times corresponding to midpoints of
segments.
.. seealso::
:func:`psd`:
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
"""
assert(NFFT > noverlap)
Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
Pxx = Pxx.real #Needed since helper implements generically
if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[NFFT/2:]-Fs,freqs[:NFFT/2]))
Pxx = np.concatenate((Pxx[NFFT/2:,:],Pxx[:NFFT/2,:]),0)
return Pxx, freqs, t
specgram.__doc__ = specgram.__doc__ % kwdocd
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(PSD)s
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect, since
the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`:
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x)<2*NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f
cohere.__doc__ = cohere.__doc__ % kwdocd
def corrcoef(*args):
"""
corrcoef(*X*) where *X* is a matrix returns a matrix of correlation
coefficients for the columns of *X*
corrcoef(*x*, *y*) where *x* and *y* are vectors returns the matrix of
correlation coefficients for *x* and *y*.
Numpy arrays can be real or complex.
The correlation matrix is defined from the covariance matrix *C*
as
.. math::
r_{ij} = \\frac{C_{ij}}{\\sqrt{C_{ii}C_{jj}}}
"""
warnings.warn("Use numpy.corrcoef", DeprecationWarning)
kw = dict(rowvar=False)
return np.corrcoef(*args, **kw)
def polyfit(*args, **kwargs):
u"""
polyfit(*x*, *y*, *N*)
Do a best fit polynomial of order *N* of *y* to *x*. Return value
is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for
*N*=2::
p2*x0^2 + p1*x0 + p0 = y1
p2*x1^2 + p1*x1 + p0 = y1
p2*x2^2 + p1*x2 + p0 = y2
.....
p2*xk^2 + p1*xk + p0 = yk
Method: if *X* is a the Vandermonde Matrix computed from *x* (see
`vandermonds
<http://mathworld.wolfram.com/VandermondeMatrix.html>`_), then the
polynomial least squares solution is given by the '*p*' in
X*p = y
where *X* is a (len(*x*) \N{MULTIPLICATION SIGN} *N* + 1) matrix,
*p* is a *N*+1 length vector, and *y* is a (len(*x*)
\N{MULTIPLICATION SIGN} 1) vector.
This equation can be solved as
.. math::
p = (X_t X)^-1 X_t y
where :math:`X_t` is the transpose of *X* and -1 denotes the
inverse. Numerically, however, this is not a good method, so we
use :func:`numpy.linalg.lstsq`.
For more info, see `least squares fitting
<http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html>`_,
but note that the *k*'s and *n*'s in the superscripts and
subscripts on that page. The linear algebra is correct, however.
.. seealso::
:func:`polyval`
"""
warnings.warn("use numpy.poyfit", DeprecationWarning)
return np.polyfit(*args, **kwargs)
def polyval(*args, **kwargs):
"""
*y* = polyval(*p*, *x*)
*p* is a vector of polynomial coeffients and *y* is the polynomial
evaluated at *x*.
Example code to remove a polynomial (quadratic) trend from y::
p = polyfit(x, y, 2)
trend = polyval(p, x)
resid = y - trend
.. seealso::
:func:`polyfit`
"""
warnings.warn("use numpy.polyval", DeprecationWarning)
return np.polyval(*args, **kwargs)
def vander(*args, **kwargs):
"""
*X* = vander(*x*, *N* = *None*)
The Vandermonde matrix of vector *x*. The *i*-th column of *X* is the
the *i*-th power of *x*. *N* is the maximum power to compute; if *N* is
*None* it defaults to len(*x*).
"""
warnings.warn("Use numpy.vander()", DeprecationWarning)
return np.vander(*args, **kwargs)
def donothing_callback(*args):
pass
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
u"""
Cxy, Phase, freqs = cohere_pairs(X, ij, ...)
Compute the coherence for all pairs in *ij*. *X* is a
(*numSamples*, *numCols*) numpy array. *ij* is a list of tuples
(*i*, *j*). Each tuple is a pair of indexes into the columns of *X*
for which you want to compute coherence. For example, if *X* has 64
columns, and you want to compute all nonredundant pairs, define *ij*
as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i, j) )
The other function arguments, except for *preferSpeedOverMemory*
(see below), are explained in the help string of :func:`psd`.
Return value is a tuple (*Cxy*, *Phase*, *freqs*).
- *Cxy*: a dictionary of (*i*, *j*) tuples -> coherence vector for that
pair. I.e., ``Cxy[(i,j)] = cohere(X[:,i], X[:,j])``. Number of
dictionary keys is ``len(ij)``.
- *Phase*: a dictionary of phases of the cross spectral density at
each frequency for each pair. The keys are ``(i,j)``.
- *freqs*: a vector of frequencies, equal in length to either
the coherence or phase vectors for any (*i*, *j*) key.. Eg,
to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If *N* is the
number of pairs, this function is O(N) for most of the heavy
lifting, whereas calling cohere for each pair is
O(N\N{SUPERSCRIPT TWO}). However, because of the caching, it is
also more memory intensive, making 2 additional complex arrays
with approximately the same number of elements as *X*.
The parameter *preferSpeedOverMemory*, if *False*, limits the
caching by only making one, rather than two, complex cache arrays.
This is useful if memory becomes critical. Even when
*preferSpeedOverMemory* is *False*, :func:`cohere_pairs` will
still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is *True*. In my tests
with a (43000, 64) array over all non-redundant pairs,
*preferSpeedOverMemory* = *True* delivered a 33% performace boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = *False*. But both solutions were more
than 10x faster than naievly crunching all possible pairs through
cohere.
.. seealso::
:file:`test/cohere_pairs_test.py` in the src tree:
For an example script that shows that this
:func:`cohere_pairs` and :func:`cohere` give the same
results for a given pair.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros( (NFFT, numCols), X.dtype)
X[:numRows,:] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
seen = {}
for i,j in ij:
seen[i]=1; seen[j] = 1
allColumns = seen.keys()
Ncols = len(allColumns)
del seen
# for real X, ignore the negative frequencies
if np.iscomplexobj(X): numFreqs = NFFT
else: numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), typecode(X)))
ind = range(0, numRows-NFFT+1, NFFT-noverlap)
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice,:] = fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(absolute(Slices)**2), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i,j in ij:
count +=1
if count%10==0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices>1: Pxy = np.mean(Pxy)
Pxy = np.divide(Pxy, normVal)
Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Phase[(i,j)] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y*.
.. math::
\sum p_i \log_2(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n,bins = np.histogram(y, bins)
n = n.astype(np.float_)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1]-bins[0]
S = -1.0*np.sum(p*log(p)) + log(delta)
#S = -1.0*np.sum(p*log(p))
return S
def hist(y, bins=10, normed=0):
"""
Return the histogram of *y* with *bins* equally sized bins. If
bins is an array, use those bins. Return value is (*n*, *x*)
where *n* is the count for each bin in *x*.
If *normed* is *False*, return the counts in the first element of
the returned tuple. If *normed* is *True*, return the probability
density :math:`\\frac{n}{(len(y)\mathrm{dbin}}`.
If *y* has rank > 1, it will be raveled. If *y* is masked, only the
unmasked values will be used.
Credits: the Numeric 22 documentation
"""
warnings.warn("Use numpy.histogram()", DeprecationWarning)
return np.histogram(y, bins=bins, range=None, normed=normed)
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def levypdf(x, gamma, alpha):
"Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*"
N = len(x)
if N%2 != 0:
raise ValueError, 'x must be an event length array; try\n' + \
'x = np.linspace(minx, maxx, N), where N is even'
dx = x[1]-x[0]
f = 1/(N*dx)*np.arange(-N/2, N/2, np.float_)
ind = np.concatenate([np.arange(N/2, N, int),
np.arange(0, N/2, int)])
df = f[1]-f[0]
cfl = exp(-gamma*np.absolute(2*pi*f)**alpha)
px = np.fft.fft(np.take(cfl,ind)*df).astype(np.float_)
return np.take(px, ind)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def trapz(x, y):
"""
Trapezoidal integral of *y*(*x*).
"""
warnings.warn("Use numpy.trapz(y,x) instead of trapz(x,y)", DeprecationWarning)
return np.trapz(y, x)
#if len(x)!=len(y):
# raise ValueError, 'x and y must have the same length'
#if len(x)<2:
# raise ValueError, 'x and y must have > 1 element'
#return np.sum(0.5*np.diff(x)*(y[1:]+y[:-1]))
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x)==0:
return np.array([])
ind = (x==0).nonzero()[0]
if len(ind)==0:
return np.arange(len(x))
if len(ind)==len(x):
return np.array([])
y = np.zeros( (len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0];
dn = (dif == -1).nonzero()[0];
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x)
def prepca(P, frac=0):
"""
Compute the principal components of *P*. *P* is a (*numVars*,
*numObs*) array. *frac* is the minimum fraction of variance that a
component must contain to be included.
Return value is a tuple of the form (*Pcomponents*, *Trans*,
*fracVar*) where:
- *Pcomponents* : a (numVars, numObs) array
- *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* *
*P*
- *fracVar* : the fraction of the variance accounted for by each
component returned
A similar function of the same name was in the Matlab (TM)
R13 Neural Network Toolbox but is not found in later versions;
its successor seems to be called "processpcs".
"""
U,s,v = np.linalg.svd(P)
varEach = s**2/P.shape[1]
totVar = varEach.sum()
fracVar = varEach/totVar
ind = slice((fracVar>=frac).sum())
# select the components that are greater
Trans = U[:,ind].transpose()
# The transformed data
Pcomponents = np.dot(Trans,P)
return Pcomponents, Trans, fracVar[ind]
def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
x = np.array(x).ravel() # we need a copy
x.sort()
Nx = len(x)
if not cbook.iterable(p):
return x[int(p*Nx/100.0)]
p = np.asarray(p)* Nx/100.0
ind = p.astype(int)
ind = np.where(ind>=Nx, Nx-1, ind)
return x.take(ind)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). Eg if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max()<=1 or p.min()<0 or p.max()>100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, np.float_)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:,np.newaxis])
M = M / M.std(axis=1)[:,np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try: Ny = len(y0)
except TypeError:
yout = np.zeros( (len(t),), np.float_)
else:
yout = np.zeros( (len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M,N,frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M,N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0,M-1)
y = np.random.randint(0,N-1)
data[x,y] = np.random.rand()
return data
def dist(x,y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d,d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment
"""
p = np.asarray(p, np.float_)
s0 = np.asarray(s0, np.float_)
s1 = np.asarray(s1, np.float_)
v = s1 - s0
w = p - s0
c1 = np.dot(w,v);
if ( c1 <= 0 ):
return dist(p, s0);
c2 = np.dot(v,v)
if ( c2 <= c1 ):
return dist(p, s1);
b = c1 / c2
pb = s0 + b * v;
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x=window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
def liaupunov(x, fprime):
"""
*x* is a very long trajectory from a map, and *fprime* returns the
derivative of *x*.
Returns :
.. math::
\lambda = \\frac{1}{n}\\sum \\ln|f^'(x_i)|
.. seealso::
Sec 10.5 Strogatz (1994) "Nonlinear Dynamics and Chaos".
`Wikipedia article on Lyapunov Exponent
<http://en.wikipedia.org/wiki/Lyapunov_exponent>`_.
.. note::
What the function here calculates may not be what you really want;
*caveat emptor*.
It also seems that this function's name is badly misspelled.
"""
return np.mean(np.log(np.absolute(fprime(x))))
class FIFOBuffer:
"""
A FIFO queue to hold incoming *x*, *y* data in a rotating buffer
using numpy arrays under the hood. It is assumed that you will
call asarrays much less frequently than you add data to the queue
-- otherwise another data structure will be faster.
This can be used to support plots where data is added from a real
time feed and the plot object wants to grab data from the buffer
and plot it to screen less freqeuently than the incoming.
If you set the *dataLim* attr to
:class:`~matplotlib.transforms.BBox` (eg
:attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as
new data come in.
TODO: add a grow method that will extend nmax
.. note::
mlab seems like the wrong place for this class.
"""
def __init__(self, nmax):
"""
Buffer up to *nmax* points.
"""
self._xa = np.zeros((nmax,), np.float_)
self._ya = np.zeros((nmax,), np.float_)
self._xs = np.zeros((nmax,), np.float_)
self._ys = np.zeros((nmax,), np.float_)
self._ind = 0
self._nmax = nmax
self.dataLim = None
self.callbackd = {}
def register(self, func, N):
"""
Call *func* every time *N* events are passed; *func* signature
is ``func(fifo)``.
"""
self.callbackd.setdefault(N, []).append(func)
def add(self, x, y):
"""
Add scalar *x* and *y* to the queue.
"""
if self.dataLim is not None:
xys = ((x,y),)
self.dataLim.update(xys, -1) #-1 means use the default ignore setting
ind = self._ind % self._nmax
#print 'adding to fifo:', ind, x, y
self._xs[ind] = x
self._ys[ind] = y
for N,funcs in self.callbackd.items():
if (self._ind%N)==0:
for func in funcs:
func(self)
self._ind += 1
def last(self):
"""
Get the last *x*, *y* or *None*. *None* if no data set.
"""
if self._ind==0: return None, None
ind = (self._ind-1) % self._nmax
return self._xs[ind], self._ys[ind]
def asarrays(self):
"""
Return *x* and *y* as arrays; their length will be the len of
data added or *nmax*.
"""
if self._ind<self._nmax:
return self._xs[:self._ind], self._ys[:self._ind]
ind = self._ind % self._nmax
self._xa[:self._nmax-ind] = self._xs[ind:]
self._xa[self._nmax-ind:] = self._xs[:ind]
self._ya[:self._nmax-ind] = self._ys[ind:]
self._ya[self._nmax-ind:] = self._ys[:ind]
return self._xa, self._ya
def update_datalim_to_current(self):
"""
Update the *datalim* in the current data in the fifo.
"""
if self.dataLim is None:
raise ValueError('You must first set the dataLim attr')
x, y = self.asarrays()
self.dataLim.update_numerix(x, y, True)
def movavg(x,n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
def save(fname, X, fmt='%.18e',delimiter=' '):
"""
Save the data in *X* to file *fname* using *fmt* string to convert the
data to strings.
*fname* can be a filename or a file handle. If the filename ends
in '.gz', the file is automatically saved in compressed gzip
format. The :func:`load` function understands gzipped files
transparently.
Example usage::
save('test.out', X) # X is an array
save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays
save('test2.out', x) # x is 1D
save('test3.out', x, fmt='%1.4e') # use exponential notation
*delimiter* is used to separate the fields, eg. *delimiter* ','
for comma-separated values.
"""
if cbook.is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname,'wb')
else:
fh = file(fname,'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
origShape = None
if X.ndim == 1:
origShape = X.shape
X.shape = len(X), 1
for row in X:
fh.write(delimiter.join([fmt%val for val in row]) + '\n')
if origShape is not None:
X.shape = origShape
def load(fname,comments='#',delimiter=None, converters=None,skiprows=0,
usecols=None, unpack=False, dtype=np.float_):
"""
Load ASCII data from *fname* into an array and return the array.
The data must be regular, same number of values in every row
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'.
matfile data is not supported; for that, use :mod:`scipy.io.mio`
module.
Example usage::
X = load('test.dat') # data in two columns
t = X[:,0]
y = X[:,1]
Alternatively, you can do the same with "unpack"; see below::
X = load('test.dat') # a matrix of data
x = load('test.dat') # a single column of data
- *comments*: the character used to indicate the start of a comment
in the file
- *delimiter* is a string-like character used to seperate values
in the file. If *delimiter* is unspecified or *None*, any
whitespace string is a separator.
- *converters*, if not *None*, is a dictionary mapping column number to
a function that will convert that column to a float (or the optional
*dtype* if specified). Eg, if column 0 is a date string::
converters = {0:datestr2num}
- *skiprows* is the number of rows from the top to skip.
- *usecols*, if not *None*, is a sequence of integer column indexes to
extract where 0 is the first column, eg ``usecols=[1,4,5]`` to extract
just the 2nd, 5th and 6th columns
- *unpack*, if *True*, will transpose the matrix allowing you to unpack
into named arguments on the left hand side::
t,y = load('test.dat', unpack=True) # for two column data
x,y,z = load('somefile.dat', usecols=[3,5,7], unpack=True)
- *dtype*: the array will have this dtype. default: ``numpy.float_``
.. seealso::
See :file:`examples/pylab_examples/load_converter.py` in the source tree:
Exercises many of these options.
"""
if converters is None: converters = {}
fh = cbook.to_filehandle(fname)
X = []
if delimiter==' ':
# space splitting is a special case since x.split() is what
# you want, not x.split(' ')
def splitfunc(x):
return x.split()
else:
def splitfunc(x):
return x.split(delimiter)
converterseq = None
for i,line in enumerate(fh):
if i<skiprows: continue
line = line.split(comments, 1)[0].strip()
if not len(line): continue
if converterseq is None:
converterseq = [converters.get(j,float)
for j,val in enumerate(splitfunc(line))]
if usecols is not None:
vals = splitfunc(line)
row = [converterseq[j](vals[j]) for j in usecols]
else:
row = [converterseq[j](val)
for j,val in enumerate(splitfunc(line))]
thisLen = len(row)
X.append(row)
X = np.array(X, dtype)
r,c = X.shape
if r==1 or c==1:
X.shape = max(r,c),
if unpack: return X.transpose()
else: return X
def slopes(x,y):
"""
SLOPES calculate the slope y'(x) Given data vectors X and Y SLOPES
calculates Y'(X), i.e the slope of a curve Y(X). The slope is
estimated using the slope obtained from that of a parabola through
any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between x-
and y-values. For many functions, however, the abscissa are given
in different dimensions, so an aspect ratio is completely
arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
STINEMAN_INTERP Well behaved data interpolation. Given data
vectors X and Y, the slope vector YP and a new abscissa vector XI
the function stineman_interp(xi,x,y,yp) uses Stineman
interpolation to calculate a vector YI corresponding to XI.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa:
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For yp=None, the routine automatically determines the slopes using
the "slopes" routine.
X is assumed to be sorted in increasing order
For values xi[j] < x[0] or xi[j] > x[-1], the routine tries a
extrapolation. The relevance of the data obtained from this, of
course, questionable...
original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
def inside_poly(points, verts):
"""
points is a sequence of x,y points
verts is a sequence of x,y vertices of a poygon
return value is a sequence of indices into points for the points
that are inside the polygon
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(ymin, xs, ys):
"""
given a arrays *xs* and *ys*, return the vertices of a polygon
that has a scalar lower bound *ymin* and an upper bound at the *ys*.
intended for use with Axes.fill, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
return poly_between(xs, ys, xmin)
def poly_between(x, ylower, yupper):
"""
given a sequence of x, ylower and yupper, return the polygon that
fills the regions between them. ylower or yupper can be scalar or
iterable. If they are iterable, they must be equal in length to x
return value is x, y arrays for use with Axes.fill
"""
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*np.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*np.ones(Nx)
x = np.concatenate( (x, x[::-1]) )
y = np.concatenate( (yupper, ylower[::-1]) )
return x,y
### the following code was written and submitted by Fernando Perez
### from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <[email protected]>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import operator
import math
#*****************************************************************************
# Globals
#****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))
else:
return math.exp(x)
def amap(fn,*args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(map(fn,*args))
#from numpy import zeros_like
def zeros_like(a):
"""
Return an array of zeros of the shape and typecode of *a*.
"""
warnings.warn("Use numpy.zeros_like(a)", DeprecationWarning)
return np.zeros_like(a)
#from numpy import sum as sum_flat
def sum_flat(a):
"""
Return the sum of all the elements of *a*, flattened out.
It uses ``a.flat``, and if *a* is not contiguous, a call to
``ravel(a)`` is made.
"""
warnings.warn("Use numpy.sum(a) or a.sum()", DeprecationWarning)
return np.sum(a)
#from numpy import mean as mean_flat
def mean_flat(a):
"""
Return the mean of all the elements of *a*, flattened out.
"""
warnings.warn("Use numpy.mean(a) or a.mean()", DeprecationWarning)
return np.mean(a)
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.absolute(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2))
def norm_flat(a,p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p=='Infinity':
return np.amax(np.absolute(a))
else:
return (np.sum(np.absolute(a)**p))**(1.0/p)
def frange(xini,xfin=None,delta=None,**kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
#defaults
kw.setdefault('closed',1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin == None:
xfin = xini + 0.0
xini = 0.0
if delta == None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts=kw['npts']
delta=(xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
#npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
#import numpy.diag as diagonal_matrix
def diagonal_matrix(diag):
"""
Return square diagonal matrix whose non-zero elements are given by the
input array.
"""
warnings.warn("Use numpy.diag(d)", DeprecationWarning)
return np.diag(diag)
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
warnings.warn("Use dtype kwarg instead of typecode",
DeprecationWarning)
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr (number, base = 2, padding = 0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base: \
return (padding - 1) * chars [0] + chars [int (number)]
max_exponent = int (math.log (number)/math.log (base))
max_power = long (base) ** max_exponent
lead_digit = int (number/max_power)
return chars [lead_digit] + \
base_repr (number - max_power * lead_digit, base, \
max (padding - 1, max_exponent))
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1))
digits = map (operator.mod, shifts, max_length * [2])
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','')
def log2(x,ln2 = math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError,TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the Matlab (TM) function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape)==np.max(X.shape)
#from numpy import fromfunction as fromfunction_kw
def fromfunction_kw(function, dimensions, **kwargs):
"""
Drop-in replacement for :func:`numpy.fromfunction`.
Allows passing keyword arguments to the desired function.
Call it as (keywords are optional)::
fromfunction_kw(MyFunction, dimensions, keywords)
The function ``MyFunction`` is responsible for handling the
dictionary of keywords it will receive.
"""
warnings.warn("Use numpy.fromfunction()", DeprecationWarning)
return np.fromfunction(function, dimensions, **kwargs)
### end fperez numutils code
def rem(x,y):
"""
Deprecated - see :func:`numpy.remainder`
"""
raise NotImplementedError('Deprecated - see numpy.remainder')
def norm(x,y=2):
"""
Deprecated - see :func:`numpy.linalg.norm`
"""
raise NotImplementedError('Deprecated - see numpy.linalg.norm')
def orth(A):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def rank(x):
"""
Deprecated - see :func:`numpy.rank`
"""
raise NotImplementedError('Deprecated - see numpy.rank')
def sqrtm(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - see scipy.linalg.sqrtm')
def mfuncC(f, x):
"""
Deprecated
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def approx_real(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
#helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isnan(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isinf(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def rec_view(rec):
"""
Return a view of an ndarray as a recarray
.. seealso::
http://projects.scipy.org/pipermail/numpy-discussion/2008-August/036429.html
"""
return rec.view(np.recarray)
#return rec.view(dtype=(np.record, rec.dtype), type=np.recarray)
def rec_append_field(rec, name, arr, dtype=None):
"""
Return a new record array with field name populated with data from
array *arr*. This function is Deprecated. Please use
:func:`rec_append_fields`.
"""
warnings.warn("use rec_append_fields", DeprecationWarning)
return rec_append_fields(rec, name, arr, dtype)
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names) \
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError, "number of arrays do not match number of names"
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = map(np.asarray, arrs)
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError, "dtypes must be None, a single dtype or a list"
newdtype = np.dtype(rec.dtype.descr + zip(names, dtypes))
newrec = np.empty(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return rec_view(newrec)
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
Nr = len(rec)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.empty(Nr, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return rec_view(newrec)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. eg ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = dict()
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
# sort the output by groupby keys
keys = rowd.keys()
keys.sort()
rows = []
for key in keys:
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = zip(*stats)
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if cbook.is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s'%name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s'%name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = dict([(makekey(row),i) for i,row in enumerate(r1)])
r2d = dict([(makekey(row),i) for i,row in enumerate(r2)])
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r1.dtype[name]
assert dt2==dt1
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names: return name
else: return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.empty(common_len + left_len + right_len, dtype=newdtype)
if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
newrec_fields = newrec.dtype.fields.keys()
for k, v in defaults.items():
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return rec_view(newrec)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=True):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converted*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g. '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing
If no rows are found, *None* is returned -- see :file:`examples/loadrec.py`
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
parsedate = dateutil.parser.parse
fh = cbook.to_filehandle(fname)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def next(self):
return self.fix(self.fh.next())
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter==' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i>=(skiprows-1): break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x=='True': return True
elif x=='False': return False
else: raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
mydateparser = with_default_value(dateparser, datetime.date(1,1,1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x)
if d.hour>0 or d.minute>0 or d.second>0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1,1,1))
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}
try: func(name, item)
except:
if func==mystr:
raise ValueError('Could not find a working conversion function')
else: return get_func(name, item, funcmap[func]) # recurse
else: return func
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return' : 'return_',
'file' : 'file_',
'print' : 'print_',
}
def get_converters(reader):
converters = None
for i, row in enumerate(reader):
if i==0:
converters = [mybool]*len(row)
if checkrows and i>checkrows:
break
#print i, len(names), len(row)
#print 'converters', zip(converters, row)
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
#if not item.strip(): continue
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
#print 'csv2rec', row
if len(row) and row[0].startswith(comments):
continue
headers = row
break
# remove these chars
delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column%d'%i
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt>0:
names.append(item + '_%d'%cnt)
else:
names.append(item)
seen[item] = cnt+1
else:
if cbook.is_string_like(names):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
skipheader = reader.next()
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row): continue
if row[0].startswith(comments): continue
rows.append([func(name, val) for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val) for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
try: from numpy.ma import mrecords
except ImportError:
raise RuntimeError('numpy 1.05 or later is required for masked array support')
else:
r = mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj:
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
#class FormatString(FormatObj):
# def tostr(self, x):
# return '"%r"'%self.toval(x)
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None: return 'None'
return self.fmt%self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df'%precision)
self.precision = precision
self.scale = scale
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '%d'%int(x)
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def toval(self, x):
if x is None: return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_ : FormatBool(),
np.int16 : FormatInt(),
np.int32 : FormatInt(),
np.int64 : FormatInt(),
np.float32 : FormatFloat(),
np.float64 : FormatFloat(),
np.object_ : FormatObj(),
np.string_ : FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item,atype=int):
tdict = {None:int, int:float, float:str}
try: atype(str(item))
except: return get_type(item,tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:
length = max(len(colname),column.itemsize)
return 0, length+padding, "%s" # left justify
if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:
length = max(len(colname),np.max(map(len,map(str,column))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_:
fmt = "%." + str(precision) + "f"
length = max(len(colname),np.max(map(len,map(lambda x:fmt%x,column))))
return 1, length+padding, fmt # right justify
return 0, max(len(colname),np.max(map(len,map(str,column))))+padding, "%s"
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
.. seealso::
:func:`csv2rec`:
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'w', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x,y,z,xi,yi):
"""
``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* =
*f*(*x*, *y*) to the data in the (usually) nonuniformly spaced
vectors (*x*, *y*, *z*). :func:`griddata` interpolates this
surface at the points specified by (*xi*, *yi*) to produce
*zi*. *xi* and *yi* must describe a regular grid, can be either 1D
or 2D, but must be monotonically increasing.
A masked array is returned if any grid points are outside convex
hull defined by input data (no extrapolation is done).
Uses natural neighbor interpolation based on Delaunay
triangulation. By default, this algorithm is provided by the
:mod:`matplotlib.delaunay` package, written by Robert Kern. The
triangulation algorithm in this package is known to fail on some
nearly pathological cases. For this reason, a separate toolkit
(:mod:`mpl_tookits.natgrid`) has been created that provides a more
robust algorithm fof triangulation and interpolation. This
toolkit is based on the NCAR natgrid library, which contains code
that is not redistributable under a BSD-compatible license. When
installed, this function will use the :mod:`mpl_toolkits.natgrid`
algorithm, otherwise it will use the built-in
:mod:`matplotlib.delaunay` package.
The natgrid matplotlib toolkit can be downloaded from
http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792
"""
try:
from mpl_toolkits.natgrid import _natgrid, __version__
_use_natgrid = True
except ImportError:
import matplotlib.delaunay as delaunay
from matplotlib.delaunay import __version__
_use_natgrid = False
if not griddata._reported:
if _use_natgrid:
verbose.report('using natgrid version %s' % __version__)
else:
verbose.report('using delaunay version %s' % __version__)
griddata._reported = True
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if not len(x)==len(y)==len(z):
raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
if hasattr(z,'mask'):
x = x.compress(z.mask == False)
y = y.compress(z.mask == False)
z = z.compressed()
if _use_natgrid: # use natgrid toolkit if available.
if xi.ndim == 2:
xi = xi[0,:]
yi = yi[:,0]
# override default natgrid internal parameters.
_natgrid.seti('ext',0)
_natgrid.setr('nul',np.nan)
# cast input arrays to doubles (this makes a copy)
x = x.astype(np.float)
y = y.astype(np.float)
z = z.astype(np.float)
xo = xi.astype(np.float)
yo = yi.astype(np.float)
if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0:
raise ValueError, 'output grid defined by xi,yi must be monotone increasing'
# allocate array for output (buffer will be overwritten by nagridd)
zo = np.empty((yo.shape[0],xo.shape[0]), np.float)
_natgrid.natgridd(x,y,z,xo,yo,zo)
else: # use Robert Kern's delaunay package from scikits (default)
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if xi.ndim == 1:
xi,yi = np.meshgrid(xi,yi)
# triangulate data
tri = delaunay.Triangulation(x,y)
# interpolate data
interp = tri.nn_interpolator(z)
zo = interp(xi,yi)
# mask points on grid outside convex hull of input data.
if np.any(np.isnan(zo)):
zo = np.ma.masked_where(np.isnan(zo),zo)
return zo
griddata._reported = False
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x,y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
nx = ma
else:
nx = np
xs = nx.asarray(xs)
ys = nx.asarray(ys)
Nx = len(xs)
Ny = len(ys)
assert(Nx==Ny)
x = xmin*nx.ones(2*Nx)
y = nx.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
nx = ma
else:
nx = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*nx.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*nx.ones(Nx)
x = nx.concatenate( (x, x[::-1]) )
y = nx.concatenate( (yupper, ylower[::-1]) )
return x,y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P)
def distances_along_curve( X ):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff( X, axis=0 )
return vector_lengths(X,axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) )
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
| agpl-3.0 |
david-ragazzi/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py | 70 | 10245 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| gpl-3.0 |
lenovor/scikit-learn | sklearn/tests/test_random_projection.py | 142 | 14033 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [0, 1, 2])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
wavelets/zipline | tests/risk/answer_key.py | 39 | 11989 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import hashlib
import os
import numpy as np
import pandas as pd
import pytz
import xlrd
import requests
from six.moves import map
def col_letter_to_index(col_letter):
# Only supports single letter,
# but answer key doesn't need multi-letter, yet.
index = 0
for i, char in enumerate(reversed(col_letter)):
index += ((ord(char) - 65) + 1) * pow(26, i)
return index
DIR = os.path.dirname(os.path.realpath(__file__))
ANSWER_KEY_CHECKSUMS_PATH = os.path.join(DIR, 'risk-answer-key-checksums')
ANSWER_KEY_CHECKSUMS = open(ANSWER_KEY_CHECKSUMS_PATH, 'r').read().splitlines()
ANSWER_KEY_FILENAME = 'risk-answer-key.xlsx'
ANSWER_KEY_PATH = os.path.join(DIR, ANSWER_KEY_FILENAME)
ANSWER_KEY_BUCKET_NAME = 'zipline-test_data'
ANSWER_KEY_DL_TEMPLATE = """
https://s3.amazonaws.com/zipline-test-data/risk/{md5}/risk-answer-key.xlsx
""".strip()
LATEST_ANSWER_KEY_URL = ANSWER_KEY_DL_TEMPLATE.format(
md5=ANSWER_KEY_CHECKSUMS[-1])
def answer_key_signature():
with open(ANSWER_KEY_PATH, 'rb') as f:
md5 = hashlib.md5()
buf = f.read(1024)
md5.update(buf)
while buf != b"":
buf = f.read(1024)
md5.update(buf)
return md5.hexdigest()
def ensure_latest_answer_key():
"""
Get the latest answer key from a publically available location.
Logic for determining what and when to download is as such:
- If there is no local spreadsheet file, then get the lastest answer key,
as defined by the last row in the checksum file.
- If there is a local spreadsheet file:
-- If the spreadsheet's checksum is in the checksum file:
--- If the spreadsheet's checksum does not match the latest, then grab the
the latest checksum and replace the local checksum file.
--- If the spreadsheet's checksum matches the latest, then skip download,
and use the local spreadsheet as a cached copy.
-- If the spreadsheet's checksum is not in the checksum file, then leave
the local file alone, assuming that the local xls's md5 is not in the list
due to local modifications during development.
It is possible that md5's could collide, if that is ever case, we should
then find an alternative naming scheme.
The spreadsheet answer sheet is not kept in SCM, as every edit would
increase the repo size by the file size, since it is treated as a binary.
"""
answer_key_dl_checksum = None
local_answer_key_exists = os.path.exists(ANSWER_KEY_PATH)
if local_answer_key_exists:
local_hash = answer_key_signature()
if local_hash in ANSWER_KEY_CHECKSUMS:
# Assume previously downloaded version.
# Check for latest.
if local_hash != ANSWER_KEY_CHECKSUMS[-1]:
# More recent checksum, download
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
else:
# Assume local copy that is being developed on
answer_key_dl_checksum = None
else:
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
if answer_key_dl_checksum:
res = requests.get(
ANSWER_KEY_DL_TEMPLATE.format(md5=answer_key_dl_checksum))
with open(ANSWER_KEY_PATH, 'wb') as f:
f.write(res.content)
# Get latest answer key on load.
ensure_latest_answer_key()
class DataIndex(object):
"""
Coordinates for the spreadsheet, using the values as seen in the notebook.
The python-excel libraries use 0 index, while the spreadsheet in a GUI
uses a 1 index.
"""
def __init__(self, sheet_name, col, row_start, row_end,
value_type='float'):
self.sheet_name = sheet_name
self.col = col
self.row_start = row_start
self.row_end = row_end
self.value_type = value_type
@property
def col_index(self):
return col_letter_to_index(self.col) - 1
@property
def row_start_index(self):
return self.row_start - 1
@property
def row_end_index(self):
return self.row_end - 1
def __str__(self):
return "'{sheet_name}'!{col}{row_start}:{col}{row_end}".format(
sheet_name=self.sheet_name,
col=self.col,
row_start=self.row_start,
row_end=self.row_end
)
class AnswerKey(object):
INDEXES = {
'RETURNS': DataIndex('Sim Period', 'D', 4, 255),
'BENCHMARK': {
'Dates': DataIndex('s_p', 'A', 4, 254, value_type='date'),
'Returns': DataIndex('s_p', 'H', 4, 254)
},
# Below matches the inconsistent capitalization in spreadsheet
'BENCHMARK_PERIOD_RETURNS': {
'Monthly': DataIndex('s_p', 'R', 8, 19),
'3-Month': DataIndex('s_p', 'S', 10, 19),
'6-month': DataIndex('s_p', 'T', 13, 19),
'year': DataIndex('s_p', 'U', 19, 19),
},
'BENCHMARK_PERIOD_VOLATILITY': {
'Monthly': DataIndex('s_p', 'V', 8, 19),
'3-Month': DataIndex('s_p', 'W', 10, 19),
'6-month': DataIndex('s_p', 'X', 13, 19),
'year': DataIndex('s_p', 'Y', 19, 19),
},
'ALGORITHM_PERIOD_RETURNS': {
'Monthly': DataIndex('Sim Period', 'Z', 23, 34),
'3-Month': DataIndex('Sim Period', 'AA', 25, 34),
'6-month': DataIndex('Sim Period', 'AB', 28, 34),
'year': DataIndex('Sim Period', 'AC', 34, 34),
},
'ALGORITHM_PERIOD_VOLATILITY': {
'Monthly': DataIndex('Sim Period', 'AH', 23, 34),
'3-Month': DataIndex('Sim Period', 'AI', 25, 34),
'6-month': DataIndex('Sim Period', 'AJ', 28, 34),
'year': DataIndex('Sim Period', 'AK', 34, 34),
},
'ALGORITHM_PERIOD_SHARPE': {
'Monthly': DataIndex('Sim Period', 'AL', 23, 34),
'3-Month': DataIndex('Sim Period', 'AM', 25, 34),
'6-month': DataIndex('Sim Period', 'AN', 28, 34),
'year': DataIndex('Sim Period', 'AO', 34, 34),
},
'ALGORITHM_PERIOD_BETA': {
'Monthly': DataIndex('Sim Period', 'AP', 23, 34),
'3-Month': DataIndex('Sim Period', 'AQ', 25, 34),
'6-month': DataIndex('Sim Period', 'AR', 28, 34),
'year': DataIndex('Sim Period', 'AS', 34, 34),
},
'ALGORITHM_PERIOD_ALPHA': {
'Monthly': DataIndex('Sim Period', 'AT', 23, 34),
'3-Month': DataIndex('Sim Period', 'AU', 25, 34),
'6-month': DataIndex('Sim Period', 'AV', 28, 34),
'year': DataIndex('Sim Period', 'AW', 34, 34),
},
'ALGORITHM_PERIOD_BENCHMARK_VARIANCE': {
'Monthly': DataIndex('Sim Period', 'BJ', 23, 34),
'3-Month': DataIndex('Sim Period', 'BK', 25, 34),
'6-month': DataIndex('Sim Period', 'BL', 28, 34),
'year': DataIndex('Sim Period', 'BM', 34, 34),
},
'ALGORITHM_PERIOD_COVARIANCE': {
'Monthly': DataIndex('Sim Period', 'BF', 23, 34),
'3-Month': DataIndex('Sim Period', 'BG', 25, 34),
'6-month': DataIndex('Sim Period', 'BH', 28, 34),
'year': DataIndex('Sim Period', 'BI', 34, 34),
},
'ALGORITHM_PERIOD_DOWNSIDE_RISK': {
'Monthly': DataIndex('Sim Period', 'BN', 23, 34),
'3-Month': DataIndex('Sim Period', 'BO', 25, 34),
'6-month': DataIndex('Sim Period', 'BP', 28, 34),
'year': DataIndex('Sim Period', 'BQ', 34, 34),
},
'ALGORITHM_PERIOD_SORTINO': {
'Monthly': DataIndex('Sim Period', 'BR', 23, 34),
'3-Month': DataIndex('Sim Period', 'BS', 25, 34),
'6-month': DataIndex('Sim Period', 'BT', 28, 34),
'year': DataIndex('Sim Period', 'BU', 34, 34),
},
'ALGORITHM_RETURN_VALUES': DataIndex(
'Sim Cumulative', 'D', 4, 254),
'ALGORITHM_CUMULATIVE_VOLATILITY': DataIndex(
'Sim Cumulative', 'P', 4, 254),
'ALGORITHM_CUMULATIVE_SHARPE': DataIndex(
'Sim Cumulative', 'R', 4, 254),
'CUMULATIVE_DOWNSIDE_RISK': DataIndex(
'Sim Cumulative', 'U', 4, 254),
'CUMULATIVE_SORTINO': DataIndex(
'Sim Cumulative', 'V', 4, 254),
'CUMULATIVE_INFORMATION': DataIndex(
'Sim Cumulative', 'AA', 4, 254),
'CUMULATIVE_BETA': DataIndex(
'Sim Cumulative', 'AD', 4, 254),
'CUMULATIVE_ALPHA': DataIndex(
'Sim Cumulative', 'AE', 4, 254),
'CUMULATIVE_MAX_DRAWDOWN': DataIndex(
'Sim Cumulative', 'AH', 4, 254),
}
def __init__(self):
self.workbook = xlrd.open_workbook(ANSWER_KEY_PATH)
self.sheets = {}
self.sheets['Sim Period'] = self.workbook.sheet_by_name('Sim Period')
self.sheets['Sim Cumulative'] = self.workbook.sheet_by_name(
'Sim Cumulative')
self.sheets['s_p'] = self.workbook.sheet_by_name('s_p')
for name, index in self.INDEXES.items():
if isinstance(index, dict):
subvalues = {}
for subkey, subindex in index.items():
subvalues[subkey] = self.get_values(subindex)
setattr(self, name, subvalues)
else:
setattr(self, name, self.get_values(index))
def parse_date_value(self, value):
return xlrd.xldate_as_tuple(value, 0)
def parse_float_value(self, value):
return value if value != '' else np.nan
def get_raw_values(self, data_index):
return self.sheets[data_index.sheet_name].col_values(
data_index.col_index,
data_index.row_start_index,
data_index.row_end_index + 1)
@property
def value_type_to_value_func(self):
return {
'float': self.parse_float_value,
'date': self.parse_date_value,
}
def get_values(self, data_index):
value_parser = self.value_type_to_value_func[data_index.value_type]
return [value for value in
map(value_parser, self.get_raw_values(data_index))]
ANSWER_KEY = AnswerKey()
BENCHMARK_DATES = ANSWER_KEY.BENCHMARK['Dates']
BENCHMARK_RETURNS = ANSWER_KEY.BENCHMARK['Returns']
DATES = [datetime.datetime(*x, tzinfo=pytz.UTC) for x in BENCHMARK_DATES]
BENCHMARK = pd.Series(dict(zip(DATES, BENCHMARK_RETURNS)))
ALGORITHM_RETURNS = pd.Series(
dict(zip(DATES, ANSWER_KEY.ALGORITHM_RETURN_VALUES)))
RETURNS_DATA = pd.DataFrame({'Benchmark Returns': BENCHMARK,
'Algorithm Returns': ALGORITHM_RETURNS})
RISK_CUMULATIVE = pd.DataFrame({
'volatility': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_VOLATILITY))),
'sharpe': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_SHARPE))),
'downside_risk': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_DOWNSIDE_RISK))),
'sortino': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_SORTINO))),
'information': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_INFORMATION))),
'alpha': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_ALPHA))),
'beta': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_BETA))),
'max_drawdown': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_MAX_DRAWDOWN))),
})
| apache-2.0 |
madcowswe/ODrive | analysis/cogging_torque/cogging_harmonics.py | 2 | 1304 |
import numpy as np
import matplotlib.pyplot as plt
encoder_cpr = 2400
stator_slots = 12
pole_pairs = 7
N = data.size
fft = np.fft.rfft(data)
freq = np.fft.rfftfreq(N, d=1./encoder_cpr)
harmonics = [0]
harmonics += [(i+1)*stator_slots for i in range(pole_pairs)]
harmonics += [pole_pairs]
harmonics += [(i+1)*2*pole_pairs for i in range(int(stator_slots/4))]
fft_sparse = fft.copy()
indicies = np.arange(fft_sparse.size)
mask = [i not in harmonics for i in indicies]
fft_sparse[mask] = 0.0
interp_data = np.fft.irfft(fft_sparse)
#%%
#plt.figure()
plt.subplot(3, 1, 1)
plt.plot(data, label='raw')
plt.plot(interp_data, label='selected harmonics IFFT')
plt.title('cogging map')
plt.xlabel('counts')
plt.ylabel('A')
plt.legend(loc='best')
#plt.figure()
plt.subplot(3, 1, 2)
plt.stem(freq, np.abs(fft)/N, label='raw')
plt.stem(freq[harmonics], np.abs(fft_sparse[harmonics])/N, markerfmt='ro', label='selected harmonics')
plt.title('cogging map spectrum')
plt.xlabel('cycles/turn')
plt.ylabel('A')
plt.legend(loc='best')
#plt.figure()
plt.subplot(3, 1, 3)
plt.stem(freq, np.abs(fft)/N, label='raw')
plt.stem(freq[harmonics], np.abs(fft_sparse[harmonics])/N, markerfmt='ro', label='selected harmonics')
plt.title('cogging map spectrum')
plt.xlabel('cycles/turn')
plt.ylabel('A')
plt.legend(loc='best') | mit |
nikitasingh981/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 74 | 8472 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed.size, 0)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_consistent_transform():
# X_fit_ needs to retain the old, unmodified copy of X
state = np.random.RandomState(0)
X = state.rand(10, 10)
kpca = KernelPCA(random_state=state).fit(X)
transformed1 = kpca.transform(X)
X_copy = X.copy()
X[:, 0] = 666
transformed2 = kpca.transform(X_copy)
assert_array_almost_equal(transformed1, transformed2)
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
schae234/gingivere | tests/motor_insert.py | 2 | 1470 | from __future__ import print_function
from collections import defaultdict
import copy
import pandas as pd
import tornado.ioloop
from tornado import gen
import load_raw_data
import motor
from tests import shelve_api
def insert_patient(patient):
count = 0
for data in load_raw_data.walk_training_mats(patient):
insert_item = copy.deepcopy(data)
channels = insert_item['channels']
del insert_item['data']
del insert_item['channels']
for i, item in enumerate(data['data']):
print(channels)
insert_item['channel'] = channels[i]
insert_item['_id'] = count
count += 1
yield insert_item
def shelve(result, error):
if error:
print('error getting user!', error)
else:
name = "%02d_%s" % (i, result['file'])
d['name'].append(name)
d['_id'].append(result['_id'])
d['state'].append(result['state'])
d['channel'].append(result['channel'])
print("Just posted: " + name)
@gen.coroutine
def bulk_write():
global d
d = defaultdict(list)
collection.insert((i for i in insert_patient('Dog_2')), callback=shelve)
if __name__ == "__main__":
client = motor.MotorClient()
db = motor.MotorDatabase(client, 'gingivere')
collection = motor.MotorCollection(db, 'Dog_1')
tornado.ioloop.IOLoop.current().run_sync(bulk_write)
df = pd.DataFrame(d)
shelve_api.insert(df, 'test_dog_1')
| mit |
architecture-building-systems/CEAforArcGIS | cea/technologies/network_layout/steiner_spanning_tree.py | 1 | 18985 | """
This script calculates the minimum spanning tree of a shapefile network
"""
import math
import os
import networkx as nx
import pandas as pd
from geopandas import GeoDataFrame as gdf
from networkx.algorithms.approximation.steinertree import steiner_tree
from shapely.geometry import LineString
from typing import List
import cea.config
import cea.inputlocator
from cea.constants import SHAPEFILE_TOLERANCE
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
def calc_steiner_spanning_tree(crs_projected,
temp_path_potential_network_shp,
output_network_folder,
temp_path_building_centroids_shp,
path_output_edges_shp,
path_output_nodes_shp,
weight_field,
type_mat_default,
pipe_diameter_default,
type_network,
total_demand_location,
create_plant,
allow_looped_networks,
optimization_flag,
plant_building_names,
disconnected_building_names):
"""
Calculate the minimum spanning tree of the network. Note that this function can't be run in parallel in it's
present form.
:param str crs_projected: e.g. "+proj=utm +zone=48N +ellps=WGS84 +datum=WGS84 +units=m +no_defs"
:param str temp_path_potential_network_shp: e.g. "TEMP/potential_network.shp"
:param str output_network_folder: "{general:scenario}/inputs/networks/DC"
:param str temp_path_building_centroids_shp: e.g. "%TEMP%/nodes_buildings.shp"
:param str path_output_edges_shp: "{general:scenario}/inputs/networks/DC/edges.shp"
:param str path_output_nodes_shp: "{general:scenario}/inputs/networks/DC/nodes.shp"
:param str weight_field: e.g. "Shape_Leng"
:param str type_mat_default: e.g. "T1"
:param float pipe_diameter_default: e.g. 150
:param str type_network: "DC" or "DH"
:param str total_demand_location: "{general:scenario}/outputs/data/demand/Total_demand.csv"
:param bool create_plant: e.g. True
:param bool allow_looped_networks:
:param bool optimization_flag:
:param List[str] plant_building_names: e.g. ``['B001']``
:param List[str] disconnected_building_names: e.g. ``['B002', 'B010', 'B004', 'B005', 'B009']``
:return: ``(mst_edges, mst_nodes)``
"""
# read shapefile into networkx format into a directed potential_network_graph, this is the potential network
potential_network_graph = nx.read_shp(temp_path_potential_network_shp)
building_nodes_graph = nx.read_shp(temp_path_building_centroids_shp)
# transform to an undirected potential_network_graph
iterator_edges = potential_network_graph.edges(data=True)
G = nx.Graph()
for (x, y, data) in iterator_edges:
x = (round(x[0], SHAPEFILE_TOLERANCE), round(x[1], SHAPEFILE_TOLERANCE))
y = (round(y[0], SHAPEFILE_TOLERANCE), round(y[1], SHAPEFILE_TOLERANCE))
G.add_edge(x, y, weight=data[weight_field])
# get the building nodes and coordinates
iterator_nodes = building_nodes_graph.nodes(data=True)
terminal_nodes_coordinates = []
terminal_nodes_names = []
for coordinates, data in iterator_nodes._nodes.items():
building_name = data['Name']
if building_name in disconnected_building_names:
print("Building {} is considered to be disconnected and it is not included".format(building_name))
else:
terminal_nodes_coordinates.append(
(round(coordinates[0], SHAPEFILE_TOLERANCE), round(coordinates[1], SHAPEFILE_TOLERANCE)))
terminal_nodes_names.append(data['Name'])
# calculate steiner spanning tree of undirected potential_network_graph
try:
mst_non_directed = nx.Graph(steiner_tree(G, terminal_nodes_coordinates))
nx.write_shp(mst_non_directed, output_network_folder) # need to write to disk and then import again
mst_nodes = gdf.from_file(path_output_nodes_shp)
mst_edges = gdf.from_file(path_output_edges_shp)
except:
raise ValueError('There was an error while creating the Steiner tree. '
'Check the streets.shp for isolated/disconnected streets (lines) and erase them, '
'the Steiner tree does not support disconnected graphs. '
'If no disconnected streets can be found, try increasing the SHAPEFILE_TOLERANCE in cea.constants and run again. '
'Otherwise, try using the Feature to Line tool of ArcMap with a tolerance of around 10m to solve the issue.')
# POPULATE FIELDS IN NODES
pointer_coordinates_building_names = dict(zip(terminal_nodes_coordinates, terminal_nodes_names))
def populate_fields(coordinate):
if coordinate in terminal_nodes_coordinates:
return pointer_coordinates_building_names[coordinate]
else:
return "NONE"
mst_nodes['coordinates'] = mst_nodes['geometry'].apply(
lambda x: (round(x.coords[0][0], SHAPEFILE_TOLERANCE), round(x.coords[0][1], SHAPEFILE_TOLERANCE)))
mst_nodes['Building'] = mst_nodes['coordinates'].apply(lambda x: populate_fields(x))
mst_nodes['Name'] = mst_nodes['FID'].apply(lambda x: "NODE" + str(x))
mst_nodes['Type'] = mst_nodes['Building'].apply(lambda x: 'CONSUMER' if x != "NONE" else "NONE")
# do some checks to see that the building names was not compromised
if len(terminal_nodes_names) != (len(mst_nodes['Building'].unique()) - 1):
raise ValueError('There was an error while populating the nodes fields. '
'One or more buildings could not be matched to nodes of the network. '
'Try changing the constant SNAP_TOLERANCE in cea/constants.py to try to fix this')
# POPULATE FIELDS IN EDGES
mst_edges.loc[:, 'Type_mat'] = type_mat_default
mst_edges.loc[:, 'Pipe_DN'] = pipe_diameter_default
mst_edges.loc[:, 'Name'] = ["PIPE" + str(x) for x in mst_edges.index]
if allow_looped_networks:
# add loops to the network by connecting None nodes that exist in the potential network
mst_edges, mst_nodes = add_loops_to_network(G,
mst_non_directed,
mst_nodes,
mst_edges,
type_mat_default,
pipe_diameter_default)
# mst_edges.drop(['weight'], inplace=True, axis=1)
if create_plant:
if optimization_flag == False:
building_anchor = calc_coord_anchor(total_demand_location, mst_nodes, type_network)
mst_nodes, mst_edges = add_plant_close_to_anchor(building_anchor, mst_nodes, mst_edges,
type_mat_default, pipe_diameter_default)
else:
for building in plant_building_names:
building_anchor = building_node_from_name(building, mst_nodes)
mst_nodes, mst_edges = add_plant_close_to_anchor(building_anchor, mst_nodes, mst_edges,
type_mat_default, pipe_diameter_default)
# GET COORDINATE AND SAVE FINAL VERSION TO DISK
mst_edges.crs = crs_projected
mst_nodes.crs = crs_projected
mst_edges['length_m'] = mst_edges['weight']
mst_edges[['geometry','length_m', 'Type_mat', 'Name', 'Pipe_DN']].to_file(path_output_edges_shp, driver='ESRI Shapefile')
mst_nodes[['geometry', 'Building', 'Name', 'Type']].to_file(path_output_nodes_shp, driver='ESRI Shapefile')
def add_loops_to_network(G, mst_non_directed, new_mst_nodes, mst_edges, type_mat, pipe_dn):
added_a_loop = False
# Identify all NONE type nodes in the steiner tree
for node_number, node_coords in zip(new_mst_nodes.index, new_mst_nodes['coordinates']):
if new_mst_nodes['Type'][node_number] == 'NONE':
# find neighbours of nodes in the potential network and steiner network
potential_neighbours = G[node_coords]
steiner_neighbours = mst_non_directed[node_coords]
# check if there are differences, if yes, an edge was deleted here
if not set(potential_neighbours.keys()) == set(steiner_neighbours.keys()):
new_neighbour_list = []
for a in potential_neighbours.keys():
if a not in steiner_neighbours.keys():
new_neighbour_list.append(a)
# check if the node that is additional in the potential network also exists in the steiner network
for new_neighbour in new_neighbour_list:
if new_neighbour in list(new_mst_nodes['coordinates'].values):
# check if it is a none type
# write out index of this node
node_index = list(new_mst_nodes['coordinates'].values).index(new_neighbour)
if new_mst_nodes['Type'][node_index] == 'NONE':
# create new edge
line = LineString((node_coords, new_neighbour))
if not line in mst_edges['geometry']:
mst_edges = mst_edges.append(
{"geometry": line, "Pipe_DN": pipe_dn, "Type_mat": type_mat,
"Name": "PIPE" + str(mst_edges.Name.count())},
ignore_index=True)
added_a_loop = True
mst_edges.reset_index(inplace=True, drop=True)
if not added_a_loop:
print('No first degree loop added. Trying two nodes apart.')
# Identify all NONE type nodes in the steiner tree
for node_number, node_coords in zip(new_mst_nodes.index, new_mst_nodes['coordinates']):
if new_mst_nodes['Type'][node_number] == 'NONE':
# find neighbours of nodes in the potential network and steiner network
potential_neighbours = G[node_coords]
steiner_neighbours = mst_non_directed[node_coords]
# check if there are differences, if yes, an edge was deleted here
if not set(potential_neighbours.keys()) == set(steiner_neighbours.keys()):
new_neighbour_list = []
for a in potential_neighbours.keys():
if a not in steiner_neighbours.keys():
new_neighbour_list.append(a)
# check if the node that is additional in the potential network does not exist in the steiner network
for new_neighbour in new_neighbour_list:
if new_neighbour not in list(new_mst_nodes['coordinates'].values):
# find neighbours of that node
second_degree_pot_neigh = list(G[new_neighbour].keys())
for potential_second_deg_neighbour in second_degree_pot_neigh:
if potential_second_deg_neighbour in list(new_mst_nodes[
'coordinates'].values) and potential_second_deg_neighbour != node_coords:
# check if it is a none type
# write out index of this node
node_index = list(new_mst_nodes['coordinates'].values).index(
potential_second_deg_neighbour)
if new_mst_nodes['Type'][node_index] == 'NONE':
# create new edge
line = LineString((node_coords, new_neighbour))
if line not in mst_edges['geometry']:
mst_edges = mst_edges.append(
{"geometry": line, "Pipe_DN": pipe_dn, "Type_mat": type_mat,
"Name": "PIPE" + str(mst_edges.Name.count())},
ignore_index=True)
# Add new node from potential network to steiner tree
# create copy of selected node and add to list of all nodes
copy_of_new_mst_nodes = new_mst_nodes.copy()
x_distance = new_neighbour[0] - node_coords[0]
y_distance = new_neighbour[1] - node_coords[1]
copy_of_new_mst_nodes.geometry = copy_of_new_mst_nodes.translate(
xoff=x_distance, yoff=y_distance)
selected_node = copy_of_new_mst_nodes[
copy_of_new_mst_nodes["coordinates"] == node_coords]
selected_node.loc[:, "Name"] = "NODE" + str(new_mst_nodes.Name.count())
selected_node.loc[:, "Type"] = "NONE"
selected_node["coordinates"] = selected_node.geometry.values[0].coords
if selected_node["coordinates"].values not in new_mst_nodes[
"coordinates"].values:
new_mst_nodes = new_mst_nodes.append(selected_node)
new_mst_nodes.reset_index(inplace=True, drop=True)
line2 = LineString((new_neighbour, potential_second_deg_neighbour))
if line2 not in mst_edges['geometry']:
mst_edges = mst_edges.append(
{"geometry": line2, "Pipe_DN": pipe_dn, "Type_mat": type_mat,
"Name": "PIPE" + str(mst_edges.Name.count())},
ignore_index=True)
added_a_loop = True
mst_edges.reset_index(inplace=True, drop=True)
if not added_a_loop:
print('No loops added.')
return mst_edges, new_mst_nodes
def calc_coord_anchor(total_demand_location, nodes_df, type_network):
total_demand = pd.read_csv(total_demand_location)
nodes_names_demand = nodes_df.merge(total_demand, left_on="Building", right_on="Name", how="inner")
if type_network == "DH":
field = "QH_sys_MWhyr"
elif type_network == "DC":
field = "QC_sys_MWhyr"
else:
raise ValueError("Invalid value for variable 'type_network': {type_network}".format(type_network=type_network))
max_value = nodes_names_demand[field].max()
building_series = nodes_names_demand[nodes_names_demand[field] == max_value]
return building_series
def building_node_from_name(building_name, nodes_df):
building_series = nodes_df[nodes_df['Building'] == building_name]
return building_series
def add_plant_close_to_anchor(building_anchor, new_mst_nodes, mst_edges, type_mat, pipe_dn):
# find closest node
copy_of_new_mst_nodes = new_mst_nodes.copy()
building_coordinates = building_anchor.geometry.values[0].coords
x1 = building_coordinates[0][0]
y1 = building_coordinates[0][1]
delta = 10E24 # big number
for node in copy_of_new_mst_nodes.iterrows():
if node[1]['Type'] == 'NONE':
x2 = node[1].geometry.coords[0][0]
y2 = node[1].geometry.coords[0][1]
distance = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
if 0 < distance < delta:
delta = distance
node_id = node[1].Name
pd.options.mode.chained_assignment = None # avoid warning
# create copy of selected node and add to list of all nodes
copy_of_new_mst_nodes.geometry = copy_of_new_mst_nodes.translate(xoff=1, yoff=1)
selected_node = copy_of_new_mst_nodes[copy_of_new_mst_nodes["Name"] == node_id]
selected_node.loc[:, "Name"] = "NODE" + str(new_mst_nodes.Name.count())
selected_node.loc[:, "Type"] = "PLANT"
new_mst_nodes = new_mst_nodes.append(selected_node)
new_mst_nodes.reset_index(inplace=True, drop=True)
# create new edge
point1 = (selected_node.geometry.x, selected_node.geometry.y)
point2 = (new_mst_nodes[new_mst_nodes["Name"] == node_id].geometry.x,
new_mst_nodes[new_mst_nodes["Name"] == node_id].geometry.y)
line = LineString((point1, point2))
mst_edges = mst_edges.append({"geometry": line, "Pipe_DN": pipe_dn, "Type_mat": type_mat,
"Name": "PIPE" + str(mst_edges.Name.count())
}, ignore_index=True)
mst_edges.reset_index(inplace=True, drop=True)
return new_mst_nodes, mst_edges
def main(config):
assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
weight_field = 'Shape_Leng'
type_mat_default = config.network_layout.type_mat
pipe_diameter_default = config.network_layout.pipe_diameter
type_network = config.network_layout.network_type
create_plant = config.network_layout.create_plant
output_substations_shp = locator.get_temporary_file("nodes_buildings.shp")
path_potential_network = locator.get_temporary_file("potential_network.shp") # shapefile, location of output.
output_edges = locator.get_network_layout_edges_shapefile(type_network, '')
output_nodes = locator.get_network_layout_nodes_shapefile(type_network, '')
output_network_folder = locator.get_input_network_folder(type_network, '')
total_demand_location = locator.get_total_demand()
calc_steiner_spanning_tree(path_potential_network, output_network_folder, output_substations_shp, output_edges,
output_nodes, weight_field, type_mat_default, pipe_diameter_default, type_network,
total_demand_location, create_plant)
if __name__ == '__main__':
main(cea.config.Configuration())
| mit |
gclenaghan/scikit-learn | sklearn/ensemble/tests/test_forest.py | 3 | 41612 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, est.transform, X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
| bsd-3-clause |
decvalts/landlab | landlab/__init__.py | 1 | 1460 | #! /usr/bin/env python
"""
The Landlab
:Package name: TheLandlab
:Version: 0.1.0
:Release date: 2013-03-24
:Authors:
Greg Tucker,
Nicole Gasparini,
Erkan Istanbulluoglu,
Daniel Hobley,
Sai Nudurupati,
Jordan Adams,
Eric Hutton
:URL: http://csdms.colorado.edu/trac/landlab
:License: MIT
"""
from __future__ import absolute_import
__version__ = '0.1.27'
import os
if 'DISPLAY' not in os.environ:
try:
import matplotlib
except ImportError:
import warnings
warnings.warn('matplotlib not found', ImportWarning)
else:
matplotlib.use('Agg')
from .core.model_parameter_dictionary import ModelParameterDictionary
from .core.model_parameter_dictionary import (MissingKeyError,
ParameterValueError)
from .core.model_component import Component
from .framework.collections import Palette, Arena, NoProvidersError
from .framework.decorators import Implements, ImplementsOrRaise
from .framework.framework import Framework
from .field.scalar_data_fields import FieldError
from .grid import *
from .plot import *
from .testing.nosetester import LandlabTester
test = LandlabTester().test
bench = LandlabTester().bench
__all__ = ['ModelParameterDictionary', 'MissingKeyError',
'ParameterValueError', 'Component', 'Palette', 'Arena',
'NoProvidersError', 'Implements', 'ImplementsOrRaise',
'Framework', 'FieldError', 'LandlabTester']
| mit |
jmschrei/scikit-learn | sklearn/utils/tests/test_extmath.py | 7 | 21916 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.01)
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(300, 1000, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
print(error_2 - error_20)
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
print(error_2 - error)
assert_greater(15, np.abs(error_2 - error))
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
# ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
# ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
# ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
# min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = x1 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A1 = x2 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause |
mattdelhey/rice-scrape | scrape/scrape_eval.py | 2 | 3448 | import dryscrape
import re
import sys
import os
import numpy as np
import pandas as pd
sys.path.append('scrape')
from helpers import login_to_evaluations
UserID = ''
PIN = ''
project_dir = '/Users/mdelhey/rice-scrape/'
YEAR_SCRAPE = '2013'
TERM_SCRAPE = 'Spring'
# Boilerplate
os.chdir(project_dir)
try: __file__
except: __file__ = 'repl'
# Create pandas df
data_evals = pd.DataFrame(None, columns=['crn', 'term', 'course', 'xlist', 'enrolled', 'instructor',
'r_organization', 'r_assignments', 'r_quality', 'r_challenge',
'r_workload', 'r_satisfies', 'r_grade', 'r_pf',
'n_organization', 'n_assignments', 'n_quality', 'n_challenge',
'n_workload', 'n_satisfies', 'n_grade', 'n_pf',
'n_comments', 'comments'])
# set up a web scraping session
sess = dryscrape.Session(base_url = 'http://esther.rice.edu')
# OPPTIONS: no images, pretend to be firefox
sess.set_header('User-Agent', 'Chrome/36.0.1985.67')
sess.set_attribute('auto_load_images', False)
# Login & navigate to the right page
print '[%s] Visiting esther.rice.edu (Year: %s, Term: %s)' % (__file__, YEAR_SCRAPE, TERM_SCRAPE)
login_to_evaluations(UserID, PIN, sess, wait = 4)
sess.render('tmp.png')
# Loop through each subject/class
departs = []
classes = []
a = sess.xpath('//*[@id="crse_menu"]/td[2]/select/option')[1]
for d in sess.xpath('//*[@id="crse_menu"]/td[2]/select/option'):
#departs.append(d['text'])
departs.append(d.text())
d.select_option()
# Loop through each class
c = sess.xpath('//*[@id="crse_menu"]/td[4]/select')[0]
for c in sess.xpath('//*[@id="crse_menu"]/td[4]/select'):
classes.append(c.text())
# search (submit form)
search_link = sess.at_xpath('//*[@id="includeone"]/table/tbody/tr[5]/td[1]/input')
search_link.click()
# grab data: term, course, enrolled, instructors, etc.
row = { i: None for i in data_evals.columns }
row['term'] = sess.at_xpath( '//*[@id="%s_p"]/td[1]/table/tbody/tr[1]/td[2]' % row['crn']).text()
row['course'] = sess.at_xpath('//*[@id="%s_p"]/td[1]/table/tbody/tr[2]/td[2]' % row['crn']).text()
row['enrolled'] = sess.at_xpath('//*[@id="%s_p"]/td[1]/table/tbody/tr[3]/td[2]' % row['crn']).text()
row['instructor'] = sess.at_xpath('//*[@id="%s_p"]/td[1]/table/tbody/tr[4]/td[2]' % row['crn']).text()
row['r_organization'] = sess.at_xpath('//*[@id="chart_%s_1_means"]' % row['crn']).text()
row['n_organization'] = sess.at_xpath('//*[@id="chart_%s_1_response_total"]' % row['crn']).text()
# Loop through comments
num_comments_str = sess.at_xpath('//*[@id="20427"]/tbody/tr[6]/td/table/tbody/tr/td[3]').text()
num_comments = int(re.findall(r'[0-9]+', num_comments_str)[0])
comments = []
for cm in range(num_comments):
cm_path = '//*[@id="comment_%s_%s"]' % (crn, str(cm + 1))
cm_time_path = '//*[@id="comment_time_%s_%s"]' % (crn, str(cm + 1))
tup = (sess.at_xpath(cm_time_path).text(), sess.at_xpath(cm_path).text())
comments.append(tup)
# pick statistics
subj_link = sess.at_xpath('//*[@id="crse_menu"]/td[2]/select/option[80]')
subj_link.select_option()
# pick stat310
class_link = sess.at_xpath('//*[@id="crse_menu"]/td[4]/select/option[8]')
class_link.select_option()
sess.render('tmp.png')
| mit |
igabr/Metis_Projects_Chicago_2017 | 05-project-kojack/prophet_helper.py | 1 | 1164 | import pandas as pd
import numpy as np
from fbprophet import Prophet
from datetime import date
def prophet_forecast(row_of_df):
holidays = []
df = pd.DataFrame(row_of_df)
col_name = df.columns[0]
# print("Creating time series model for {}".format(col_name))
# print("Starting_date passed to prophet is {}.".format(df.index[0]))
# print("End date passed to prophet is {}".format(df.index[-1]))
for d in df.index:
val = d.weekday()
if val == 5 or val == 6:
holidays.append("Weekend")
else:
holidays.append("Weekday")
hol_df = pd.DataFrame(holidays, index=df.index, columns=["holiday"])
hol_df.reset_index(inplace=True)
hol_df.rename(columns={"date":"ds"}, inplace=True)
df.reset_index(inplace=True)
df.rename(columns={"date":"ds", df.columns[-1]:"y"}, inplace=True)
m = Prophet(holidays=hol_df, daily_seasonality=False, yearly_seasonality=False)
m.fit(df)
future = m.make_future_dataframe(periods=1) #predicting only 1 day into the future
forecast = m.predict(future)
y_pred = forecast.yhat[-1:].values[0]
print("The predicted value for {} on {} is {}".format(col_name, future.iloc[-1, :]['ds'].date(), y_pred))
return y_pred | mit |
eg-zhang/scikit-learn | examples/classification/plot_lda_qda.py | 78 | 5046 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
plt.show()
| bsd-3-clause |
nicolas998/Op_Interpolated | 06_Codigos/viejos/Figuras_Qsim.py | 2 | 2786 | #!/usr/bin/env python
from wmf import wmf
import numpy as np
import pickle
import pandas as pnd
import pylab as pl
import argparse
import textwrap
import os
from multiprocessing import Pool
#-------------------------------------------------------------------
#FUNBCIONES LOCALES
#-------------------------------------------------------------------
def Multiprocess_Warper(Lista):
return cu.run_shia(Lista[0],Lista[1],Lista[2],Lista[3])
def ReadQsimPickle(ruta,nodo):
f = open(ruta,'rb')
Q = pickle.load(f)
S = pickle.load(f)
f.close()
return Q[nodo].values,S
#-------------------------------------------------------------------
#PARSEADOR DE ARGUMENTOS
#-------------------------------------------------------------------
#Parametros de entrada del trazador
parser=argparse.ArgumentParser(
prog='Figuras_Qsim',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Script hecho para generar figuras de las simulaciones realizadas por
el modelo, el escript toma la lista de caudales simulados y para
cada nodo saca el caudal obtenido
'''))
#Parametros obligatorios
parser.add_argument("dir_qsim", help="(Obligatorio) Directorio con caudales simulados")
parser.add_argument("nodo",help="(Obligatorio) Nodo sobre el cual se realizara la figura",type = int)
parser.add_argument("ruta",help="(Obligatorio) Ruta donde se escribe la figura")
parser.add_argument("-c","--observada",help="(Opcional) ID de la estacion observada",type=int)
args=parser.parse_args()
#-------------------------------------------------------------------
#Lista los caudales simulados en la zona
#-------------------------------------------------------------------
#Lista los archivos de simulacion presentes
L = os.listdir(args.dir_qsim)
L = [l for l in L if l.endswith('qsim')]
#Trata de leer los caudales
nombres = []
CaudalesSim = []
for l in L:
#try:
Q,s = ReadQsimPickle(args.dir_qsim+l,args.nodo)
CaudalesSim.append(Q.tolist())
nombres.append(l[:-4])
#except:
# pass
CaudalesSim = np.array(CaudalesSim)
#-------------------------------------------------------------------
#Si hay una serie de caudales observada la lee
#-------------------------------------------------------------------
if args.observada:
print 'hola'
#-------------------------------------------------------------------
#Genera la figura para ese periodo
#-------------------------------------------------------------------
if args.observada:
wmf.plot_sim_single(CaudalesSim, mrain = s.values,
Dates = s.index.to_pydatetime(),
ruta = args.ruta,
Qo = CaudalObservado)
else:
wmf.plot_sim_single(CaudalesSim, mrain = s.values,
Dates = s.index.to_pydatetime(),
ruta = args.ruta,
legend = False)
| gpl-3.0 |
DGrady/pandas | pandas/core/computation/ops.py | 15 | 15900 | """Operator classes for eval.
"""
import operator as op
from functools import partial
from datetime import datetime
import numpy as np
from pandas.core.dtypes.common import is_list_like, is_scalar
import pandas as pd
from pandas.compat import PY3, string_types, text_type
import pandas.core.common as com
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
from pandas.core.base import StringMixin
from pandas.core.computation.common import _ensure_decoded, _result_type_many
from pandas.core.computation.scope import _DEFAULT_GLOBALS
_reductions = 'sum', 'prod'
_unary_math_ops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p',
'sqrt', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos',
'arctan', 'arccosh', 'arcsinh', 'arctanh', 'abs')
_binary_math_ops = ('arctan2',)
_mathops = _unary_math_ops + _binary_math_ops
_LOCAL_TAG = '__pd_eval_local_'
class UndefinedVariableError(NameError):
"""NameError subclass for local variables."""
def __init__(self, name, is_local):
if is_local:
msg = 'local variable {0!r} is not defined'
else:
msg = 'name {0!r} is not defined'
super(UndefinedVariableError, self).__init__(msg.format(name))
class Term(StringMixin):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = super(Term, klass).__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
self._name = name
self.env = env
self.side = side
tname = text_type(name)
self.is_local = (tname.startswith(_LOCAL_TAG) or
tname in _DEFAULT_GLOBALS)
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self):
return self.name.replace(_LOCAL_TAG, '')
def __unicode__(self):
return pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, 'ndim') and res.ndim > 2:
raise NotImplementedError("N-dimensional objects, where N > 2,"
" are not supported with eval")
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, string_types):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def isscalar(self):
return is_scalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self):
return pprint_thing('{0}(name={1!r}, type={2})'
''.format(self.__class__.__name__, self.name,
self.type))
@property
def is_datetime(self):
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def ndim(self):
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
def __unicode__(self):
# in python 2 str() of float
# can truncate shorter than repr()
return repr(self.name)
_bool_op_map = {'not': '~', 'and': '&', 'or': '|'}
class Op(StringMixin):
"""Hold an operator of arbitrary arity
"""
def __init__(self, op, operands, *args, **kwargs):
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = kwargs.get('encoding', None)
def __iter__(self):
return iter(self.operands)
def __unicode__(self):
"""Print a generic n-ary operator and its operands using infix
notation"""
# recurse over the operands
parened = ('({0})'.format(pprint_thing(opr))
for opr in self.operands)
return pprint_thing(' {0} '.format(self.op).join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (_cmp_ops_syms + _bool_ops_syms):
return np.bool_
return _result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self):
types = self.operand_types
obj_dtype_set = frozenset([np.dtype('object')])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def isscalar(self):
return all(operand.isscalar for operand in self.operands)
@property
def is_datetime(self):
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
_cmp_ops_syms = '>', '<', '>=', '<=', '==', '!=', 'in', 'not in'
_cmp_ops_funcs = op.gt, op.lt, op.ge, op.le, op.eq, op.ne, _in, _not_in
_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs))
_bool_ops_syms = '&', '|', 'and', 'or'
_bool_ops_funcs = op.and_, op.or_, op.and_, op.or_
_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs))
_arith_ops_syms = '+', '-', '*', '/', '**', '//', '%'
_arith_ops_funcs = (op.add, op.sub, op.mul, op.truediv if PY3 else op.div,
op.pow, op.floordiv, op.mod)
_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs))
_special_case_arith_ops_syms = '**', '//', '%'
_special_case_arith_ops_funcs = op.pow, op.floordiv, op.mod
_special_case_arith_ops_dict = dict(zip(_special_case_arith_ops_syms,
_special_case_arith_ops_funcs))
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def _cast_inplace(terms, acceptable_dtypes, dtype):
"""Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
acceptable_dtypes : list of acceptable numpy.dtype
Will not cast if term's dtype in this list.
.. versionadded:: 0.19.0
dtype : str or numpy.dtype
The dtype to cast to.
"""
dt = np.dtype(dtype)
for term in terms:
if term.type in acceptable_dtypes:
continue
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value)
def is_term(obj):
return isinstance(obj, Term)
class BinOp(Op):
"""Hold a binary operator and its operands
Parameters
----------
op : str
left : Term or Op
right : Term or Op
"""
def __init__(self, op, lhs, rhs, **kwargs):
super(BinOp, self).__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError('Invalid binary operator {0!r}, valid'
' operators are {1}'.format(op, keys))
def __call__(self, env):
"""Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# handle truediv
if self.op == '/' and env.scope['truediv']:
self.func = op.truediv
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine, parser, term_type, eval_in_python):
"""Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == 'python':
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
right = self.rhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
res = pd.eval(self, local_dict=env, engine=engine,
parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self):
"""Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.isscalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.isscalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
if ((self.lhs.isscalar or self.rhs.isscalar) and
self.op in _bool_ops_dict and
(not (issubclass(self.rhs.return_type, (bool, np.bool_)) and
issubclass(self.lhs.return_type, (bool, np.bool_))))):
raise NotImplementedError("cannot evaluate scalar only bool ops")
def isnumeric(dtype):
return issubclass(np.dtype(dtype).type, np.number)
class Div(BinOp):
"""Div operator to special case casting.
Parameters
----------
lhs, rhs : Term or Op
The Terms or Ops in the ``/`` expression.
truediv : bool
Whether or not to use true division. With Python 3 this happens
regardless of the value of ``truediv``.
"""
def __init__(self, lhs, rhs, truediv, *args, **kwargs):
super(Div, self).__init__('/', lhs, rhs, *args, **kwargs)
if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError("unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(self.op,
lhs.return_type,
rhs.return_type))
if truediv or PY3:
# do not upcast float32s to float64 un-necessarily
acceptable_dtypes = [np.float32, np.float_]
_cast_inplace(com.flatten(self), acceptable_dtypes, np.float_)
_unary_ops_syms = '+', '-', '~', 'not'
_unary_ops_funcs = op.pos, op.neg, op.invert, op.invert
_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs))
class UnaryOp(Op):
"""Hold a unary operator and its operands
Parameters
----------
op : str
The token used to represent the operator.
operand : Term or Op
The Term or Op operand to the operator.
Raises
------
ValueError
* If no function associated with the passed operator token is found.
"""
def __init__(self, op, operand):
super(UnaryOp, self).__init__(op, (operand,))
self.operand = operand
try:
self.func = _unary_ops_dict[op]
except KeyError:
raise ValueError('Invalid unary operator {0!r}, valid operators '
'are {1}'.format(op, _unary_ops_syms))
def __call__(self, env):
operand = self.operand(env)
return self.func(operand)
def __unicode__(self):
return pprint_thing('{0}({1})'.format(self.op, self.operand))
@property
def return_type(self):
operand = self.operand
if operand.return_type == np.dtype('bool'):
return np.dtype('bool')
if (isinstance(operand, Op) and
(operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict)):
return np.dtype('bool')
return np.dtype('int')
class MathCall(Op):
def __init__(self, func, args):
super(MathCall, self).__init__(func.name, args)
self.func = func
def __call__(self, env):
operands = [op(env) for op in self.operands]
with np.errstate(all='ignore'):
return self.func.func(*operands)
def __unicode__(self):
operands = map(str, self.operands)
return pprint_thing('{0}({1})'.format(self.op, ','.join(operands)))
class FuncNode(object):
def __init__(self, name):
if name not in _mathops:
raise ValueError(
"\"{0}\" is not a supported function".format(name))
self.name = name
self.func = getattr(np, name)
def __call__(self, *args):
return MathCall(self, args)
| bsd-3-clause |
cloudera/ibis | ibis/backends/pandas/execution/maps.py | 1 | 6331 | import collections
import functools
import pandas as pd
import toolz
import ibis.expr.operations as ops
from ..dispatch import execute_node
@execute_node.register(ops.MapLength, pd.Series)
def execute_map_length_series(op, data, **kwargs):
# TODO: investigate whether calling a lambda is faster
return data.dropna().map(len).reindex(data.index)
@execute_node.register(ops.MapLength, (collections.abc.Mapping, type(None)))
def execute_map_length_dict(op, data, **kwargs):
return None if data is None else len(data)
@execute_node.register(ops.MapValueForKey, pd.Series, pd.Series)
def execute_map_value_for_key_series_series(op, data, key, **kwargs):
assert data.size == key.size, 'data.size != key.size'
return data.map(
lambda x, keyiter=iter(key.values): x.get(next(keyiter), None)
)
@execute_node.register(ops.MapValueForKey, pd.Series, type(None))
def execute_map_value_for_key_series_none(op, data, key, **kwargs):
return pd.Series([None] * len(data))
@execute_node.register(ops.MapValueForKey, pd.Series, object)
def execute_map_value_for_key_series_scalar(op, data, key, **kwargs):
return data.map(functools.partial(safe_get, key=key))
@execute_node.register(ops.MapValueForKey, collections.abc.Mapping, pd.Series)
def execute_map_value_for_key_dict_series(op, data, key, **kwargs):
return key.map(functools.partial(safe_get, data))
@execute_node.register(ops.MapValueForKey, collections.abc.Mapping, object)
def execute_map_value_for_key_dict_scalar(op, data, key, **kwargs):
return safe_get(data, key)
@execute_node.register(ops.MapValueOrDefaultForKey, pd.Series, object, object)
def map_value_default_series_scalar_scalar(op, data, key, default, **kwargs):
return data.map(functools.partial(safe_get, key=key, default=default))
@execute_node.register(
ops.MapValueOrDefaultForKey, pd.Series, object, pd.Series
)
def map_value_default_series_scalar_series(op, data, key, default, **kwargs):
return data.map(
lambda mapping, key=key, defaultiter=iter(default.values): safe_get(
mapping, key, next(defaultiter)
)
)
@execute_node.register(
ops.MapValueOrDefaultForKey, pd.Series, pd.Series, object
)
def map_value_default_series_series_scalar(op, data, key, default, **kwargs):
return data.map(
lambda mapping, keyiter=iter(key.values), default=default: safe_get(
mapping, next(keyiter), default
)
)
@execute_node.register(
ops.MapValueOrDefaultForKey, pd.Series, pd.Series, pd.Series
)
def execute_map_value_default_series_series_series(op, data, key, default):
def get(
mapping, keyiter=iter(key.values), defaultiter=iter(default.values)
):
return safe_get(mapping, next(keyiter), next(defaultiter))
return data.map(get)
@execute_node.register(
ops.MapValueOrDefaultForKey, collections.abc.Mapping, object, object
)
def execute_map_value_default_dict_scalar_scalar(
op, data, key, default, **kwargs
):
return safe_get(data, key, default)
@execute_node.register(
ops.MapValueOrDefaultForKey, collections.abc.Mapping, object, pd.Series
)
def execute_map_value_default_dict_scalar_series(
op, data, key, default, **kwargs
):
return default.map(lambda d, data=data, key=key: safe_get(data, key, d))
@execute_node.register(
ops.MapValueOrDefaultForKey, collections.abc.Mapping, pd.Series, object
)
def execute_map_value_default_dict_series_scalar(
op, data, key, default, **kwargs
):
return key.map(
lambda k, data=data, default=default: safe_get(data, k, default)
)
@execute_node.register(
ops.MapValueOrDefaultForKey, collections.abc.Mapping, pd.Series, pd.Series
)
def execute_map_value_default_dict_series_series(
op, data, key, default, **kwargs
):
return key.map(
lambda k, data=data, defaultiter=iter(default.values): safe_get(
data, k, next(defaultiter)
)
)
def safe_method(mapping, method, *args, **kwargs):
if mapping is None:
return None
try:
method = getattr(mapping, method)
except AttributeError:
return None
else:
return method(*args, **kwargs)
def safe_get(mapping, key, default=None):
return safe_method(mapping, 'get', key, default)
def safe_keys(mapping):
result = safe_method(mapping, 'keys')
if result is None:
return None
return list(result)
def safe_values(mapping):
result = safe_method(mapping, 'values')
if result is None:
return None
return list(result)
@execute_node.register(ops.MapKeys, pd.Series)
def execute_map_keys_series(op, data, **kwargs):
return data.map(safe_keys)
@execute_node.register(ops.MapKeys, (collections.abc.Mapping, type(None)))
def execute_map_keys_dict(op, data, **kwargs):
if data is None:
return None
return list(data.keys())
@execute_node.register(ops.MapValues, pd.Series)
def execute_map_values_series(op, data, **kwargs):
return data.map(safe_values)
@execute_node.register(ops.MapValues, (collections.abc.Mapping, type(None)))
def execute_map_values_dict(op, data, **kwargs):
if data is None:
return None
return list(data.values())
def safe_merge(*maps):
return None if any(m is None for m in maps) else toolz.merge(*maps)
@execute_node.register(
ops.MapConcat,
(collections.abc.Mapping, type(None)),
(collections.abc.Mapping, type(None)),
)
def execute_map_concat_dict_dict(op, lhs, rhs, **kwargs):
return safe_merge(lhs, rhs)
@execute_node.register(
ops.MapConcat, (collections.abc.Mapping, type(None)), pd.Series
)
def execute_map_concat_dict_series(op, lhs, rhs, **kwargs):
if lhs is None:
return pd.Series([None] * len(rhs))
return rhs.map(lambda m, lhs=lhs: safe_merge(lhs, m))
@execute_node.register(
ops.MapConcat, pd.Series, (collections.abc.Mapping, type(None))
)
def execute_map_concat_series_dict(op, lhs, rhs, **kwargs):
if rhs is None:
return pd.Series([None] * len(lhs))
return lhs.map(lambda m, rhs=rhs: safe_merge(m, rhs))
@execute_node.register(ops.MapConcat, pd.Series, pd.Series)
def execute_map_concat_series_series(op, lhs, rhs, **kwargs):
return lhs.map(
lambda m, rhsiter=iter(rhs.values): safe_merge(m, next(rhsiter))
)
| apache-2.0 |
rflamary/POT | docs/source/auto_examples/plot_otda_d2.py | 2 | 5388 | # -*- coding: utf-8 -*-
"""
===================================================
OT for domain adaptation on empirical distributions
===================================================
This example introduces a domain adaptation in a 2D setting. It explicits
the problem of domain adaptation and introduces some optimal transport
approaches to solve it.
Quantities such as optimal couplings, greater coupling coefficients and
transported samples are represented in order to give a visual understanding
of what the transport methods are doing.
"""
# Authors: Remi Flamary <[email protected]>
# Stanislas Chambon <[email protected]>
#
# License: MIT License
import matplotlib.pylab as pl
import ot
import ot.plot
##############################################################################
# generate data
# -------------
n_samples_source = 150
n_samples_target = 150
Xs, ys = ot.datasets.make_data_classif('3gauss', n_samples_source)
Xt, yt = ot.datasets.make_data_classif('3gauss2', n_samples_target)
# Cost matrix
M = ot.dist(Xs, Xt, metric='sqeuclidean')
##############################################################################
# Instantiate the different transport algorithms and fit them
# -----------------------------------------------------------
# EMD Transport
ot_emd = ot.da.EMDTransport()
ot_emd.fit(Xs=Xs, Xt=Xt)
# Sinkhorn Transport
ot_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-1)
ot_sinkhorn.fit(Xs=Xs, Xt=Xt)
# Sinkhorn Transport with Group lasso regularization
ot_lpl1 = ot.da.SinkhornLpl1Transport(reg_e=1e-1, reg_cl=1e0)
ot_lpl1.fit(Xs=Xs, ys=ys, Xt=Xt)
# transport source samples onto target samples
transp_Xs_emd = ot_emd.transform(Xs=Xs)
transp_Xs_sinkhorn = ot_sinkhorn.transform(Xs=Xs)
transp_Xs_lpl1 = ot_lpl1.transform(Xs=Xs)
##############################################################################
# Fig 1 : plots source and target samples + matrix of pairwise distance
# ---------------------------------------------------------------------
pl.figure(1, figsize=(10, 10))
pl.subplot(2, 2, 1)
pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')
pl.xticks([])
pl.yticks([])
pl.legend(loc=0)
pl.title('Source samples')
pl.subplot(2, 2, 2)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')
pl.xticks([])
pl.yticks([])
pl.legend(loc=0)
pl.title('Target samples')
pl.subplot(2, 2, 3)
pl.imshow(M, interpolation='nearest')
pl.xticks([])
pl.yticks([])
pl.title('Matrix of pairwise distances')
pl.tight_layout()
##############################################################################
# Fig 2 : plots optimal couplings for the different methods
# ---------------------------------------------------------
pl.figure(2, figsize=(10, 6))
pl.subplot(2, 3, 1)
pl.imshow(ot_emd.coupling_, interpolation='nearest')
pl.xticks([])
pl.yticks([])
pl.title('Optimal coupling\nEMDTransport')
pl.subplot(2, 3, 2)
pl.imshow(ot_sinkhorn.coupling_, interpolation='nearest')
pl.xticks([])
pl.yticks([])
pl.title('Optimal coupling\nSinkhornTransport')
pl.subplot(2, 3, 3)
pl.imshow(ot_lpl1.coupling_, interpolation='nearest')
pl.xticks([])
pl.yticks([])
pl.title('Optimal coupling\nSinkhornLpl1Transport')
pl.subplot(2, 3, 4)
ot.plot.plot2D_samples_mat(Xs, Xt, ot_emd.coupling_, c=[.5, .5, 1])
pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')
pl.xticks([])
pl.yticks([])
pl.title('Main coupling coefficients\nEMDTransport')
pl.subplot(2, 3, 5)
ot.plot.plot2D_samples_mat(Xs, Xt, ot_sinkhorn.coupling_, c=[.5, .5, 1])
pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')
pl.xticks([])
pl.yticks([])
pl.title('Main coupling coefficients\nSinkhornTransport')
pl.subplot(2, 3, 6)
ot.plot.plot2D_samples_mat(Xs, Xt, ot_lpl1.coupling_, c=[.5, .5, 1])
pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')
pl.xticks([])
pl.yticks([])
pl.title('Main coupling coefficients\nSinkhornLpl1Transport')
pl.tight_layout()
##############################################################################
# Fig 3 : plot transported samples
# --------------------------------
# display transported samples
pl.figure(4, figsize=(10, 4))
pl.subplot(1, 3, 1)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=0.5)
pl.scatter(transp_Xs_emd[:, 0], transp_Xs_emd[:, 1], c=ys,
marker='+', label='Transp samples', s=30)
pl.title('Transported samples\nEmdTransport')
pl.legend(loc=0)
pl.xticks([])
pl.yticks([])
pl.subplot(1, 3, 2)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=0.5)
pl.scatter(transp_Xs_sinkhorn[:, 0], transp_Xs_sinkhorn[:, 1], c=ys,
marker='+', label='Transp samples', s=30)
pl.title('Transported samples\nSinkhornTransport')
pl.xticks([])
pl.yticks([])
pl.subplot(1, 3, 3)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=0.5)
pl.scatter(transp_Xs_lpl1[:, 0], transp_Xs_lpl1[:, 1], c=ys,
marker='+', label='Transp samples', s=30)
pl.title('Transported samples\nSinkhornLpl1Transport')
pl.xticks([])
pl.yticks([])
pl.tight_layout()
pl.show()
| mit |
StefReck/Km3-Autoencoder | scripts/evaluation_dataset.py | 1 | 45607 | # -*- coding: utf-8 -*-
"""
Evaluate model performance after training.
This is for comparison of supervised accuracy on different datasets.
Especially for the plots for the broken data comparison.
The usual setup is that simulations are broken (so that the AE has not to be trained again)
so 3 tests are necessary:
Trained on broken --> Test on broken (seeming performance)
Trained on broken --> Test on real (actual perfromance)
Trained on real --> Test on real (best case)
"""
import argparse
import numpy as np
import matplotlib.pyplot as plt
from util.evaluation_utilities import make_or_load_files, make_binned_data_plot, make_energy_mae_plot_mean_only, make_energy_mae_plot_mean_only_single
from util.saved_setups_for_plot_statistics import get_path_best_epoch
from energy_evaluation import make_or_load_hist_data
def parse_input():
parser = argparse.ArgumentParser(description='Evaluate model performance after training. This is for comparison of supervised accuracy on different datasets. Especially for the plots for the broken data comparison.')
parser.add_argument('info_tags', nargs="+", type=str, help='Names of identifiers for a saved setup. All for making all available ones.')
args = parser.parse_args()
params = vars(args)
return params
#Standard, plot acc vs energy plots of these saved setups (taken from parser now)
#which_ones=("4_64_enc",)
#extra string to be included in file names
extra_name=""
#number of bins of the histogram plot; default (is 97) is 32 now; backward compatibility with 98 bins
bins=32
#If not None: Change the y range of all plots to this one (to make unit looks)
y_lims_override = None
#instead of plotting acc vs. energy, one can also make a compare plot,
#which shows the difference #between "on simulations" and "on measured data"
#then, the number of the broken mode has to be given
#can be True, False or "both"
#TODO Rework, disfunctional
make_difference_plot=False
which_broken_study=4
def get_procedure(broken_model, real_model, brokendata_tag, realdata_tag):
#For when the "Simulation"-dataset is manipulated simulations:
modelidents = (broken_model, broken_model, real_model)
dataset_array = (brokendata_tag, realdata_tag, realdata_tag)
return modelidents, dataset_array
def get_info(which_one, extra_name="", y_lims_override=None):
"""
Saved setups of plots.
Returns all relevant infos to exactly produce (or reproduce) these plots.
"""
#DEFAULT VALUES (overwritten when necessary)
#This will be added before all modelidents
modelpath = "/home/woody/capn/mppi013h/Km3-Autoencoder/models/"
#Default class type the evaluation is done for. None for autoencoders.
class_type = (2, 'up_down')
#mse, acc, mre
plot_type = "acc"
#Default location of legend ("best")
legend_loc="best"
#ylims of plot ( only for acc )
y_lims=(0.5,1.0)
#Where to save the plots
plot_path = "/home/woody/capn/mppi013h/Km3-Autoencoder/results/plots/"
folder_in_the_plots_path = "broken_study/"
#Labels for the plot are defined below now!
#label_array=["On 'simulations'", "On 'measured' data", "Upper limit on 'measured' data"]
title_of_plot=""
#Overwrite default color palette. Leave empty for auto
color_array=["orange", "blue", "navy"]
#Add the number of bins to the name of the plot file (usually 32)
extra_name="_"+ str(bins)+"_bins" + extra_name
try: which_one=int(which_one)
except: ValueError
# ----------------------------- Up down -----------------------------
if which_one=="1_unf" or which_one==0:
#vgg_3_broken1_unf
modelidents = ("vgg_3-broken1/trained_vgg_3-broken1_supervised_up_down_epoch6.h5",
"vgg_3-broken1/trained_vgg_3-broken1_supervised_up_down_epoch6.h5",
"vgg_3/trained_vgg_3_supervised_up_down_new_epoch5.h5")
#Which dataset each to use
dataset_array = ("xzt_broken", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Unfrozen network performance with manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_3_broken1_unf"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.4,1.05)
elif which_one=="1_enc" or which_one==1:
#vgg_3_broken1_enc
modelidents = ("vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_broken1_epoch14.h5",
"vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_broken1_epoch14.h5",
"vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_accdeg_epoch24.h5")
#Which dataset each to use
dataset_array = ("xzt_broken", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Autoencoder-encoder network performance with manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_3_broken1_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.4,1.05)
legend_loc="lower right"
elif which_one=="2_unf" or which_one==2:
#vgg_3_broken2_unf
modelidents = ("vgg_3/trained_vgg_3_supervised_up_down_new_epoch5.h5",
"vgg_3/trained_vgg_3_supervised_up_down_new_epoch5.h5",
"vgg_3-noise10/trained_vgg_3-noise10_supervised_up_down_epoch6.h5")
#Which dataset each to use
dataset_array = ("xzt", "xzt_broken2", "xzt_broken2")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Unfrozen network performance with noisy data'
#in the results/plots folder:
plot_file_name = "vgg_3_broken2_unf"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.68,0.96)
legend_loc="lower right"
elif which_one=="2_enc" or which_one==3:
#vgg_3_broken2_enc
modelidents = ("vgg_3-noise10/trained_vgg_3-noise10_autoencoder_epoch10_supervised_up_down_epoch9.h5",
"vgg_3-noise10/trained_vgg_3-noise10_autoencoder_epoch10_supervised_up_down_epoch9.h5",
"vgg_3-noise10/trained_vgg_3-noise10_autoencoder_epoch10_supervised_up_down_noise_epoch14.h5")
#Which dataset each to use
dataset_array = ("xzt", "xzt_broken2", "xzt_broken2")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Autoencoder-encoder network performance with noisy data'
#in the results/plots folder:
plot_file_name = "vgg_3_broken2_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.68,0.96)
legend_loc="lower right"
elif which_one=="4_unf" or which_one==4:
modelidents = ("vgg_3-broken4/trained_vgg_3-broken4_supervised_up_down_epoch4.h5",
"vgg_3-broken4/trained_vgg_3-broken4_supervised_up_down_epoch4.h5",
"vgg_3/trained_vgg_3_supervised_up_down_new_epoch5.h5")
#Which dataset each to use
dataset_array = ("xzt_broken4", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Unfrozen network performance with manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_3_broken4_unf"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.5,1.0)
elif which_one=="4_enc" or which_one==5:
modelidents = ("vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_broken4_epoch52.h5",
"vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_broken4_epoch52.h5",
"vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_accdeg_epoch24.h5")
#Which dataset each to use
dataset_array = ("xzt_broken4", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Autoencoder-encoder network performance with manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_3_broken4_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.5,1.0)
elif which_one=="4_pic_enc" or which_one==6:
modelidents = ("vgg_5_picture/trained_vgg_5_picture_autoencoder_epoch48_supervised_up_down_broken4_epoch53.h5",
"vgg_5_picture/trained_vgg_5_picture_autoencoder_epoch48_supervised_up_down_broken4_epoch53.h5",
"vgg_5_picture/trained_vgg_5_picture_autoencoder_epoch48_supervised_up_down_epoch74.h5")
#Which dataset each to use
dataset_array = ("xzt_broken4", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='600 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_picture_broken4_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,0.95)
elif which_one=="4_200_enc" or which_one==7:
modelidents = ("vgg_5_200/trained_vgg_5_200_autoencoder_epoch94_supervised_up_down_broken4_epoch59.h5",
"vgg_5_200/trained_vgg_5_200_autoencoder_epoch94_supervised_up_down_broken4_epoch59.h5",
"vgg_5_200/trained_vgg_5_200_autoencoder_epoch94_supervised_up_down_epoch45.h5")
#Which dataset each to use
dataset_array = ("xzt_broken4", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='200 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_200_broken4_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,0.95)
elif which_one=="4_64_enc" or which_one==8:
modelidents = ("vgg_5_64/trained_vgg_5_64_autoencoder_epoch64_supervised_up_down_broken4_epoch57.h5",
"vgg_5_64/trained_vgg_5_64_autoencoder_epoch64_supervised_up_down_broken4_epoch57.h5",
"vgg_5_64/trained_vgg_5_64_autoencoder_epoch64_supervised_up_down_epoch26.h5")
#Which dataset each to use
dataset_array = ("xzt_broken4", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='64 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_64_broken4_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,0.95)
elif which_one=="4_64_enc_nodrop" or which_one==26:
modelidents = ("vgg_5_64/trained_vgg_5_64_autoencoder_epoch82_supervised_up_down_broken4_nodrop_epoch52.h5",
"vgg_5_64/trained_vgg_5_64_autoencoder_epoch82_supervised_up_down_broken4_nodrop_epoch52.h5",
"vgg_5_64/trained_vgg_5_64_autoencoder_epoch64_supervised_up_down_nodrop_epoch69.h5")
#Which dataset each to use
dataset_array = ("xzt_broken4", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='64 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_64_broken4_enc_nodrop"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,0.95)
elif which_one=="4_32_enc" or which_one==9:
modelidents = ("vgg_5_32-eps01/trained_vgg_5_32-eps01_autoencoder_epoch31_supervised_up_down_broken4_epoch1.h5",
"vgg_5_32-eps01/trained_vgg_5_32-eps01_autoencoder_epoch31_supervised_up_down_broken4_epoch1.h5",
"vgg_5_32-eps01/trained_vgg_5_32-eps01_autoencoder_epoch31_supervised_up_down_epoch48.h5")
dataset_array = ("xzt_broken4", "xzt", "xzt")
title_of_plot='32 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_32_broken4_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,0.95)
legend_loc="lower right"
elif which_one=="4_32_enc_nodrop" or which_one==23:
modelidents = ("vgg_5_32-eps01/trained_vgg_5_32-eps01_autoencoder_epoch22_supervised_up_down_broken4_nodrop_epoch47.h5",
"vgg_5_32-eps01/trained_vgg_5_32-eps01_autoencoder_epoch22_supervised_up_down_broken4_nodrop_epoch47.h5",
"vgg_5_32-eps01/trained_vgg_5_32-eps01_autoencoder_epoch31_supervised_up_down_nodrop_epoch79.h5")
dataset_array = ("xzt_broken4", "xzt", "xzt")
title_of_plot='32 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_32_broken4_enc_nodrop"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,0.95)
legend_loc="lower right"
elif which_one=="4flip_unf" or which_one==10:
modelidents = ("vgg_3/trained_vgg_3_supervised_up_down_new_epoch5.h5",
"vgg_3/trained_vgg_3_supervised_up_down_new_epoch5.h5",
"vgg_3-broken4/trained_vgg_3-broken4_supervised_up_down_epoch4.h5")
#Which dataset each to use
dataset_array = ("xzt", "xzt_broken4", "xzt_broken4")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Unfrozen network performance with manipulated data'
#in the results/plots folder:
plot_file_name = "vgg_3_broken4_flip_unf"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.75,1.0)
elif which_one=="4flip_enc" or which_one==11:
modelidents = ("vgg_3-broken4/trained_vgg_3-broken4_autoencoder_epoch12_supervised_up_down_xzt_epoch62.h5",
"vgg_3-broken4/trained_vgg_3-broken4_autoencoder_epoch12_supervised_up_down_xzt_epoch62.h5",
"vgg_3-broken4/trained_vgg_3-broken4_autoencoder_epoch10_supervised_up_down_broken4_epoch59.h5")
#Which dataset each to use
dataset_array = ("xzt", "xzt_broken4", "xzt_broken4")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Autoencoder-encoder network performance with manipulated data'
#in the results/plots folder:
plot_file_name = "vgg_3_broken4_flip_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.75,1)
elif which_one=="5_enc" or which_one==12:
modelidents = ("vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_broken5_epoch58.h5",
"vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_broken5_epoch58.h5",
"vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_accdeg_epoch24.h5")
#Which dataset each to use
dataset_array = ("xzt_broken5", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Autoencoder-encoder network performance with manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_3_broken5_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,1.0)
legend_loc="lower right"
elif which_one=="5_unf" or which_one==13:
broken_model = "vgg_3-broken5/trained_vgg_3-broken5_supervised_up_down_epoch6.h5"
real_model = "vgg_3/trained_vgg_3_supervised_up_down_new_epoch5.h5"
brokendata_tag = "xzt_broken5"
realdata_tag = "xzt"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Unfrozen network performance with manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_3_broken5_unf"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,1.0)
legend_loc="lower right"
elif which_one=="4_200_large_enc" or which_one==14:
broken_model = "vgg_5_200_large/trained_vgg_5_200_large_autoencoder_epoch39_supervised_up_down_broken4_epoch34.h5"
real_model = get_path_best_epoch("vgg_5_200_large", full_path=False)
brokendata_tag = "xzt_broken4"
realdata_tag = "xzt"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
title_of_plot='Large 200 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_200_large_broken4_enc"+extra_name+".pdf"
y_lims=(0.7,0.95)
elif which_one=="4_200_small_enc" or which_one==15:
broken_model = "vgg_5_200_small/trained_vgg_5_200_small_autoencoder_epoch77_supervised_up_down_broken4_epoch57.h5"
real_model = get_path_best_epoch("vgg_5_200_small", full_path=False)
brokendata_tag = "xzt_broken4"
realdata_tag = "xzt"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
title_of_plot='Small 200 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_200_small_broken4_enc"+extra_name+".pdf"
y_lims=(0.7,0.95)
# ----------------------------- Energy regression -----------------------------
elif which_one=="energy_12_enc" or which_one==16:
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_200_small_broken12_enc"+extra_name+".pdf"
plot_type = "mre"
#y_lims=(0.7,0.95)
broken_model = "vgg_3/trained_vgg_3_autoencoder_epoch8_supervised_energy_broken12_epoch48.h5"
real_model = get_path_best_epoch("vgg_3_2000_E", full_path=False)
brokendata_tag = "xzt_broken12"
realdata_tag = "xzt"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
elif which_one=="energy_12_unf" or which_one==17:
brokendata_tag = "xzt_broken12"
realdata_tag = "xzt"
broken_model = "vgg_3-broken12/trained_vgg_3-broken12_supervised_energy_epoch11.h5"
real_model = get_path_best_epoch("2000_unf_E", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_200_small_broken12_unf"+extra_name+".pdf"
plot_type = "mre"
#y_lims=(0.7,0.95)
elif which_one=="energy_4_2000_unf" or which_one==19:
brokendata_tag = "xzt_broken4"
realdata_tag = "xzt"
broken_model = "vgg_3-broken4/trained_vgg_3-broken4_supervised_energy_epoch10.h5"
real_model = get_path_best_epoch("2000_unf_E", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken4_unf"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.2,0.6)
elif which_one=="energy_4_2000_enc" or which_one==20:
brokendata_tag = "xzt_broken4"
realdata_tag = "xzt"
broken_model = "vgg_3/trained_vgg_3_autoencoder_epoch8_supervised_energy_broken4_nodrop_epoch5.h5"
real_model = get_path_best_epoch("vgg_3_2000_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken4_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.2,0.6)
elif which_one=="energy_13_2000_unf" or which_one==21:
brokendata_tag = "xzt_broken13"
realdata_tag = "xzt"
broken_model = "vgg_3-broken13/trained_vgg_3-broken13_supervised_energy_epoch19.h5"
real_model = get_path_best_epoch("2000_unf_E", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken13_unf"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.02,0.78)
elif which_one=="energy_13_2000_enc" or which_one==22:
brokendata_tag = "xzt_broken13"
realdata_tag = "xzt"
broken_model = "vgg_3/trained_vgg_3_autoencoder_epoch8_supervised_energy_broken13_nodrop_epoch9.h5"
real_model = get_path_best_epoch("vgg_3_2000_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken13_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.02,0.78)
#Broken 14 (rauschen prop zu E, bis zu 2 kHz plus)
#Bottleneck scan
elif which_one=="energy_14_2000_unf" or which_one==24:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_3-broken14/trained_vgg_3-broken14_supervised_energy_epoch15.h5"
real_model = get_path_best_epoch("2000_unf_E", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken14_unf"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_2000_enc" or which_one==25:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_3/trained_vgg_3_autoencoder_epoch8_supervised_energy_broken14_nodrop_epoch7.h5"
real_model = get_path_best_epoch("vgg_3_2000_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_600_pic_enc" or which_one==27:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_5_picture/trained_vgg_5_picture_autoencoder_epoch44_supervised_energy_broken14_nodrop_epoch12.h5"
real_model = get_path_best_epoch("vgg_5_600_picture_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_picture_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_200_dense_enc" or which_one==28:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_5_200_dense-new/trained_vgg_5_200_dense-new_autoencoder_epoch101_supervised_energy_broken14_nodrop_epoch45.h5"
real_model = get_path_best_epoch("vgg_5_200_dense_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_200_dense_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_64_enc" or which_one==29:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_5_64/trained_vgg_5_64_autoencoder_epoch78_supervised_energy_broken14_nodrop_epoch49.h5"
real_model = get_path_best_epoch("vgg_5_64_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_64_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_32_enc" or which_one==30:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_5_32-eps01/trained_vgg_5_32-eps01_autoencoder_epoch44_supervised_energy_broken14_nodrop_epoch59.h5"
real_model = get_path_best_epoch("vgg_5_32_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_32_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_200_enc" or which_one==31:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_5_200/trained_vgg_5_200_autoencoder_epoch94_supervised_energy_broken14_nodrop_epoch11.h5"
real_model = get_path_best_epoch("vgg_5_200_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_200_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_200_large_enc" or which_one==36:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_5_200_large/trained_vgg_5_200_large_autoencoder_epoch45_supervised_energy_broken14_drop035_epoch14.h5"
real_model = get_path_best_epoch("vgg_5_200_large_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_200_large_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_200_small_enc" or which_one==37:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_5_200_small/trained_vgg_5_200_small_autoencoder_epoch89_supervised_energy_broken14_nodrop_epoch11.h5"
real_model = get_path_best_epoch("vgg_5_200_small_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_200_small_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
# ----------------------------- Other tests -----------------------------
elif which_one=="energy_2_2000_unf" or which_one==32:
brokendata_tag = "xzt"
realdata_tag = "xzt_broken2"
broken_model = get_path_best_epoch("2000_unf_E", full_path=False)
real_model = "vgg_3-noise10/trained_vgg_3-noise10_supervised_energy_epoch12.h5"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken2_unf"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.21,0.81)
elif which_one=="energy_2_2000_enc" or which_one==33:
brokendata_tag = "xzt"
realdata_tag = "xzt_broken2"
broken_model = "vgg_3-noise10/trained_vgg_3-noise10_autoencoder_epoch5_supervised_energy_nodrop_epoch3.h5"
real_model = "vgg_3-noise10/trained_vgg_3-noise10_autoencoder_epoch7_supervised_energy_nodrop_epoch5.h5" #_broken2
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken2_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.21,0.81)
elif which_one=="energy_15_2000_unf" or which_one==34:
brokendata_tag = "xzt"
realdata_tag = "xzt_broken15"
broken_model = get_path_best_epoch("2000_unf_E", full_path=False)
real_model = "vgg_5_2000-broken15/trained_vgg_5_2000-broken15_supervised_energy_epoch12.h5"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken15_unf"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.18,0.55)
elif which_one=="energy_15_2000_enc" or which_one==35:
brokendata_tag = "xzt"
realdata_tag = "xzt_broken15"
broken_model = "vgg_5_64-broken15/trained_vgg_5_64-broken15_autoencoder_epoch83_supervised_energynodrop_epoch67.h5"
real_model = "vgg_5_64-broken15/trained_vgg_5_64-broken15_autoencoder_epoch83_supervised_energy_broken15_nodrop_epoch22.h5"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken15_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.18,0.55)
# ----------------------------- Unfreeze stuff -----------------------------
elif which_one=="unfreeze_comp" or which_one==18:
broken_model = "vgg_5_200-unfreeze/trained_vgg_5_200-unfreeze_autoencoder_epoch1_supervised_up_down_contE20_broken4_epoch30.h5"
real_model = "vgg_5_200-unfreeze/trained_vgg_5_200-unfreeze_autoencoder_epoch1_supervised_up_down_contE20_epoch30.h5"
brokendata_tag = "xzt_broken4"
realdata_tag = "xzt"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Continuation of partially unfrozen network training'
#in the results/plots folder:
folder_in_the_plots_path="unfreeze/"
plot_file_name = "broken4_vgg5_200_contE20"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,1.0)
legend_loc="lower right"
else:
raise NameError(str(which_one) + " is not known!")
title_of_plot=""
if plot_type=="mre":
#energy plot
label_array=["On 'simulations'", "On 'measured' data", "Lower limit on 'measured' data"]
else:
label_array=["On 'simulations'", "On 'measured' data", "Upper limit on 'measured' data"]
if y_lims_override != None:
y_lims = y_lims_override
modelidents = [modelpath + modelident for modelident in modelidents]
save_plot_as = plot_path + folder_in_the_plots_path + plot_file_name
return modelidents, dataset_array ,title_of_plot, save_plot_as, y_lims, class_type, plot_type, legend_loc, label_array, color_array
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
def make_evaluation(info_tag, extra_name, y_lims_override, show_the_plot=True):
"""
Main function:
Make an evaluation based on the info_tag (Generate+Save or load evaluation data, save plot).
A plot that shows acc or loss over the mc energy in a histogram, evaluated on different
datasets.
Often, there will be three models plotted:
0: On 'simulations'
1: On 'measured' data
2: Upper lim
"""
modelidents, dataset_array, title_of_plot, save_plot_as, y_lims, class_type, plot_type, legend_loc, label_array, color_array = get_info(info_tag, extra_name=extra_name, y_lims_override=y_lims_override)
#make plot of multiple data:
if plot_type == "acc":
#For up-down networks:
#generate or load data automatically:
#this will be a list of binned evaluations, one for every model
hist_data_array = make_or_load_files(modelidents, dataset_array, class_type=class_type, bins=bins)
print_statistics_in_numbers(hist_data_array, plot_type)
y_label_of_plot="Accuracy"
fig = make_binned_data_plot(hist_data_array, label_array, title_of_plot, y_label=y_label_of_plot, y_lims=y_lims, color_array=color_array, legend_loc=legend_loc)
elif plot_type == "mre":
#Median relative error for energy regression, seperated for track and shower
#Data is loaded by the energy evaluation function, which is not
#fully compatible with this one :-( so additional infos copied from there manually
hist_data_array=[]
hist_data_single=[]
for model_no,model_path in enumerate(modelidents):
dataset_tag = dataset_array[model_no]
print("Working on", model_path.split("trained_")[1][:-3], "using dataset", dataset_tag)
zero_center=True
energy_bins_2d=np.arange(3,101,1)
energy_bins_1d=20
hist_data_2d, energy_mae_plot_data = make_or_load_hist_data(model_path,
dataset_tag, zero_center, energy_bins_2d, energy_bins_1d, samples=None,
include_mae_single=True)
#only interested in the mae plot data
hist_data_array.append(energy_mae_plot_data[:2])
hist_data_single.append(energy_mae_plot_data[2])
print_statistics_in_numbers(hist_data_array, plot_type, hist_data_single=hist_data_single)
y_label_of_plot='Median fractional energy resolution'
#Make the single plot and save without displaying
fig_single = make_energy_mae_plot_mean_only_single(hist_data_single, label_list=label_array, color_list=color_array, y_lims=y_lims)
fig_single_save_as=save_plot_as[:-4]+"_single.pdf"
fig_single.savefig(fig_single_save_as)
print("Single plot saved to", fig_single_save_as)
plt.close(fig_single)
fig = make_energy_mae_plot_mean_only(hist_data_array, label_list=label_array, color_list=color_array, y_lims=y_lims)
elif plot_type == "mse":
#Intended for Autoencoders, not been used in a long time...
y_label_of_plot="Loss"
fig = make_binned_data_plot(hist_data_array, label_array, title_of_plot, y_label=y_label_of_plot, y_lims=y_lims, color_array=color_array, legend_loc=legend_loc)
else:
print("Plot type", plot_type, "not supported. Not generating plots, but hist_data is still saved.")
fig.savefig(save_plot_as)
print("Plot saved to", save_plot_as)
if show_the_plot == True:
plt.show(fig)
else:
plt.close(fig)
return
def print_statistics_in_numbers(hist_data_array, plot_type, return_line=False, hist_data_single=None):
"""
Prints the average overall loss of performance,
averaged over all bins (not all events).
For this, three hist_datas are necessary:
hist_data_array
[0]: On simulations (broken on broken)
[1]: On measured (broken on real)
[2]: Upper limit (real on real)
"""
print("\n----------Statistics of this evaluation-----------------")
print("\tAveraged over energy bins, not events!")
if plot_type == "acc":
#hist_data contains [energy, binned_acc] for every model
on_simulations_data = hist_data_array[0][1]
on_measured_data = hist_data_array[1][1]
upper_limit_data = hist_data_array[2][1]
dropoff_sim_measured = ( (on_simulations_data - on_measured_data)/on_measured_data ).mean()
dropoff_upper_limit_measured = ((upper_limit_data - on_measured_data)/on_measured_data ).mean()
print("Acc on Sims:\tOn measured\tUpper lim")
print(np.mean(on_simulations_data),"\t", np.mean(on_measured_data),"\t", np.mean(upper_limit_data))
print("\nAverage relative %-acc reduction across all bins: 100 * (x - measured) / measured")
print("From simulation to measured\tFrom upper lim to measured:")
print(dropoff_sim_measured*100,"\t",dropoff_upper_limit_measured*100)
print("--------------------------------------------------------\n")
header = ("(Sim-Meas)/Meas","(Upperlim-Meas)/Meas")
line=(dropoff_sim_measured*100, dropoff_upper_limit_measured*100)
elif plot_type=="mre":
#hist_data_array is for every model the tuple:
#[energy_mae_plot_data_track, energy_mae_plot_data_shower]
#each containing [energy, binned mre]
#hist_data_single contains for every model the unseperated data tuple: [energy, binned mre]
on_simulations_data_track = np.array(hist_data_array[0][0][1])
on_measured_data_track = np.array(hist_data_array[1][0][1])
upper_limit_data_track = np.array(hist_data_array[2][0][1])
on_simulations_data_shower = np.array(hist_data_array[0][1][1])
on_measured_data_shower = np.array(hist_data_array[1][1][1])
upper_limit_data_shower = np.array(hist_data_array[2][1][1])
on_simulations_data_single = np.array(hist_data_single[0][1])
on_measured_data_single = np.array(hist_data_single[1][1])
upper_limit_data_single = np.array(hist_data_single[2][1])
print("First three are MRE, last two are average relative % increase across all bins: -1 * 100 * (x - measured) / measured")
def print_one_table (on_simulations_data, on_measured_data, upper_limit_data, printig_header="Track like events:"):
dropoff_sim_measured = (-1*(on_simulations_data - on_measured_data)/on_measured_data).mean()
dropoff_upper_limit = (-1*(upper_limit_data - on_measured_data )/on_measured_data ).mean()
print(printig_header)
print("On Sims:\tOn measured\tUpper lim\tFrom simulation to measured\tFrom upper lim to measured:")
print(np.mean(on_simulations_data),"\t", np.mean(on_measured_data),"\t", np.mean(upper_limit_data),"\t", dropoff_sim_measured*100,"\t",dropoff_upper_limit*100)
print("--------------------------------------------------------\n")
print_one_table(on_simulations_data_track, on_measured_data_track, upper_limit_data_track, "Track like events:")
print_one_table(on_simulations_data_shower, on_measured_data_shower, upper_limit_data_shower, "Shower like events:")
print_one_table(on_simulations_data_single, on_measured_data_single, upper_limit_data_single, "All events:")
header = None
line=None
else:
raise NameError("Unknown plottype"+plot_type)
if return_line:
return header, line
if __name__ == "__main__":
params = parse_input()
which_ones = params["info_tags"]
if "all" in which_ones:
show_the_plot = False
current_tag=0
while True:
try:
make_evaluation(current_tag, extra_name, y_lims_override, show_the_plot)
current_tag+=1
except NameError:
print("Done. Made a total of", current_tag, "plots.")
break
else:
show_the_plot = True
for info_tag in which_ones:
make_evaluation(info_tag, extra_name, y_lims_override, show_the_plot)
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#not supported anymore...
if make_difference_plot == True or make_difference_plot == "both":
raise
#which plots to make diff of; (first - second) / first
make_diff_of_list=((0,1),(2,1))
title_list=("Relative loss of accuracy: 'simulations' to 'measured' data",
"Realtive difference in accuracy: Upper limit to 'measured' data")
if which_broken_study==2:
which_ones = ("2_unf", "2_enc")
save_as_list=(plot_path + "vgg_3_broken2_sim_real"+extra_name+".pdf",
plot_path + "vgg_3_broken2_upper_real"+extra_name+".pdf")
y_lims_list=((-0.02,0.1),(-0.02,0.1))
elif which_broken_study==4:
which_ones = ("4_unf", "4_enc")
save_as_list=(plot_path + "vgg_3_broken4_sim_real"+extra_name+".pdf",
plot_path + "vgg_3_broken4_upper_real"+extra_name+".pdf")
y_lims_list=((-0.02,0.1),(-0.02,0.1))
else:
raise()
for i in range(len(make_diff_of_list)):
#label_array=["On 'simulations'", "On 'measured' data", "Upper limit on 'measured' data"]
modelidents,dataset_array,title_of_plot,plot_file_name,y_lims = get_info(which_ones[0], y_lims_override=y_lims_override)
modelnames=[] # a tuple of eg "vgg_1_xzt_supervised_up_down_epoch6"
# (created from "trained_vgg_1_xzt_supervised_up_down_epoch6.h5" )
for modelident in modelidents:
modelnames.append(modelident.split("trained_")[1][:-3])
hist_data_array_unf = make_or_load_files(modelnames, dataset_array, modelidents=modelidents, class_type=class_type, bins=bins)
modelidents,dataset_array,title_of_plot,plot_file_name,y_lims = get_info(which_ones[1], y_lims_override=y_lims_override)
modelnames=[] # a tuple of eg "vgg_1_xzt_supervised_up_down_epoch6"
# (created from "trained_vgg_1_xzt_supervised_up_down_epoch6.h5" )
for modelident in modelidents:
modelnames.append(modelident.split("trained_")[1][:-3])
hist_data_array_enc = make_or_load_files(modelnames, dataset_array, modelidents=modelidents, class_type=class_type, bins=bins)
label_array=["Unfrozen", "Autoencoder-encoder"]
#Overwrite default color palette. Leave empty for auto
color_array=[]
#loss, acc, None
plot_type = "acc"
#Info about model
class_type = (2, 'up_down')
modelpath = "/home/woody/capn/mppi013h/Km3-Autoencoder/models/"
plot_path = "/home/woody/capn/mppi013h/Km3-Autoencoder/results/plots/"
title_of_plot=title_list[i]
save_plot_as = save_as_list[i]
y_lims=y_lims_list[i]
make_diff_of=make_diff_of_list[i]
hist_data_array_diff=[]
hist_1=np.array(hist_data_array_unf[make_diff_of[0]])
hist_2=np.array(hist_data_array_unf[make_diff_of[1]])
diff_hist=[hist_1[0], (hist_1[1]-hist_2[1])/hist_1[1]]
hist_data_array_diff.append(diff_hist)
hist_1=np.array(hist_data_array_enc[make_diff_of[0]])
hist_2=np.array(hist_data_array_enc[make_diff_of[1]])
diff_hist=[hist_1[0], (hist_1[1]-hist_2[1])/hist_1[1]]
hist_data_array_diff.append(diff_hist)
#make plot of multiple data:
if plot_type == "acc":
y_label_of_plot="Difference in accuracy"
make_energy_to_accuracy_plot_comp_data(hist_data_array_diff, label_array, title_of_plot, filepath=save_plot_as, y_label=y_label_of_plot, y_lims=y_lims, color_array=color_array)
elif plot_type == "loss":
y_label_of_plot="Loss"
make_energy_to_loss_plot_comp_data(hist_data_array_diff, label_array, title_of_plot, filepath=save_plot_as, y_label=y_label_of_plot, color_array=color_array)
elif plot_type == None:
print("plot_type==None: Not generating plots")
else:
print("Plot type", plot_type, "not supported. Not generating plots, but hist_data is still saved.")
print("Plot saved to", save_plot_as)
| mit |
davidgbe/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/tests/io/parser/test_read_fwf.py | 7 | 15261 | # -*- coding: utf-8 -*-
"""
Tests the 'read_fwf' function in parsers.py. This
test suite is independent of the others because the
engine is set to 'python-fwf' internally.
"""
from datetime import datetime
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame
from pandas import compat
from pandas.compat import StringIO, BytesIO
from pandas.io.parsers import read_csv, read_fwf, EmptyDataError
class TestFwfParsing(object):
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = read_csv(StringIO(data_expected),
engine='python', header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From Thomas Kluyver: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assert_raises_regex(ValueError,
"must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assert_raises_regex(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_BytesIO_input(self):
if not compat.PY3:
pytest.skip(
"Bytes-related test - only needs to work on Python 3")
result = read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
def test_fwf_colspecs_is_list_or_tuple(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
with tm.assert_raises_regex(TypeError,
'column specifications must '
'be a list or tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(data),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
with tm.assert_raises_regex(TypeError,
'Each column specification '
'must be.+'):
read_fwf(StringIO(data), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
assert len(res)
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn",
"dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
pytest.skip("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = np.array([[1, 2., 4],
[5, np.nan, 10.]])
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = np.array([[1, 2334., 5],
[10, 13, 10]])
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_bool_header_arg(self):
# see gh-6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with pytest.raises(TypeError):
read_fwf(StringIO(data), header=arg)
def test_full_file(self):
# File with all values
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = """
Account Name Balance CreditLimit AccountCreated
101 Keanu Reeves 9315.45 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 Jennifer Love Hewitt 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65 5000.00 2/5/2007
""".strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = """
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = """
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r"""
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~Keanu Reeves
33+++122.33\\\bar.........Gerard Butler
++44~~~~12.01 baz~~Jennifer Love Hewitt
~~55 11+++foo++++Jada Pinkett-Smith
..66++++++.03~~~bar Bill Murray
""".strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
pytest.skip(
'Bytes-related test - only needs to work on Python 3')
test = """
שלום שלום
ום שלל
של ום
""".strip('\r\n')
expected = read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)],
header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(
BytesIO(test.encode('utf8')), header=None, encoding='utf8'))
def test_dtype(self):
data = """ a b c
1 2 3.2
3 4 5.2
"""
colspecs = [(0, 5), (5, 10), (10, None)]
result = pd.read_fwf(StringIO(data), colspecs=colspecs)
expected = pd.DataFrame({
'a': [1, 3],
'b': [2, 4],
'c': [3.2, 5.2]}, columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
expected['a'] = expected['a'].astype('float64')
expected['b'] = expected['b'].astype(str)
expected['c'] = expected['c'].astype('int32')
result = pd.read_fwf(StringIO(data), colspecs=colspecs,
dtype={'a': 'float64', 'b': str, 'c': 'int32'})
tm.assert_frame_equal(result, expected)
def test_skiprows_inference(self):
# GH11256
test = """
Text contained in the file header
DataCol1 DataCol2
0.0 1.0
101.6 956.1
""".strip()
expected = read_csv(StringIO(test), skiprows=2,
delim_whitespace=True)
tm.assert_frame_equal(expected, read_fwf(
StringIO(test), skiprows=2))
def test_skiprows_by_index_inference(self):
test = """
To be skipped
Not To Be Skipped
Once more to be skipped
123 34 8 123
456 78 9 456
""".strip()
expected = read_csv(StringIO(test), skiprows=[0, 2],
delim_whitespace=True)
tm.assert_frame_equal(expected, read_fwf(
StringIO(test), skiprows=[0, 2]))
def test_skiprows_inference_empty(self):
test = """
AA BBB C
12 345 6
78 901 2
""".strip()
with pytest.raises(EmptyDataError):
read_fwf(StringIO(test), skiprows=3)
| mit |
joetidwell/daftHM | examples/classic.py | 7 | 1057 | """
The Quintessential PGM
======================
This is a demonstration of a very common structure found in graphical models.
It has been rendered using Daft's default settings for all the parameters
and it shows off how much beauty is baked in by default.
"""
from matplotlib import rc
rc("font", family="serif", size=12)
rc("text", usetex=True)
import daft
# Instantiate the PGM.
pgm = daft.PGM([2.3, 2.05], origin=[0.3, 0.3])
# Hierarchical parameters.
pgm.add_node(daft.Node("alpha", r"$\alpha$", 0.5, 2, fixed=True))
pgm.add_node(daft.Node("beta", r"$\beta$", 1.5, 2))
# Latent variable.
pgm.add_node(daft.Node("w", r"$w_n$", 1, 1))
# Data.
pgm.add_node(daft.Node("x", r"$x_n$", 2, 1, observed=True))
# Add in the edges.
pgm.add_edge("alpha", "beta")
pgm.add_edge("beta", "w")
pgm.add_edge("w", "x")
pgm.add_edge("beta", "x")
# And a plate.
pgm.add_plate(daft.Plate([0.5, 0.5, 2, 1], label=r"$n = 1, \cdots, N$",
shift=-0.1))
# Render and save.
pgm.render()
pgm.figure.savefig("classic.pdf")
pgm.figure.savefig("classic.png", dpi=150)
| mit |
vortex-ape/scikit-learn | sklearn/feature_selection/rfe.py | 2 | 20029 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..utils.metaestimators import _safe_split
from ..utils.validation import check_is_fitted
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..utils import Parallel, delayed, effective_n_jobs
from ..model_selection import check_cv
from ..model_selection._validation import _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer):
"""
Return the score for a fit across one fold.
"""
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
return rfe._fit(
X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer)).scores_
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and the importance of each feature is obtained either through a
``coef_`` attribute or through a ``feature_importances_`` attribute.
Then, the least important features are pruned from current set of features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a ``fit`` method that provides
information about feature importance either through a ``coef_``
attribute or through a ``feature_importances_`` attribute.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then ``step`` corresponds to the
(integer) number of features to remove at each iteration.
If within (0.0, 1.0), then ``step`` corresponds to the percentage
(rounded down) of features to remove at each iteration.
verbose : int, (default=0)
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True, False, False, False, False,
False])
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
See also
--------
RFECV : Recursive feature elimination with built-in cross-validated
selection of the best number of features
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
# Parameter step_score controls the calculation of self.scores_
# step_score is not exposed to users
# and is used when implementing RFECV
# self.scores_ will not be calculated when calling _fit through fit
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features // 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
else:
coefs = getattr(estimator, 'feature_importances_', None)
if coefs is None:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
check_is_fitted(self, 'support_')
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a ``fit`` method that provides
information about feature importance either through a ``coef_``
attribute or through a ``feature_importances_`` attribute.
step : int or float, optional (default=1)
If greater than or equal to 1, then ``step`` corresponds to the
(integer) number of features to remove at each iteration.
If within (0.0, 1.0), then ``step`` corresponds to the percentage
(rounded down) of features to remove at each iteration.
Note that the last iteration may remove fewer than ``step`` features in
order to reach ``min_features_to_select``.
min_features_to_select : int, (default=1)
The minimum number of features to be selected. This number of features
will always be scored, even if the difference between the original
feature count and ``min_features_to_select`` isn't divisible by
``step``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. If the
estimator is a classifier or if ``y`` is neither binary nor multiclass,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.20
``cv`` default value of None will change from 3-fold to 5-fold
in v0.22.
scoring : string, callable or None, optional, (default=None)
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int, (default=0)
Controls verbosity of output.
n_jobs : int or None, optional (default=None)
Number of cores to run in parallel while fitting across folds.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to
``ceil((n_features - min_features_to_select) / step) + 1``,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True, False, False, False, False,
False])
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
See also
--------
RFE : Recursive feature elimination
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, min_features_to_select=1, cv='warn',
scoring=None, verbose=0, n_jobs=None):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.verbose = verbose
self.n_jobs = n_jobs
self.min_features_to_select = min_features_to_select
def fit(self, X, y, groups=None):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
groups : array-like, shape = [n_samples], optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
X, y = check_X_y(X, y, "csr")
# Initialization
cv = check_cv(self.cv, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
# Build an RFE object, which will evaluate and score each possible
# feature count, down to self.min_features_to_select
rfe = RFE(estimator=self.estimator,
n_features_to_select=self.min_features_to_select,
step=self.step, verbose=self.verbose)
# Determine the number of subsets of features by fitting across
# the train folds and choosing the "features_to_select" parameter
# that gives the least averaged error across all folds.
# Note that joblib raises a non-picklable error for bound methods
# even if n_jobs is set to 1 with the default multiprocessing
# backend.
# This branching is done so that to
# make sure that user code that sets n_jobs to 1
# and provides bound methods as scorers is not broken with the
# addition of n_jobs parameter in version 0.18.
if effective_n_jobs(self.n_jobs) == 1:
parallel, func = list, _rfe_single_fit
else:
parallel = Parallel(n_jobs=self.n_jobs)
func = delayed(_rfe_single_fit)
scores = parallel(
func(rfe, self.estimator, X, y, train, test, scorer)
for train, test in cv.split(X, y, groups))
scores = np.sum(scores, axis=0)
scores_rev = scores[::-1]
argmax_idx = len(scores) - np.argmax(scores_rev) - 1
n_features_to_select = max(
n_features - (argmax_idx * step),
self.min_features_to_select)
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select, step=self.step,
verbose=self.verbose)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to get_n_splits(X, y) - 1
# here, the scores are normalized by get_n_splits(X, y)
self.grid_scores_ = scores[::-1] / cv.get_n_splits(X, y, groups)
return self
| bsd-3-clause |
allenai/document-qa | docqa/eval/squad_full_document_eval.py | 1 | 8928 | import argparse
from typing import List, Optional
import numpy as np
import pandas as pd
from tqdm import tqdm
from docqa import trainer
from docqa.data_processing.qa_training_data import ContextAndQuestion, Answer, ParagraphAndQuestionDataset
from docqa.data_processing.span_data import TokenSpans
from docqa.data_processing.text_utils import NltkPlusStopWords, ParagraphWithInverse
from docqa.dataset import FixedOrderBatcher
from docqa.eval.ranked_scores import compute_ranked_scores
from docqa.evaluator import Evaluation, Evaluator
from docqa.model_dir import ModelDir
from docqa.squad.document_rd_corpus import get_doc_rd_doc
from docqa.squad.squad_data import SquadCorpus
from docqa.squad.squad_document_qa import SquadTfIdfRanker
from docqa.squad.squad_official_evaluation import exact_match_score as squad_em_score
from docqa.squad.squad_official_evaluation import f1_score as squad_f1_score
from docqa.utils import ResourceLoader, flatten_iterable, print_table
"""
Run an evaluation on "document-level" squad
"""
class RankedParagraphQuestion(ContextAndQuestion):
def __init__(self, question: List[str], answer: Optional[Answer],
question_id: str, paragraph: ParagraphWithInverse,
rank: int, paragraph_number: int):
super().__init__(question, answer, question_id)
self.paragraph = paragraph
self.rank = rank
self.paragraph_number = paragraph_number
def get_original_text(self, para_start, para_end):
return self.paragraph.get_original_text(para_start, para_end)
def get_context(self):
return flatten_iterable(self.paragraph.text)
@property
def n_context_words(self) -> int:
return sum(len(s) for s in self.paragraph.text)
class RecordParagraphSpanPrediction(Evaluator):
def __init__(self, bound: int, record_text_ans: bool):
self.bound = bound
self.record_text_ans = record_text_ans
def tensors_needed(self, prediction):
span, score = prediction.get_best_span(self.bound)
needed = dict(spans=span, model_scores=score)
return needed
def evaluate(self, data: List[RankedParagraphQuestion], true_len, **kargs):
spans, model_scores = np.array(kargs["spans"]), np.array(kargs["model_scores"])
pred_f1s = np.zeros(len(data))
pred_em = np.zeros(len(data))
text_answers = []
for i in tqdm(range(len(data)), total=len(data), ncols=80, desc="scoring"):
point = data[i]
if point.answer is None and not self.record_text_ans:
continue
pred_span = spans[i]
pred_text = point.paragraph.get_original_text(pred_span[0], pred_span[1])
if self.record_text_ans:
text_answers.append(pred_text)
if point.answer is None:
continue
f1 = 0
em = False
for answer in data[i].answer.answer_text:
f1 = max(f1, squad_f1_score(pred_text, answer))
if not em:
em = squad_em_score(pred_text, answer)
pred_f1s[i] = f1
pred_em[i] = em
results = {}
results["n_answers"] = [0 if x.answer is None else len(x.answer.answer_spans) for x in data]
if self.record_text_ans:
results["text_answer"] = text_answers
results["predicted_score"] = model_scores
results["predicted_start"] = spans[:, 0]
results["predicted_end"] = spans[:, 1]
results["text_f1"] = pred_f1s
results["rank"] = [x.rank for x in data]
results["text_em"] = pred_em
results["question_id"] = [x.question_id for x in data]
return Evaluation({}, results)
def main():
parser = argparse.ArgumentParser(description='Evaluate a model on document-level SQuAD')
parser.add_argument('model', help='model to use')
parser.add_argument('output', type=str,
help="Store the per-paragraph results in csv format in this file")
parser.add_argument('-n', '--n_sample', type=int, default=None,
help="(for testing) sample documents")
parser.add_argument('-s', '--async', type=int, default=10,
help="Encoding batch asynchronously, queueing up to this many")
parser.add_argument('-a', '--answer_bound', type=int, default=17,
help="Max answer span length")
parser.add_argument('-p', '--n_paragraphs', type=int, default=None,
help="Max number of paragraphs to use")
parser.add_argument('-b', '--batch_size', type=int, default=200,
help="Batch size, larger sizes can be faster but uses more memory")
parser.add_argument('-c', '--corpus', choices=["dev", "train", "doc-rd-dev"], default="dev")
parser.add_argument('--no_ema', action="store_true",
help="Don't use EMA weights even if they exist")
args = parser.parse_args()
model_dir = ModelDir(args.model)
print("Loading data")
questions = []
ranker = SquadTfIdfRanker(NltkPlusStopWords(True),
args.n_paragraphs, force_answer=False)
if args.corpus == "doc-rd-dev":
docs = SquadCorpus().get_dev()
if args.n_sample is not None:
docs.sort(key=lambda x:x.doc_id)
np.random.RandomState(0).shuffle(docs)
docs = docs[:args.n_sample]
print("Fetching document reader docs...")
doc_rd_versions = get_doc_rd_doc(docs)
print("Ranking and matching with questions...")
for doc in tqdm(docs):
doc_questions = flatten_iterable(x.questions for x in doc.paragraphs)
paragraphs = doc_rd_versions[doc.title]
ranks = ranker.rank([x.words for x in doc_questions], [x.text for x in paragraphs])
for i, question in enumerate(doc_questions):
para_ranks = np.argsort(ranks[i])
for para_rank, para_num in enumerate(para_ranks[:args.n_paragraphs]):
# Just use dummy answers spans for these pairs
questions.append(RankedParagraphQuestion(question.words,
TokenSpans(question.answer.answer_text, np.zeros((0, 2), dtype=np.int32)),
question.question_id, paragraphs[para_num], para_rank, para_num))
rl = ResourceLoader()
else:
if args.corpus == "dev":
docs = SquadCorpus().get_dev()
else:
docs = SquadCorpus().get_train()
rl = SquadCorpus().get_resource_loader()
if args.n_sample is not None:
docs.sort(key=lambda x:x.doc_id)
np.random.RandomState(0).shuffle(docs)
docs = docs[:args.n_sample]
for q in ranker.ranked_questions(docs):
for i, p in enumerate(q.paragraphs):
questions.append(RankedParagraphQuestion(q.question,
TokenSpans(q.answer_text, p.answer_spans),
q.question_id,
ParagraphWithInverse([p.text], p.original_text, p.spans),
i, p.paragraph_num))
print("Split %d docs into %d paragraphs" % (len(docs), len(questions)))
questions = sorted(questions, key=lambda x: (x.n_context_words, len(x.question)), reverse=True)
for q in questions:
if len(q.answer.answer_spans.shape) != 2:
raise ValueError()
checkpoint = model_dir.get_best_weights()
if checkpoint is not None:
print("Using best weights")
else:
print("Using latest checkpoint")
checkpoint = model_dir.get_latest_checkpoint()
if checkpoint is None:
raise ValueError("No checkpoints found")
data = ParagraphAndQuestionDataset(questions, FixedOrderBatcher(args.batch_size, True))
model = model_dir.get_model()
evaluation = trainer.test(model, [RecordParagraphSpanPrediction(args.answer_bound, True)],
{args.corpus: data}, rl, checkpoint,
not args.no_ema, args.async)[args.corpus]
print("Saving result")
output_file = args.output
df = pd.DataFrame(evaluation.per_sample)
df.sort_values(["question_id", "rank"], inplace=True, ascending=True)
group_by = ["question_id"]
f1 = compute_ranked_scores(df, "predicted_score", "text_f1", group_by)
em = compute_ranked_scores(df, "predicted_score", "text_em", group_by)
table = [["N Paragraphs", "EM", "F1"]]
table += list([str(i+1), "%.4f" % e, "%.4f" % f] for i, (e, f) in enumerate(zip(em, f1)))
print_table(table)
df.to_csv(output_file, index=False)
if __name__ == "__main__":
main()
| apache-2.0 |
massmutual/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
alurban/mentoring | tidal_disruption/scripts/kepler_potentials.py | 1 | 1465 | # Imports.
import numpy as np
from numpy import pi
import matplotlib.pyplot as plt
import matplotlib.patheffects as PE
from matplotlib import ticker
# Physical constants.
G = 6.67408e-11 # Newton's constant in m^3 / kg / s
MSun = 1.989e30 # Solar mass in kg
M = 1.4 * MSun # Mass of each neutron star in this example
# Set array of orbital separation values.
a = np.linspace(1e-4, 100, 500) * 1000
# Construct a set of arrays corresponding to effective potentials
# with different angular momenta.
aL = np.array([0, 10, 20, 40, 60])
L = np.sqrt( 0.5 * G * M**3 * aL * 1000 )
Phi = np.array([- G * M**2 / a + x**2 / (M * a**2) for x in L])
colors = ['k--', 'k', 'CornflowerBlue', 'Red', 'DarkSlateGrey']
# Construct a figure.
fig = plt.figure( figsize=(6, 3.25) )
# Plot the effective potential as a function of orbital separation.
ax = fig.add_subplot(1, 1, 1)
ax.plot([0, 100], [0, 0], 'k--', linewidth=0.5)
for x, y, c in zip(aL, Phi, colors):
ax.plot(a/1000, y/1e45, c, linewidth=2., label='$a_L =$ %s km' % x)
ax.set_xlim([0, 100])
ax.set_xlabel('orbital separation (km)')
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter("%d"))
ax.set_ylim([-30, 10])
ax.set_ylabel('energy (10$^{45}$ J)')
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter("%d"))
leg = ax.legend(loc=4, fontsize=11, fancybox=True)
leg.legendPatch.set_path_effects([PE.withSimplePatchShadow()])
# Save the figure.
fig.tight_layout()
plt.savefig('kepler_potentials.pdf')
| gpl-3.0 |
fabioticconi/scikit-learn | benchmarks/bench_plot_randomized_svd.py | 38 | 17557 | """
Benchmarks on the power iterations phase in randomized SVD.
We test on various synthetic and real datasets the effect of increasing
the number of power iterations in terms of quality of approximation
and running time. A number greater than 0 should help with noisy matrices,
which are characterized by a slow spectral decay.
We test several policy for normalizing the power iterations. Normalization
is crucial to avoid numerical issues.
The quality of the approximation is measured by the spectral norm discrepancy
between the original input matrix and the reconstructed one (by multiplying
the randomized_svd's outputs). The spectral norm is always equivalent to the
largest singular value of a matrix. (3) justifies this choice. However, one can
notice in these experiments that Frobenius and spectral norms behave
very similarly in a qualitative sense. Therefore, we suggest to run these
benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to
compute.
The benchmarks follow.
(a) plot: time vs norm, varying number of power iterations
data: many datasets
goal: compare normalization policies and study how the number of power
iterations affect time and norm
(b) plot: n_iter vs norm, varying rank of data and number of components for
randomized_SVD
data: low-rank matrices on which we control the rank
goal: study whether the rank of the matrix and the number of components
extracted by randomized SVD affect "the optimal" number of power iterations
(c) plot: time vs norm, varing datasets
data: many datasets
goal: compare default configurations
We compare the following algorithms:
- randomized_svd(..., power_iteration_normalizer='none')
- randomized_svd(..., power_iteration_normalizer='LU')
- randomized_svd(..., power_iteration_normalizer='QR')
- randomized_svd(..., power_iteration_normalizer='auto')
- fbpca.pca() from https://github.com/facebook/fbpca (if installed)
Conclusion
----------
- n_iter=2 appears to be a good default value
- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU
gives similar errors to QR but is cheaper. That's what 'auto' implements.
References
----------
(1) Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
(2) A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
(3) An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
# Author: Giorgio Patrini
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import gc
import pickle
from time import time
from collections import defaultdict
import os.path
from sklearn.utils import gen_batches
from sklearn.utils.validation import check_random_state
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import (make_low_rank_matrix,
make_sparse_uncorrelated)
from sklearn.datasets import (fetch_lfw_people,
fetch_mldata,
fetch_20newsgroups_vectorized,
fetch_olivetti_faces,
fetch_rcv1)
try:
import fbpca
fbpca_available = True
except ImportError:
fbpca_available = False
# If this is enabled, tests are much slower and will crash with the large data
enable_spectral_norm = False
# TODO: compute approximate spectral norms with the power method as in
# Estimating the largest eigenvalues by the power and Lanczos methods with
# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on
# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.
# This approximation is a very fast estimate of the spectral norm, but depends
# on starting random vectors.
# Determine when to switch to batch computation for matrix norms,
# in case the reconstructed (dense) matrix is too large
MAX_MEMORY = np.int(2e9)
# The following datasets can be dowloaded manually from:
# CIFAR 10: http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat
CIFAR_FOLDER = "./cifar-10-batches-py/"
SVHN_FOLDER = "./SVHN/"
datasets = ['low rank matrix', 'lfw_people', 'olivetti_faces', '20newsgroups',
'MNIST original', 'CIFAR', 'a1a', 'SVHN', 'uncorrelated matrix']
big_sparse_datasets = ['big sparse matrix', 'rcv1']
def unpickle(file_name):
with open(file_name, 'rb') as fo:
return pickle.load(fo, encoding='latin1')["data"]
def handle_missing_dataset(file_folder):
if not os.path.isdir(file_folder):
print("%s file folder not found. Test skipped." % file_folder)
return 0
def get_data(dataset_name):
print("Getting dataset: %s" % dataset_name)
if dataset_name == 'lfw_people':
X = fetch_lfw_people().data
elif dataset_name == '20newsgroups':
X = fetch_20newsgroups_vectorized().data[:, :100000]
elif dataset_name == 'olivetti_faces':
X = fetch_olivetti_faces().data
elif dataset_name == 'rcv1':
X = fetch_rcv1().data
elif dataset_name == 'CIFAR':
if handle_missing_dataset(CIFAR_FOLDER) == "skip":
return
X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1))
for i in range(5)]
X = np.vstack(X1)
del X1
elif dataset_name == 'SVHN':
if handle_missing_dataset(SVHN_FOLDER) == 0:
return
X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)['X']
X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
X = np.vstack(X2)
del X1
del X2
elif dataset_name == 'low rank matrix':
X = make_low_rank_matrix(n_samples=500, n_features=np.int(1e4),
effective_rank=100, tail_strength=.5,
random_state=random_state)
elif dataset_name == 'uncorrelated matrix':
X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000,
random_state=random_state)
elif dataset_name == 'big sparse matrix':
sparsity = np.int(1e6)
size = np.int(1e6)
small_size = np.int(1e4)
data = np.random.normal(0, 1, np.int(sparsity/10))
data = np.repeat(data, 10)
row = np.random.uniform(0, small_size, sparsity)
col = np.random.uniform(0, small_size, sparsity)
X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
del data
del row
del col
else:
X = fetch_mldata(dataset_name).data
return X
def plot_time_vs_s(time, norm, point_labels, title):
plt.figure()
colors = ['g', 'b', 'y']
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(time[l], norm[l], label=l, marker='^', c='red')
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20),
textcoords='offset points', ha='right', va='bottom')
plt.legend(loc="upper right")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def scatter_time_vs_s(time, norm, point_labels, title):
plt.figure()
size = 100
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -80),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
else:
plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, 30),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
plt.legend(loc="best")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def plot_power_iter_vs_s(power_iter, s, title):
plt.figure()
for l in sorted(s.keys()):
plt.plot(power_iter, s[l], label=l, marker='o')
plt.legend(loc="lower right", prop={'size': 10})
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("n_iter")
def svd_timing(X, n_comps, n_iter, n_oversamples,
power_iteration_normalizer='auto', method=None):
"""
Measure time for decomposition
"""
print("... running SVD ...")
if method is not 'fbpca':
gc.collect()
t0 = time()
U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter,
power_iteration_normalizer,
random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
# There is a different convention for l here
U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter,
l=n_oversamples+n_comps)
call_time = time() - t0
return U, mu, V, call_time
def norm_diff(A, norm=2, msg=True):
"""
Compute the norm diff with the original matrix, when randomized
SVD is called with *params.
norm: 2 => spectral; 'fro' => Frobenius
"""
if msg:
print("... computing %s norm ..." % norm)
if norm == 2:
# s = sp.linalg.norm(A, ord=2) # slow
value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False)
else:
if sp.sparse.issparse(A):
value = sp.sparse.linalg.norm(A, ord=norm)
else:
value = sp.linalg.norm(A, ord=norm)
return value
def scalable_frobenius_norm_discrepancy(X, U, s, V):
# if the input is not too big, just call scipy
if X.shape[0] * X.shape[1] < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
return norm_diff(A, norm='fro')
print("... computing fro norm by batches...")
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = .0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
return np.sqrt(cum_norm)
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
X_spectral_norm = norm_diff(X, norm=2, msg=False)
all_frobenius = defaultdict(list)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for pi in power_iter:
for pm in ['none', 'LU', 'QR']:
print("n_iter = %d on sklearn - %s" % (pi, pm))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples)
label = "sklearn - %s" % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print("n_iter = %d on fbca" % (pi))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
method='fbpca')
label = "fbpca"
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_spectral, power_iter, title)
title = "%s: Frobenius norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_frobenius, power_iter, title)
def bench_b(power_list):
n_samples, n_features = 1000, 10000
data_params = {'n_samples': n_samples, 'n_features': n_features,
'tail_strength': .7, 'random_state': random_state}
dataset_name = "low rank matrix %d x %d" % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for n_comp in [np.int(rank/2), rank, rank*2]:
label = "rank=%d, n_comp=%d" % (rank, n_comp)
print(label)
for pi in power_list:
U, s, V, _ = svd_timing(X, n_comp, n_iter=pi, n_oversamples=2,
power_iteration_normalizer='LU')
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_spectral, title)
title = "%s: frobenius norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_frobenius, title)
def bench_c(datasets, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
n_comps = np.minimum(n_comps, np.min(X.shape))
label = "sklearn"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
label = "fbpca"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=2,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if len(all_time) == 0:
raise ValueError("No tests ran. Aborting.")
if enable_spectral_norm:
title = "normalized spectral norm diff vs running time"
scatter_time_vs_s(all_time, all_spectral, datasets, title)
title = "normalized Frobenius norm diff vs running time"
scatter_time_vs_s(all_time, all_frobenius, datasets, title)
if __name__ == '__main__':
random_state = check_random_state(1234)
power_iter = np.linspace(0, 6, 7, dtype=int)
n_comps = 50
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
print(" >>>>>> Benching sklearn and fbpca on %s %d x %d" %
(dataset_name, X.shape[0], X.shape[1]))
bench_a(X, dataset_name, power_iter, n_oversamples=2,
n_comps=np.minimum(n_comps, np.min(X.shape)))
print(" >>>>>> Benching on simulated low rank matrix with variable rank")
bench_b(power_iter)
print(" >>>>>> Benching sklearn and fbpca default configurations")
bench_c(datasets + big_sparse_datasets, n_comps)
plt.show()
| bsd-3-clause |
ashhher3/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/manifold/tests/test_locally_linear.py | 1 | 5242 | from itertools import product
import numpy as np
from nose.tools import assert_true
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
# ----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
# ----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
# Test the error raised when parameter passed to lle is invalid
def test_lle_init_parameters():
X = np.random.rand(5, 3)
clf = manifold.LocallyLinearEmbedding(eigen_solver="error")
msg = "unrecognized eigen_solver 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
clf = manifold.LocallyLinearEmbedding(method="error")
msg = "unrecognized method 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| mit |
wazeerzulfikar/scikit-learn | examples/decomposition/plot_pca_3d.py | 10 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# #############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
# #############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
aywander/pluto-outflows | Tools/pyPLUTO/examples/Sph_disk.py | 3 | 1488 | import os
import sys
from numpy import *
from matplotlib.pyplot import *
import pyPLUTO as pp
plutodir = os.environ['PLUTO_DIR']
wdir = plutodir+'/Test_Problems/MHD/FARGO/Spherical_Disk/'
nlinf = pp.nlast_info(w_dir=wdir,datatype='vtk')
D = pp.pload(nlinf['nlast'],w_dir=wdir,datatype='vtk') # Loading the data into a pload object D.
I = pp.Image()
f1 = figure(figsize=[15,6],num=1)
ax1=f1.add_subplot(122)
I.pltSphData(D,w_dir=wdir,datatype='vtk',plvar='bx1',logvar=False,rphi=False,x3cut=96)
colorbar(orientation='horizontal')
ax1.set_xlabel(r'Radius')
ax1.set_ylabel(r'Height')
ax1.set_title(r'Magnetic field $B_{\rm x}$')
ax2=f1.add_subplot(121)
I.pltSphData(D,w_dir=wdir,datatype='vtk',plvar='rho',logvar=True,rphi=True,x2cut=24)
colorbar(orientation='vertical')
ax2.set_xlabel(r'x')
ax2.set_ylabel(r'y')
ax2.set_title(r'Log $\rho$')
# Code to plot arrows. --> Spacing between the arrow can be adjusted by
# modifying the newdims tuple of conrid function.
T = pp.Tools()
newdims = 2*(20,)
R,Z,SphData = I.getSphData(D,w_dir=wdir,datatype='vtk',rphi=True,x2cut=24)
xcong = T.congrid(R,newdims,method='linear')
ycong = T.congrid(Z,newdims,method='linear')
vel1 = SphData['v1c']
vel2 = SphData['v3c']
xveccong = T.congrid(vel1,newdims,method='linear')
yveccong = T.congrid(vel2,newdims,method='linear')
normVp = sqrt(xveccong**2 + yveccong**2)
xveccong = xveccong/normVp
yveccong = yveccong/normVp
ax2.quiver(xcong, ycong, xveccong, yveccong,color='w')
show()
| gpl-2.0 |
benneely/qdact-basic-analysis | notebooks/python_scripts/07_consultloc_tables.py | 1 | 6839 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 5 14:15:45 2016
@author: nn31
"""
import pandas as pd
import pickle
import re
import numpy as np
from robocomp import Model
from scipy import stats
import scipy
#read in cmmi data
cmmi = pd.read_csv("/Volumes/DCCRP_projects/CMMI/data/QDACT 05-03-2016.csv",
parse_dates=['AssessmentDate','AdmissionDate','DischargeDate','PalliativeDischargeDate'])
cmmi.sort_values(by=['internalid','AssessmentDate'],ascending=[1,1],inplace=True)
cmmi['ConsultLoc'] = cmmi['ConsultLoc'].apply(lambda x: 3 if x==7 else x)
dd = pickle.load(open("/Volumes/Dropbox - Gmail/40-githubRrepos/qdact-basic-analysis/notebooks/python_scripts/02_data_dictionary_dict.p", "rb" ))
#For this analysis we are only interested in the time to first visit, so we'll need to subset
#to that using a pandas groupby
internalIDGroup = cmmi.groupby('internalid')
#look for individuals with > 1 visits
internalIDGroup.size()[internalIDGroup.size()>1]
fv = internalIDGroup.first()
#For all of our variables, we'd like to apply the algorithms that allow us to proceed with
#analytics
missings = Model('set_missing_codes')
fv.ConsultLoc.value_counts(dropna=False)
fv.ConsultLoc = missings.scoreIt(fv.ConsultLoc.tolist())
fv.ConsultLoc.value_counts(dropna=False)
#All my row variables set to missing
rowVars = ['ESASAnxiety','ESASAppetite','ESASConstipation','ESASDepression','ESASDrowsiness',
'ESASNausea','ESASPain','ESASShortnessOfBreath','ESASTiredness','ESASWellBeing',
'PPSScore']
for x in rowVars:
fv[x] = missings.scoreIt(fv[x].tolist())
pandasDF = fv
colVar = 'ConsultLoc'
rowVar = 'ESASAnxiety'
label = 'Anxiety ahh'
# treatment as continuous
def cont(pandasDF,rowVar,colVar,label):
grouped = pandasDF[[colVar,rowVar]].groupby(colVar)[rowVar]
groupednm = pandasDF[[colVar,rowVar]].dropna().groupby(colVar)[rowVar]
preSize = pd.DataFrame({'pre':grouped.size()})
posSize = pd.DataFrame({'post':groupednm.size()})
numbers = pd.merge(preSize,posSize,left_index=True,right_index=True)
descriptors = groupednm.quantile([.25, .5, .75]).unstack()
output = pd.merge(numbers,descriptors,left_index=True,right_index=True)
values_per_group = [col for col_name, col in groupednm]
results = stats.kruskal(*values_per_group)
output['label'] = output[[0.5,0.25,0.75,'post']].apply(lambda x : str(x[0]) +
' (' + str(x[1]) + '-' + str(x[2]) + ') n:' + str(x[3]), axis=1)
final = pd.DataFrame({label:output['label']}).transpose()
final['pvalue'] = results.pvalue
return(final)
x1 = cont(fv,'ESASAnxiety', 'ConsultLoc',label='Anxiety - Continuous')
x2 = cont(fv,'ESASAppetite', 'ConsultLoc',label='Appetite - Continuous')
x3 = cont(fv,'ESASConstipation', 'ConsultLoc',label='Constipation - Continuous')
x4 = cont(fv,'ESASDepression', 'ConsultLoc',label='Depression - Continuous')
x5 = cont(fv,'ESASDrowsiness', 'ConsultLoc',label='Drowsiness - Continuous')
x6 = cont(fv,'ESASNausea', 'ConsultLoc',label='Nausea - Continuous')
x7 = cont(fv,'ESASPain', 'ConsultLoc',label='Pain - Continuous')
x8 = cont(fv,'ESASShortnessOfBreath','ConsultLoc',label='Shortness - Continuous')
x9 = cont(fv,'ESASTiredness', 'ConsultLoc',label='Tiredness - Continuous')
x10 = cont(fv,'ESASWellBeing', 'ConsultLoc',label='Well Being - Continuous')
x11 = cont(fv,'PPSScore', 'ConsultLoc',label='PPSScore - Continuous')
frames = [x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11]
table = pd.concat(frames)
#Now, let's change the column headers
new_header = table.columns.tolist()
for i,x in enumerate(new_header):
if x in dd.get('ConsultLoc').get('codes'):
indx = dd.get('ConsultLoc').get('codes').index(x)
new_header[i] = dd.get('ConsultLoc').get('codes_fmt')[indx]
table.columns = new_header
table.to_csv("/Users/nn31/Dropbox/40-githubRrepos/qdact-basic-analysis/notebooks/python_scripts/07_ConsultLoc_tables.csv")
#Treatment as Categorical Mild / Moderate Severe
#We now need our mild / moderate phenotypes let's create that object for use:
mild_moderate = Model('mod_sev_symptoms')
for x in rowVars:
fv[x+'m_ms'] = mild_moderate.scoreIt(fv[x].tolist())
pandasDF = fv
colVar = 'ConsultLoc'
rowVar = 'ESASAnxietym_ms'
label = 'Anxiety ahh'
def cat(pandasDF,rowVar,colVar,label,displayRowText=None):
cross = pd.crosstab(pandasDF[rowVar], pandasDF[colVar]).transpose()
cross_pct = cross.apply(lambda r: r/r.sum(), axis=1)
col_tot = cross.apply(lambda r: r.sum(), axis=1)
merge1 = pd.merge(cross,cross_pct,left_index=True,right_index=True)
pval = scipy.stats.chi2_contingency(cross)[1]
all_dat = pd.merge(merge1,pd.DataFrame({'tot':col_tot}),left_index=True,right_index=True)
if displayRowText is None:
show = cross.columns.tolist()[0]
else:
show = displayRowText
if show not in cross.columns.tolist():
raise ValueError("cat trying to limit to data not available")
temp = all_dat[[show + '_x',show + '_y','tot']]
output = pd.DataFrame({label + ' ' + show:temp.apply(lambda x: str("{0:.0f}".format(x[0])) + '/' + str("{0:.0f}".format(x[2])) + ' (' + "{0:.2f}%".format(x[1] * 100) + ')',axis=1 )}).transpose()
output['pvalue'] = pval
return(output)
x1 = cat(fv,'ESASAnxietym_ms', 'ConsultLoc',label='Anxiety')
x2 = cat(fv,'ESASAppetitem_ms', 'ConsultLoc',label='Appetite')
x3 = cat(fv,'ESASConstipationm_ms', 'ConsultLoc',label='Constipation')
x4 = cat(fv,'ESASDepressionm_ms', 'ConsultLoc',label='Depression')
x5 = cat(fv,'ESASDrowsinessm_ms', 'ConsultLoc',label='Drowsiness')
x6 = cat(fv,'ESASNauseam_ms', 'ConsultLoc',label='Nausea')
x7 = cat(fv,'ESASPainm_ms', 'ConsultLoc',label='Pain')
x8 = cat(fv,'ESASShortnessOfBreathm_ms','ConsultLoc',label='Shortness')
x9 = cat(fv,'ESASTirednessm_ms', 'ConsultLoc',label='Tiredness')
x10 = cat(fv,'ESASWellBeingm_ms', 'ConsultLoc',label='Well Being')
frames = [x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11]
table = pd.concat(frames)
#Now, let's change the column headers
new_header = table.columns.tolist()
for i,x in enumerate(new_header):
if x in dd.get('ConsultLoc').get('codes'):
indx = dd.get('ConsultLoc').get('codes').index(x)
new_header[i] = dd.get('ConsultLoc').get('codes_fmt')[indx]
table.columns = new_header
table.to_csv("/Users/nn31/Dropbox/40-githubRrepos/qdact-basic-analysis/notebooks/python_scripts/07_ConsultLoc_tables_cat.csv")
from datetime import datetime
import numpy as np
dt = datetime.utcnow()
dt
dt64 = np.datetime64(dt)
ts = (x - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
ts
datetime.utcfromtimestamp(ts)
datetime.datetime(2012, 12, 4, 19, 51, 25, 362455)
np.__version__
| gpl-3.0 |
antiface/mne-python | mne/decoding/tests/test_ems.py | 19 | 1969 | # Author: Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
from nose.tools import assert_equal, assert_raises
from mne import io, Epochs, read_events, pick_types
from mne.utils import requires_sklearn
from mne.decoding import compute_ems
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
curdir = op.join(op.dirname(__file__))
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
@requires_sklearn
def test_ems():
"""Test event-matched spatial filters"""
raw = io.Raw(raw_fname, preload=False)
# create unequal number of events
events = read_events(event_name)
events[-2, 2] = 3
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
assert_raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l'])
epochs.equalize_event_counts(epochs.event_id, copy=False)
assert_raises(KeyError, compute_ems, epochs, ['blah', 'hahah'])
surrogates, filters, conditions = compute_ems(epochs)
assert_equal(list(set(conditions)), [1, 3])
events = read_events(event_name)
event_id2 = dict(aud_l=1, aud_r=2, vis_l=3)
epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs.equalize_event_counts(epochs.event_id, copy=False)
n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']])
assert_raises(ValueError, compute_ems, epochs)
surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l'])
assert_equal(n_expected, len(surrogates))
assert_equal(n_expected, len(conditions))
assert_equal(list(set(conditions)), [2, 3])
raw.close()
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/matplotlib/backends/backend_gtk.py | 2 | 44416 | from __future__ import division, print_function
import os, sys, warnings
def fn_name(): return sys._getframe(1).f_code.co_name
if sys.version_info[0] >= 3:
warnings.warn(
"The gtk* backends have not been tested with Python 3.x",
ImportWarning)
try:
import gobject
import gtk; gdk = gtk.gdk
import pango
except ImportError:
raise ImportError("Gtk* backend requires pygtk to be installed.")
pygtk_version_required = (2,4,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
_new_tooltip_api = (gtk.pygtk_version[1] >= 12)
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib.backends.backend_gdk import RendererGDK, FigureCanvasGDK
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.colors import colorConverter
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import lines
from matplotlib import markers
from matplotlib import cbook
from matplotlib import verbose
from matplotlib import rcParams
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
# Hide the benign warning that it can't stat a file that doesn't
warnings.filterwarnings('ignore', '.*Unable to retrieve the file info for.*', gtk.Warning)
cursord = {
cursors.MOVE : gdk.Cursor(gdk.FLEUR),
cursors.HAND : gdk.Cursor(gdk.HAND2),
cursors.POINTER : gdk.Cursor(gdk.LEFT_PTR),
cursors.SELECT_REGION : gdk.Cursor(gdk.TCROSS),
}
# ref gtk+/gtk/gtkwidget.h
def GTK_WIDGET_DRAWABLE(w):
flags = w.flags();
return flags & gtk.VISIBLE != 0 and flags & gtk.MAPPED != 0
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
if gtk.main_level() == 0:
gtk.main()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK(figure)
manager = FigureManagerGTK(canvas, num)
return manager
class TimerGTK(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses GTK for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = gobject.timeout_add(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
gobject.source_remove(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self):
TimerBase._on_timer(self)
# Gtk timeout_add() requires that the callback returns True if it
# is to be called again.
if len(self.callbacks) > 0 and not self._single:
return True
else:
self._timer = None
return False
class FigureCanvasGTK (gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
65511 : 'super',
65512 : 'super',
65406 : 'alt',
65289 : 'tab',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (gdk.BUTTON_PRESS_MASK |
gdk.BUTTON_RELEASE_MASK |
gdk.EXPOSURE_MASK |
gdk.KEY_PRESS_MASK |
gdk.KEY_RELEASE_MASK |
gdk.ENTER_NOTIFY_MASK |
gdk.LEAVE_NOTIFY_MASK |
gdk.POINTER_MOTION_MASK |
gdk.POINTER_MOTION_HINT_MASK)
def __init__(self, figure):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
FigureCanvasBase.__init__(self, figure)
gtk.DrawingArea.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._pixmap_width = -1
self._pixmap_height = -1
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('expose_event', self.expose_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(False)
self.set_flags(gtk.CAN_FOCUS)
self._renderer_init()
self._idle_event_id = gobject.idle_add(self.idle_event)
self.last_downclick = {}
def destroy(self):
#gtk.DrawingArea.destroy(self)
self.close_event()
gobject.source_remove(self._idle_event_id)
if self._idle_draw_id != 0:
gobject.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
if event.direction==gdk.SCROLL_UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
dblclick = (event.type == gdk._2BUTTON_PRESS)
if not dblclick:
# GTK is the only backend that generates a DOWN-UP-DOWN-DBLCLICK-UP event
# sequence for a double click. All other backends have a DOWN-UP-DBLCLICK-UP
# sequence. In order to provide consistency to matplotlib users, we will
# eat the extra DOWN event in the case that we detect it is part of a double
# click.
# first, get the double click time in milliseconds.
current_time = event.get_time()
last_time = self.last_downclick.get(event.button,0)
dblclick_time = gtk.settings_get_for_screen(gdk.screen_get_default()).get_property('gtk-double-click-time')
delta_time = current_time-last_time
if delta_time < dblclick_time:
del self.last_downclick[event.button] # we do not want to eat more than one event.
return False # eat.
self.last_downclick[event.button] = current_time
FigureCanvasBase.button_press_event(self, x, y, event.button, dblclick=dblclick, guiEvent=event)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
key = self._get_key(event)
if _debug: print("hit", key)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
return False # finish event propagation?
def key_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
key = self._get_key(event)
if _debug: print("release", key)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
return False # finish event propagation?
def motion_notify_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
# flipy so y=0 is bottom of canvas
y = self.allocation.height - y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
x, y, state = event.window.get_pointer()
FigureCanvasBase.enter_notify_event(self, event, xy=(x,y))
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval < 256:
key = chr(event.keyval)
else:
key = None
for key_mask, prefix in (
[gdk.MOD4_MASK, 'super'],
[gdk.MOD1_MASK, 'alt'],
[gdk.CONTROL_MASK, 'ctrl'],):
if event.state & key_mask:
key = '{0}+{1}'.format(prefix, key)
return key
def configure_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if widget.window is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches (w/dpi, h/dpi)
self._need_redraw = True
return False # finish event propagation?
def draw(self):
# Note: FigureCanvasBase.draw() is inconveniently named as it clashes
# with the deprecated gtk.Widget.draw()
self._need_redraw = True
if GTK_WIDGET_DRAWABLE(self):
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.window.process_updates (False)
def draw_idle(self):
def idle_draw(*args):
self.draw()
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _renderer_init(self):
"""Override by GTK backends to select a different renderer
Renderer should provide the methods:
set_pixmap ()
set_width_height ()
that are used by
_render_figure() / _pixmap_prepare()
"""
self._renderer = RendererGDK (self, self.figure.dpi)
def _pixmap_prepare(self, width, height):
"""
Make sure _._pixmap is at least width, height,
create new pixmap if necessary
"""
if _debug: print('FigureCanvasGTK.%s' % fn_name())
create_pixmap = False
if width > self._pixmap_width:
# increase the pixmap in 10%+ (rather than 1 pixel) steps
self._pixmap_width = max (int (self._pixmap_width * 1.1),
width)
create_pixmap = True
if height > self._pixmap_height:
self._pixmap_height = max (int (self._pixmap_height * 1.1),
height)
create_pixmap = True
if create_pixmap:
self._pixmap = gdk.Pixmap (self.window, self._pixmap_width,
self._pixmap_height)
self._renderer.set_pixmap (self._pixmap)
def _render_figure(self, pixmap, width, height):
"""used by GTK and GTKcairo. GTKAgg overrides
"""
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def expose_event(self, widget, event):
"""Expose_event for all GTK backends. Should not be overridden.
"""
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if GTK_WIDGET_DRAWABLE(self):
if self._need_redraw:
x, y, w, h = self.allocation
self._pixmap_prepare (w, h)
self._render_figure(self._pixmap, w, h)
self._need_redraw = False
x, y, w, h = event.area
self.window.draw_drawable (self.style.fg_gc[self.state],
self._pixmap, x, y, x, y, w, h)
return False # finish event propagation?
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
filetypes['png'] = 'Portable Network Graphics'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format):
if self.flags() & gtk.REALIZED == 0:
# for self.window(for pixmap) and has a side effect of altering
# figure width,height (via configure-event?)
gtk.DrawingArea.realize(self)
width, height = self.get_width_height()
pixmap = gdk.Pixmap (self.window, width, height)
self._renderer.set_pixmap (pixmap)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, 0, 8, width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
if is_string_like(filename):
try:
pixbuf.save(filename, format)
except gobject.GError as exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
elif is_writable_file_like(filename):
if hasattr(pixbuf, 'save_to_callback'):
def save_callback(buf, data=None):
data.write(buf)
try:
pixbuf.save_to_callback(save_callback, format, user_data=filename)
except gobject.GError as exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
else:
raise ValueError("Saving to a Python file-like object is only supported by PyGTK >= 2.8")
else:
raise ValueError("filename must be a path or a file-like object")
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerGTK(*args, **kwargs)
def flush_events(self):
gtk.gdk.threads_enter()
while gtk.events_pending():
gtk.main_iteration(True)
gtk.gdk.flush()
gtk.gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The gtk.Toolbar (gtk only)
vbox : The gtk.VBox containing the canvas and toolbar (gtk only)
window : The gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print('FigureManagerGTK.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.window = gtk.Window()
self.set_window_title("Figure %d" % num)
if (window_icon):
try:
self.window.set_icon_from_file(window_icon)
except:
# some versions of gtk throw a glib.GError but not
# all, so I am not sure how to catch it. I am unhappy
# diong a blanket catch here, but an not sure what a
# better way is - JDH
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
self.vbox = gtk.VBox()
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
self.vbox.pack_start(self.canvas, True, True)
self.toolbar = self._get_toolbar(canvas)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
if self.toolbar is not None:
self.toolbar.show()
self.vbox.pack_end(self.toolbar, False, False)
tb_w, tb_h = self.toolbar.size_request()
h += tb_h
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar is not None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print('FigureManagerGTK.%s' % fn_name())
if hasattr(self, 'toolbar') and self.toolbar is not None:
self.toolbar.destroy()
if hasattr(self, 'vbox'):
self.vbox.destroy()
if hasattr(self, 'window'):
self.window.destroy()
if hasattr(self, 'canvas'):
self.canvas.destroy()
self.__dict__.clear() #Is this needed? Other backends don't have it.
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
gtk.main_level() >= 1:
gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle(self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if rcParams['toolbar'] == 'classic':
toolbar = NavigationToolbar (canvas, self.window)
elif rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK (canvas, self.window)
else:
toolbar = None
return toolbar
def get_window_title(self):
return self.window.get_title()
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK(NavigationToolbar2, gtk.Toolbar):
def __init__(self, canvas, window):
self.win = window
gtk.Toolbar.__init__(self)
NavigationToolbar2.__init__(self, canvas)
def set_message(self, s):
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.window.set_cursor(cursord[cursor])
def release(self, event):
try: del self._pixmapBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
drawable = self.canvas.window
if drawable is None:
return
gc = drawable.new_gc()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in (min(x0,x1), min(y0, y1), w, h)]
try:
lastrect, pixmapBack = self._pixmapBack
except AttributeError:
#snap image back
if event.inaxes is None:
return
ax = event.inaxes
l,b,w,h = [int(val) for val in ax.bbox.bounds]
b = int(height)-(b+h)
axrect = l,b,w,h
self._pixmapBack = axrect, gtk.gdk.Pixmap(drawable, w, h)
self._pixmapBack[1].draw_drawable(gc, drawable, l, b, 0, 0, w, h)
else:
drawable.draw_drawable(gc, pixmapBack, 0, 0, *lastrect)
drawable.draw_rectangle(gc, False, *rect)
def _init_toolbar(self):
self.set_style(gtk.TOOLBAR_ICONS)
self._init_toolbar2_4()
def _init_toolbar2_4(self):
basedir = os.path.join(rcParams['datapath'],'images')
if not _new_tooltip_api:
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file + '.png')
image = gtk.Image()
image.set_from_file(fname)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
if _new_tooltip_api:
tbutton.set_tooltip_text(tooltip_text)
else:
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
toolitem = gtk.SeparatorToolItem()
self.insert(toolitem, -1)
# set_draw() not making separator invisible,
# bug #143692 fixed Jun 06 2004, will be in GTK+ 2.6
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = gtk.ToolItem()
self.insert(toolitem, -1)
self.message = gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
fc.set_current_name(self.canvas.get_default_filename())
return fc
def save_figure(self, *args):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception as e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = gtk.Window()
if (window_icon):
try: window.set_icon_from_file(window_icon)
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = gtk.VBox()
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True)
window.show()
def _get_canvas(self, fig):
return FigureCanvasGTK(fig)
class NavigationToolbar(gtk.Toolbar):
"""
Public attributes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image, callback(str), callback_arg, scroll(bool)
toolitems = (
('Left', 'Pan left with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_BACK, 'panx', -1, True),
('Right', 'Pan right with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_FORWARD, 'panx', 1, True),
('Zoom In X',
'Zoom In X (shrink the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomx', 1, True),
('Zoom Out X',
'Zoom Out X (expand the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomx', -1, True),
(None, None, None, None, None, None,),
('Up', 'Pan up with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_UP, 'pany', 1, True),
('Down', 'Pan down with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_DOWN, 'pany', -1, True),
('Zoom In Y',
'Zoom in Y (shrink the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomy', 1, True),
('Zoom Out Y',
'Zoom Out Y (expand the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomy', -1, True),
(None, None, None, None, None, None,),
('Save', 'Save the figure',
gtk.STOCK_SAVE, 'save_figure', None, False),
)
def __init__(self, canvas, window):
"""
figManager is the FigureManagerGTK instance that contains the
toolbar, with attributes figure, window and drawingArea
"""
gtk.Toolbar.__init__(self)
self.canvas = canvas
# Note: gtk.Toolbar already has a 'window' attribute
self.win = window
self.set_style(gtk.TOOLBAR_ICONS)
self._create_toolitems_2_4()
self.update = self._update_2_4
self.fileselect = FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
self.show_all()
self.update()
def _create_toolitems_2_4(self):
# use the GTK+ 2.4 GtkToolbar API
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
if not _new_tooltip_api:
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_num, callback, callback_arg, scroll \
in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
image = gtk.Image()
image.set_from_stock(image_num, iconSize)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
if callback_arg:
tbutton.connect('clicked', getattr(self, callback),
callback_arg)
else:
tbutton.connect('clicked', getattr(self, callback))
if scroll:
tbutton.connect('scroll_event', getattr(self, callback))
if _new_tooltip_api:
tbutton.set_tooltip_text(tooltip_text)
else:
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
# Axes toolitem, is empty at start, update() adds a menu if >=2 axes
self.axes_toolitem = gtk.ToolItem()
self.insert(self.axes_toolitem, 0)
if _new_tooltip_api:
self.axes_toolitem.set_tooltip_text(
'Select axes that controls affect')
else:
self.axes_toolitem.set_tooltip (
self.tooltips,
tip_text='Select axes that controls affect',
tip_private = 'Private')
align = gtk.Alignment (xalign=0.5, yalign=0.5, xscale=0.0, yscale=0.0)
self.axes_toolitem.add(align)
self.menubutton = gtk.Button ("Axes")
align.add (self.menubutton)
def position_menu (menu):
"""Function for positioning a popup menu.
Place menu below the menu button, but ensure it does not go off
the bottom of the screen.
The default is to popup menu at current mouse position
"""
x0, y0 = self.window.get_origin()
x1, y1, m = self.window.get_pointer()
x2, y2 = self.menubutton.get_pointer()
sc_h = self.get_screen().get_height() # requires GTK+ 2.2 +
w, h = menu.size_request()
x = x0 + x1 - x2
y = y0 + y1 - y2 + self.menubutton.allocation.height
y = min(y, sc_h - h)
return x, y, True
def button_clicked (button, data=None):
self.axismenu.popup (None, None, position_menu, 0,
gtk.get_current_event_time())
self.menubutton.connect ("clicked", button_clicked)
def _update_2_4(self):
# for GTK+ 2.4+
# called by __init__() and FigureManagerGTK
self._axes = self.canvas.figure.axes
if len(self._axes) >= 2:
self.axismenu = self._make_axis_menu()
self.menubutton.show_all()
else:
self.menubutton.hide()
self.set_active(range(len(self._axes)))
def _make_axis_menu(self):
# called by self._update*()
def toggled(item, data=None):
if item == self.itemAll:
for item in items: item.set_active(True)
elif item == self.itemInvert:
for item in items:
item.set_active(not item.get_active())
ind = [i for i,item in enumerate(items) if item.get_active()]
self.set_active(ind)
menu = gtk.Menu()
self.itemAll = gtk.MenuItem("All")
menu.append(self.itemAll)
self.itemAll.connect("activate", toggled)
self.itemInvert = gtk.MenuItem("Invert")
menu.append(self.itemInvert)
self.itemInvert.connect("activate", toggled)
items = []
for i in range(len(self._axes)):
item = gtk.CheckMenuItem("Axis %d" % (i+1))
menu.append(item)
item.connect("toggled", toggled)
item.set_active(True)
items.append(item)
menu.show_all()
return menu
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, button, direction):
'panx in direction'
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
return True
def pany(self, button, direction):
'pany in direction'
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
return True
def zoomx(self, button, direction):
'zoomx in direction'
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
return True
def zoomy(self, button, direction):
'zoomy in direction'
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
return True
def get_filechooser(self):
return FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
def save_figure(self, *args):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception as e:
error_msg_gtk(str(e), parent=self)
class FileChooserDialog(gtk.FileChooserDialog):
"""GTK+ 2.4 file selector which remembers the last file/directory
selected and presents the user with a menu of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK),
path = None,
filetypes = [],
default_filetype = None
):
super(FileChooserDialog, self).__init__ (title, parent, action,
buttons)
super(FileChooserDialog, self).set_do_overwrite_confirmation(True)
self.set_default_response (gtk.RESPONSE_OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = gtk.HBox (spacing=10)
hbox.pack_start (gtk.Label ("File Format:"), expand=False)
liststore = gtk.ListStore(gobject.TYPE_STRING)
cbox = gtk.ComboBox(liststore)
cell = gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start (cbox)
self.filetypes = filetypes
self.sorted_filetypes = filetypes.items()
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
cbox.append_text ("%s (*.%s)" % (name, ext))
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(gtk.RESPONSE_OK):
break
filename = self.get_filename()
break
self.hide()
return filename, self.ext
class DialogLineprops:
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in markers.MarkerStyle.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
r,g,b = colorConverter.to_rgb(line.get_color())
color = gtk.gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())
color = gtk.gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# set icon used when windows are minimized
# Unfortunately, the SVG renderer (rsvg) leaks memory under earlier
# versions of pygtk, so we have to use a PNG file instead.
try:
if gtk.pygtk_version < (2, 8, 0) or sys.platform == 'win32':
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
window_icon = os.path.join(rcParams['datapath'], 'images', icon_filename)
except:
window_icon = None
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel gtk.Window
parent = parent.get_toplevel()
if parent.flags() & gtk.TOPLEVEL == 0:
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = gtk.MessageDialog(
parent = parent,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = msg)
dialog.run()
dialog.destroy()
FigureManager = FigureManagerGTK
| mit |
CforED/Machine-Learning | examples/feature_selection/plot_rfe_with_cross_validation.py | 161 | 1380 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
jusjusjus/Motiftoolbox | Fitzhugh_3-cell/webrun.py | 1 | 5211 | #!/usr/bin/env python
import matplotlib #
matplotlib.use('Agg') # This prevents the x server to start.
import mpld3 as m
from mpld3 import plugins
from flask import Flask,request
import pylab as pl
import numpy as np
import system as sys
import info as nf
import network3N as netw
import torus as tor
import traces as tra
import fitzhugh as model
from WebSupport.Plugins.clickPlugin import ClickPlugin
from WebSupport.Plugins.dragPlugin import DragPlugin
from matplotlib.backend_bases import Event
from functools import wraps
from flask import request, Response
app = Flask(__name__)
def check_auth(username, password):
"""
This function is called to check if a username /
password combination is valid.
"""
try:
f = open('/usr/sbin/MotiftoolboxPwd', 'r')
keypair = f.readline().split(',')
f.close()
except:
keypair = ['user', 'password']
return username == keypair[0] and password == keypair[1]
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
info, network, system, traces, torus = None, None, None, None, None
sweepingPhasespace = False;
def initialize():
pl.close('all')
global info, network, system, traces, torus, sweepingPhasespace
reload(model)
info = nf.info()
network = netw.network(info=info)
system = sys.system(info=info, network=network)
traces = tra.traces(system, network, info=info)
torus = tor.torus(system, network, traces, info=info)
network.system = system
system.traces = traces
## customize system for web
system.setParams(epsilon=0.3)
system.ax.set_xlabel(r'Inactivation Variable')
system.ax.set_ylabel(r'Voltage Variable')
system.ax.set_title('')
system.fig.tight_layout()
plugins.connect(system.fig, DragPlugin(eventHandlerURL="updatesystem", radioButtonID="systemRadio"))
# customize network
network.ax.patch.set_facecolor('#777777')
network.moveText(2, [0.02, -0.1])
network.moveText(3, [0.02, -0.1])
network.ax.texts[6].set_text('1')
network.ax.texts[7].set_text('2')
network.ax.texts[8].set_text('3')
plugins.connect(network.fig, DragPlugin(eventHandlerURL="updatenetwork", radioButtonID="networkRadio"))
# customize traces
traces.ax.patch.set_facecolor('#777777')
traces.fig.tight_layout()
# customize torus
torus.ax_traces.set_xlabel(r'phase lag: 1-2')
torus.ax_basins.set_xlabel(r'phase lag: 1-2')
torus.ax_traces.set_ylabel(r'phase lag: 1-3')
torus.fig.tight_layout()
torus.switch_processor() # switches on the gpu if available
if torus.USE_GPU: torus.setGridsize(24)
plugins.connect(torus.fig, ClickPlugin(eventHandlerURL="updatetorus", radioButtonID="torusRadio"))
# reload timing variable
sweepingPhasespace = False;
@app.route("/reset")
@requires_auth
def reset():
initialize()
return ""
@app.route("/")
@requires_auth
def loadLayout():
f = open('fitzhugh_3cell.html','r')
HTML = f.read();
return HTML
@app.route("/system")
@requires_auth
def loadSystem():
return m.fig_to_html(system.fig)
@app.route("/updatesystem")
@requires_auth
def systemOnclick():
event = Event("mockEvent",system.fig.canvas)
event.xdata = float(request.args['startX'])
event.ydata = float(request.args['startY'])
system.on_button(event)
event.xdata = float(request.args['endX'])
event.ydata = float(request.args['endY'])
event.button = int(request.args['type'])
system.off_button(event)
return ""
@app.route("/torus")
@requires_auth
def loadTorus():
return m.fig_to_html(torus.fig)
@app.route("/updatetorus")
@requires_auth
def torusOnclick():
global sweepingPhasespace
if request.args['type'] == 'sweep':
if sweepingPhasespace == False :
sweepingPhasespace = True
torus.sweep_phase_space()
sweepingPhasespace = False
return ""
elif request.args['type'] == 'trace':
torus.click_traces(float(request.args['xval']), float(request.args['yval']))
return ""
return ""
@app.route("/torusClear")
@requires_auth
def torusClear():
torus.erase_traces()
torus.erase_basins()
return ""
@app.route("/network")
@requires_auth
def loadNetwork():
return m.fig_to_html(network.fig)
@app.route("/updatenetwork")
@requires_auth
def networkOnclick():
event = Event("mockEvent",network.fig.canvas);
event.xdata = float(request.args['startX'])
event.ydata = float(request.args['startY'])
event.button = int(request.args['type'])
network.on_button(event)
event.xdata = float(request.args['endX'])
event.ydata = float(request.args['endY'])
event.button = int(request.args['type'])
network.off_button(event)
return ""
@app.route("/traces")
@requires_auth
def loadTraces():
return m.fig_to_html(traces.fig)
@app.route("/info")
@requires_auth
def loadInfo():
return m.fig_to_html(info.fig)
if __name__ == "__main__":
initialize()
app.run(host='0.0.0.0', port=8080)
app.debug = True
| gpl-2.0 |
louispotok/pandas | pandas/tests/frame/test_replace.py | 3 | 44681 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime
import re
from pandas.compat import (zip, range, lrange, StringIO)
from pandas import (DataFrame, Series, Index, date_range, compat,
Timestamp)
import pandas as pd
from numpy import nan
import numpy as np
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameReplace(TestData):
def test_replace_inplace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
tsframe = self.tsframe.copy()
tsframe.replace(nan, 0, inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
# mixed type
mf = self.mixed_frame
mf.iloc[5:20, mf.columns.get_loc('foo')] = nan
mf.iloc[-10:, mf.columns.get_loc('A')] = nan
result = self.mixed_frame.replace(np.nan, 0)
expected = self.mixed_frame.fillna(value=0)
assert_frame_equal(result, expected)
tsframe = self.tsframe.copy()
tsframe.replace([nan], [0], inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
def test_regex_replace_scalar(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# simplest cases
# regex -> value
# obj frame
res = dfobj.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_scalar_inplace(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# simplest cases
# regex -> value
# obj frame
res = dfobj.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfobj.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_list_obj(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(value=values, regex=to_replace_res)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_obj_inplace(self):
# same as above with inplace=True
# lists of regexes and values
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(value=values, regex=to_replace_res, inplace=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed(self):
# mixed frame to make sure this doesn't break things
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
mix2 = {'a': lrange(4), 'b': list('ab..'), 'c': list('halo')}
dfmix2 = DataFrame(mix2)
res = dfmix2.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(regex=to_replace_res, value=values)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed_inplace(self):
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# the same inplace
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(regex=to_replace_res, value=values, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_dict_mixed(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
# dicts
# single dict {re1: v1}, search the whole frame
# need test for this...
# list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole
# frame
res = dfmix.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*\.\s*'}, {'b': nan}, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the
# whole frame
res = dfmix.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, inplace=True,
regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'})
res2 = dfmix.copy()
res2.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'},
inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# scalar -> dict
# to_replace regex, {value: value}
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace('a', {'b': nan}, regex=True, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace(regex='a', value={'b': nan}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
def test_regex_replace_dict_nested(self):
# nested dicts will not work until this is implemented for Series
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
res = dfmix.replace({'b': {r'\s*\.\s*': nan}}, regex=True)
res2 = dfmix.copy()
res4 = dfmix.copy()
res2.replace({'b': {r'\s*\.\s*': nan}}, inplace=True, regex=True)
res3 = dfmix.replace(regex={'b': {r'\s*\.\s*': nan}})
res4.replace(regex={'b': {r'\s*\.\s*': nan}}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
assert_frame_equal(res4, expec)
def test_regex_replace_dict_nested_gh4115(self):
df = pd.DataFrame({'Type': ['Q', 'T', 'Q', 'Q', 'T'], 'tmp': 2})
expected = DataFrame({'Type': [0, 1, 0, 0, 1], 'tmp': 2})
result = df.replace({'Type': {'Q': 0, 'T': 1}})
assert_frame_equal(result, expected)
def test_regex_replace_list_to_scalar(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),
'c': [nan, nan, nan, 'd']})
res = df.replace([r'\s*\.\s*', 'a|b'], nan, regex=True)
res2 = df.copy()
res3 = df.copy()
res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True)
res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_str_to_numeric(self):
# what happens when you try to replace a numeric value with a regex?
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace(r'\s*\.\s*', 0, regex=True)
res2 = df.copy()
res2.replace(r'\s*\.\s*', 0, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=r'\s*\.\s*', value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', 0, 0], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_regex_list_to_numeric(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace([r'\s*\.\s*', 'b'], 0, regex=True)
res2 = df.copy()
res2.replace([r'\s*\.\s*', 'b'], 0, regex=True, inplace=True)
res3 = df.copy()
res3.replace(regex=[r'\s*\.\s*', 'b'], value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0,
nan,
'd']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_series_of_regexes(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
s1 = Series({'b': r'\s*\.\s*'})
s2 = Series({'b': nan})
res = df.replace(s1, s2, regex=True)
res2 = df.copy()
res2.replace(s1, s2, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=s1, value=s2, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_numeric_to_object_conversion(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})
res = df.replace(0, 'a')
assert_frame_equal(res, expec)
assert res.a.dtype == np.object_
def test_replace_regex_metachar(self):
metachars = '[]', '()', r'\d', r'\w', r'\s'
for metachar in metachars:
df = DataFrame({'a': [metachar, 'else']})
result = df.replace({'a': {metachar: 'paren'}})
expected = DataFrame({'a': ['paren', 'else']})
assert_frame_equal(result, expected)
def test_replace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
zero_filled = self.tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, self.tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), self.tsframe)
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
self.tsframe['B'][:5] = -1e8
# empty
df = DataFrame(index=['a', 'b'])
assert_frame_equal(df, df.replace(5, 7))
# GH 11698
# test for mixed data types.
df = pd.DataFrame([('-', pd.to_datetime('20150101')),
('a', pd.to_datetime('20150102'))])
df1 = df.replace('-', np.nan)
expected_df = pd.DataFrame([(np.nan, pd.to_datetime('20150101')),
('a', pd.to_datetime('20150102'))])
assert_frame_equal(df1, expected_df)
def test_replace_list(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]
to_replace_res = [r'.', r'e']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', nan, nan],
'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap',
'l', 'o']})
assert_frame_equal(res, expec)
# list of [v1, v2, ..., vN] -> [v1, v2, .., vN]
to_replace_res = [r'.', r'f']
values = [r'..', r'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e', 'crap', 'g',
'h'],
'c': ['h', 'e', 'l', 'o']})
assert_frame_equal(res, expec)
def test_replace_series_dict(self):
# from GH 3064
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
result = df.replace(0, {'zero': 0.5, 'one': 1.0})
expected = DataFrame(
{'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 2.0, 'b': 1.0}})
assert_frame_equal(result, expected)
result = df.replace(0, df.mean())
assert_frame_equal(result, expected)
# series to series/dict
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
s = Series({'zero': 0.0, 'one': 2.0})
result = df.replace(s, {'zero': 0.5, 'one': 1.0})
expected = DataFrame(
{'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 1.0, 'b': 0.0}})
assert_frame_equal(result, expected)
result = df.replace(s, df.mean())
assert_frame_equal(result, expected)
def test_replace_convert(self):
# gh 3907
df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']])
m = {'foo': 1, 'bar': 2, 'bah': 3}
rep = df.replace(m)
expec = Series([np.int64] * 3)
res = rep.dtypes
assert_series_equal(expec, res)
def test_replace_mixed(self):
mf = self.mixed_frame
mf.iloc[5:20, mf.columns.get_loc('foo')] = nan
mf.iloc[-10:, mf.columns.get_loc('A')] = nan
result = self.mixed_frame.replace(np.nan, -18)
expected = self.mixed_frame.fillna(value=-18)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-18, nan), self.mixed_frame)
result = self.mixed_frame.replace(np.nan, -1e8)
expected = self.mixed_frame.fillna(value=-1e8)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame)
# int block upcasting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64')})
expected = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0.5, 1], dtype='float64')})
result = df.replace(0, 0.5)
assert_frame_equal(result, expected)
df.replace(0, 0.5, inplace=True)
assert_frame_equal(df, expected)
# int block splitting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64'),
'C': Series([1, 2], dtype='int64')})
expected = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0.5, 1], dtype='float64'),
'C': Series([1, 2], dtype='int64')})
result = df.replace(0, 0.5)
assert_frame_equal(result, expected)
# to object block upcasting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64')})
expected = DataFrame({'A': Series([1, 'foo'], dtype='object'),
'B': Series([0, 1], dtype='int64')})
result = df.replace(2, 'foo')
assert_frame_equal(result, expected)
expected = DataFrame({'A': Series(['foo', 'bar'], dtype='object'),
'B': Series([0, 'foo'], dtype='object')})
result = df.replace([1, 2], ['foo', 'bar'])
assert_frame_equal(result, expected)
# test case from
df = DataFrame({'A': Series([3, 0], dtype='int64'),
'B': Series([0, 3], dtype='int64')})
result = df.replace(3, df.mean().to_dict())
expected = df.copy().astype('float64')
m = df.mean()
expected.iloc[0, 0] = m[0]
expected.iloc[1, 1] = m[1]
assert_frame_equal(result, expected)
def test_replace_simple_nested_dict(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({'col': {1: 'a', 4: 'b'}})
assert_frame_equal(expected, result)
# in this case, should be the same as the not nested version
result = df.replace({1: 'a', 4: 'b'})
assert_frame_equal(expected, result)
def test_replace_simple_nested_dict_with_nonexistent_value(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({-1: '-', 1: 'a', 4: 'b'})
assert_frame_equal(expected, result)
result = df.replace({'col': {-1: '-', 1: 'a', 4: 'b'}})
assert_frame_equal(expected, result)
def test_replace_value_is_none(self):
orig_value = self.tsframe.iloc[0, 0]
orig2 = self.tsframe.iloc[1, 0]
self.tsframe.iloc[0, 0] = nan
self.tsframe.iloc[1, 0] = 1
result = self.tsframe.replace(to_replace={nan: 0})
expected = self.tsframe.T.replace(to_replace={nan: 0}).T
assert_frame_equal(result, expected)
result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})
tsframe = self.tsframe.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
expected = tsframe
assert_frame_equal(expected, result)
self.tsframe.iloc[0, 0] = orig_value
self.tsframe.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self):
# dtypes
tsframe = self.tsframe.copy().astype(np.float32)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
zero_filled = tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), tsframe)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
tsframe['B'][:5] = -1e8
b = tsframe['B']
b[b == -1e8] = nan
tsframe['B'] = b
result = tsframe.fillna(method='bfill')
assert_frame_equal(result, tsframe.fillna(method='bfill'))
def test_replace_dtypes(self):
# int
df = DataFrame({'ints': [1, 2, 3]})
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]})
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int32)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int32)
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int16)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int16)
assert_frame_equal(result, expected)
# bools
df = DataFrame({'bools': [True, False, True]})
result = df.replace(False, True)
assert result.values.all()
# complex blocks
df = DataFrame({'complex': [1j, 2j, 3j]})
result = df.replace(1j, 0j)
expected = DataFrame({'complex': [0j, 2j, 3j]})
assert_frame_equal(result, expected)
# datetime blocks
prev = datetime.today()
now = datetime.today()
df = DataFrame({'datetime64': Index([prev, now, prev])})
result = df.replace(prev, now)
expected = DataFrame({'datetime64': Index([now] * 3)})
assert_frame_equal(result, expected)
def test_replace_input_formats_listlike(self):
# both dicts
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(to_rep, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], values[k])
assert_frame_equal(filled, DataFrame(expected))
result = df.replace([0, 2, 5], [5, 2, 0])
expected = DataFrame({'A': [np.nan, 5, np.inf], 'B': [5, 2, 0],
'C': ['', 'asdf', 'fd']})
assert_frame_equal(result, expected)
# scalar to dict
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(np.nan, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(np.nan, values[k])
assert_frame_equal(filled, DataFrame(expected))
# list to list
to_rep = [np.nan, 0, '']
values = [-2, -1, 'missing']
result = df.replace(to_rep, values)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], values[i], inplace=True)
assert_frame_equal(result, expected)
pytest.raises(ValueError, df.replace, to_rep, values[1:])
def test_replace_input_formats_scalar(self):
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
# dict to scalar
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
filled = df.replace(to_rep, 0)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], 0)
assert_frame_equal(filled, DataFrame(expected))
pytest.raises(TypeError, df.replace, to_rep, [np.nan, 0, ''])
# list to scalar
to_rep = [np.nan, 0, '']
result = df.replace(to_rep, -1)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], -1, inplace=True)
assert_frame_equal(result, expected)
def test_replace_limit(self):
pass
def test_replace_dict_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = {'Agree': 4, 'Disagree': 2, 'Neutral': 3, 'Strongly Agree':
5, 'Strongly Disagree': 1}
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
assert_series_equal(result, expected)
def test_replace_series_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = Series({'Agree': 4, 'Disagree': 2, 'Neutral': 3,
'Strongly Agree': 5, 'Strongly Disagree': 1})
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
assert_series_equal(result, expected)
def test_replace_dict_tuple_list_ordering_remains_the_same(self):
df = DataFrame(dict(A=[nan, 1]))
res1 = df.replace(to_replace={nan: 0, 1: -1e8})
res2 = df.replace(to_replace=(1, nan), value=[-1e8, 0])
res3 = df.replace(to_replace=[1, nan], value=[-1e8, 0])
expected = DataFrame({'A': [0, -1e8]})
assert_frame_equal(res1, res2)
assert_frame_equal(res2, res3)
assert_frame_equal(res3, expected)
def test_replace_doesnt_replace_without_regex(self):
raw = """fol T_opp T_Dir T_Enh
0 1 0 0 vo
1 2 vr 0 0
2 2 0 0 0
3 3 0 bt 0"""
df = pd.read_csv(StringIO(raw), sep=r'\s+')
res = df.replace({r'\D': 1})
assert_frame_equal(df, res)
def test_replace_bool_with_string(self):
df = DataFrame({'a': [True, False], 'b': list('ab')})
result = df.replace(True, 'a')
expected = DataFrame({'a': ['a', False], 'b': df.b})
assert_frame_equal(result, expected)
def test_replace_pure_bool_with_string_no_op(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace('asdf', 'fdsa')
assert_frame_equal(df, result)
def test_replace_bool_with_bool(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace(False, True)
expected = DataFrame(np.ones((2, 2), dtype=bool))
assert_frame_equal(result, expected)
def test_replace_with_dict_with_bool_keys(self):
df = DataFrame({0: [True, False], 1: [False, True]})
with tm.assert_raises_regex(TypeError, 'Cannot compare types .+'):
df.replace({'asdf': 'asdb', True: 'yes'})
def test_replace_truthy(self):
df = DataFrame({'a': [True, True]})
r = df.replace([np.inf, -np.inf], np.nan)
e = df
assert_frame_equal(r, e)
def test_replace_int_to_int_chain(self):
df = DataFrame({'a': lrange(1, 5)})
with tm.assert_raises_regex(ValueError,
"Replacement not allowed .+"):
df.replace({'a': dict(zip(range(1, 5), range(2, 6)))})
def test_replace_str_to_str_chain(self):
a = np.arange(1, 5)
astr = a.astype(str)
bstr = np.arange(2, 6).astype(str)
df = DataFrame({'a': astr})
with tm.assert_raises_regex(ValueError,
"Replacement not allowed .+"):
df.replace({'a': dict(zip(astr, bstr))})
def test_replace_swapping_bug(self):
df = pd.DataFrame({'a': [True, False, True]})
res = df.replace({'a': {True: 'Y', False: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
assert_frame_equal(res, expect)
df = pd.DataFrame({'a': [0, 1, 0]})
res = df.replace({'a': {0: 'Y', 1: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
assert_frame_equal(res, expect)
def test_replace_period(self):
d = {
'fname': {
'out_augmented_AUG_2011.json':
pd.Period(year=2011, month=8, freq='M'),
'out_augmented_JAN_2011.json':
pd.Period(year=2011, month=1, freq='M'),
'out_augmented_MAY_2012.json':
pd.Period(year=2012, month=5, freq='M'),
'out_augmented_SUBSIDY_WEEK.json':
pd.Period(year=2011, month=4, freq='M'),
'out_augmented_AUG_2012.json':
pd.Period(year=2012, month=8, freq='M'),
'out_augmented_MAY_2011.json':
pd.Period(year=2011, month=5, freq='M'),
'out_augmented_SEP_2013.json':
pd.Period(year=2013, month=9, freq='M')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
assert set(df.fname.values) == set(d['fname'].keys())
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
assert_frame_equal(result, expected)
def test_replace_datetime(self):
d = {'fname':
{'out_augmented_AUG_2011.json': pd.Timestamp('2011-08'),
'out_augmented_JAN_2011.json': pd.Timestamp('2011-01'),
'out_augmented_MAY_2012.json': pd.Timestamp('2012-05'),
'out_augmented_SUBSIDY_WEEK.json': pd.Timestamp('2011-04'),
'out_augmented_AUG_2012.json': pd.Timestamp('2012-08'),
'out_augmented_MAY_2011.json': pd.Timestamp('2011-05'),
'out_augmented_SEP_2013.json': pd.Timestamp('2013-09')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
assert set(df.fname.values) == set(d['fname'].keys())
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
assert_frame_equal(result, expected)
def test_replace_datetimetz(self):
# GH 11326
# behaving poorly when presented with a datetime64[ns, tz]
df = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': [0, np.nan, 2]})
result = df.replace(np.nan, 1)
expected = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': Series([0, 1, 2], dtype='float64')})
assert_frame_equal(result, expected)
result = df.fillna(1)
assert_frame_equal(result, expected)
result = df.replace(0, np.nan)
expected = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': [np.nan, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.replace(Timestamp('20130102', tz='US/Eastern'),
Timestamp('20130104', tz='US/Eastern'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104', tz='US/Eastern'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace(
{'A': pd.NaT}, Timestamp('20130104', tz='US/Eastern'))
assert_frame_equal(result, expected)
# coerce to object
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace(
{'A': pd.NaT}, Timestamp('20130104', tz='US/Pacific'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104', tz='US/Pacific'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace({'A': np.nan}, Timestamp('20130104'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
def test_replace_with_empty_dictlike(self):
# GH 15289
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
assert_frame_equal(df, df.replace({}))
assert_frame_equal(df, df.replace(Series([])))
assert_frame_equal(df, df.replace({'b': {}}))
assert_frame_equal(df, df.replace(Series({'b': {}})))
@pytest.mark.parametrize("to_replace, method, expected", [
(0, 'bfill', {'A': [1, 1, 2],
'B': [5, nan, 7],
'C': ['a', 'b', 'c']}),
(nan, 'bfill', {'A': [0, 1, 2],
'B': [5.0, 7.0, 7.0],
'C': ['a', 'b', 'c']}),
('d', 'ffill', {'A': [0, 1, 2],
'B': [5, nan, 7],
'C': ['a', 'b', 'c']}),
([0, 2], 'bfill', {'A': [1, 1, 2],
'B': [5, nan, 7],
'C': ['a', 'b', 'c']}),
([1, 2], 'pad', {'A': [0, 0, 0],
'B': [5, nan, 7],
'C': ['a', 'b', 'c']}),
((1, 2), 'bfill', {'A': [0, 2, 2],
'B': [5, nan, 7],
'C': ['a', 'b', 'c']}),
(['b', 'c'], 'ffill', {'A': [0, 1, 2],
'B': [5, nan, 7],
'C': ['a', 'a', 'a']}),
])
def test_replace_method(self, to_replace, method, expected):
# GH 19632
df = DataFrame({'A': [0, 1, 2],
'B': [5, nan, 7],
'C': ['a', 'b', 'c']})
result = df.replace(to_replace=to_replace, value=None, method=method)
expected = DataFrame(expected)
assert_frame_equal(result, expected)
| bsd-3-clause |
katyhuff/cyder | output/contour_plot.py | 1 | 10705 |
"""
Comparison of griddata and tricontour for an unstructured triangular grid.
"""
from __future__ import print_function
import matplotlib.pyplot as plt
from pylab import *
import matplotlib.tri as tri
import numpy as np
from numpy.random import uniform, seed
from matplotlib.mlab import griddata
import pdb
class ContourPlot(object) :
"""
A class representing a contour plot. It needs data, a_xis labels, a title,
etc.
"""
_x_min = -2
"""
The minimum value of the values in the x dimension
"""
_y_min = -2
"""
The minimum value of the values in the y dimension
"""
_x_max = 2
"""
The maximum value of the values in the x dimension
"""
_y_max = 2
"""
The maximum value of the values in the x dimension
"""
_x = None
"""
<++>
"""
_y = None
"""
<++>
"""
_z = None
"""
<++>
"""
_xi = None
"""
<++>
"""
_yi = None
"""
<++>
"""
_zi = None
"""
<++>
"""
_npts = 200
"""
<++>
"""
_n_labels = 15 # the number of labels on each a_xis
"""
<++>
"""
_filename = "out.eps"
"""
<++>
"""
_title = "plot_title"
"""
<++>
"""
def __init__(self,
x_min = -2,
y_min = -2,
x_max = 2,
y_max = 2,
x = None,
y = None,
z = None,
ngridx = 100,
ngridy = 200,
npts = 200,
x_label = 'x',
y_label = 'y',
ptitle = 'plottitle',
fname = 'contour_plot.eps'
):
self._x_min = x_min
self._x_max = x_max
self._y_min = y_min
self._y_max = y_max
self._x=x
self._y=y
self._z=z
if self._x is None :
self._x = self.set_x()
if self._y is None :
self._y = self.set_y()
if self._z is None :
self._z = self.set_z()
if self._xi is None :
self._xi = self.set_xi(ngridx)
if self._yi is None :
self._yi = self.set_yi(ngridx, ngridy)
if self._zi is None :
self._zi = self.set_zi()
self._npts = npts
self._x_label = x_label
self._y_label = y_label
self._title = ptitle
self._filename = fname
#self.grid_data_and_contour() #change subplot defs below
self.plot_tricontour()
self.save_it()
def grid_data_and_contour(self) :
# griddata and contour.
x=self._x
y=self._y
z=self._z
n_labels=self._n_labels
xi=self._xi
yi=self._yi
zi=self._zi
x_min=self._x_min
y_min=self._y_min
x_max=self._x_max
y_max=self._y_max
plt.subplot(211)
plt.contour(xi, yi, zi, n_labels, linewidths=0.5, colors='k')
plt.contourf(xi, yi, zi, n_labels, cmap=plt.cm.rainbow,
norm=plt.normalize(vmax=abs(zi).max(), vmin=-abs(zi).max()))
plt.colorbar() # draw colorbar
plt.plot(x, y, 'ko', ms=3)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
print ('griddata and contour plotted')
def plot_tricontour(self):
# tricontour.
x=self._x
y=self._y
z=self._z
n_labels=self._n_labels
zi=self._zi
x_min=self._x_min
y_min=self._y_min
x_max=self._x_max
y_max=self._y_max
title=self._title
x_label=self._x_label
y_label=self._y_label
plt.subplot(111) # change this if you want to plot both
triang = tri.Triangulation(x, y)
plt.tricontour(x, y, z, n_labels, linewidths=0.5, colors='k')
plt.tricontourf(x, y, z, n_labels, cmap=plt.cm.rainbow,
norm=plt.normalize(vmax=abs(zi).max(), vmin=-abs(zi).max()))
plt.colorbar()
plt.plot(x, y, 'ko', ms=3)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
print ('tricontour plotted')
def save_it(self) :
savefig(self._filename)
print ('saved as '+ self._filename)
def set_x(self) :
if self._x is None :
seed(0)
# this should be a matrix of identical columns?
self._x = uniform(self._x_min, self._x_max, self._npts)
return self._x;
def set_xi(self, ngridx) :
if self._xi is None :
spacing = self._x_max/ngridx
self._xi = np.linspace(self._x_min-spacing, self._x_max+spacing, ngridx)
return self._xi
def set_y(self) :
if self._y is None :
# this should be a matrix of identical columns
self._y = uniform(self._y_min, self._y_max, self._npts)
return self._y
def set_yi(self, ngridx, ngridy) :
if self._yi is None :
spacing = self._y_max/ngridx
self._yi = np.linspace(self._y_min-spacing, self._y_max+spacing, ngridy)
return self._yi
def set_z(self) :
if self._z is None :
# this should be the mass in whatever component
self._z = self._x*np.exp(-self._x**2 - self._y**2)
return self._z;
def set_zi(self) :
if self._zi is None :
self._zi = griddata(self._x, self._y, self._z, self._xi, self._yi, interp='nn')
return self._zi
def set_title(self, npts) :
self._title = 'tricontour (%d points)' % npts
return self._title
def set_filename(self) :
self._filename = 'contour_plot.eps'
return self._filename
import output_tools
import numpy as np
import glob
class ContourData(object) :
"""
A class that holds a lot of data for the contour plot
"""
_data = None
"""
A 3xn numpy ndarray to hold n (x,y,z) data points
"""
_flist = []
"""
The list of sqlite files to be queried to fill the db.
n=the length of this list, unless there are duplicates
"""
_title = 'real'
_filename = 'real.eps'
_x=[]
_y=[]
_z=[]
_x_param = ''
_y_param = ''
_x_label = ''
_y_label = ''
_z_label = ''
_comp_id = 4
_npts = 0
def __init__(self,
root='deg',
xparam = 'degradation',
xlabel = 'degradation',
yparam = 'advective_velocity',
ylabel = 'advective_velocity',
zlabel = 'massKG',
ngrid=200,
title = 'Degradation Rate vs. Advective Velocity',
filename = 'deg_rate.eps'
):
self._x_param = xparam
self._x_label = xlabel
self._y_param = yparam
self._y_label = ylabel
self._z_label = zlabel
self._title = title
self._filename = str(filename)
self._flist = self.collect_filenames(root)
self.extract_data(self._flist)
ContourPlot(
x_min = min(self._x),
y_min = min(self._y),
x_max = max(self._x),
y_max = max(self._y),
x = self._x,
y = self._y,
z = self._z,
ngridx = ngrid,
ngridy = ngrid,
npts = len(self._z),
x_label = self._x_label,
y_label = self._y_label,
ptitle = self._title,
fname = self._filename
)
def extract_data(self, flist) :
for f in flist :
self.add_run(f)
def add_run(self, dbname) :
param_query = output_tools.Query(filename=dbname, queryType='nucparams')
self._x.append( self.get_x_val(param_query))
self._y.append( self.get_y_val(param_query))
param_query.execute()
vals_query = output_tools.Query(dbname, "contaminants", t0=0, tf=100)
vals_query.execute()
self._z.append( self.get_z_val(vals_query))
return self._x, self._y, self._z
def get_x_val(self, query) :
return query.get_param_val(self._comp_id, self._x_param)
def get_y_val(self, query) :
return query.get_param_val(self._comp_id, self._y_param)
def get_z_val(self, query) :
query.collapse_isos()
data = query.get_data()
comp_list=query.get_comp_list()
mass_slice = data[:,comp_list.index(self._comp_id)]
return mass_slice.max()
def collect_filenames(self, root) :
for name in glob.glob(root+'*.sqlite'):
self._flist.append(name)
print(name)
self._npts += 1
return self._flist
from argparse import ArgumentParser
def main():
arg_parser = ArgumentParser(description="Plots 2D data from the"
" contaminants table, when parameterized by data in the nucparams"
" table.")
arg_parser.add_argument("-r", metavar="root", type=str, nargs=1,
dest="root", help="This is the name root for the sqlite files to plot.")
arg_parser.add_argument("-xp", metavar="x_param", type=str, nargs=1,
dest="x_param", help="This is the x parameter name in the sqlite database.")
arg_parser.add_argument("-xl", metavar="x_label", type=str, nargs=1,
dest="x_label", help="This is the x parameter name as plotted")
arg_parser.add_argument("-yp", metavar="y_param", type=str, nargs=1,
dest="y_param", help="This is the y parameter name in the sqlite database.")
arg_parser.add_argument("-yl", metavar="y_label", type=str, nargs=1,
dest="y_label", help="This is the y label name as plotted")
arg_parser.add_argument("-zl", metavar="z_label", type=str, nargs=1,
dest="z_label", help="This is the z label name as plotted")
arg_parser.add_argument("-n", metavar="ngrid", type=int, nargs=1,
dest="ngrid", help="This number adjusts the contour grid resolution")
arg_parser.add_argument("-t", metavar="title", type=str, nargs=1,
dest="title", help="This is the title of the plot")
arg_parser.add_argument("-o", metavar="filename", type=str, nargs=1,
dest="filename", help="This is the output filename. Include eps.")
args=arg_parser.parse_args()
ContourData(
root = args.root[0],
xparam = args.x_param[0],
xlabel = args.x_label[0],
yparam = args.y_param[0],
ylabel = args.y_label[0],
zlabel = args.z_label[0],
ngrid = args.ngrid[0],
title = args.title[0],
filename = args.filename[0]
)
if __name__=="__main__" :
main()
| bsd-3-clause |
brianlorenz/COSMOS_IMACS_Redshifts | Emission_Fitting/FitEmissionOneSig.py | 1 | 38467 | #Fits an emission ine with a Gaussian and returns the amplitude, standard deviation, and continuum line
#Usage: run FitEmission.py 'a6' 4861 to fit the lines at rest wavelengths 6563 (Ha) for the a6 mask.
#Typing run FitEmission.py 'a6' 'HaNII' will fit all three lines around Ha simulaaneously
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
from astropy.io import fits
from astropy.convolution import convolve, Box1DKernel
from scipy.interpolate import splrep, splev
from scipy.signal import medfilt
from scipy.optimize import curve_fit,nnls
#Location of output data file
dataout = '/Users/blorenz/COSMOS/COSMOSData/lineflux_tot.txt'
viewdataout = '/Users/blorenz/COSMOS/COSMOSData/lineflux_view.txt'
#Folder to save the figures
figout = '/Users/blorenz/COSMOS/COSMOSData/fitEmissionOut/'
#The location with the file for all of our data
ourdatapath = '/Users/blorenz/COSMOS/COSMOSData/all_c_hasinger.txt'
#Where the calibrated spectra are stored
caldatapath = '/Users/blorenz/COSMOS/COSMOSData/flxFitsFileOut/'
#File for all of the emission/absorption features of the galaxy (to mask out other features when fitting)
linedata = '/Users/blorenz/COSMOS/COSMOSData/corFitsFileOut/galaxylines.dat'
#File for the MAD of the difference in flux of duplicates in each line (to flag low S/N lines)
maddatapath = '/Users/blorenz/COSMOS/COSMOSData/linemad.txt'
#Read in the spectral lines for masking
gallines = ascii.read(linedata).to_pandas()
#Remove all absoption lines
gallines = gallines[gallines.col2==1]
gallines = gallines.reset_index()
#Read the datafile (if there is one), then create a blank one to write to:
if os.path.exists(dataout):
outarr = ascii.read(dataout).to_pandas()
else: outarr = pd.DataFrame()
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
#Fontsizes for plotting
axisfont = 18
ticksize = 16
titlefont = 24
legendfont = 16
textfont = 16
#Set the letnum
letnum = sys.argv[1]
#Read in all of our data
ourdata = ascii.read(ourdatapath).to_pandas()
ourdata = ourdata[ourdata.ImageName.str.contains('feb1' + letnum[1] + '_' + letnum[0]) == True]
ourdata = ourdata[ourdata.Unsure == 0]
ourdata = ourdata[ourdata.Bad == 0]
ourdata = ourdata[ourdata.Flag3 == 0]
#ourdata = ourdata[ourdata.Flag1 == 0]
ourdata = ourdata[ourdata.Star == 0]
#Function to make the mask before the gaussian
def getMask(modelspec,sigspec,spectrum):
#Model continuum
m = modelspec
#Find all of the pixels where the flux goes to 0 or negative, and set those to 0
maskline = (spectrum > 0)
#Get the weights so we can downweight by noise
w = divz(1,sigspec)*maskline
return m,w
#Find the objid of every object, and it's corresponding letter number combination
#objs[0] - objid
#objs[1] - letter
#objs[2] - number
objs = [(i[4:10],i[17],i[15]) for i in ourdata.ImageName]
#Start two counters to run along the plot
plt1 = 0
plt10 = 0
plt1b = 0
plt10b = 0
#Set the gridsize, so 12 means a 12x12 grid
gridsize = 12
#Start the plot before the loop:
fig,axarr = plt.subplots(gridsize,gridsize,figsize = (100,80))
figb,axarrb = plt.subplots(gridsize,gridsize,figsize = (100,80))
#Loop the fitting over all objects
#for i in range(16,20):
for i in range(len(objs)):
#Mark the data as good
fitflag = 0 #Good data
#Set that we are not looking at the lines around Ha
HaNII = False
#Get the redshift
zcc = ourdata.iloc[i].z_cc
#Set the location of the data file
flxfits = caldatapath + 'flx_' + objs[i][0] + '_feb1' + objs[i][2] + '_' + objs[i][1] + 'big.fits'
#Read in its datafile if it exists
if os.path.exists(flxfits):
flxdata = fits.open(flxfits)[0].data
flxhead = fits.open(flxfits)[0].header
#Read in the spectrum and model
spec = flxdata[0]
noise = flxdata[1] #?
model = flxdata[3]
#Calculate the wavelength range for the data
crval1 = flxhead["crval1"]
crpix1 = flxhead["crpix1"]
cdelt1 = flxhead["cdelt1"]
naxis1 = flxhead["naxis1"]
dcflag = flxhead["dc-flag"]
exptime = flxhead['exptime']
wavelength = (1.0+np.arange(naxis1)-crpix1)*cdelt1 + crval1
#Loop over all of the emission lines to fit:
#for j in range(1, len(sys.argv)):
#Changed to only fitting one line at a time, don't want to unindent everything
if 1==1:
#line = int(sys.argv[j])
line = sys.argv[2]
#Check if we are fitting the Ha and NII lines toether:
if line == 'HaNII':
line = 6563
#Variable to know that we are fitting three lines
HaNII = True
#Dataframe that we will store everything in
HaNIIdat = pd.DataFrame()
#Set up the rest wavelengths for the lines
HaNIIdat.at[0,'restwave'] = 6548.1
HaNIIdat.at[1,'restwave'] = 6562.8
HaNIIdat.at[2,'restwave'] = 6583.0
else: line = int(line)
#Compute the wavelength of the line redshifted to the galaxy
zline = (1+zcc)*line
#Set the range over which to look for the line (in angstroms, each pixel is 2A)
srange = 50
#Set the short range to try to find the peak
shrange = 6
#Find the indices to crop the spectra around the line
idx = np.logical_and(wavelength > zline-srange, wavelength < zline+srange)
idx2 = np.logical_and(wavelength > zline-shrange, wavelength < zline+shrange)
#Special case for OII doublet if it isn't redshifted into view:
if zline < 4910:
idx = np.arange(0,srange)
idx2 = np.arange(0,shrange)
fitflag = 5 #Flagged for not in view
#Crop the spectrum to the proper range
waveline = wavelength[idx]
specline = spec[idx]
shspecline = spec[idx2]
modelline = model[idx]
noiseline = noise[idx]
shnoiseline = noise[idx2]
#Redshift the lines to the current galaxy
zgallines = gallines.col1*(1+zcc)
#Mask out the spectral lines with this function
#data - the data to mask out
#line - the line to keep (others are masked)
def droplines(wavedrop=waveline,specdrop=specline,modeldrop=modelline,noisedrop = noiseline,zline=zline,peakwave=0,zcc=zcc,HaNII = HaNII):
#Mark that we plot the dropped region
pdrop = 1
#We first find the line that you are fitting so we don't mask it
#Compute the differenc between the current line and every line in the data
linediff = zgallines - zline
#Find the index of the closest value to 0. There may be negatives
closelineidx = np.abs(linediff).idxmin()
#Save the name of the line for later
linename = gallines.iloc[closelineidx].col3
restwave = gallines.iloc[closelineidx].col1
#Drop the closest line from the table so that we mask the others
otherlines = zgallines.drop(closelineidx)
#Special case for OII doublet, since it should find 3726.2, then also drop 3728.9
if linename == '[OII]':
otherlines = otherlines.drop(closelineidx+1)
restwave = 3727
#Special case for Ha three lines, since it should find Ha, then also drop NII on either side of it
if HaNII:
otherlines = otherlines.drop(closelineidx-1)
otherlines = otherlines.drop(closelineidx+1)
#Find the other lines that are around the current line, as integers
rounded = [np.round(i) for i in otherlines if (i > zline-srange and i < zline+srange)]
#Make them even if they are odd to match up with wavelengths
centers = [int(i)+(int(i)&1) for i in rounded]
#Find offset from expected
lineval = gallines.iloc[closelineidx].col1
zlineval = lineval*(1+zcc)
if peakwave:
waveoffset = peakwave-zline
#Round it and make it even
waveoffset = np.floor(waveoffset)
waveoffset = int(waveoffset)+(int(waveoffset)&1)
centers = [i+waveoffset for i in centers]
#Arrays for the pixels on either side of each center
centerrange = [np.arange(i-shrange,i+shrange+2,2) for i in centers]
#Find the indices where the arrays match (we will drop these)
dropidx = [np.nonzero(np.in1d(wavedrop,i))[0] for i in centerrange]
#Save this version for plotting
pdropidx = dropidx
#Drop the values at those indices from both wavelength and spectrum
#Fixes a bug when they are not the same length -happens if line is on an edge
if len(dropidx) == 2:
dropidx = np.append(dropidx[0],dropidx[1])
elif not dropidx:
#Variable to say whether or not to plot the dropidx
pdrop = 0
#Drop the lines
newwave = np.delete(wavedrop,dropidx)
newspec = np.delete(specdrop,dropidx)
newmodel = np.delete(modeldrop,dropidx)
newnoise = np.delete(noisedrop,dropidx)
return newwave,newspec,newmodel,newnoise,dropidx,linename,restwave,pdropidx,pdrop
#Mask the other emission lines
dropwaveline,dropspecline,dropmodelline,dropnoiseline,dropidx,linename,restwave,pdropidx,pdrop = droplines()
m,w = getMask(dropmodelline, dropnoiseline, dropspecline)
#Model continuum
#m = dropmodelline
#Get the weights so we can downweight by noise
#w = divz(1,dropnoiseline)
#Set up Gaussian Function
#mu - mean value of the gaussian
#sigma - standard deviation
def gauss3(x, mu, sigma):
A,B = amp3(x,mu,sigma)
g = np.exp(-0.5*(x-mu)**2/(np.e**sigma)**2)/np.sqrt(2*np.pi*(np.e**sigma)**2) #NORMALIZED GAUSSIAN
s = A*g + B*m
return s
#A is area under Gauss curve, B is the scale factor of the continuum
def amp3(x, mu, sigma):
g = np.exp(-0.5*(x-mu)**2/(np.e**sigma)**2)/np.sqrt(2*np.pi*(np.e**sigma)**2) #NORMALIZED GAUSSIAN
A,B = nnls(np.transpose([g,m])*w[::,np.newaxis],dropspecline*w)[0]
return A,B
def gaussHa(x, z, sigma):
A48,A63,A83,B = ampHa(x, z, sigma)
g48 = np.exp(-0.5*(x-(6548.1*(1+z)))**2/(np.e**sigma)**2)/np.sqrt(2*np.pi*(np.e**sigma)**2)
g63 = np.exp(-0.5*(x-(6562.8*(1+z)))**2/(np.e**sigma)**2)/np.sqrt(2*np.pi*(np.e**sigma)**2)
g83 = np.exp(-0.5*(x-(6583.0*(1+z)))**2/(np.e**sigma)**2)/np.sqrt(2*np.pi*(np.e**sigma)**2)
s = A48*g48 + A63*g63 + A83*g83 + B*m
return s
#A is area under Gauss curve, B is the scale factor of the continuum
def ampHa(x, z, sigma):
g48 = np.exp(-0.5*(x-(6548.1*(1+z)))**2/(np.e**sigma)**2)/np.sqrt(2*np.pi*(np.e**sigma)**2)
g63 = np.exp(-0.5*(x-(6562.8*(1+z)))**2/(np.e**sigma)**2)/np.sqrt(2*np.pi*(np.e**sigma)**2)
g83 = np.exp(-0.5*(x-(6583.0*(1+z)))**2/(np.e**sigma)**2)/np.sqrt(2*np.pi*(np.e**sigma)**2)
A48,A63,A83,B = nnls(np.transpose([g48,g63,g83,m])*w[::,np.newaxis],dropspecline*w)[0]
return A48,A63,A83,B
###Set initial guess parameters
#find the highest peak, get the wavelength value of it
#Index of highest peak
pkidx = np.argmax(shspecline)+srange/2-shrange/2
#Wavelength of peak
peakwave = waveline[pkidx]
guess3 = (peakwave,np.log(2))
guesscurve3 = gauss3(dropwaveline,guess3[0],guess3[1])
#Set the bounds, from expected position of the line +- 4 pixels, and sigma from 2 to 10
bounds3 = ([restwave*(1+zcc)-8,np.log(2)],[restwave*(1+zcc)+8,np.log(10)])
#Special case for OII doublet
if linename == 'O[II]':
guess3 = (peakwave,np.log(4))
guesscurve3 = gauss3(dropwaveline,guess3[0],guess3[1])
#Set the bounds
bounds3 = ([restwave*(1+zcc)-8,np.log(2)],[restwave*(1+zcc)+8,np.log(15)])
#Special case for Ha lines, need to set for all three gaussians
if HaNII:
guessHa = (zcc,np.log(2))
guesscurveHa = gaussHa(dropwaveline,guessHa[0],guessHa[1])
boundsHa = ([zcc-0.0012,np.log(2)],[zcc+0.0012,np.log(10)])
#Check if there is a lot of bad data
if np.count_nonzero(~np.isnan(specline)):
try:
#Fit the Gaussian
#coeff3, var_matrix3 = curve_fit(gauss3, waveline, specline, p0=guess3, bounds=bounds3)
if not HaNII:
coeff3, var_matrix3 = curve_fit(gauss3, dropwaveline, dropspecline, p0=guess3, bounds=bounds3)
else:
coeffHa, var_matrixHa = curve_fit(gaussHa, dropwaveline, dropspecline, p0=guessHa, bounds=boundsHa)
#Fit again with a proper mask
#Mask the other emission lines
if not HaNII:
peakwave = coeff3[0]
dropwaveline,dropspecline,dropmodelline,dropnoiseline,dropidx,linename,restwave,pdropidx,pdrop = droplines(peakwave=peakwave)
guess3 = (peakwave,coeff3[1])
#Redefine the gauss functions since now the model and noise have changed
m,w = getMask(dropmodelline, dropnoiseline, dropspecline)
#Model continuum
#m = dropmodelline
#Get the weights so we can downweight by noise
#w = divz(1,dropnoiseline)
#Set up Gaussian Function
#mu - mean value of the gaussian
#sigma - log(standard deviation)
def gauss3(x, mu, sigma):
A,B = amp3(x,mu,sigma)
g = np.exp(-0.5*(x-mu)**2/(np.e**sigma)**2)/np.sqrt(2*np.pi*(np.e**sigma)**2) #NORMALIZED GAUSSIAN
s = A*g + B*m
return s
#A is area under Gauss curve, B is the scale factor of the continuum
def amp3(x, mu, sigma):
g = np.exp(-0.5*(x-mu)**2/(np.e**sigma)**2)/np.sqrt(2*np.pi*(np.e**sigma)**2) #NORMALIZED GAUSSIAN
A,B = nnls(np.transpose([g,m])*w[::,np.newaxis],dropspecline*w)[0]
return A,B
#Only fit if you're not doing HaNII, otherwise nothing is masked so we don't need to fit again
if not HaNII:
coeff3, var_matrix3 = curve_fit(gauss3, dropwaveline, dropspecline, p0=guess3, bounds=bounds3)
#Compute the values of the fit
if not HaNII:
gausscurve3 = gauss3(dropwaveline,coeff3[0],coeff3[1]) #
amp3 = amp3(dropwaveline,coeff3[0],coeff3[1]) #
mu3 = coeff3[0]
stddev3 = np.e**np.abs(coeff3[1])
flux3 = amp3[0]
scale3 = amp3[1]
else:
gausscurveHa = gaussHa(dropwaveline,coeffHa[0],coeffHa[1])
ampHa = ampHa(dropwaveline,coeffHa[0],coeffHa[1])
#Fit redshift
zgauss = coeffHa[0]
#Mean of each line
for num in np.arange(0,3):
HaNIIdat.at[num,'mu'] = HaNIIdat.iloc[num]['restwave']*(1+zgauss)
HaNIIdat.at[num,'sig'] = np.e**np.abs(coeffHa[1])
HaNIIdat.at[num,'flux'] = ampHa[num]
HaNIIdat.at[num,'scale'] = ampHa[3]
mu3 = HaNIIdat.iloc[1]['mu']
stddev3 = HaNIIdat.iloc[1]['sig']
flux3 = HaNIIdat.iloc[1]['flux']
scale3 = HaNIIdat.iloc[1]['scale']
#Compute chi^2 statistics in the range of the line
if not HaNII:
#Degrees of freedom: mu, sigma, area, scale
dof = 4
#Set the lower and upper bounds for the region to find chi2
chilb = mu3-2*stddev3
chiub = mu3+2*stddev3
#Get only the indices in that region
cidx = np.logical_and(dropwaveline > chilb-2, dropwaveline < chiub+2)
arrchi2 = divz((dropspecline[cidx]-gausscurve3[cidx]),dropnoiseline[cidx])**2
chi2 = np.add.reduce(arrchi2)
rchi2 = divz(chi2,len(dropwaveline[cidx])-dof)
#Compute the sum of the fluxes in the line in the same region
sumflux = 2*np.add.reduce(dropspecline[cidx]-dropmodelline[cidx])
else:
#Degrees of freedom: z, scale, sigma (x3, for each line), area (x3, for each line)
dof = 6
cidxarr = []
#Set the lower and upper bounds for the region to find chi2
for num in np.arange(0,3):
HaNIIdat.at[num,'chilb'] = (1+zgauss)*HaNIIdat.iloc[num]['restwave']-2*HaNIIdat.iloc[num]['sig']
HaNIIdat.at[num,'chiub'] = (1+zgauss)*HaNIIdat.iloc[num]['restwave']+2*HaNIIdat.iloc[num]['sig']
cidxarr.append(np.logical_and(dropwaveline > HaNIIdat.iloc[num]['chilb']-2, dropwaveline < HaNIIdat.iloc[num]['chiub']+2))
#Chi2 just in this line
arrchi2 = divz((dropspecline[cidxarr[num]]-gausscurveHa[cidxarr[num]]),dropnoiseline[cidxarr[num]])**2
HaNIIdat.at[num,'chi2'] = np.add.reduce(arrchi2)
HaNIIdat.at[num,'rchi2'] = divz(HaNIIdat.iloc[num]['chi2'],len(dropwaveline[cidxarr[num]])-4)
#Compute the sum of the fluxes in the line in the same region
HaNIIdat.at[num,'sumflux'] = 2*np.add.reduce(dropspecline[cidxarr[num]]-dropmodelline[cidxarr[num]])
zrestline = HaNIIdat.iloc[num]['restwave']*(1+zcc)
idx3 = np.logical_and(waveline > zrestline-shrange, waveline < zrestline+shrange)
HaNIIdat.at[num,'usig'] = np.sqrt(np.add.reduce(noiseline[idx3]**2))
#wsig for each line
#Masks out the other two lines, %3 is %3 is mod3
for num in np.arange(0,3):
wsigidx = np.logical_not(np.logical_or(cidxarr[(num+1)%3],cidxarr[(num+2)%3]))
g = np.exp(-0.5*(dropwaveline[wsigidx]-HaNIIdat.iloc[num]['mu'])**2/HaNIIdat.iloc[num]['sig']**2)/np.sqrt(2*np.pi*HaNIIdat.iloc[num]['sig']**2)
HaNIIdat.at[num,'wsig'] = np.sqrt(np.sum(g*(dropnoiseline[wsigidx]**2))*np.sqrt(2*np.pi*(HaNIIdat.iloc[num]['sig']**2)))
#Chi2 over the whole region
cidxtot = np.logical_or(np.logical_or(cidxarr[0],cidxarr[1]),cidxarr[2])
arrchi2tot = divz((dropspecline[cidxtot]-gausscurveHa[cidxtot]),dropnoiseline[cidxtot])**2
chi2tot = np.add.reduce(arrchi2tot)
rchi2tot = divz(chi2tot,len(dropwaveline[cidxtot])-dof)
#Now compute the weigthed error
#Gaussian curve with area=1
if not HaNII:
g = np.exp(-0.5*(dropwaveline-mu3)**2/stddev3**2)/np.sqrt(2*np.pi*stddev3**2) #NORMALIZED GAUSSIA
wsig = np.sqrt(np.sum(g*(dropnoiseline**2))*np.sqrt(2*np.pi*(stddev3**2)))
usig = np.sqrt(np.add.reduce(shnoiseline**2))
#Get the string of the nearest wavelength to the line. Used for saving everything
linestr = (str(int(np.round(restwave))))
else:
wsig = HaNIIdat.iloc[1]['wsig']
usig = HaNIIdat.iloc[1]['usig']
linestr = 'HaNII'
###Set flags
#Make sure the flag isn't 5 (out of view). if it is, don't flag it otherwise
if fitflag ==5:
pass
#Check if more than half of the spectrum is masked - if so, throw it out
elif (len(np.where(w<=0)[0])>(len(dropwaveline)/3)):
fitflag = 1 #Marks bad data
#Check if the width of the line hit the bounds
elif (stddev3 > 7.0):
fitflag = 2 #Marks bad sigma
#Check the flag for each line when fitting HaNII
if HaNII:
for num in np.arange(0,3):
if fitflag == 1: HaNIIdat.at[num,'flag'] = 1
elif (HaNIIdat.iloc[num]['sig'] > 7.0):
HaNIIdat.at[num,'flag'] = 2
else:
HaNIIdat.at[num,'flag'] = 0
def mkplot(plt10,plt1,plt10b,plt1b,gridsize):
#Create the plot
#fig,ax0 = plt.subplots(figsize = (13,7))
#Set the axis to the correct number - check if it is flagged or not
if fitflag:
ax0 = axarrb[plt10b,plt1b]
#Increment the counters for next time
plt1b = plt1b + 1
if plt1b == gridsize:
plt1b = 0
plt10b = plt10b + 1
else:
ax0 = axarr[plt10,plt1]
#Increment the counters for next time
plt1 = plt1 + 1
if plt1 == gridsize:
plt1 = 0
plt10 = plt10 + 1
#Plotting
ax0.plot(waveline,specline,color='cornflowerblue',label='Spectrum')
#ax0.plot(dropwaveline,dropspecline,color='darkblue',label='Masked Spectrum')
#This will break if one of the lines has an empty array, the except statement fixes it. This is only for plotting
if pdrop:
if dropidx[0].size > 0:
try: [ax0.axvspan(np.min(waveline[j]),np.max(waveline[j]), color='indianred', alpha=0.1) for j in pdropidx]
except: [ax0.axvspan(np.min(waveline[j]),np.max(waveline[j]), color='indianred', alpha=0.1) for j in dropidx]
#Check if any weights were set to 0 - if so, plot the mask for those
if np.where(w<=0)[0].any():
[ax0.plot(dropwaveline[j],dropspecline[j], marker='o', color='red', alpha=0.7) for j in np.where(w<=0)[0]]
#Plot the region over which we fit chi2
if not HaNII:
ax0.axvspan(np.min(dropwaveline[cidx]),np.max(dropwaveline[cidx]), color='grey', alpha=0.2, label='chi2 region')
else:
pass
#[ax0.axvspan(np.min(dropwaveline[cidxarr[num]]),np.max(dropwaveline[cidxarr[num]]), color='grey', alpha=0.2, label='chi2 region') for num in np.arange(0,3)]
ax0.plot(waveline,modelline*scale3,color='red',label='Model')
#ax0.plot(waveline,modelline,color='red',ls='--')
#ax0.plot(dropwaveline,guesscurve3,color='orange',label='Initial Guess')
ax0.plot(dropwaveline,dropnoiseline,color='orange',label='Noise')
#ax0.plot(((1+zcc)*6562.8,(1+zcc)*6562.8),(-1000,100000),label='Expected Mean',ls='--',color='red')
#Titles, axes, legends
#ax0.set_title('H$\\alpha$, OBJID ' + objs[i][0] + '_' + objs[i][1] + objs[i][2] + ', z=' + str(np.around(zcc,4)),fontsize = titlefont)
ax0.set_ylim(-0.01,np.max(gausscurveHa)*1.1)
ax0.set_xlabel('Wavelength ($\AA$)',fontsize = axisfont)
ax0.set_ylabel('Flux ($10^{-17}$ erg/s/${cm}^2/\AA$)',fontsize = axisfont)
ax0.tick_params(labelsize = ticksize)
#ax0.set_ylim(-0.07,0.75)
return ax0, plt10, plt1, plt10b, plt1b
ax0,plt10,plt1,plt10b,plt1b = mkplot(plt10,plt1,plt10b,plt1b,gridsize)
if not HaNII:
ax0.text(0.02,0.95,'Mean: ' + str(round(mu3,2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.90,'Std Dev: ' + str(round(stddev3,2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.85,'Scale: ' + str(round(amp3[1],2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.80,'Flux: ' + str(round(amp3[0],2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.75,'Sumflux: ' + str(round(sumflux,2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.70,'Chi2: ' + str(round(chi2,2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.65,'rChi2: ' + str(round(rchi2,2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.60,'wsig: ' + str(round(wsig,3)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.55,'usig: ' + str(round(usig,3)),fontsize = textfont, transform=ax0.transAxes)
else:
ax0.text(0.02,0.95, 'z: ' + str(round(zcc,4)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.90,'Mean: ' + str(round(HaNIIdat.iloc[1]['mu'],2)),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.24,0.95, str(round(HaNIIdat.iloc[1]['mu'],2)),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.40,0.95, str(round(HaNIIdat.iloc[2]['mu'],2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.85,'Width: ' + str(round(HaNIIdat.iloc[1]['sig'],2)),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.24,0.90, str(round(HaNIIdat.iloc[1]['sig'],2)),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.40,0.90, str(round(HaNIIdat.iloc[2]['sig'],2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.80,'Flux: ' + str(round(HaNIIdat.iloc[1]['flux'],2)),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.24,0.85, str(round(HaNIIdat.iloc[1]['flux'],2)),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.40,0.85, str(round(HaNIIdat.iloc[2]['flux'],2)),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.02,0.80,'Flag: ' + str(int(HaNIIdat.iloc[0]['flag'])),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.15,0.80, str(int(HaNIIdat.iloc[1]['flag'])),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.28,0.80, str(int(HaNIIdat.iloc[2]['flag'])),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.75, 'Scale: ' + str(round(HaNIIdat.iloc[2]['scale'],2)),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.02,0.70, 'zfit: ' + str(round(zgauss,4)),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.02,0.65, 'chi2tot: ' + str(round(chi2tot,2)),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.02,0.60, 'rchi2tot: ' + str(round(rchi2tot,2)),fontsize = textfont, transform=ax0.transAxes)
if fitflag:
ax0.text(0.02,0.50,'flag: ' + str(fitflag),fontsize = textfont, transform=ax0.transAxes)
#fig.text(0.14,0.60,'Redshift: ' + str(round(zcc,4)),fontsize = textfont)
#fig.text(0.14,0.60,'Luminosity (erg/s): ' + str(round(lumin,2)),fontsize = textfont)
if not HaNII:
ax0.plot(dropwaveline,gausscurve3,color='black',label='Gaussian fit')
else:
ax0.plot(dropwaveline,gausscurveHa,color='black',label='Gaussian fit')
#plt.show()
#Store the results to the output array:
#First we find the index with a matching objid
#midx = np.where((outarr.OBJID.astype(float)-float(objs[i][0])==0) and (outarr.Mask == (objs[i][1]+objs[i][2])))[0]
#Get the array of trues and falses where the OBJID and mask both match
tfarr = (outarr.OBJID.astype(float)-float(objs[i][0])==0) & (outarr.Mask == (objs[i][1]+objs[i][2]))
#Get the index of the matching element
midx = outarr.index[tfarr]
#We make sure outarr has correct column types
if os.path.exists(dataout):
#outarr.OBJID = outarr.OBJID.astype(str)
outarr.Mask = outarr.Mask.astype(str)
outarr.fluxfile = outarr.fluxfile.astype(str)
#We check to make sure there is only one.
#If there are none, we append a new row onto outarr
if len(midx)>1:
print('Error, check text document for duplicates')
elif len(midx)==0:
#Makes the index the length of the array, which will add a new row at the bottom
midx = len(outarr)
#Store the info that doesn't change
outarr.at[midx,'OBJID'] = objs[i][0]
outarr.at[midx,'Mask'] = objs[i][1]+objs[i][2]
outarr.at[midx,'fluxfile'] = 'flx_' + objs[i][0] + '_feb1' + objs[i][2] + '_' + objs[i][1] + 'big.fits'
outarr.at[midx,'zcc'] = zcc
#Write in the new info from the fit. outarr.at auto generates new columns if needed
if not HaNII:
outarr.at[midx,linestr + '_mean'] = mu3
outarr.at[midx,linestr + '_stddev'] = stddev3
outarr.at[midx,linestr + '_flux'] = flux3
outarr.at[midx,linestr + '_scale'] = scale3
outarr.at[midx,linestr + '_chi2'] = chi2
outarr.at[midx,linestr + '_rchi2'] = rchi2
outarr.at[midx,linestr + '_sumflux'] = sumflux
outarr.at[midx,linestr + '_wsig'] = wsig
outarr.at[midx,linestr + '_usig'] = usig
outarr.at[midx,linestr + '_flag'] = fitflag
else:
linearr = ['6548','6563','6583']
counter = 0
for linestr in linearr:
outarr.at[midx,linestr + '_fix_mean'] = HaNIIdat.iloc[counter]['mu']
outarr.at[midx,linestr + '_fix_stddev'] = HaNIIdat.iloc[counter]['sig']
outarr.at[midx,linestr + '_fix_flux'] = HaNIIdat.iloc[counter]['flux']
outarr.at[midx,linestr + '_fix_scale'] = HaNIIdat.iloc[counter]['scale']
outarr.at[midx,linestr + '_fix_chi2'] = HaNIIdat.iloc[counter]['chi2']
outarr.at[midx,linestr + '_fix_rchi2'] = HaNIIdat.iloc[counter]['rchi2']
outarr.at[midx,linestr + '_fix_sumflux'] = HaNIIdat.iloc[counter]['sumflux']
outarr.at[midx,linestr + '_fix_wsig'] = HaNIIdat.iloc[counter]['wsig']
outarr.at[midx,linestr + '_fix_usig'] = HaNIIdat.iloc[counter]['usig']
outarr.at[midx,linestr + '_fix_flag'] = HaNIIdat.iloc[counter]['flag']
counter = counter + 1
outarr.at[midx,'6563_fix_chi2tot'] = chi2tot
outarr.at[midx,'6563_fix_rchi2tot'] = rchi2tot
outarr.at[midx,'6563_fix_zgauss'] = zgauss
'''
Flag values:
1 - too many zeros, we threw out the fit
2 - sigma >8, so it hit the bounds.
4 - scale >1.3 or <0.7, probably something wrong with spectrum in the region
5 - the line is not redshifted enough to be in view (e.g. 3727 OII)
'''
except (RuntimeError):
ax0.text(0.14,0.84,'Fitting Failed',fontsize = textfont, transform=ax0.transAxes)
#plt.show()
else: print('Bad data at ' + str(line) + ', too many NaN. ' + 'flx_' + objs[i][0] + '_feb1' + objs[i][2] + '_' + objs[i][1] + 'big.fits' )
ax0.legend(fontsize = legendfont,loc=1)
#If not, give an error but continue
else: print('Could not read file ' + flxfits)
###Editing the datafile
#Sort by OBJID
outarr = outarr.sort_values('OBJID')
#Sort the columns so the lines are next to each other
outarr = outarr.reindex(sorted(outarr.columns), axis=1)
#Remove all NaN and replace them with -99
outarr = outarr.fillna(value = -99.999999999999)
#Remove columns with this, then take it back out
#outarr = outarr.drop('Ha_chi2',axis=1)
'''
for linestr in linearr:
fluxdata = fluxdata.rename(columns={linestr + '_mean_fix':linestr + '_fix_mean'})
fluxdata = fluxdata.rename(columns={linestr + '_stddev_fix':linestr + '_fix_stddev'})
fluxdata = fluxdata.rename(columns={linestr + '_flux_fix':linestr + '_fix_flux'})
fluxdata = fluxdata.rename(columns={linestr + '_scale_fix':linestr + '_fix_scale'})
fluxdata = fluxdata.rename(columns={linestr + '_chi2_fix':linestr + '_fix_chi2'})
fluxdata = fluxdata.rename(columns={linestr + '_rchi2_fix':linestr + '_fix_rchi2'})
fluxdata = fluxdata.rename(columns={linestr + '_sumflux_fix':linestr + '_fix_sumflux'})
fluxdata = fluxdata.rename(columns={linestr + '_wsig_fix':linestr + '_fix_wsig'})
fluxdata = fluxdata.rename(columns={linestr + '_usig_fix':linestr + '_fix_usig'})
fluxdata = fluxdata.rename(columns={linestr + '_flag_fix':linestr + '_fix_flag'})
fluxdata = fluxdata.rename(columns={'6563_chi2tot_fix':'6563_fix_chi2tot'})
fluxdata = fluxdata.rename(columns={'6563_rchi2tot_fix':'6563_fix_rchi2tot'})
fluxdata = fluxdata.rename(columns={'6563_zgauss_fix':'6563_fix_zgauss'})
'''
#Write the file
outarr.to_csv(dataout,index=False)
#Save the figure
#plt.show()
fig.tight_layout()
figb.tight_layout()
if HaNII: linename = 'HaNII'
fig.savefig(figout + str(int(np.round(restwave))) + '_' + linename + '_' + letnum + '_1sig.pdf')
figb.savefig(figout + str(int(np.round(restwave))) + '_' + linename + '_' + letnum + '_flagged_1sig.pdf')
plt.close(fig)
plt.close(figb)
| mit |
daodaoliang/bokeh | examples/plotting/server/boxplot.py | 42 | 2372 | # The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
import numpy as np
import pandas as pd
from bokeh.plotting import figure, show, output_server
# Generate some synthetic time series for six different categories
cats = list("abcdef")
data = np.random.randn(2000)
g = np.random.choice(cats, 2000)
for i, l in enumerate(cats):
data[g == l] += i // 2
df = pd.DataFrame(dict(score=data, group=g))
# Find the quartiles and IQR foor each category
groups = df.groupby('group')
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q3 + 1.5*iqr
lower = q1 - 1.5*iqr
# find the outliers for each category
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score']
out = groups.apply(outliers).dropna()
# Prepare outlier data for plotting, we need coordinate for every outlier.
outx = []
outy = []
for cat in cats:
# only add outliers if they exist
if not out.loc[cat].empty:
for value in out[cat]:
outx.append(cat)
outy.append(value)
output_server('boxplot')
p = figure(tools="previewsave", background_fill="#EFE8E2", title="", x_range=cats)
# If no outliers, shrink lengths of stems to be no longer than the minimums or maximums
qmin = groups.quantile(q=0.00)
qmax = groups.quantile(q=1.00)
upper.score = [min([x,y]) for (x,y) in zip(list(qmax.iloc[:,0]),upper.score) ]
lower.score = [max([x,y]) for (x,y) in zip(list(qmin.iloc[:,0]),lower.score) ]
# stems
p.segment(cats, upper.score, cats, q3.score, line_width=2, line_color="black")
p.segment(cats, lower.score, cats, q1.score, line_width=2, line_color="black")
# boxes
p.rect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score,
fill_color="#E08E79", line_width=2, line_color="black")
p.rect(cats, (q2.score+q1.score)/2, 0.7, q2.score-q1.score,
fill_color="#3B8686", line_width=2, line_color="black")
# whisters (almost-0 height rects simpler than segments)
p.rect(cats, lower.score, 0.2, 0.01, line_color="black")
p.rect(cats, upper.score, 0.2, 0.01, line_color="black")
# outliers
p.circle(outx, outy, size=6, color="#F38630", fill_alpha=0.6)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = "white"
p.ygrid.grid_line_width = 2
p.xaxis.major_label_text_font_size="12pt"
show(p)
| bsd-3-clause |
winklerand/pandas | pandas/tests/tseries/offsets/test_offsets.py | 1 | 131710 | import os
from distutils.version import LooseVersion
from datetime import date, datetime, timedelta
import pytest
from pandas.compat import range
from pandas import compat
import numpy as np
from pandas.compat.numpy import np_datetime64_compat
from pandas.core.series import Series
from pandas.tseries.frequencies import (_offset_map, get_freq_code,
_get_freq_str, _INVALID_FREQ_ERROR,
get_offset, get_standard_freq)
from pandas.core.indexes.datetimes import (
_to_m8, DatetimeIndex, _daterange_cache)
import pandas._libs.tslibs.offsets as liboffsets
from pandas._libs.tslibs.offsets import WeekDay, CacheableOffset
from pandas.tseries.offsets import (BDay, CDay, BQuarterEnd, BMonthEnd,
BusinessHour, WeekOfMonth, CBMonthEnd,
CustomBusinessHour,
CBMonthBegin, BYearEnd, MonthEnd,
MonthBegin, SemiMonthBegin, SemiMonthEnd,
BYearBegin, QuarterBegin, BQuarterBegin,
BMonthBegin, DateOffset, Week, YearBegin,
YearEnd, Day,
QuarterEnd, BusinessMonthEnd, FY5253,
Nano, Easter, FY5253Quarter,
LastWeekOfMonth)
from pandas.core.tools.datetimes import (
format, ole2datetime, parse_time_string,
to_datetime, DateParseError)
import pandas.tseries.offsets as offsets
from pandas.io.pickle import read_pickle
from pandas._libs.tslibs import timezones
from pandas._libs.tslib import normalize_date, NaT, Timestamp
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.tseries.holiday import USFederalHolidayCalendar
from .common import assert_offset_equal, assert_onOffset
def test_monthrange():
import calendar
for y in range(2000, 2013):
for m in range(1, 13):
assert tslib.monthrange(y, m) == calendar.monthrange(y, m)
####
# Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
with pytest.raises(ValueError):
ole2datetime(60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s, errors='ignore') == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
def test_to_m8():
valb = datetime(2007, 10, 1)
valu = _to_m8(valb)
assert isinstance(valu, np.datetime64)
# assert valu == np.datetime64(datetime(2007,10,1))
# def test_datetime64_box():
# valu = np.datetime64(datetime(2007,10,1))
# valb = _dt_box(valu)
# assert type(valb) == datetime
# assert valb == datetime(2007,10,1)
#####
# DateOffset Tests
#####
class Base(object):
_offset = None
timezones = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Tokyo', 'dateutil/US/Pacific']
def _get_offset(self, klass, value=1, normalize=False):
# create instance from offset class
if klass is FY5253:
klass = klass(n=value, startingMonth=1, weekday=1,
variation='last', normalize=normalize)
elif klass is FY5253Quarter:
klass = klass(n=value, startingMonth=1, weekday=1,
qtr_with_extra_week=1, variation='last',
normalize=normalize)
elif klass is LastWeekOfMonth:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is WeekOfMonth:
klass = klass(n=value, week=1, weekday=5, normalize=normalize)
elif klass is Week:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is DateOffset:
klass = klass(days=value, normalize=normalize)
else:
try:
klass = klass(value, normalize=normalize)
except:
klass = klass(normalize=normalize)
return klass
def test_apply_out_of_range(self, tz):
if self._offset is None:
return
# try to create an out-of-bounds result timestamp; if we can't create
# the offset skip
try:
if self._offset in (BusinessHour, CustomBusinessHour):
# Using 10000 in BusinessHour fails in tz check because of DST
# difference
offset = self._get_offset(self._offset, value=100000)
else:
offset = self._get_offset(self._offset, value=10000)
result = Timestamp('20080101') + offset
assert isinstance(result, datetime)
assert result.tzinfo is None
# Check tz is preserved
t = Timestamp('20080101', tz=tz)
result = t + offset
assert isinstance(result, datetime)
assert t.tzinfo == result.tzinfo
except tslib.OutOfBoundsDatetime:
raise
except (ValueError, KeyError) as e:
pytest.skip(
"cannot create out_of_range offset: {0} {1}".format(
str(self).split('.')[-1], e))
class TestCommon(Base):
# exected value created by Base._get_offset
# are applied to 2011/01/01 09:00 (Saturday)
# used for .apply and .rollforward
expecteds = {'Day': Timestamp('2011-01-02 09:00:00'),
'DateOffset': Timestamp('2011-01-02 09:00:00'),
'BusinessDay': Timestamp('2011-01-03 09:00:00'),
'CustomBusinessDay': Timestamp('2011-01-03 09:00:00'),
'CustomBusinessMonthEnd': Timestamp('2011-01-31 09:00:00'),
'CustomBusinessMonthBegin': Timestamp('2011-01-03 09:00:00'),
'MonthBegin': Timestamp('2011-02-01 09:00:00'),
'BusinessMonthBegin': Timestamp('2011-01-03 09:00:00'),
'MonthEnd': Timestamp('2011-01-31 09:00:00'),
'SemiMonthEnd': Timestamp('2011-01-15 09:00:00'),
'SemiMonthBegin': Timestamp('2011-01-15 09:00:00'),
'BusinessMonthEnd': Timestamp('2011-01-31 09:00:00'),
'YearBegin': Timestamp('2012-01-01 09:00:00'),
'BYearBegin': Timestamp('2011-01-03 09:00:00'),
'YearEnd': Timestamp('2011-12-31 09:00:00'),
'BYearEnd': Timestamp('2011-12-30 09:00:00'),
'QuarterBegin': Timestamp('2011-03-01 09:00:00'),
'BQuarterBegin': Timestamp('2011-03-01 09:00:00'),
'QuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BQuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BusinessHour': Timestamp('2011-01-03 10:00:00'),
'CustomBusinessHour': Timestamp('2011-01-03 10:00:00'),
'WeekOfMonth': Timestamp('2011-01-08 09:00:00'),
'LastWeekOfMonth': Timestamp('2011-01-29 09:00:00'),
'FY5253Quarter': Timestamp('2011-01-25 09:00:00'),
'FY5253': Timestamp('2011-01-25 09:00:00'),
'Week': Timestamp('2011-01-08 09:00:00'),
'Easter': Timestamp('2011-04-24 09:00:00'),
'Hour': Timestamp('2011-01-01 10:00:00'),
'Minute': Timestamp('2011-01-01 09:01:00'),
'Second': Timestamp('2011-01-01 09:00:01'),
'Milli': Timestamp('2011-01-01 09:00:00.001000'),
'Micro': Timestamp('2011-01-01 09:00:00.000001'),
'Nano': Timestamp(np_datetime64_compat(
'2011-01-01T09:00:00.000000001Z'))}
def test_return_type(self, offset_types):
offset = self._get_offset(offset_types)
# make sure that we are returning a Timestamp
result = Timestamp('20080101') + offset
assert isinstance(result, Timestamp)
# make sure that we are returning NaT
assert NaT + offset is NaT
assert offset + NaT is NaT
assert NaT - offset is NaT
assert (-offset).apply(NaT) is NaT
def test_offset_n(self, offset_types):
offset = self._get_offset(offset_types)
assert offset.n == 1
neg_offset = offset * -1
assert neg_offset.n == -1
mul_offset = offset * 3
assert mul_offset.n == 3
def test_offset_freqstr(self, offset_types):
offset = self._get_offset(offset_types)
freqstr = offset.freqstr
if freqstr not in ('<Easter>',
"<DateOffset: kwds={'days': 1}>",
'LWOM-SAT', ):
code = get_offset(freqstr)
assert offset.rule_code == code
def _check_offsetfunc_works(self, offset, funcname, dt, expected,
normalize=False):
offset_s = self._get_offset(offset, normalize=normalize)
func = getattr(offset_s, funcname)
result = func(dt)
assert isinstance(result, Timestamp)
assert result == expected
result = func(Timestamp(dt))
assert isinstance(result, Timestamp)
assert result == expected
# see gh-14101
exp_warning = None
ts = Timestamp(dt) + Nano(5)
if (offset_s.__class__.__name__ == 'DateOffset' and
(funcname == 'apply' or normalize) and
ts.nanosecond > 0):
exp_warning = UserWarning
# test nanosecond is preserved
with tm.assert_produces_warning(exp_warning,
check_stacklevel=False):
result = func(ts)
assert isinstance(result, Timestamp)
if normalize is False:
assert result == expected + Nano(5)
else:
assert result == expected
if isinstance(dt, np.datetime64):
# test tz when input is datetime or Timestamp
return
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
tz_obj = timezones.maybe_get_tz(tz)
dt_tz = tslib._localize_pydatetime(dt, tz_obj)
result = func(dt_tz)
assert isinstance(result, Timestamp)
assert result == expected_localize
result = func(Timestamp(dt, tz=tz))
assert isinstance(result, Timestamp)
assert result == expected_localize
# see gh-14101
exp_warning = None
ts = Timestamp(dt, tz=tz) + Nano(5)
if (offset_s.__class__.__name__ == 'DateOffset' and
(funcname == 'apply' or normalize) and
ts.nanosecond > 0):
exp_warning = UserWarning
# test nanosecond is preserved
with tm.assert_produces_warning(exp_warning,
check_stacklevel=False):
result = func(ts)
assert isinstance(result, Timestamp)
if normalize is False:
assert result == expected_localize + Nano(5)
else:
assert result == expected_localize
def test_apply(self, offset_types):
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for dt in [sdt, ndt]:
expected = self.expecteds[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'apply', dt, expected)
expected = Timestamp(expected.date())
self._check_offsetfunc_works(offset_types, 'apply', dt, expected,
normalize=True)
def test_rollforward(self, offset_types):
expecteds = self.expecteds.copy()
# result will not be changed if the target is on the offset
no_changes = ['Day', 'MonthBegin', 'SemiMonthBegin', 'YearBegin',
'Week', 'Hour', 'Minute', 'Second', 'Milli', 'Micro',
'Nano', 'DateOffset']
for n in no_changes:
expecteds[n] = Timestamp('2011/01/01 09:00')
expecteds['BusinessHour'] = Timestamp('2011-01-03 09:00:00')
expecteds['CustomBusinessHour'] = Timestamp('2011-01-03 09:00:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2011-01-02 00:00:00'),
'DateOffset': Timestamp('2011-01-02 00:00:00'),
'MonthBegin': Timestamp('2011-02-01 00:00:00'),
'SemiMonthBegin': Timestamp('2011-01-15 00:00:00'),
'YearBegin': Timestamp('2012-01-01 00:00:00'),
'Week': Timestamp('2011-01-08 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for dt in [sdt, ndt]:
expected = expecteds[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollforward', dt,
expected)
expected = norm_expected[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollforward', dt,
expected, normalize=True)
def test_rollback(self, offset_types):
expecteds = {'BusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthEnd':
Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthBegin':
Timestamp('2010-12-01 09:00:00'),
'BusinessMonthBegin': Timestamp('2010-12-01 09:00:00'),
'MonthEnd': Timestamp('2010-12-31 09:00:00'),
'SemiMonthEnd': Timestamp('2010-12-31 09:00:00'),
'BusinessMonthEnd': Timestamp('2010-12-31 09:00:00'),
'BYearBegin': Timestamp('2010-01-01 09:00:00'),
'YearEnd': Timestamp('2010-12-31 09:00:00'),
'BYearEnd': Timestamp('2010-12-31 09:00:00'),
'QuarterBegin': Timestamp('2010-12-01 09:00:00'),
'BQuarterBegin': Timestamp('2010-12-01 09:00:00'),
'QuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BQuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BusinessHour': Timestamp('2010-12-31 17:00:00'),
'CustomBusinessHour': Timestamp('2010-12-31 17:00:00'),
'WeekOfMonth': Timestamp('2010-12-11 09:00:00'),
'LastWeekOfMonth': Timestamp('2010-12-25 09:00:00'),
'FY5253Quarter': Timestamp('2010-10-26 09:00:00'),
'FY5253': Timestamp('2010-01-26 09:00:00'),
'Easter': Timestamp('2010-04-04 09:00:00')}
# result will not be changed if the target is on the offset
for n in ['Day', 'MonthBegin', 'SemiMonthBegin', 'YearBegin', 'Week',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',
'DateOffset']:
expecteds[n] = Timestamp('2011/01/01 09:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2010-12-31 00:00:00'),
'DateOffset': Timestamp('2010-12-31 00:00:00'),
'MonthBegin': Timestamp('2010-12-01 00:00:00'),
'SemiMonthBegin': Timestamp('2010-12-15 00:00:00'),
'YearBegin': Timestamp('2010-01-01 00:00:00'),
'Week': Timestamp('2010-12-25 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for dt in [sdt, ndt]:
expected = expecteds[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollback', dt,
expected)
expected = norm_expected[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollback', dt,
expected, normalize=True)
def test_onOffset(self, offset_types):
dt = self.expecteds[offset_types.__name__]
offset_s = self._get_offset(offset_types)
assert offset_s.onOffset(dt)
# when normalize=True, onOffset checks time is 00:00:00
offset_n = self._get_offset(offset_types, normalize=True)
assert not offset_n.onOffset(dt)
if offset_types in (BusinessHour, CustomBusinessHour):
# In default BusinessHour (9:00-17:00), normalized time
# cannot be in business hour range
return
date = datetime(dt.year, dt.month, dt.day)
assert offset_n.onOffset(date)
def test_add(self, offset_types, tz):
dt = datetime(2011, 1, 1, 9, 0)
offset_s = self._get_offset(offset_types)
expected = self.expecteds[offset_types.__name__]
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
assert isinstance(result, Timestamp)
assert result == expected
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
assert isinstance(result, Timestamp)
assert result == expected_localize
# normalize=True
offset_s = self._get_offset(offset_types, normalize=True)
expected = Timestamp(expected.date())
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
assert isinstance(result, Timestamp)
assert result == expected
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
assert isinstance(result, Timestamp)
assert result == expected_localize
def test_pickle_v0_15_2(self):
offsets = {'DateOffset': DateOffset(years=1),
'MonthBegin': MonthBegin(1),
'Day': Day(1),
'YearBegin': YearBegin(1),
'Week': Week(1)}
pickle_path = os.path.join(tm.get_data_path(),
'dateoffset_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
# with open(pickle_path, 'wb') as f: pickle.dump(offsets, f)
#
tm.assert_dict_equal(offsets, read_pickle(pickle_path))
class TestDateOffset(Base):
def setup_method(self, method):
self.d = Timestamp(datetime(2008, 1, 2))
_offset_map.clear()
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert ((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert ((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert ((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert ((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert (DateOffset(months=2).copy() == DateOffset(months=2))
def test_eq(self):
offset1 = DateOffset(days=1)
offset2 = DateOffset(days=365)
assert offset1 != offset2
class TestBusinessDay(Base):
_offset = BDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_different_normalize_equals(self):
# equivalent in this special case
offset = BDay()
offset2 = BDay()
offset2.normalize = True
assert offset == offset2
def test_repr(self):
assert repr(self.offset) == '<BusinessDay>'
assert repr(self.offset2) == '<2 * BusinessDays>'
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 1, 3)
def testRAdd(self):
assert self.d + self.offset2 == self.offset2 + self.d
def testSub(self):
off = self.offset2
pytest.raises(Exception, off.__sub__, self.d)
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + BDay(-2)
def testRSub(self):
assert self.d - self.offset2 == (-self.offset2).apply(self.d)
def testMult1(self):
assert self.d + 10 * self.offset == self.d + BDay(10)
def testMult2(self):
assert self.d + (-5 * BDay(-10)) == self.d + BDay(50)
def testRollback1(self):
assert BDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert (BDay(10).rollback(datetime(2008, 1, 5)) ==
datetime(2008, 1, 4))
def testRollforward1(self):
assert BDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert (BDay(10).rollforward(datetime(2008, 1, 5)) ==
datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = BDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, d, expected in tests:
assert_onOffset(offset, d, expected)
apply_cases = []
apply_cases.append((BDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
apply_cases.append((2 * BDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
apply_cases.append((-BDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
apply_cases.append((-2 * BDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
apply_cases.append((BDay(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + BDay(10)
assert result == datetime(2012, 11, 6)
result = dt + BDay(100) - BDay(100)
assert result == dt
off = BDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
off = BDay() * 10
rs = datetime(2014, 1, 5) + off # see #5890
xp = datetime(2014, 1, 17)
assert rs == xp
def test_apply_corner(self):
pytest.raises(TypeError, BDay().apply, BMonthEnd())
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BDay()
offset2 = BDay()
assert not offset1 != offset2
class TestBusinessHour(Base):
_offset = BusinessHour
def setup_method(self, method):
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = BusinessHour()
self.offset2 = BusinessHour(n=3)
self.offset3 = BusinessHour(n=-1)
self.offset4 = BusinessHour(n=-4)
from datetime import time as dt_time
self.offset5 = BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30))
self.offset6 = BusinessHour(start='20:00', end='05:00')
self.offset7 = BusinessHour(n=-2, start=dt_time(21, 30),
end=dt_time(6, 30))
def test_constructor_errors(self):
from datetime import time as dt_time
with pytest.raises(ValueError):
BusinessHour(start=dt_time(11, 0, 5))
with pytest.raises(ValueError):
BusinessHour(start='AAA')
with pytest.raises(ValueError):
BusinessHour(start='14:00:05')
def test_different_normalize_equals(self):
# equivalent in this special case
offset = self._offset()
offset2 = self._offset()
offset2.normalize = True
assert offset == offset2
def test_repr(self):
assert repr(self.offset1) == '<BusinessHour: BH=09:00-17:00>'
assert repr(self.offset2) == '<3 * BusinessHours: BH=09:00-17:00>'
assert repr(self.offset3) == '<-1 * BusinessHour: BH=09:00-17:00>'
assert repr(self.offset4) == '<-4 * BusinessHours: BH=09:00-17:00>'
assert repr(self.offset5) == '<BusinessHour: BH=11:00-14:30>'
assert repr(self.offset6) == '<BusinessHour: BH=20:00-05:00>'
assert repr(self.offset7) == '<-2 * BusinessHours: BH=21:30-06:30>'
def test_with_offset(self):
expected = Timestamp('2014-07-01 13:00')
assert self.d + BusinessHour() * 3 == expected
assert self.d + BusinessHour(n=3) == expected
def testEQ(self):
for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
assert offset == offset
assert BusinessHour() != BusinessHour(-1)
assert BusinessHour(start='09:00') == BusinessHour()
assert BusinessHour(start='09:00') != BusinessHour(start='09:01')
assert (BusinessHour(start='09:00', end='17:00') !=
BusinessHour(start='17:00', end='09:01'))
def test_hash(self):
for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
assert hash(offset) == hash(offset)
def testCall(self):
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 13)
assert self.offset3(self.d) == datetime(2014, 6, 30, 17)
assert self.offset4(self.d) == datetime(2014, 6, 30, 14)
def testRAdd(self):
assert self.d + self.offset2 == self.offset2 + self.d
def testSub(self):
off = self.offset2
pytest.raises(Exception, off.__sub__, self.d)
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + self._offset(-3)
def testRSub(self):
assert self.d - self.offset2 == (-self.offset2).apply(self.d)
def testMult1(self):
assert self.d + 5 * self.offset1 == self.d + self._offset(5)
def testMult2(self):
assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
assert self.offset3.rollback(self.d) == self.d
assert self.offset4.rollback(self.d) == self.d
assert self.offset5.rollback(self.d) == datetime(2014, 6, 30, 14, 30)
assert self.offset6.rollback(self.d) == datetime(2014, 7, 1, 5, 0)
assert self.offset7.rollback(self.d) == datetime(2014, 7, 1, 6, 30)
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset2.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset3.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset4.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset5.rollback(d) == datetime(2014, 6, 30, 14, 30)
assert self.offset6.rollback(d) == d
assert self.offset7.rollback(d) == d
assert self._offset(5).rollback(self.d) == self.d
def testRollback2(self):
assert (self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) ==
datetime(2014, 7, 4, 17, 0))
def testRollforward1(self):
assert self.offset1.rollforward(self.d) == self.d
assert self.offset2.rollforward(self.d) == self.d
assert self.offset3.rollforward(self.d) == self.d
assert self.offset4.rollforward(self.d) == self.d
assert (self.offset5.rollforward(self.d) ==
datetime(2014, 7, 1, 11, 0))
assert (self.offset6.rollforward(self.d) ==
datetime(2014, 7, 1, 20, 0))
assert (self.offset7.rollforward(self.d) ==
datetime(2014, 7, 1, 21, 30))
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset3.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset4.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset5.rollforward(d) == datetime(2014, 7, 1, 11)
assert self.offset6.rollforward(d) == d
assert self.offset7.rollforward(d) == d
assert self._offset(5).rollforward(self.d) == self.d
def testRollforward2(self):
assert (self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) ==
datetime(2014, 7, 7, 9))
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
assert result == datetime(2014, 7, 4, 17)
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
normalize_cases = []
normalize_cases.append((BusinessHour(normalize=True), {
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
normalize_cases.append((BusinessHour(-1, normalize=True), {
datetime(2014, 7, 1, 8): datetime(2014, 6, 30),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30),
datetime(2014, 7, 1, 0): datetime(2014, 6, 30),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
normalize_cases.append((BusinessHour(1, normalize=True, start='17:00',
end='04:00'), {
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 2),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
@pytest.mark.parametrize('case', normalize_cases)
def test_normalize(self, case):
offset, cases = case
for dt, expected in compat.iteritems(cases):
assert offset.apply(dt) == expected
on_offset_cases = []
on_offset_cases.append((BusinessHour(), {
datetime(2014, 7, 1, 9): True,
datetime(2014, 7, 1, 8, 59): False,
datetime(2014, 7, 1, 8): False,
datetime(2014, 7, 1, 17): True,
datetime(2014, 7, 1, 17, 1): False,
datetime(2014, 7, 1, 18): False,
datetime(2014, 7, 5, 9): False,
datetime(2014, 7, 6, 12): False}))
on_offset_cases.append((BusinessHour(start='10:00', end='15:00'), {
datetime(2014, 7, 1, 9): False,
datetime(2014, 7, 1, 10): True,
datetime(2014, 7, 1, 15): True,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12): False,
datetime(2014, 7, 6, 12): False}))
on_offset_cases.append((BusinessHour(start='19:00', end='05:00'), {
datetime(2014, 7, 1, 9, 0): False,
datetime(2014, 7, 1, 10, 0): False,
datetime(2014, 7, 1, 15): False,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12, 0): False,
datetime(2014, 7, 6, 12, 0): False,
datetime(2014, 7, 1, 19, 0): True,
datetime(2014, 7, 2, 0, 0): True,
datetime(2014, 7, 4, 23): True,
datetime(2014, 7, 5, 1): True,
datetime(2014, 7, 5, 5, 0): True,
datetime(2014, 7, 6, 23, 0): False,
datetime(2014, 7, 7, 3, 0): False}))
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, cases = case
for dt, expected in compat.iteritems(cases):
assert offset.onOffset(dt) == expected
opening_time_cases = []
# opening time should be affected by sign of n, not by n's value and
# end
opening_time_cases.append(([BusinessHour(), BusinessHour(n=2),
BusinessHour(n=4), BusinessHour(end='10:00'),
BusinessHour(n=2, end='4:00'),
BusinessHour(n=4, end='15:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
# if timestamp is on opening time, next opening time is
# as it is
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 3, 9),
datetime(2014, 7, 2, 9)),
# 2014-07-05 is saturday
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 8, 9),
datetime(2014, 7, 7, 9))}))
opening_time_cases.append(([BusinessHour(start='11:15'),
BusinessHour(n=2, start='11:15'),
BusinessHour(n=3, start='11:15'),
BusinessHour(start='11:15', end='10:00'),
BusinessHour(n=2, start='11:15', end='4:00'),
BusinessHour(n=3, start='11:15',
end='15:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 11, 15),
datetime(2014, 6, 30, 11, 15)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 11, 15): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 2, 11, 15)),
datetime(2014, 7, 2, 11, 15, 1): (datetime(2014, 7, 3, 11, 15),
datetime(2014, 7, 2, 11, 15)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 11, 15),
datetime(2014, 7, 3, 11, 15)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15))}))
opening_time_cases.append(([BusinessHour(-1), BusinessHour(n=-2),
BusinessHour(n=-4),
BusinessHour(n=-1, end='10:00'),
BusinessHour(n=-2, end='4:00'),
BusinessHour(n=-4, end='15:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 3, 9)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 9): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 8, 9))}))
opening_time_cases.append(([BusinessHour(start='17:00', end='05:00'),
BusinessHour(n=3, start='17:00',
end='03:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 17),
datetime(2014, 6, 30, 17)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 4, 17): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 3, 17)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 7, 17, 1): (datetime(2014, 7, 8, 17),
datetime(2014, 7, 7, 17)), }))
opening_time_cases.append(([BusinessHour(-1, start='17:00', end='05:00'),
BusinessHour(n=-2, start='17:00',
end='03:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 16, 59): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 3, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 7, 18): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 8, 17))}))
@pytest.mark.parametrize('case', opening_time_cases)
def test_opening_time(self, case):
_offsets, cases = case
for offset in _offsets:
for dt, (exp_next, exp_prev) in compat.iteritems(cases):
assert offset._next_opening_time(dt) == exp_next
assert offset._prev_opening_time(dt) == exp_prev
apply_cases = []
apply_cases.append((BusinessHour(), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30)}))
apply_cases.append((BusinessHour(4), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30)}))
apply_cases.append((BusinessHour(-1), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30)}))
apply_cases.append((BusinessHour(-4), {
datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),
datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30)}))
apply_cases.append((BusinessHour(start='13:00', end='16:00'), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14)}))
apply_cases.append((BusinessHour(n=2, start='13:00', end='16:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),
datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30)}))
apply_cases.append((BusinessHour(n=-1, start='13:00', end='16:00'), {
datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),
datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15)}))
apply_cases.append((BusinessHour(n=-3, start='10:00', end='16:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),
datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30)}))
apply_cases.append((BusinessHour(start='19:00', end='05:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),
datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),
datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),
datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),
datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),
datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30)}))
apply_cases.append((BusinessHour(n=-1, start='19:00', end='05:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),
datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),
datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
apply_large_n_cases = []
# A week later
apply_large_n_cases.append((BusinessHour(40), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),
datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),
datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),
datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),
datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),
datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),
datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),
datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),
datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30)}))
# 3 days and 1 hour before
apply_large_n_cases.append((BusinessHour(-25), {
datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),
datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),
datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),
datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),
datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),
datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),
datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),
datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),
datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30)}))
# 5 days and 3 hours later
apply_large_n_cases.append((BusinessHour(28, start='21:00', end='02:00'), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),
datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),
datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),
datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),
datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30)}))
@pytest.mark.parametrize('case', apply_large_n_cases)
def test_apply_large_n(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_nanoseconds(self):
tests = []
tests.append((BusinessHour(),
{Timestamp('2014-07-04 15:00') + Nano(5): Timestamp(
'2014-07-04 16:00') + Nano(5),
Timestamp('2014-07-04 16:00') + Nano(5): Timestamp(
'2014-07-07 09:00') + Nano(5),
Timestamp('2014-07-04 16:00') - Nano(5): Timestamp(
'2014-07-04 17:00') - Nano(5)}))
tests.append((BusinessHour(-1),
{Timestamp('2014-07-04 15:00') + Nano(5): Timestamp(
'2014-07-04 14:00') + Nano(5),
Timestamp('2014-07-04 10:00') + Nano(5): Timestamp(
'2014-07-04 09:00') + Nano(5),
Timestamp('2014-07-04 10:00') - Nano(5): Timestamp(
'2014-07-03 17:00') - Nano(5), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = self._offset()
offset2 = self._offset()
assert not offset1 != offset2
def test_datetimeindex(self):
idx1 = DatetimeIndex(start='2014-07-04 15:00', end='2014-07-08 10:00',
freq='BH')
idx2 = DatetimeIndex(start='2014-07-04 15:00', periods=12, freq='BH')
idx3 = DatetimeIndex(end='2014-07-08 10:00', periods=12, freq='BH')
expected = DatetimeIndex(['2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00',
'2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00',
'2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00',
'2014-07-07 16:00', '2014-07-08 09:00',
'2014-07-08 10:00'],
freq='BH')
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
idx1 = DatetimeIndex(start='2014-07-04 15:45', end='2014-07-08 10:45',
freq='BH')
idx2 = DatetimeIndex(start='2014-07-04 15:45', periods=12, freq='BH')
idx3 = DatetimeIndex(end='2014-07-08 10:45', periods=12, freq='BH')
expected = DatetimeIndex(['2014-07-04 15:45', '2014-07-04 16:45',
'2014-07-07 09:45',
'2014-07-07 10:45', '2014-07-07 11:45',
'2014-07-07 12:45',
'2014-07-07 13:45', '2014-07-07 14:45',
'2014-07-07 15:45',
'2014-07-07 16:45', '2014-07-08 09:45',
'2014-07-08 10:45'],
freq='BH')
expected = idx1
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
class TestCustomBusinessHour(Base):
_offset = CustomBusinessHour
def setup_method(self, method):
# 2014 Calendar to check custom holidays
# Sun Mon Tue Wed Thu Fri Sat
# 6/22 23 24 25 26 27 28
# 29 30 7/1 2 3 4 5
# 6 7 8 9 10 11 12
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = CustomBusinessHour(weekmask='Tue Wed Thu Fri')
self.holidays = ['2014-06-27', datetime(2014, 6, 30),
np.datetime64('2014-07-02')]
self.offset2 = CustomBusinessHour(holidays=self.holidays)
def test_constructor_errors(self):
from datetime import time as dt_time
with pytest.raises(ValueError):
CustomBusinessHour(start=dt_time(11, 0, 5))
with pytest.raises(ValueError):
CustomBusinessHour(start='AAA')
with pytest.raises(ValueError):
CustomBusinessHour(start='14:00:05')
def test_different_normalize_equals(self):
# equivalent in this special case
offset = self._offset()
offset2 = self._offset()
offset2.normalize = True
assert offset == offset2
def test_repr(self):
assert repr(self.offset1) == '<CustomBusinessHour: CBH=09:00-17:00>'
assert repr(self.offset2) == '<CustomBusinessHour: CBH=09:00-17:00>'
def test_with_offset(self):
expected = Timestamp('2014-07-01 13:00')
assert self.d + CustomBusinessHour() * 3 == expected
assert self.d + CustomBusinessHour(n=3) == expected
def testEQ(self):
for offset in [self.offset1, self.offset2]:
assert offset == offset
assert CustomBusinessHour() != CustomBusinessHour(-1)
assert (CustomBusinessHour(start='09:00') ==
CustomBusinessHour())
assert (CustomBusinessHour(start='09:00') !=
CustomBusinessHour(start='09:01'))
assert (CustomBusinessHour(start='09:00', end='17:00') !=
CustomBusinessHour(start='17:00', end='09:01'))
assert (CustomBusinessHour(weekmask='Tue Wed Thu Fri') !=
CustomBusinessHour(weekmask='Mon Tue Wed Thu Fri'))
assert (CustomBusinessHour(holidays=['2014-06-27']) !=
CustomBusinessHour(holidays=['2014-06-28']))
def test_hash(self):
assert hash(self.offset1) == hash(self.offset1)
assert hash(self.offset2) == hash(self.offset2)
def testCall(self):
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 11)
def testRAdd(self):
assert self.d + self.offset2 == self.offset2 + self.d
def testSub(self):
off = self.offset2
pytest.raises(Exception, off.__sub__, self.d)
assert 2 * off - off == off
assert self.d - self.offset2 == self.d - (2 * off - off)
def testRSub(self):
assert self.d - self.offset2 == (-self.offset2).apply(self.d)
def testMult1(self):
assert self.d + 5 * self.offset1 == self.d + self._offset(5)
def testMult2(self):
assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
d = datetime(2014, 7, 1, 0)
# 2014/07/01 is Tuesday, 06/30 is Monday(holiday)
assert self.offset1.rollback(d) == datetime(2014, 6, 27, 17)
# 2014/6/30 and 2014/6/27 are holidays
assert self.offset2.rollback(d) == datetime(2014, 6, 26, 17)
def testRollback2(self):
assert (self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) ==
datetime(2014, 7, 4, 17, 0))
def testRollforward1(self):
assert self.offset1.rollforward(self.d) == self.d
assert self.offset2.rollforward(self.d) == self.d
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
def testRollforward2(self):
assert (self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) ==
datetime(2014, 7, 7, 9))
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
assert result == datetime(2014, 7, 4, 17)
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
def test_normalize(self):
tests = []
tests.append((CustomBusinessHour(normalize=True,
holidays=self.holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3),
datetime(2014, 7, 1, 23): datetime(2014, 7, 3),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
tests.append((CustomBusinessHour(-1, normalize=True,
holidays=self.holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 6, 26),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 26),
datetime(2014, 7, 1, 0): datetime(2014, 6, 26),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
tests.append((CustomBusinessHour(1, normalize=True, start='17:00',
end='04:00', holidays=self.holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 3),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
for offset, cases in tests:
for dt, expected in compat.iteritems(cases):
assert offset.apply(dt) == expected
def test_onOffset(self):
tests = []
tests.append((CustomBusinessHour(start='10:00', end='15:00',
holidays=self.holidays),
{datetime(2014, 7, 1, 9): False,
datetime(2014, 7, 1, 10): True,
datetime(2014, 7, 1, 15): True,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12): False,
datetime(2014, 7, 6, 12): False}))
for offset, cases in tests:
for dt, expected in compat.iteritems(cases):
assert offset.onOffset(dt) == expected
def test_apply(self):
tests = []
tests.append((
CustomBusinessHour(holidays=self.holidays),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30,
30)}))
tests.append((
CustomBusinessHour(4, holidays=self.holidays),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30,
30)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_nanoseconds(self):
tests = []
tests.append((CustomBusinessHour(holidays=self.holidays),
{Timestamp('2014-07-01 15:00') + Nano(5): Timestamp(
'2014-07-01 16:00') + Nano(5),
Timestamp('2014-07-01 16:00') + Nano(5): Timestamp(
'2014-07-03 09:00') + Nano(5),
Timestamp('2014-07-01 16:00') - Nano(5): Timestamp(
'2014-07-01 17:00') - Nano(5)}))
tests.append((CustomBusinessHour(-1, holidays=self.holidays),
{Timestamp('2014-07-01 15:00') + Nano(5): Timestamp(
'2014-07-01 14:00') + Nano(5),
Timestamp('2014-07-01 10:00') + Nano(5): Timestamp(
'2014-07-01 09:00') + Nano(5),
Timestamp('2014-07-01 10:00') - Nano(5): Timestamp(
'2014-06-26 17:00') - Nano(5), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
class TestCustomBusinessDay(Base):
_offset = CDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.nd = np_datetime64_compat('2008-01-01 00:00:00Z')
self.offset = CDay()
self.offset2 = CDay(2)
def test_different_normalize_equals(self):
# equivalent in this special case
offset = CDay()
offset2 = CDay()
offset2.normalize = True
assert offset == offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessDay>'
assert repr(self.offset2) == '<2 * CustomBusinessDays>'
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 1, 3)
assert self.offset2(self.nd) == datetime(2008, 1, 3)
def testRAdd(self):
assert self.d + self.offset2 == self.offset2 + self.d
def testSub(self):
off = self.offset2
pytest.raises(Exception, off.__sub__, self.d)
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + CDay(-2)
def testRSub(self):
assert self.d - self.offset2 == (-self.offset2).apply(self.d)
def testMult1(self):
assert self.d + 10 * self.offset == self.d + CDay(10)
def testMult2(self):
assert self.d + (-5 * CDay(-10)) == self.d + CDay(50)
def testRollback1(self):
assert CDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert (CDay(10).rollback(datetime(2008, 1, 5)) ==
datetime(2008, 1, 4))
def testRollforward1(self):
assert CDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert (CDay(10).rollforward(datetime(2008, 1, 5)) ==
datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = CDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [(CDay(), datetime(2008, 1, 1), True),
(CDay(), datetime(2008, 1, 5), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, d, expected = case
assert_onOffset(offset, d, expected)
apply_cases = []
apply_cases.append((CDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
apply_cases.append((2 * CDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
apply_cases.append((-CDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
apply_cases.append((-2 * CDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
apply_cases.append((CDay(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CDay(10)
assert result == datetime(2012, 11, 6)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
def test_apply_corner(self):
pytest.raises(Exception, CDay().apply, BMonthEnd())
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = CDay()
offset2 = CDay()
assert not offset1 != offset2
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
tday = CDay(holidays=holidays)
for year in range(2012, 2015):
dt = datetime(year, 4, 30)
xp = datetime(year, 5, 2)
rs = dt + tday
assert rs == xp
def test_weekmask(self):
weekmask_saudi = 'Sat Sun Mon Tue Wed' # Thu-Fri Weekend
weekmask_uae = '1111001' # Fri-Sat Weekend
weekmask_egypt = [1, 1, 1, 1, 0, 0, 1] # Fri-Sat Weekend
bday_saudi = CDay(weekmask=weekmask_saudi)
bday_uae = CDay(weekmask=weekmask_uae)
bday_egypt = CDay(weekmask=weekmask_egypt)
dt = datetime(2013, 5, 1)
xp_saudi = datetime(2013, 5, 4)
xp_uae = datetime(2013, 5, 2)
xp_egypt = datetime(2013, 5, 2)
assert xp_saudi == dt + bday_saudi
assert xp_uae == dt + bday_uae
assert xp_egypt == dt + bday_egypt
xp2 = datetime(2013, 5, 5)
assert xp2 == dt + 2 * bday_saudi
assert xp2 == dt + 2 * bday_uae
assert xp2 == dt + 2 * bday_egypt
def test_weekmask_and_holidays(self):
weekmask_egypt = 'Sun Mon Tue Wed Thu' # Fri-Sat Weekend
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime(2013, 4, 30)
xp_egypt = datetime(2013, 5, 5)
assert xp_egypt == dt + 2 * bday_egypt
def test_calendar(self):
calendar = USFederalHolidayCalendar()
dt = datetime(2014, 1, 17)
assert_offset_equal(CDay(calendar=calendar), dt, datetime(2014, 1, 21))
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self.offset)
_check_roundtrip(self.offset2)
_check_roundtrip(self.offset * 2)
def test_pickle_compat_0_14_1(self):
hdays = [datetime(2013, 1, 1) for ele in range(4)]
pth = tm.get_data_path()
cday0_14_1 = read_pickle(os.path.join(pth, 'cday-0.14.1.pickle'))
cday = CDay(holidays=hdays)
assert cday == cday0_14_1
class CustomBusinessMonthBase(object):
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._object()
self.offset2 = self._object(2)
def testEQ(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def testRAdd(self):
assert self.d + self.offset2 == self.offset2 + self.d
def testSub(self):
off = self.offset2
pytest.raises(Exception, off.__sub__, self.d)
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + self._object(-2)
def testRSub(self):
assert self.d - self.offset2 == (-self.offset2).apply(self.d)
def testMult1(self):
assert self.d + 10 * self.offset == self.d + self._object(10)
def testMult2(self):
assert self.d + (-5 * self._object(-10)) == self.d + self._object(50)
def test_offsets_compare_equal(self):
offset1 = self._object()
offset2 = self._object()
assert not offset1 != offset2
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._object())
_check_roundtrip(self._object(2))
_check_roundtrip(self._object() * 2)
def test_copy(self):
# GH 17452
off = self._object(weekmask='Mon Wed Fri')
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_object = CBMonthEnd
def test_different_normalize_equals(self):
# equivalent in this special case
offset = CBMonthEnd()
offset2 = CBMonthEnd()
offset2.normalize = True
assert offset == offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthEnd>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthEnds>'
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert (CDay(10).rollback(datetime(2007, 12, 31)) ==
datetime(2007, 12, 31))
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, d, expected = case
assert_onOffset(offset, d, expected)
apply_cases = []
apply_cases.append((CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
apply_cases.append((2 * CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31)}))
apply_cases.append((-CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31)}))
apply_cases.append((-2 * CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31)}))
apply_cases.append((CBMonthEnd(0), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
assert result == datetime(2013, 7, 31)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-01-31', datetime(2012, 2, 28),
np.datetime64('2012-02-29')]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 30)
assert dt + 2 * bm_offset == datetime(2012, 2, 27)
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
assert (DatetimeIndex(start='20120101', end='20130101',
freq=freq).tolist()[0] == datetime(2012, 1, 31))
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_object = CBMonthBegin
def test_different_normalize_equals(self):
# equivalent in this special case
offset = CBMonthBegin()
offset2 = CBMonthBegin()
offset2.normalize = True
assert offset == offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthBegin>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthBegins>'
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 3, 3)
def testRollback1(self):
assert (CDay(10).rollback(datetime(2007, 12, 31)) ==
datetime(2007, 12, 31))
def testRollback2(self):
assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
def testRollforward1(self):
assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 3)
result = offset.rollforward(dt)
assert result == datetime(2012, 10, 1)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
apply_cases = []
apply_cases.append((CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3)}))
apply_cases.append((2 * CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1)}))
apply_cases.append((-CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1)}))
apply_cases.append((-2 * CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1)}))
apply_cases.append((CBMonthBegin(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
assert result == datetime(2013, 8, 1)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-02-01', datetime(2012, 2, 2),
np.datetime64('2012-03-01')]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 2)
assert dt + 2 * bm_offset == datetime(2012, 2, 3)
def test_datetimeindex(self):
hcal = USFederalHolidayCalendar()
cbmb = CBMonthBegin(calendar=hcal)
assert (DatetimeIndex(start='20120101', end='20130101',
freq=cbmb).tolist()[0] == datetime(2012, 1, 3))
class TestWeek(Base):
_offset = Week
def test_repr(self):
assert repr(Week(weekday=0)) == "<Week: weekday=0>"
assert repr(Week(n=-1, weekday=0)) == "<-1 * Week: weekday=0>"
assert repr(Week(n=-2, weekday=0)) == "<-2 * Weeks: weekday=0>"
def test_corner(self):
pytest.raises(ValueError, Week, weekday=7)
tm.assert_raises_regex(
ValueError, "Day must be", Week, weekday=-1)
def test_isAnchored(self):
assert Week(weekday=0).isAnchored()
assert not Week().isAnchored()
assert not Week(2, weekday=2).isAnchored()
assert not Week(2).isAnchored()
offset_cases = []
# not business week
offset_cases.append((Week(), {
datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
# Mon
offset_cases.append((Week(weekday=0), {
datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
# n=0 -> roll forward. Mon
offset_cases.append((Week(0, weekday=0), {
datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
# n=0 -> roll forward. Mon
offset_cases.append((Week(-2, weekday=1), {
datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_onOffset(self):
for weekday in range(7):
offset = Week(weekday=weekday)
for day in range(1, 8):
date = datetime(2008, 1, day)
if day % 7 == weekday:
expected = True
else:
expected = False
assert_onOffset(offset, date, expected)
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = Week()
offset2 = Week()
assert not offset1 != offset2
class TestWeekOfMonth(Base):
_offset = WeekOfMonth
def test_constructor(self):
tm.assert_raises_regex(ValueError, "^N cannot be 0",
WeekOfMonth, n=0, week=1, weekday=1)
tm.assert_raises_regex(ValueError, "^Week", WeekOfMonth,
n=1, week=4, weekday=0)
tm.assert_raises_regex(ValueError, "^Week", WeekOfMonth,
n=1, week=-1, weekday=0)
tm.assert_raises_regex(ValueError, "^Day", WeekOfMonth,
n=1, week=0, weekday=-1)
tm.assert_raises_regex(ValueError, "^Day", WeekOfMonth,
n=1, week=0, weekday=7)
def test_repr(self):
assert (repr(WeekOfMonth(weekday=1, week=2)) ==
"<WeekOfMonth: week=2, weekday=1>")
def test_offset(self):
date1 = datetime(2011, 1, 4) # 1st Tuesday of Month
date2 = datetime(2011, 1, 11) # 2nd Tuesday of Month
date3 = datetime(2011, 1, 18) # 3rd Tuesday of Month
date4 = datetime(2011, 1, 25) # 4th Tuesday of Month
# see for loop for structure
test_cases = [
(-2, 2, 1, date1, datetime(2010, 11, 16)),
(-2, 2, 1, date2, datetime(2010, 11, 16)),
(-2, 2, 1, date3, datetime(2010, 11, 16)),
(-2, 2, 1, date4, datetime(2010, 12, 21)),
(-1, 2, 1, date1, datetime(2010, 12, 21)),
(-1, 2, 1, date2, datetime(2010, 12, 21)),
(-1, 2, 1, date3, datetime(2010, 12, 21)),
(-1, 2, 1, date4, datetime(2011, 1, 18)),
(1, 0, 0, date1, datetime(2011, 2, 7)),
(1, 0, 0, date2, datetime(2011, 2, 7)),
(1, 0, 0, date3, datetime(2011, 2, 7)),
(1, 0, 0, date4, datetime(2011, 2, 7)),
(1, 0, 1, date1, datetime(2011, 2, 1)),
(1, 0, 1, date2, datetime(2011, 2, 1)),
(1, 0, 1, date3, datetime(2011, 2, 1)),
(1, 0, 1, date4, datetime(2011, 2, 1)),
(1, 0, 2, date1, datetime(2011, 1, 5)),
(1, 0, 2, date2, datetime(2011, 2, 2)),
(1, 0, 2, date3, datetime(2011, 2, 2)),
(1, 0, 2, date4, datetime(2011, 2, 2)),
(1, 2, 1, date1, datetime(2011, 1, 18)),
(1, 2, 1, date2, datetime(2011, 1, 18)),
(1, 2, 1, date3, datetime(2011, 2, 15)),
(1, 2, 1, date4, datetime(2011, 2, 15)),
(2, 2, 1, date1, datetime(2011, 2, 15)),
(2, 2, 1, date2, datetime(2011, 2, 15)),
(2, 2, 1, date3, datetime(2011, 3, 15)),
(2, 2, 1, date4, datetime(2011, 3, 15))]
for n, week, weekday, dt, expected in test_cases:
offset = WeekOfMonth(n, week=week, weekday=weekday)
assert_offset_equal(offset, dt, expected)
# try subtracting
result = datetime(2011, 2, 1) - WeekOfMonth(week=1, weekday=2)
assert result == datetime(2011, 1, 12)
result = datetime(2011, 2, 3) - WeekOfMonth(week=0, weekday=2)
assert result == datetime(2011, 2, 2)
on_offset_cases = [(0, 0, datetime(2011, 2, 7), True),
(0, 0, datetime(2011, 2, 6), False),
(0, 0, datetime(2011, 2, 14), False),
(1, 0, datetime(2011, 2, 14), True),
(0, 1, datetime(2011, 2, 1), True),
(0, 1, datetime(2011, 2, 8), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
week, weekday, dt, expected = case
offset = WeekOfMonth(week=week, weekday=weekday)
assert offset.onOffset(dt) == expected
class TestLastWeekOfMonth(Base):
_offset = LastWeekOfMonth
def test_constructor(self):
tm.assert_raises_regex(ValueError, "^N cannot be 0",
LastWeekOfMonth, n=0, weekday=1)
tm.assert_raises_regex(ValueError, "^Day", LastWeekOfMonth, n=1,
weekday=-1)
tm.assert_raises_regex(
ValueError, "^Day", LastWeekOfMonth, n=1, weekday=7)
def test_offset(self):
# Saturday
last_sat = datetime(2013, 8, 31)
next_sat = datetime(2013, 9, 28)
offset_sat = LastWeekOfMonth(n=1, weekday=5)
one_day_before = (last_sat + timedelta(days=-1))
assert one_day_before + offset_sat == last_sat
one_day_after = (last_sat + timedelta(days=+1))
assert one_day_after + offset_sat == next_sat
# Test On that day
assert last_sat + offset_sat == next_sat
# Thursday
offset_thur = LastWeekOfMonth(n=1, weekday=3)
last_thurs = datetime(2013, 1, 31)
next_thurs = datetime(2013, 2, 28)
one_day_before = last_thurs + timedelta(days=-1)
assert one_day_before + offset_thur == last_thurs
one_day_after = last_thurs + timedelta(days=+1)
assert one_day_after + offset_thur == next_thurs
# Test on that day
assert last_thurs + offset_thur == next_thurs
three_before = last_thurs + timedelta(days=-3)
assert three_before + offset_thur == last_thurs
two_after = last_thurs + timedelta(days=+2)
assert two_after + offset_thur == next_thurs
offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN)
assert datetime(2013, 7, 31) + offset_sunday == datetime(2013, 8, 25)
on_offset_cases = [
(WeekDay.SUN, datetime(2013, 1, 27), True),
(WeekDay.SAT, datetime(2013, 3, 30), True),
(WeekDay.MON, datetime(2013, 2, 18), False), # Not the last Mon
(WeekDay.SUN, datetime(2013, 2, 25), False), # Not a SUN
(WeekDay.MON, datetime(2013, 2, 25), True),
(WeekDay.SAT, datetime(2013, 11, 30), True),
(WeekDay.SAT, datetime(2006, 8, 26), True),
(WeekDay.SAT, datetime(2007, 8, 25), True),
(WeekDay.SAT, datetime(2008, 8, 30), True),
(WeekDay.SAT, datetime(2009, 8, 29), True),
(WeekDay.SAT, datetime(2010, 8, 28), True),
(WeekDay.SAT, datetime(2011, 8, 27), True),
(WeekDay.SAT, datetime(2019, 8, 31), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
weekday, dt, expected = case
offset = LastWeekOfMonth(weekday=weekday)
assert offset.onOffset(dt) == expected
class TestSemiMonthEnd(Base):
_offset = SemiMonthEnd
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 31),
datetime(2008, 1, 15),
datetime(2008, 1, 31),
datetime(2008, 2, 15),
datetime(2008, 2, 29),
datetime(2008, 3, 15),
datetime(2008, 3, 31),
datetime(2008, 4, 15),
datetime(2008, 4, 30),
datetime(2008, 5, 15),
datetime(2008, 5, 31),
datetime(2008, 6, 15),
datetime(2008, 6, 30),
datetime(2008, 7, 15),
datetime(2008, 7, 31),
datetime(2008, 8, 15),
datetime(2008, 8, 31),
datetime(2008, 9, 15),
datetime(2008, 9, 30),
datetime(2008, 10, 15),
datetime(2008, 10, 31),
datetime(2008, 11, 15),
datetime(2008, 11, 30),
datetime(2008, 12, 15),
datetime(2008, 12, 31))
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthEnd(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
result = SemiMonthEnd().apply_index(s)
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = DatetimeIndex(start=dates[0], end=dates[-1], freq='SM')
exp = DatetimeIndex(dates)
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append((SemiMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 31)}))
offset_cases.append((SemiMonthEnd(day_of_month=20), {
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 20),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 20),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20)}))
offset_cases.append((SemiMonthEnd(0), {
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 16): datetime(2008, 1, 31),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 15)}))
offset_cases.append((SemiMonthEnd(0, day_of_month=16), {
datetime(2008, 1, 1): datetime(2008, 1, 16),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 16)}))
offset_cases.append((SemiMonthEnd(2), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 11, 30)}))
offset_cases.append((SemiMonthEnd(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 30): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
offset_cases.append((SemiMonthEnd(-1, day_of_month=4), {
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2007, 1, 4): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
offset_cases.append((SemiMonthEnd(-2), {
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 2, 15),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 14): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 15)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('case', offset_cases)
def test_apply_index(self, case):
offset, cases = case
s = DatetimeIndex(cases.keys())
result = offset.apply_index(s)
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
on_offset_cases = [(datetime(2007, 12, 31), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 1), False),
(datetime(2008, 2, 29), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
dt, expected = case
assert_onOffset(SemiMonthEnd(), dt, expected)
@pytest.mark.parametrize('klass,assert_func',
[(Series, tm.assert_series_equal),
(DatetimeIndex, tm.assert_index_equal)])
def test_vectorized_offset_addition(self, klass, assert_func):
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'),
Timestamp('2000-02-01', tz='US/Central')], name='a')
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
class TestSemiMonthBegin(Base):
_offset = SemiMonthBegin
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 15),
datetime(2008, 1, 1),
datetime(2008, 1, 15),
datetime(2008, 2, 1),
datetime(2008, 2, 15),
datetime(2008, 3, 1),
datetime(2008, 3, 15),
datetime(2008, 4, 1),
datetime(2008, 4, 15),
datetime(2008, 5, 1),
datetime(2008, 5, 15),
datetime(2008, 6, 1),
datetime(2008, 6, 15),
datetime(2008, 7, 1),
datetime(2008, 7, 15),
datetime(2008, 8, 1),
datetime(2008, 8, 15),
datetime(2008, 9, 1),
datetime(2008, 9, 15),
datetime(2008, 10, 1),
datetime(2008, 10, 15),
datetime(2008, 11, 1),
datetime(2008, 11, 15),
datetime(2008, 12, 1),
datetime(2008, 12, 15))
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthBegin(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
result = SemiMonthBegin().apply_index(s)
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = DatetimeIndex(start=dates[0], end=dates[-1], freq='SMS')
exp = DatetimeIndex(dates)
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append((SemiMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2007, 1, 1)}))
offset_cases.append((SemiMonthBegin(day_of_month=20), {
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20)}))
offset_cases.append((SemiMonthBegin(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 2): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2007, 1, 1)}))
offset_cases.append((SemiMonthBegin(0, day_of_month=16), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 5): datetime(2007, 1, 16),
datetime(2007, 1, 1): datetime(2007, 1, 1)}))
offset_cases.append((SemiMonthBegin(2), {
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 15): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 12, 1)}))
offset_cases.append((SemiMonthBegin(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 6, 14): datetime(2008, 6, 1),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 15)}))
offset_cases.append((SemiMonthBegin(-1, day_of_month=4), {
datetime(2007, 1, 1): datetime(2006, 12, 4),
datetime(2007, 1, 4): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2006, 12, 2): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 4)}))
offset_cases.append((SemiMonthBegin(-2), {
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 1),
datetime(2008, 6, 14): datetime(2008, 5, 15),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 15): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 1)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('case', offset_cases)
def test_apply_index(self, case):
offset, cases = case
s = DatetimeIndex(cases.keys())
result = offset.apply_index(s)
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
on_offset_cases = [(datetime(2007, 12, 1), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 31), False),
(datetime(2008, 2, 15), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
dt, expected = case
assert_onOffset(SemiMonthBegin(), dt, expected)
@pytest.mark.parametrize('klass,assert_func',
[(Series, tm.assert_series_equal),
(DatetimeIndex, tm.assert_index_equal)])
def test_vectorized_offset_addition(self, klass, assert_func):
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + SemiMonthBegin()
result2 = SemiMonthBegin() + s
exp = klass([Timestamp('2000-02-01 00:15:00', tz='US/Central'),
Timestamp('2000-03-01', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'),
Timestamp('2000-02-01', tz='US/Central')], name='a')
result = s + SemiMonthBegin()
result2 = SemiMonthBegin() + s
exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
def test_Easter():
assert_offset_equal(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4))
assert_offset_equal(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24))
assert_offset_equal(Easter(2), datetime(2010, 1, 1), datetime(2011, 4, 24))
assert_offset_equal(Easter(), datetime(2010, 4, 4), datetime(2011, 4, 24))
assert_offset_equal(Easter(2), datetime(2010, 4, 4), datetime(2012, 4, 8))
assert_offset_equal(-Easter(), datetime(2011, 1, 1), datetime(2010, 4, 4))
assert_offset_equal(-Easter(), datetime(2010, 4, 5), datetime(2010, 4, 4))
assert_offset_equal(-Easter(2),
datetime(2011, 1, 1),
datetime(2009, 4, 12))
assert_offset_equal(-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12))
assert_offset_equal(-Easter(2),
datetime(2010, 4, 4),
datetime(2008, 3, 23))
class TestOffsetNames(object):
def test_get_offset_name(self):
assert BDay().freqstr == 'B'
assert BDay(2).freqstr == '2B'
assert BMonthEnd().freqstr == 'BM'
assert Week(weekday=0).freqstr == 'W-MON'
assert Week(weekday=1).freqstr == 'W-TUE'
assert Week(weekday=2).freqstr == 'W-WED'
assert Week(weekday=3).freqstr == 'W-THU'
assert Week(weekday=4).freqstr == 'W-FRI'
assert LastWeekOfMonth(weekday=WeekDay.SUN).freqstr == "LWOM-SUN"
def test_get_offset():
with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
get_offset('gibberish')
with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
get_offset('QS-JAN-B')
pairs = [
('B', BDay()), ('b', BDay()), ('bm', BMonthEnd()),
('Bm', BMonthEnd()), ('W-MON', Week(weekday=0)),
('W-TUE', Week(weekday=1)), ('W-WED', Week(weekday=2)),
('W-THU', Week(weekday=3)), ('W-FRI', Week(weekday=4))]
for name, expected in pairs:
offset = get_offset(name)
assert offset == expected, ("Expected %r to yield %r (actual: %r)" %
(name, expected, offset))
def test_get_offset_legacy():
pairs = [('w@Sat', Week(weekday=5))]
for name, expected in pairs:
with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
get_offset(name)
class TestParseTimeString(object):
def test_parse_time_string(self):
(date, parsed, reso) = parse_time_string('4Q1984')
(date_lower, parsed_lower, reso_lower) = parse_time_string('4q1984')
assert date == date_lower
assert parsed == parsed_lower
assert reso == reso_lower
def test_parse_time_quarter_w_dash(self):
# https://github.com/pandas-dev/pandas/issue/9688
pairs = [('1988-Q2', '1988Q2'), ('2Q-1988', '2Q1988'), ]
for dashed, normal in pairs:
(date_dash, parsed_dash, reso_dash) = parse_time_string(dashed)
(date, parsed, reso) = parse_time_string(normal)
assert date_dash == date
assert parsed_dash == parsed
assert reso_dash == reso
pytest.raises(DateParseError, parse_time_string, "-2Q1992")
pytest.raises(DateParseError, parse_time_string, "2-Q1992")
pytest.raises(DateParseError, parse_time_string, "4-4Q1992")
def test_get_standard_freq():
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
fstr = get_standard_freq('W')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert fstr == get_standard_freq('w')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert fstr == get_standard_freq('1w')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert fstr == get_standard_freq(('W', 1))
with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
get_standard_freq('WeEk')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
fstr = get_standard_freq('5Q')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert fstr == get_standard_freq('5q')
with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
get_standard_freq('5QuarTer')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert fstr == get_standard_freq(('q', 5))
class TestOffsetAliases(object):
def setup_method(self, method):
_offset_map.clear()
def test_alias_equality(self):
for k, v in compat.iteritems(_offset_map):
if v is None:
continue
assert k == v.copy()
def test_rule_code(self):
lst = ['M', 'MS', 'BM', 'BMS', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
assert k == get_offset(k).rule_code
# should be cached - this is kind of an internals test...
assert k in _offset_map
assert k == (get_offset(k) * 3).rule_code
suffix_lst = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
base = 'W'
for v in suffix_lst:
alias = '-'.join([base, v])
assert alias == get_offset(alias).rule_code
assert alias == (get_offset(alias) * 5).rule_code
suffix_lst = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG',
'SEP', 'OCT', 'NOV', 'DEC']
base_lst = ['A', 'AS', 'BA', 'BAS', 'Q', 'QS', 'BQ', 'BQS']
for base in base_lst:
for v in suffix_lst:
alias = '-'.join([base, v])
assert alias == get_offset(alias).rule_code
assert alias == (get_offset(alias) * 5).rule_code
lst = ['M', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
code, stride = get_freq_code('3' + k)
assert isinstance(code, int)
assert stride == 3
assert k == _get_freq_str(code)
def test_dateoffset_misc():
oset = offsets.DateOffset(months=2, days=4)
# it works
oset.freqstr
assert (not offsets.DateOffset(months=2) == 2)
def test_freq_offsets():
off = BDay(1, offset=timedelta(0, 1800))
assert (off.freqstr == 'B+30Min')
off = BDay(1, offset=timedelta(0, -1800))
assert (off.freqstr == 'B-30Min')
def get_all_subclasses(cls):
ret = set()
this_subclasses = cls.__subclasses__()
ret = ret | set(this_subclasses)
for this_subclass in this_subclasses:
ret | get_all_subclasses(this_subclass)
return ret
class TestCaching(object):
# as of GH 6479 (in 0.14.0), offset caching is turned off
# as of v0.12.0 only BusinessMonth/Quarter were actually caching
def setup_method(self, method):
_daterange_cache.clear()
_offset_map.clear()
def run_X_index_creation(self, cls):
inst1 = cls()
if not inst1.isAnchored():
assert not inst1._should_cache(), cls
return
assert inst1._should_cache(), cls
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 31),
freq=inst1, normalize=True)
assert cls() in _daterange_cache, cls
def test_should_cache_month_end(self):
assert not MonthEnd()._should_cache()
def test_should_cache_bmonth_end(self):
assert not BusinessMonthEnd()._should_cache()
def test_should_cache_week_month(self):
assert not WeekOfMonth(weekday=1, week=2)._should_cache()
def test_all_cacheableoffsets(self):
for subclass in get_all_subclasses(CacheableOffset):
if subclass.__name__[0] == "_" \
or subclass in TestCaching.no_simple_ctr:
continue
self.run_X_index_creation(subclass)
def test_month_end_index_creation(self):
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 31),
freq=MonthEnd(), normalize=True)
assert not MonthEnd() in _daterange_cache
def test_bmonth_end_index_creation(self):
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 29),
freq=BusinessMonthEnd(), normalize=True)
assert not BusinessMonthEnd() in _daterange_cache
def test_week_of_month_index_creation(self):
inst1 = WeekOfMonth(weekday=1, week=2)
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 29),
freq=inst1, normalize=True)
inst2 = WeekOfMonth(weekday=1, week=2)
assert inst2 not in _daterange_cache
class TestReprNames(object):
def test_str_for_named_is_name(self):
# look at all the amazing combinations!
month_prefixes = ['A', 'AS', 'BA', 'BAS', 'Q', 'BQ', 'BQS', 'QS']
names = [prefix + '-' + month
for prefix in month_prefixes
for month in ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL',
'AUG', 'SEP', 'OCT', 'NOV', 'DEC']]
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
names += ['W-' + day for day in days]
names += ['WOM-' + week + day
for week in ('1', '2', '3', '4') for day in days]
_offset_map.clear()
for name in names:
offset = get_offset(name)
assert offset.freqstr == name
def get_utc_offset_hours(ts):
# take a Timestamp and compute total hours of utc offset
o = ts.utcoffset()
return (o.days * 24 * 3600 + o.seconds) / 3600.0
class TestDST(object):
"""
test DateOffset additions over Daylight Savings Time
"""
# one microsecond before the DST transition
ts_pre_fallback = "2013-11-03 01:59:59.999999"
ts_pre_springfwd = "2013-03-10 01:59:59.999999"
# test both basic names and dateutil timezones
timezone_utc_offsets = {
'US/Eastern': dict(utc_offset_daylight=-4,
utc_offset_standard=-5, ),
'dateutil/US/Pacific': dict(utc_offset_daylight=-7,
utc_offset_standard=-8, )
}
valid_date_offsets_singular = [
'weekday', 'day', 'hour', 'minute', 'second', 'microsecond'
]
valid_date_offsets_plural = [
'weeks', 'days',
'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'
]
def _test_all_offsets(self, n, **kwds):
valid_offsets = self.valid_date_offsets_plural if n > 1 \
else self.valid_date_offsets_singular
for name in valid_offsets:
self._test_offset(offset_name=name, offset_n=n, **kwds)
def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):
offset = DateOffset(**{offset_name: offset_n})
t = tstart + offset
if expected_utc_offset is not None:
assert get_utc_offset_hours(t) == expected_utc_offset
if offset_name == 'weeks':
# dates should match
assert t.date() == timedelta(days=7 * offset.kwds[
'weeks']) + tstart.date()
# expect the same day of week, hour of day, minute, second, ...
assert (t.dayofweek == tstart.dayofweek and
t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second)
elif offset_name == 'days':
# dates should match
assert timedelta(offset.kwds['days']) + tstart.date() == t.date()
# expect the same hour of day, minute, second, ...
assert (t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second)
elif offset_name in self.valid_date_offsets_singular:
# expect the signular offset value to match between tstart and t
datepart_offset = getattr(t, offset_name
if offset_name != 'weekday' else
'dayofweek')
assert datepart_offset == offset.kwds[offset_name]
else:
# the offset should be the same as if it was done in UTC
assert (t == (tstart.tz_convert('UTC') + offset)
.tz_convert('US/Pacific'))
def _make_timestamp(self, string, hrs_offset, tz):
if hrs_offset >= 0:
offset_string = '{hrs:02d}00'.format(hrs=hrs_offset)
else:
offset_string = '-{hrs:02d}00'.format(hrs=-1 * hrs_offset)
return Timestamp(string + offset_string).tz_convert(tz)
def test_fallback_plural(self):
# test moving from daylight savings to standard time
import dateutil
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_daylight']
hrs_post = utc_offsets['utc_offset_standard']
if dateutil.__version__ < LooseVersion('2.6.0'):
# buggy ambiguous behavior in 2.6.0
# GH 14621
# https://github.com/dateutil/dateutil/issues/321
self._test_all_offsets(
n=3, tstart=self._make_timestamp(self.ts_pre_fallback,
hrs_pre, tz),
expected_utc_offset=hrs_post)
elif dateutil.__version__ > LooseVersion('2.6.0'):
# fixed, but skip the test
continue
def test_springforward_plural(self):
# test moving from standard to daylight savings
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
hrs_post = utc_offsets['utc_offset_daylight']
self._test_all_offsets(
n=3, tstart=self._make_timestamp(self.ts_pre_springfwd,
hrs_pre, tz),
expected_utc_offset=hrs_post)
def test_fallback_singular(self):
# in the case of signular offsets, we dont neccesarily know which utc
# offset the new Timestamp will wind up in (the tz for 1 month may be
# different from 1 second) so we don't specify an expected_utc_offset
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(n=1, tstart=self._make_timestamp(
self.ts_pre_fallback, hrs_pre, tz), expected_utc_offset=None)
def test_springforward_singular(self):
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(n=1, tstart=self._make_timestamp(
self.ts_pre_springfwd, hrs_pre, tz), expected_utc_offset=None)
offset_classes = {MonthBegin: ['11/2/2012', '12/1/2012'],
MonthEnd: ['11/2/2012', '11/30/2012'],
BMonthBegin: ['11/2/2012', '12/3/2012'],
BMonthEnd: ['11/2/2012', '11/30/2012'],
CBMonthBegin: ['11/2/2012', '12/3/2012'],
CBMonthEnd: ['11/2/2012', '11/30/2012'],
SemiMonthBegin: ['11/2/2012', '11/15/2012'],
SemiMonthEnd: ['11/2/2012', '11/15/2012'],
Week: ['11/2/2012', '11/9/2012'],
YearBegin: ['11/2/2012', '1/1/2013'],
YearEnd: ['11/2/2012', '12/31/2012'],
BYearBegin: ['11/2/2012', '1/1/2013'],
BYearEnd: ['11/2/2012', '12/31/2012'],
QuarterBegin: ['11/2/2012', '12/1/2012'],
QuarterEnd: ['11/2/2012', '12/31/2012'],
BQuarterBegin: ['11/2/2012', '12/3/2012'],
BQuarterEnd: ['11/2/2012', '12/31/2012'],
Day: ['11/4/2012', '11/4/2012 23:00']}.items()
@pytest.mark.parametrize('tup', offset_classes)
def test_all_offset_classes(self, tup):
offset, test_values = tup
first = Timestamp(test_values[0], tz='US/Eastern') + offset()
second = Timestamp(test_values[1], tz='US/Eastern')
assert first == second
# ---------------------------------------------------------------------
def test_get_offset_day_error():
# subclass of _BaseOffset must override _day_opt attribute, or we should
# get a NotImplementedError
with pytest.raises(NotImplementedError):
DateOffset()._get_offset_day(datetime.now())
@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds)))
def test_valid_month_attributes(kwd, month_classes):
# GH#18226
cls = month_classes
# check that we cannot create e.g. MonthEnd(weeks=3)
with pytest.raises(TypeError):
cls(**{kwd: 3})
@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds)))
def test_valid_tick_attributes(kwd, tick_classes):
# GH#18226
cls = tick_classes
# check that we cannot create e.g. Hour(weeks=3)
with pytest.raises(TypeError):
cls(**{kwd: 3})
def test_validate_n_error():
with pytest.raises(TypeError):
DateOffset(n='Doh!')
with pytest.raises(TypeError):
MonthBegin(n=timedelta(1))
with pytest.raises(TypeError):
BDay(n=np.array([1, 2], dtype=np.int64))
def test_require_integers(offset_types):
cls = offset_types
with pytest.raises(ValueError):
cls(n=1.5)
| bsd-3-clause |
kedz/cuttsum | trec2015/cuttsum/summarizers/_oracle.py | 1 | 12599 | import os
from cuttsum.resources import MultiProcessWorker
from cuttsum.pipeline import ArticlesResource
from cuttsum.misc import si2df
import cuttsum.judgements
import numpy as np
import pandas as pd
import sumpy
class RetrospectiveMonotoneSubmodularOracle(MultiProcessWorker):
def __init__(self):
self.dir_ = os.path.join(
os.getenv(u"TREC_DATA", u"."), "system-results",
"retrospective-monotone-submodular-oracle-summaries")
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
def get_path_prefix(self, event, corpus, extractor,
budget, soft_matching):
return os.path.join(self.dir_, extractor, str(budget),
"soft_match" if soft_matching is True else "no_soft_match",
corpus.fs_name(), event.fs_name())
def get_job_units(self, event, corpus, **kwargs):
extractor = kwargs.get("extractor", "gold")
if extractor == "gold" or extractor == "goose":
return [0]
else:
raise Exception(
"extractor: {} not implemented!".format(extractor))
def do_job_unit(self, event, corpus, unit, **kwargs):
if unit != 0:
raise Exception("unit of work out of bounds!")
extractor = kwargs.get("extractor", "gold")
soft_match = kwargs.get("soft_match", False)
budget = kwargs.get("budget", 25)
output_path_prefix = self.get_path_prefix(
event, corpus, extractor, budget, soft_match)
## Set up summarizer ###
# This is the monotone submodular objective function (basically
# nugget coverage).
def f_of_A(system, A, V_min_A, e, input_df, ndarray_data):
return len(
set([nugget for nuggets in input_df.ix[A, "nuggets"].tolist()
for nugget in nuggets]))
system = sumpy.system.MonotoneSubmodularBasic(f_of_A=f_of_A, k=budget)
# Get gold matchings for oracle.
articles = ArticlesResource()
all_matches = cuttsum.judgements.get_merged_dataframe()
matches = all_matches[all_matches["query id"] == event.query_id]
# Set up soft matching if we are using it.
if soft_match is True:
from cuttsum.classifiers import NuggetClassifier
classify_nuggets = NuggetClassifier().get_classifier(event)
if event.query_id.startswith("TS13"):
judged = cuttsum.judgements.get_2013_updates()
judged = judged[judged["query id"] == event.query_id]
judged_uids = set(judged["update id"].tolist())
else:
raise Exception("Bad corpus!")
# All sentences containing nuggets will go in all_df.
all_df = []
# Pull out articles with nuggets.
for hour, path, si in articles.streamitem_iter(
event, corpus, extractor):
# Convert stream item to dataframe and add gold label nuggets.
df = si2df(si, extractor=extractor)
df["nuggets"] = df["update id"].apply(
lambda x: set(
matches[matches["update id"] == x]["nugget id"].tolist()))
# Perform soft nugget matching on unjudged sentences.
if soft_match is True:
### NOTE BENE: geting an array of indices to index unjudged
# sentences so I can force pandas to return a view and not a
# copy.
I = np.where(
df["update id"].apply(lambda x: x not in judged_uids))[0]
unjudged = df[
df["update id"].apply(lambda x: x not in judged_uids)]
unjudged_sents = unjudged["sent text"].tolist()
assert len(unjudged_sents) == I.shape[0]
df.loc[I, "nuggets"] = classify_nuggets(unjudged_sents)
# Add sentences with nuggets to final set for summarzing
df = df[df["nuggets"].apply(len) > 0]
all_df.append(df)
# Collect all dataframes into one and reset index (ALWAYS RESET
# THE INDEX because pandas hates me.)
all_df = pd.concat(all_df)
all_df.reset_index(inplace=True)
summary = system.summarize(all_df)
F_of_S = len(
set(n for ns in summary._df["nuggets"].tolist() for n in ns))
#print "F(S)", F_of_S
#print "summary nuggets"
sum_nuggets = list(set(
n for ns in summary._df["nuggets"].tolist() for n in ns))
sum_nuggets.sort()
print sum_nuggets
possible_nuggets = list(set(
n for ns in all_df["nuggets"].tolist() for n in ns))
possible_nuggets.sort()
print possible_nuggets
print len(possible_nuggets)
event_nuggets = set(matches["nugget id"].tolist())
total_nuggets = len(event_nuggets)
timestamp = int(si.stream_id.split("-")[0])
output_df = pd.DataFrame(
[{"Cum. F(S)": F_of_S,
"F(S)": F_of_S,
"UB no const.": len(possible_nuggets), # total_nuggets,
"budget": budget,
"Tot. Updates": len(summary._df),
"event title": event.fs_name(),
"timestamp": timestamp,
"query id": event.query_id},],
columns=["timestamp", "query id", "event title", "Cum. F(S)",
"F(S)", "UB no const.",
"Tot. Updates", "budget",])
parent = os.path.dirname(output_path_prefix)
if not os.path.exists(parent):
os.makedirs(parent)
stats_path = output_path_prefix + ".stats.tsv"
updates_path = output_path_prefix + ".updates.tsv"
with open(stats_path, "w") as f:
output_df.to_csv(f, sep="\t", index=False)
summary._df["sent text"] = summary._df["sent text"].apply(
lambda x: x.encode("utf-8"))
with open(updates_path, "w") as f:
summary._df[["timestamp", "update id", "sent text"]].sort(
["update id"]).to_csv(f, sep="\t", index=False)
class MonotoneSubmodularOracle(MultiProcessWorker):
def __init__(self):
self.dir_ = os.path.join(
os.getenv(u"TREC_DATA", u"."), "system-results",
"monotone-submodular-oracle-summaries")
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
def get_path_prefix(self, event, corpus, extractor, budget, soft_matching):
return os.path.join(self.dir_, extractor, str(budget),
"soft_match" if soft_matching is True else "no_soft_match",
corpus.fs_name(), event.fs_name())
def get_job_units(self, event, corpus, **kwargs):
extractor = kwargs.get("extractor", "gold")
if extractor == "gold" or extractor == "goose":
return [0]
else:
raise Exception("extractor: {} not implemented!".format(extractor))
def do_job_unit(self, event, corpus, unit, **kwargs):
if unit != 0:
raise Exception("unit of work out of bounds!")
extractor = kwargs.get("extractor", "gold")
soft_match = kwargs.get("soft_match", False)
budget = kwargs.get("budget", 25)
output_path_prefix = self.get_path_prefix(
event, corpus, extractor, budget, soft_match)
## Set up summarizer ###
def f_of_A(system, A, V_min_A, e, input_df, ndarray_data):
return len(
set([nugget for nuggets in input_df.ix[A, "nuggets"].tolist()
for nugget in nuggets]))
system = sumpy.system.MonotoneSubmodularBasic(f_of_A=f_of_A, k=budget)
# Collect all previously collected nuggets here.
nugget_cache = set()
# Get gold matchings for oracle.
articles = ArticlesResource()
all_matches = cuttsum.judgements.get_merged_dataframe()
matches = all_matches[all_matches["query id"] == event.query_id]
# Set up soft matching if we are using it.
if soft_match is True:
from cuttsum.classifiers import NuggetClassifier
classify_nuggets = NuggetClassifier().get_classifier(event)
if event.query_id.startswith("TS13"):
judged = cuttsum.judgements.get_2013_updates()
judged = judged[judged["query id"] == event.query_id]
judged_uids = set(judged["update id"].tolist())
else:
raise Exception("Bad corpus!")
# Collect stats for each document here.
stats = []
# Aggregate summaries in summary_df.
summary_df = []
cum_F_of_S = 0
all_seen_nuggets = set()
# event_nuggets = set(matches["nugget id"].tolist())
# total_nuggets = len(event_nuggets)
total_updates = 0
# Pull out articles with nuggets.
for hour, path, si in articles.streamitem_iter(
event, corpus, extractor):
print hour, si.stream_id
# Convert stream item to dataframe and add gold label nuggets.
df = si2df(si, extractor=extractor)
df["nuggets"] = df["update id"].apply(
lambda x: set(
matches[matches["update id"] == x]["nugget id"].tolist()))
# Perform soft nugget matching on unjudged sentences.
if soft_match is True:
### NOTE BENE: geting an array of indices to index unjudged
# sentences so I can force pandas to return a view and not a
# copy.
I = np.where(
df["update id"].apply(lambda x: x not in judged_uids))[0]
unjudged = df[
df["update id"].apply(lambda x: x not in judged_uids)]
unjudged_sents = unjudged["sent text"].tolist()
assert len(unjudged_sents) == I.shape[0]
df.loc[I, "nuggets"] = classify_nuggets(unjudged_sents)
# Remove nuggets from dataframe if we have already collected them in
# the cache. The scoring function should ignore these.
df = df[df["nuggets"].apply(len) > 0]
all_seen_nuggets.update(
set(n for ns in df["nuggets"].tolist() for n in ns))
df["nuggets"] = df["nuggets"].apply(
lambda x: x.difference(nugget_cache))
if len(df) == 0:
continue
# Run summarizer on current document and update stats about it.
summary = system.summarize(df)
summary_nuggets = set(n for ns in summary._df["nuggets"].tolist()
for n in ns)
nugget_cache.update(summary_nuggets)
system.k -= len(summary._df)
F_of_S = len(summary_nuggets)
cum_F_of_S += F_of_S
total_updates += len(summary._df)
timestamp = int(si.stream_id.split("-")[0])
stats.append({
"Cum. F(S)": cum_F_of_S,
"F(S)": F_of_S,
"UB no const.": len(all_seen_nuggets),
"budget": budget,
"Tot. Updates": total_updates,
"event title": event.fs_name(),
"timestamp": timestamp,
"query id": event.query_id,
})
summary_df.append(summary._df)
if system.k <= 0:
print "Budget exceeded!"
break
output_df = pd.DataFrame(stats,
columns=["timestamp", "query id", "event title",
"Cum. F(S)", "F(S)", "UB no const.",
"Tot. Updates", "budget",])
# Write stats and updates to file.
parent = os.path.dirname(output_path_prefix)
if not os.path.exists(parent):
os.makedirs(parent)
stats_path = output_path_prefix + ".stats.tsv"
updates_path = output_path_prefix + ".updates.tsv"
with open(stats_path, "w") as f:
output_df.to_csv(f, sep="\t", index=False)
summary_df = pd.concat(summary_df)
summary_df["sent text"] = summary_df["sent text"].apply(
lambda x: x.encode("utf-8"))
with open(updates_path, "w") as f:
summary_df[["timestamp", "update id", "sent text"]].sort(
["update id"]).to_csv(f, sep="\t", index=False)
| apache-2.0 |
mixturemodel-flow/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py | 62 | 3753 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
hrjn/scikit-learn | examples/model_selection/plot_validation_curve.py | 141 | 1931 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
xapharius/HadoopML | Engine/src/examples/ensemble_simulation.py | 2 | 2325 | '''
Created on Mar 23, 2015
@author: xapharius
'''
import numpy as np
from simulation.mr_simulator.ensemble_simulator import EnsembleSimulator
from simulation.sampler.bootstrap_sampler import BootstrapSampler
from sklearn.linear_model import LinearRegression
from datahandler.numerical2.numerical_data_handler import NumericalDataHandler
from factory.algorithm_factory import AlgorithmFactory
from factory.homogenous_factory import HomogenousFactory
from ensemble.regression.bag import Bag
from validator.regression_validator import RegressionValidator
from validator.classification_validator import ClassificationValidator
if __name__ == '__main__':
print("=== Ensemble Simulation Example ===")
nr_params = 11
nr_label_dim = 1
data_file = '../../../data/wine-quality/winequality-red.csv'
print( "\n data: " + data_file
+ "\n params: " + str(nr_params)
+ "\n target dim: " + str(nr_label_dim)
+ "\n"
)
# 0. Prepare Data Scource
data = np.loadtxt(open(data_file, "rb"), delimiter = ";")
training_data = data[:1000]
validation_data = data[1000:]
bsampler = BootstrapSampler(sample_size_ratio = 0.1)
bsampler.bind_data(training_data)
# 1. set data handler
datahandler = NumericalDataHandler(random_subset_of_features = True)
# 2. define algorithm Factory
algf = AlgorithmFactory(LinearRegression)
# 3 Factory
factory = HomogenousFactory(datahandler, algf)
# 4. run
simulator = EnsembleSimulator(data_sampler = bsampler, factory = factory, ensemble_cls = Bag)
ensemble = simulator.simulate(nr_mappers = 10)
print "Ensemble's number of features per model:", [manager.feature_engineer.number_of_features for manager in ensemble.managers]
# 5. validate result
validator = RegressionValidator()
model_results = validator.validate(ensemble, validation_data)
print "Bagged Model:"
print model_results
#Benchmark
benchmark_model = factory.get_instance()
benchmark_model.feature_engineer.random_subset_of_features_ratio = 1
benchmark_model.train(training_data)
benchmark_results = validator.validate(benchmark_model, validation_data)
print "Benchmark Model:"
print benchmark_results | mit |
surhudm/scipy | scipy/integrate/_bvp.py | 61 | 39966 | """Boundary value problem solver."""
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.linalg import norm, pinv
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse.linalg import splu
from scipy.optimize import OptimizeResult
EPS = np.finfo(float).eps
def estimate_fun_jac(fun, x, y, p, f0=None):
"""Estimate derivatives of an ODE system rhs with forward differences.
Returns
-------
df_dy : ndarray, shape (n, n, m)
Derivatives with respect to y. An element (i, j, q) corresponds to
d f_i(x_q, y_q) / d (y_q)_j.
df_dp : ndarray with shape (n, k, m) or None
Derivatives with respect to p. An element (i, j, q) corresponds to
d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned.
"""
n, m = y.shape
if f0 is None:
f0 = fun(x, y, p)
dtype = y.dtype
df_dy = np.empty((n, n, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(y))
for i in range(n):
y_new = y.copy()
y_new[i] += h[i]
hi = y_new[i] - y[i]
f_new = fun(x, y_new, p)
df_dy[:, i, :] = (f_new - f0) / hi
k = p.shape[0]
if k == 0:
df_dp = None
else:
df_dp = np.empty((n, k, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(p))
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
f_new = fun(x, y, p_new)
df_dp[:, i, :] = (f_new - f0) / hi
return df_dy, df_dp
def estimate_bc_jac(bc, ya, yb, p, bc0=None):
"""Estimate derivatives of boundary conditions with forward differences.
Returns
-------
dbc_dya : ndarray, shape (n + k, n)
Derivatives with respect to ya. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dyb : ndarray, shape (n + k, n)
Derivatives with respect to yb. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dp : ndarray with shape (n + k, k) or None
Derivatives with respect to p. An element (i, j) corresponds to
d bc_i / d p_j. If `p` is empty, None is returned.
"""
n = ya.shape[0]
k = p.shape[0]
if bc0 is None:
bc0 = bc(ya, yb, p)
dtype = ya.dtype
dbc_dya = np.empty((n, n + k), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(ya))
for i in range(n):
ya_new = ya.copy()
ya_new[i] += h[i]
hi = ya_new[i] - ya[i]
bc_new = bc(ya_new, yb, p)
dbc_dya[i] = (bc_new - bc0) / hi
dbc_dya = dbc_dya.T
h = EPS**0.5 * (1 + np.abs(yb))
dbc_dyb = np.empty((n, n + k), dtype=dtype)
for i in range(n):
yb_new = yb.copy()
yb_new[i] += h[i]
hi = yb_new[i] - yb[i]
bc_new = bc(ya, yb_new, p)
dbc_dyb[i] = (bc_new - bc0) / hi
dbc_dyb = dbc_dyb.T
if k == 0:
dbc_dp = None
else:
h = EPS**0.5 * (1 + np.abs(p))
dbc_dp = np.empty((k, n + k), dtype=dtype)
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
bc_new = bc(ya, yb, p_new)
dbc_dp[i] = (bc_new - bc0) / hi
dbc_dp = dbc_dp.T
return dbc_dya, dbc_dyb, dbc_dp
def compute_jac_indices(n, m, k):
"""Compute indices for the collocation system Jacobian construction.
See `construct_global_jac` for the explanation.
"""
i_col = np.repeat(np.arange((m - 1) * n), n)
j_col = (np.tile(np.arange(n), n * (m - 1)) +
np.repeat(np.arange(m - 1) * n, n**2))
i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n)
j_bc = np.tile(np.arange(n), n + k)
i_p_col = np.repeat(np.arange((m - 1) * n), k)
j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n)
i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k)
j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k)
i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc))
j = np.hstack((j_col, j_col + n,
j_bc, j_bc + (m - 1) * n,
j_p_col, j_p_bc))
return i, j
def stacked_matmul(a, b):
"""Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]).
In our case a[i, :, :] and b[i, :, :] are always square.
"""
# Empirical optimization. Use outer Python loop and BLAS for large
# matrices, otherwise use a single einsum call.
if a.shape[1] > 50:
out = np.empty_like(a)
for i in range(a.shape[0]):
out[i] = np.dot(a[i], b[i])
return out
else:
return np.einsum('...ij,...jk->...ik', a, b)
def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp,
df_dp_middle, dbc_dya, dbc_dyb, dbc_dp):
"""Construct the Jacobian of the collocation system.
There are n * m + k functions: m - 1 collocations residuals, each
containing n components, followed by n + k boundary condition residuals.
There are n * m + k variables: m vectors of y, each containing n
components, followed by k values of vector p.
For example, let m = 4, n = 2 and k = 1, then the Jacobian will have
the following sparsity structure:
1 1 2 2 0 0 0 0 5
1 1 2 2 0 0 0 0 5
0 0 1 1 2 2 0 0 5
0 0 1 1 2 2 0 0 5
0 0 0 0 1 1 2 2 5
0 0 0 0 1 1 2 2 5
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
Zeros denote identically zero values, other values denote different kinds
of blocks in the matrix (see below). The blank row indicates the separation
of collocation residuals from boundary conditions. And the blank column
indicates the separation of y values from p values.
Refer to [1]_ (p. 306) for the formula of n x n blocks for derivatives
of collocation residuals with respect to y.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
k : int
Number of the unknown parameters.
i_jac, j_jac : ndarray
Row and column indices returned by `compute_jac_indices`. They
represent different blocks in the Jacobian matrix in the following
order (see the scheme above):
* 1: m - 1 diagonal n x n blocks for the collocation residuals.
* 2: m - 1 off-diagonal n x n blocks for the collocation residuals.
* 3 : (n + k) x n block for the dependency of the boundary
conditions on ya.
* 4: (n + k) x n block for the dependency of the boundary
conditions on yb.
* 5: (m - 1) * n x k block for the dependency of the collocation
residuals on p.
* 6: (n + k) x k block for the dependency of the boundary
conditions on p.
df_dy : ndarray, shape (n, n, m)
Jacobian of f with respect to y computed at the mesh nodes.
df_dy_middle : ndarray, shape (n, n, m - 1)
Jacobian of f with respect to y computed at the middle between the
mesh nodes.
df_dp : ndarray with shape (n, k, m) or None
Jacobian of f with respect to p computed at the mesh nodes.
df_dp_middle: ndarray with shape (n, k, m - 1) or None
Jacobian of f with respect to p computed at the middle between the
mesh nodes.
dbc_dya, dbc_dyb : ndarray, shape (n, n)
Jacobian of bc with respect to ya and yb.
dbc_dp: ndarray with shape (n, k) or None
Jacobian of bc with respect to p.
Returns
-------
J : csc_matrix, shape (n * m + k, n * m + k)
Jacobian of the collocation system in a sparse form.
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
df_dy = np.transpose(df_dy, (2, 0, 1))
df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1))
h = h[:, np.newaxis, np.newaxis]
dtype = df_dy.dtype
# Computing diagonal n x n blocks.
dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_0[:] = -np.identity(n)
dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[:-1])
dPhi_dy_0 -= h**2 / 12 * T
# Computing off-diagonal n x n blocks.
dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_1[:] = np.identity(n)
dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[1:])
dPhi_dy_1 += h**2 / 12 * T
values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(),
dbc_dyb.ravel()))
if k > 0:
df_dp = np.transpose(df_dp, (2, 0, 1))
df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1))
T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:])
df_dp_middle += 0.125 * h * T
dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle)
values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel()))
J = coo_matrix((values, (i_jac, j_jac)))
return csc_matrix(J)
def collocation_fun(fun, y, p, x, h):
"""Evaluate collocation residuals.
This function lies in the core of the method. The solution is sought
as a cubic C1 continuous spline with derivatives matching the ODE rhs
at given nodes `x`. Collocation conditions are formed from the equality
of the spline derivatives and rhs of the ODE system in the middle points
between nodes.
Such method is classified to Lobbato IIIA family in ODE literature.
Refer to [1]_ for the formula and some discussion.
Returns
-------
col_res : ndarray, shape (n, m - 1)
Collocation residuals at the middle points of the mesh intervals.
y_middle : ndarray, shape (n, m - 1)
Values of the cubic spline evaluated at the middle points of the mesh
intervals.
f : ndarray, shape (n, m)
RHS of the ODE system evaluated at the mesh nodes.
f_middle : ndarray, shape (n, m - 1)
RHS of the ODE system evaluated at the middle points of the mesh
intervals (and using `y_middle`).
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
f = fun(x, y, p)
y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) -
0.125 * h * (f[:, 1:] - f[:, :-1]))
f_middle = fun(x[:-1] + 0.5 * h, y_middle, p)
col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] +
4 * f_middle)
return col_res, y_middle, f, f_middle
def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h):
"""Create the function and the Jacobian for the collocation system."""
x_middle = x[:-1] + 0.5 * h
i_jac, j_jac = compute_jac_indices(n, m, k)
def col_fun(y, p):
return collocation_fun(fun, y, p, x, h)
def sys_jac(y, p, y_middle, f, f_middle, bc0):
if fun_jac is None:
df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f)
df_dy_middle, df_dp_middle = estimate_fun_jac(
fun, x_middle, y_middle, p, f_middle)
else:
df_dy, df_dp = fun_jac(x, y, p)
df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p)
if bc_jac is None:
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1],
p, bc0)
else:
dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p)
return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy,
df_dy_middle, df_dp, df_dp_middle, dbc_dya,
dbc_dyb, dbc_dp)
return col_fun, sys_jac
def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol):
"""Solve the nonlinear collocation system by a Newton method.
This is a simple Newton method with a backtracking line search. As
advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2
is used, where J is the Jacobian matrix at the current iteration and r is
the vector or collocation residuals (values of the system lhs).
The method alters between full Newton iterations and the fixed-Jacobian
iterations based
There are other tricks proposed in [1]_, but they are not used as they
don't seem to improve anything significantly, and even break the
convergence on some test problems I tried.
All important parameters of the algorithm are defined inside the function.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
h : ndarray, shape (m-1,)
Mesh intervals.
col_fun : callable
Function computing collocation residuals.
bc : callable
Function computing boundary condition residuals.
jac : callable
Function computing the Jacobian of the whole system (including
collocation and boundary condition residuals). It is supposed to
return csc_matrix.
y : ndarray, shape (n, m)
Initial guess for the function values at the mesh nodes.
p : ndarray, shape (k,)
Initial guess for the unknown parameters.
B : ndarray with shape (n, n) or None
Matrix to force the S y(a) = 0 condition for a problems with the
singular term. If None, the singular term is assumed to be absent.
bvp_tol : float
Tolerance to which we want to solve a BVP.
Returns
-------
y : ndarray, shape (n, m)
Final iterate for the function values at the mesh nodes.
p : ndarray, shape (k,)
Final iterate for the unknown parameters.
singular : bool
True, if the LU decomposition failed because Jacobian turned out
to be singular.
References
----------
.. [1] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations"
"""
# We know that the solution residuals at the middle points of the mesh
# are connected with collocation residuals r_middle = 1.5 * col_res / h.
# As our BVP solver tries to decrease relative residuals below a certain
# tolerance it seems reasonable to terminated Newton iterations by
# comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold,
# which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite
# the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r
# should be computed as follows:
tol_r = 2/3 * h * 5e-2 * bvp_tol
# We also need to control residuals of the boundary conditions. But it
# seems that they become very small eventually as the solver progresses,
# i. e. the tolerance for BC are not very important. We set it 1.5 orders
# lower than the BVP tolerance as well.
tol_bc = 5e-2 * bvp_tol
# Maximum allowed number of Jacobian evaluation and factorization, in
# other words the maximum number of full Newton iterations. A small value
# is recommended in the literature.
max_njev = 4
# Maximum number of iterations, considering that some of them can be
# performed with the fixed Jacobian. In theory such iterations are cheap,
# but it's not that simple in Python.
max_iter = 8
# Minimum relative improvement of the criterion function to accept the
# step (Armijo constant).
sigma = 0.2
# Step size decrease factor for backtracking.
tau = 0.5
# Maximum number of backtracking steps, the minimum step is then
# tau ** n_trial.
n_trial = 4
col_res, y_middle, f, f_middle = col_fun(y, p)
bc_res = bc(y[:, 0], y[:, -1], p)
res = np.hstack((col_res.ravel(order='F'), bc_res))
njev = 0
singular = False
recompute_jac = True
for iteration in range(max_iter):
if recompute_jac:
J = jac(y, p, y_middle, f, f_middle, bc_res)
njev += 1
try:
LU = splu(J)
except RuntimeError:
singular = True
break
step = LU.solve(res)
cost = np.dot(step, step)
y_step = step[:m * n].reshape((n, m), order='F')
p_step = step[m * n:]
alpha = 1
for trial in range(n_trial + 1):
y_new = y - alpha * y_step
if B is not None:
y_new[:, 0] = np.dot(B, y_new[:, 0])
p_new = p - alpha * p_step
col_res, y_middle, f, f_middle = col_fun(y_new, p_new)
bc_res = bc(y_new[:, 0], y_new[:, -1], p_new)
res = np.hstack((col_res.ravel(order='F'), bc_res))
step_new = LU.solve(res)
cost_new = np.dot(step_new, step_new)
if cost_new < (1 - 2 * alpha * sigma) * cost:
break
if trial < n_trial:
alpha *= tau
y = y_new
p = p_new
if njev == max_njev:
break
if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and
np.all(bc_res < tol_bc)):
break
# If the full step was taken, then we are going to continue with
# the same Jacobian. This is the approach of BVP_SOLVER.
if alpha == 1:
step = step_new
cost = cost_new
recompute_jac = False
else:
recompute_jac = True
return y, p, singular
def print_iteration_header():
print("{:^15}{:^15}{:^15}{:^15}".format(
"Iteration", "Max residual", "Total nodes", "Nodes added"))
def print_iteration_progress(iteration, residual, total_nodes, nodes_added):
print("{:^15}{:^15.2e}{:^15}{:^15}".format(
iteration, residual, total_nodes, nodes_added))
class BVPResult(OptimizeResult):
pass
TERMINATION_MESSAGES = {
0: "The algorithm converged to the desired accuracy.",
1: "The maximum number of mesh nodes is exceeded.",
2: "A singular Jacobian encountered when solving the collocation system."
}
def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle):
"""Estimate rms values of collocation residuals using Lobatto quadrature.
The residuals are defined as the difference between the derivatives of
our solution and rhs of the ODE system. We use relative residuals, i.e.
normalized by 1 + np.abs(f). RMS values are computed as sqrt from the
normalized integrals of the squared relative residuals over each interval.
Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the
fact that residuals at the mesh nodes are identically zero.
In [2] they don't normalize integrals by interval lengths, which gives
a higher rate of convergence of the residuals by the factor of h**0.5.
I chose to do such normalization for an ease of interpretation of return
values as RMS estimates.
Returns
-------
rms_res : ndarray, shape (m - 1,)
Estimated rms values of the relative residuals over each interval.
References
----------
.. [1] http://mathworld.wolfram.com/LobattoQuadrature.html
.. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
x_middle = x[:-1] + 0.5 * h
s = 0.5 * h * (3/7)**0.5
x1 = x_middle + s
x2 = x_middle - s
y1 = sol(x1)
y2 = sol(x2)
y1_prime = sol(x1, 1)
y2_prime = sol(x2, 1)
f1 = fun(x1, y1, p)
f2 = fun(x2, y2, p)
r1 = y1_prime - f1
r2 = y2_prime - f2
r_middle /= 1 + np.abs(f_middle)
r1 /= 1 + np.abs(f1)
r2 /= 1 + np.abs(f2)
r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0)
r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0)
r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0)
return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5
def create_spline(y, yp, x, h):
"""Create a cubic spline given values and derivatives.
Formulas for the coefficients are taken from interpolate.CubicSpline.
Returns
-------
sol : PPoly
Constructed spline as a PPoly instance.
"""
from scipy.interpolate import PPoly
n, m = y.shape
c = np.empty((4, n, m - 1), dtype=y.dtype)
slope = (y[:, 1:] - y[:, :-1]) / h
t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h
c[0] = t / h
c[1] = (slope - yp[:, :-1]) / h - t
c[2] = yp[:, :-1]
c[3] = y[:, :-1]
c = np.rollaxis(c, 1)
return PPoly(c, x, extrapolate=True, axis=1)
def modify_mesh(x, insert_1, insert_2):
"""Insert nodes into a mesh.
Nodes removal logic is not established, its impact on the solver is
presumably negligible. So only insertion is done in this function.
Parameters
----------
x : ndarray, shape (m,)
Mesh nodes.
insert_1 : ndarray
Intervals to each insert 1 new node in the middle.
insert_2 : ndarray
Intervals to each insert 2 new nodes, such that divide an interval
into 3 equal parts.
Returns
-------
x_new : ndarray
New mesh nodes.
Notes
-----
`insert_1` and `insert_2` should not have common values.
"""
# Because np.insert implementation apparently varies with a version of
# numpy, we use a simple and reliable approach with sorting.
return np.sort(np.hstack((
x,
0.5 * (x[insert_1] + x[insert_1 + 1]),
(2 * x[insert_2] + x[insert_2 + 1]) / 3,
(x[insert_2] + 2 * x[insert_2 + 1]) / 3
)))
def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype):
"""Wrap functions for unified usage in the solver."""
if fun_jac is None:
fun_jac_wrapped = None
if bc_jac is None:
bc_jac_wrapped = None
if k == 0:
def fun_p(x, y, _):
return np.asarray(fun(x, y), dtype)
def bc_wrapped(ya, yb, _):
return np.asarray(bc(ya, yb), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, _):
return np.asarray(fun_jac(x, y), dtype), None
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, _):
dbc_dya, dbc_dyb = bc_jac(ya, yb)
return (np.asarray(dbc_dya, dtype),
np.asarray(dbc_dyb, dtype), None)
else:
def fun_p(x, y, p):
return np.asarray(fun(x, y, p), dtype)
def bc_wrapped(x, y, p):
return np.asarray(bc(x, y, p), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, p):
df_dy, df_dp = fun_jac(x, y, p)
return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype)
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, p):
dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p)
return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype),
np.asarray(dbc_dp, dtype))
if S is None:
fun_wrapped = fun_p
else:
def fun_wrapped(x, y, p):
f = fun_p(x, y, p)
if x[0] == a:
f[:, 0] = np.dot(D, f[:, 0])
f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a)
else:
f += np.dot(S, y) / (x - a)
return f
if fun_jac is not None:
if S is None:
fun_jac_wrapped = fun_jac_p
else:
Sr = S[:, :, np.newaxis]
def fun_jac_wrapped(x, y, p):
df_dy, df_dp = fun_jac_p(x, y, p)
if x[0] == a:
df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0])
df_dy[:, :, 1:] += Sr / (x[1:] - a)
else:
df_dy += Sr / (x - a)
return df_dy, df_dp
return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped
def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None,
tol=1e-3, max_nodes=1000, verbose=0):
"""Solve a boundary-value problem for a system of ODEs.
This function numerically solves a first order system of ODEs subject to
two-point boundary conditions::
dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b
bc(y(a), y(b), p) = 0
Here x is a 1-dimensional independent variable, y(x) is a n-dimensional
vector-valued function and p is a k-dimensional vector of unknown
parameters which is to be found along with y(x). For the problem to be
determined there must be n + k boundary conditions, i.e. bc must be
(n + k)-dimensional function.
The last singular term in the right-hand side of the system is optional.
It is defined by an n-by-n matrix S, such that the solution must satisfy
S y(a) = 0. This condition will be forced during iterations, so it must not
contradict boundary conditions. See [2]_ for the explanation how this term
is handled when solving BVPs numerically.
Problems in a complex domain can be solved as well. In this case y and p
are considered to be complex, and f and bc are assumed to be complex-valued
functions, but x stays real. Note that f and bc must be complex
differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you
should rewrite your problem for real and imaginary parts separately. To
solve a problem in a complex domain, pass an initial guess for y with a
complex data type (see below).
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(x, y)``,
or ``fun(x, y, p)`` if parameters are present. All arguments are
ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that
``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The
return value must be an array with shape (n, m) and with the same
layout as ``y``.
bc : callable
Function evaluating residuals of the boundary conditions. The calling
signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are
present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,),
and ``p`` with shape (k,). The return value must be an array with
shape (n + k,).
x : array_like, shape (m,)
Initial mesh. Must be a strictly increasing sequence of real numbers
with ``x[0]=a`` and ``x[-1]=b``.
y : array_like, shape (n, m)
Initial guess for the function values at the mesh nodes, i-th column
corresponds to ``x[i]``. For problems in a complex domain pass `y`
with a complex data type (even if the initial guess is purely real).
p : array_like with shape (k,) or None, optional
Initial guess for the unknown parameters. If None (default), it is
assumed that the problem doesn't depend on any parameters.
S : array_like with shape (n, n) or None
Matrix defining the singular term. If None (default), the problem is
solved without the singular term.
fun_jac : callable or None, optional
Function computing derivatives of f with respect to y and p. The
calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if
parameters are present. The return must contain 1 or 2 elements in the
following order:
* df_dy : array_like with shape (n, n, m) where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j.
* df_dp : array_like with shape (n, k, m) where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d p_j.
Here q numbers nodes at which x and y are defined, whereas i and j
number vector components. If the problem is solved without unknown
parameters df_dp should not be returned.
If `fun_jac` is None (default), the derivatives will be estimated
by the forward finite differences.
bc_jac : callable or None, optional
Function computing derivatives of bc with respect to ya, yb and p.
The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)``
if parameters are present. The return must contain 2 or 3 elements in
the following order:
* dbc_dya : array_like with shape (n, n) where an element (i, j)
equals to d bc_i(ya, yb, p) / d ya_j.
* dbc_dyb : array_like with shape (n, n) where an element (i, j)
equals to d bc_i(ya, yb, p) / d yb_j.
* dbc_dp : array_like with shape (n, k) where an element (i, j)
equals to d bc_i(ya, yb, p) / d p_j.
If the problem is solved without unknown parameters dbc_dp should not
be returned.
If `bc_jac` is None (default), the derivatives will be estimated by
the forward finite differences.
tol : float, optional
Desired tolerance of the solution. If we define ``r = y' - f(x, y)``
where y is the found solution, then the solver tries to achieve on each
mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is
estimated in a root mean squared sense (using a numerical quadrature
formula). Default is 1e-3.
max_nodes : int, optional
Maximum allowed number of the mesh nodes. If exceeded, the algorithm
terminates. Default is 1000.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations.
Returns
-------
Bunch object with the following fields defined:
sol : PPoly
Found solution for y as `scipy.interpolate.PPoly` instance, a C1
continuous cubic spline.
p : ndarray or None, shape (k,)
Found parameters. None, if the parameters were not present in the
problem.
x : ndarray, shape (m,)
Nodes of the final mesh.
y : ndarray, shape (n, m)
Solution values at the mesh nodes.
yp : ndarray, shape (n, m)
Solution derivatives at the mesh nodes.
rms_residuals : ndarray, shape (m - 1,)
RMS values of the relative residuals over each mesh interval (see the
description of `tol` parameter).
niter : int
Number of completed iterations.
status : int
Reason for algorithm termination:
* 0: The algorithm converged to the desired accuracy.
* 1: The maximum number of mesh nodes is exceeded.
* 2: A singular Jacobian encountered when solving the collocation
system.
message : string
Verbal description of the termination reason.
success : bool
True if the algorithm converged to the desired accuracy (``status=0``).
Notes
-----
This function implements a 4-th order collocation algorithm with the
control of residuals similar to [1]_. A collocation system is solved
by a damped Newton method with an affine-invariant criterion function as
described in [3]_.
Note that in [1]_ integral residuals are defined without normalization
by interval lengths. So their definition is different by a multiplier of
h**0.5 (h is an interval length) from the definition used here.
.. versionadded:: 0.18.0
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
.. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP
Solver".
.. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations".
.. [4] `Cauchy-Riemann equations
<https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
Wikipedia.
Examples
--------
In the first example we solve Bratu's problem::
y'' + k * exp(y) = 0
y(0) = y(1) = 0
for k = 1.
We rewrite the equation as a first order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -exp(y1)
>>> def fun(x, y):
... return np.vstack((y[1], -np.exp(y[0])))
Implement evaluation of the boundary condition residuals:
>>> def bc(ya, yb):
... return np.array([ya[0], yb[0]])
Define the initial mesh with 5 nodes:
>>> x = np.linspace(0, 1, 5)
This problem is known to have two solutions. To obtain both of them we
use two different initial guesses for y. We denote them by subscripts
a and b.
>>> y_a = np.zeros((2, x.size))
>>> y_b = np.zeros((2, x.size))
>>> y_b[0] = 3
Now we are ready to run the solver.
>>> from scipy.integrate import solve_bvp
>>> res_a = solve_bvp(fun, bc, x, y_a)
>>> res_b = solve_bvp(fun, bc, x, y_b)
Let's plot the two found solutions. We take an advantage of having the
solution in a spline form to produce a smooth plot.
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot_a = res_a.sol(x_plot)[0]
>>> y_plot_b = res_b.sol(x_plot)[0]
>>> import matplotlib.pyplot as plt
>>> plt.plot(x_plot, y_plot_a, label='y_a')
>>> plt.plot(x_plot, y_plot_b, label='y_b')
>>> plt.legend()
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
We see that the two solutions have similar shape, but differ in scale
significantly.
In the second example we solve a simple Sturm-Liouville problem::
y'' + k**2 * y = 0
y(0) = y(1) = 0
It is known that a non-trivial solution y = A * sin(k * x) is possible for
k = pi * n, where n is an integer. To establish the normalization constant
A = 1 we add a boundary condition::
y'(0) = k
Again we rewrite our equation as a first order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -k**2 * y1
>>> def fun(x, y, p):
... k = p[0]
... return np.vstack((y[1], -k**2 * y[0]))
Note that parameters p are passed as a vector (with one element in our
case).
Implement the boundary conditions:
>>> def bc(ya, yb, p):
... k = p[0]
... return np.array([ya[0], yb[0], ya[1] - k])
Setup the initial mesh and guess for y. We aim to find the solution for
k = 2 * pi, to achieve that we set values of y to approximately follow
sin(2 * pi * x):
>>> x = np.linspace(0, 1, 5)
>>> y = np.zeros((2, x.size))
>>> y[0, 1] = 1
>>> y[0, 3] = -1
Run the solver with 6 as an initial guess for k.
>>> sol = solve_bvp(fun, bc, x, y, p=[6])
We see that the found k is approximately correct:
>>> sol.p[0]
6.28329460046
And finally plot the solution to see the anticipated sinusoid:
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot = sol.sol(x_plot)[0]
>>> plt.plot(x_plot, y_plot)
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
"""
x = np.asarray(x, dtype=float)
if x.ndim != 1:
raise ValueError("`x` must be 1 dimensional.")
h = np.diff(x)
if np.any(h <= 0):
raise ValueError("`x` must be strictly increasing.")
a = x[0]
y = np.asarray(y)
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
if y.ndim != 2:
raise ValueError("`y` must be 2 dimensional.")
if y.shape[1] != x.shape[0]:
raise ValueError("`y` is expected to have {} columns, but actually "
"has {}.".format(x.shape[0], y.shape[1]))
if p is None:
p = np.array([])
else:
p = np.asarray(p, dtype=dtype)
if p.ndim != 1:
raise ValueError("`p` must be 1 dimensional.")
if tol < 100 * EPS:
warn("`tol` is too low, setting to {:.2e}".format(100 * EPS))
tol = 100 * EPS
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
n = y.shape[0]
k = p.shape[0]
if S is not None:
S = np.asarray(S, dtype=dtype)
if S.shape != (n, n):
raise ValueError("`S` is expected to have shape {}, "
"but actually has {}".format((n, n), S.shape))
# Compute I - S^+ S to impose necessary boundary conditions.
B = np.identity(n) - np.dot(pinv(S), S)
y[:, 0] = np.dot(B, y[:, 0])
# Compute (I - S)^+ to correct derivatives at x=a.
D = pinv(np.identity(n) - S)
else:
B = None
D = None
fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions(
fun, bc, fun_jac, bc_jac, k, a, S, D, dtype)
f = fun_wrapped(x, y, p)
if f.shape != y.shape:
raise ValueError("`fun` return is expected to have shape {}, "
"but actually has {}.".format(y.shape, f.shape))
bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
if bc_res.shape != (n + k,):
raise ValueError("`bc` return is expected to have shape {}, "
"but actually has {}.".format((n + k,), bc_res.shape))
status = 0
iteration = 0
if verbose == 2:
print_iteration_header()
while True:
m = x.shape[0]
col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped,
fun_jac_wrapped, bc_jac_wrapped, x, h)
y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys,
y, p, B, tol)
iteration += 1
col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y,
p, x, h)
# This relation is not trivial, but can be verified.
r_middle = 1.5 * col_res / h
sol = create_spline(y, f, x, h)
rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p,
r_middle, f_middle)
max_rms_res = np.max(rms_res)
if singular:
status = 2
break
insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol))
insert_2, = np.nonzero(rms_res >= 100 * tol)
nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0]
if m + nodes_added > max_nodes:
status = 1
if verbose == 2:
nodes_added = "({})".format(nodes_added)
print_iteration_progress(iteration, max_rms_res, m,
nodes_added)
break
if verbose == 2:
print_iteration_progress(iteration, max_rms_res, m, nodes_added)
if nodes_added > 0:
x = modify_mesh(x, insert_1, insert_2)
h = np.diff(x)
y = sol(x)
else:
status = 0
break
if verbose > 0:
if status == 0:
print("Solved in {} iterations, number of nodes {}, "
"maximum relative residual {:.2e}."
.format(iteration, x.shape[0], max_rms_res))
elif status == 1:
print("Number of nodes is exceeded after iteration {}, "
"maximum relative residual {:.2e}."
.format(iteration, max_rms_res))
elif status == 2:
print("Singular Jacobian encountered when solving the collocation "
"system on iteration {}, maximum relative residual {:.2e}."
.format(iteration, max_rms_res))
if p.size == 0:
p = None
return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res,
niter=iteration, status=status,
message=TERMINATION_MESSAGES[status], success=status == 0)
| bsd-3-clause |
joshloyal/pydata-amazon-products | amazon_products/text_plots.py | 1 | 5332 | import numpy as np
import pandas as pd
import seaborn as sns
import wordcloud
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import TSNE
from sklearn.preprocessing import Normalizer
from sklearn.pipeline import make_pipeline
def sample_array(seq, n_samples, seed=123):
random_state = np.random.RandomState(seed)
return random_state.choice(seq, size=n_samples, replace=False)
def frequency_plot(text_list,
ngram_range=(1,2),
max_words=500,
plot_n_words=10,
yaxis_label='word',
**kwargs):
"""Generate a horizontal bar chart of words ranked by their global
tf-idf weights in the corpus.
Parameters
----------
text_list : array-like of shape [n_samples,]
The list of documents to generate the word cloud.
ngram_range : tuple (default=(1,2))
The ngrams to use. Defaults to uni-gram and bi-grams.
max_words : int (default=500)
The maximum vocabulary of the word cloud.
plot_n_words : int (default=10)
The number of words to display in the bar plot.
**kwargs
Any remaining key word arguments to pass in to `sns.barplot`.
Returns
-------
A seaborn barplot.
"""
# fit text features
vectorizer = TfidfVectorizer(ngram_range=(1,2),
stop_words='english',
sublinear_tf=False,
use_idf=True,
max_df=0.95,
min_df=5,
max_features=max_words)
tfidf = vectorizer.fit_transform(text_list)
global_tfidf = np.asarray(tfidf.sum(axis=0)).flatten()
global_tfidf /= np.abs(global_tfidf).max()
# weight word cloud by global idf weights
vocab = vectorizer.vocabulary_
freq = {word: global_tfidf[word_index] for
word, word_index in vocab.items()}
freq = sorted(freq.items(), key=lambda x: x[1], reverse=True)
word, weight = zip(*freq)
data = pd.DataFrame({yaxis_label: word, 'tf-idf': weight})
return sns.barplot(
'tf-idf', yaxis_label, data=data[:plot_n_words], **kwargs)
def word_cloud(text_list,
ngram_range=(1,2),
max_words=500,
fig_size=(500,500)):
"""Generate a word-cloud weighted by idf weights to a list of
documents.
Parameters
----------
text_list : array-like of shape [n_samples,]
The list of documents to generate the word cloud.
ngram_range : tuple (default=(1,2))
The ngrams to use. Defaults to uni-gram and bi-grams.
max_words : int (default=500)
The maximum vocabulary of the word cloud.
fig_size : tuple (default=(500,500))
The size of the word cloud image.
Returns
-------
The word cloud as a PIL Image.
"""
# fit text features
vectorizer = TfidfVectorizer(ngram_range=(1,2),
stop_words='english',
sublinear_tf=False,
use_idf=True,
max_df=0.95,
min_df=5,
max_features=max_words)
tfidf = vectorizer.fit_transform(text_list)
global_tfidf = np.asarray(tfidf.sum(axis=0)).flatten()
global_tfidf /= np.abs(global_tfidf).max()
width, height = fig_size
word_cloud = wordcloud.WordCloud(width=width, height=height)
# weight word cloud by global idf weights
vocab = vectorizer.vocabulary_
freq = {word: global_tfidf[word_index] for
word, word_index in vocab.items()}
word_cloud.fit_words(freq)
return word_cloud.to_image()
def text_embedding(text_list,
n_samples=None,
labels=None,
n_components=50,
perplexity=30,
n_iter=1000,
random_state=123):
"""Create an embedding that reflects the semantics of a collection of
documents using LSA and t-SNE.
"""
# downsample the data if necessary
if n_samples:
text_list = sample_array(text_list, n_samples, seed=random_state)
if labels is not None:
labels = sample_array(labels, n_samples, seed=random_state)
# make a simple LSA pipeline
vectorizer = TfidfVectorizer(ngram_range=(1,2),
stop_words='english',
sublinear_tf=True,
use_idf=True,
norm='l2',
max_features=10000)
svd = TruncatedSVD(n_components=n_components, random_state=123)
lsa = make_pipeline(vectorizer, svd)
# fit lsa
X = lsa.fit_transform(text_list)
# project down to two dimensions with t-SNE
X = TSNE(n_components=2,
init='pca',
perplexity=perplexity,
n_iter=n_iter,
random_state=123).fit_transform(X)
proj = pd.DataFrame({'component_1': X[:, 0], 'component_2': X[:, 1]})
proj['text'] = text_list
if labels is not None:
proj['labels'] = labels
return proj
| mit |
HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/Applications/ParaView/Testing/Python/TestPythonViewMatplotlibScript.py | 1 | 1798 | # Set up a basic scene for rendering.
from paraview.simple import *
import os
import sys
script = """
import paraview.numpy_support
# Utility to get next color
def getNextColor():
colors = 'bgrcmykw'
for c in colors:
yield c
# This function must be defined. It is where specific data arrays are requested.
def setup_data(view):
print "Setting up data"
# This function must be defined. It is where the actual rendering commands for matplotlib go.
def render(view,width,height):
from paraview import python_view
figure = python_view.matplotlib_figure(width,height)
ax = figure.add_subplot(111)
ax.hold = True
numObjects = view.GetNumberOfVisibleDataObjects()
print "num visible objects: ", numObjects
for i, color in zip(xrange(0,numObjects), getNextColor()):
dataObject = view.GetVisibleDataObjectForRendering(i)
if dataObject:
vtk_points = dataObject.GetPoints()
if vtk_points:
vtk_points_data = vtk_points.GetData()
pts = paraview.numpy_support.vtk_to_numpy(vtk_points_data)
x, y = pts[:,0], pts[:,1]
ax.scatter(x, y, color=color)
ax.hold = False
return python_view.figure_to_image(figure)
"""
view = CreateView("PythonView")
view.Script = script
cone = Cone()
Show(cone, view)
sphere = Sphere()
Show(sphere, view)
Render()
try:
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
print "Could not get baseline directory. Test failed."
baseline_file = os.path.join(baselinePath, "TestPythonViewMatplotlibScript.png")
import vtk.test.Testing
vtk.test.Testing.VTK_TEMP_DIR = vtk.util.misc.vtkGetTempDir()
vtk.test.Testing.compareImage(view.GetRenderWindow(), baseline_file, threshold=25)
vtk.test.Testing.interact()
Delete(cone)
del cone
Delete(sphere)
del sphere
| gpl-3.0 |
phoebe-project/phoebe2-docs | 2.1/examples/legacy.py | 1 | 5270 | #!/usr/bin/env python
# coding: utf-8
# Comparing PHOEBE 2 vs PHOEBE Legacy
# ============================
#
# **NOTE**: PHOEBE 1.0 legacy is an alternate backend and is not installed with PHOEBE 2.0. In order to run this backend, you'll need to have [PHOEBE 1.0](https://phoebe-project.org/1.0) installed and manually build the python bindings in the `phoebe-py` directory.
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](../tutorials/building_a_system.html) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u
import numpy as np
import matplotlib.pyplot as plt
phoebe.devel_on() # needed to use WD-style meshing, which isn't fully supported yet
logger = phoebe.logger()
b = phoebe.default_binary()
b['q'] = 0.7
b['requiv@secondary'] = 0.7
# Adding Datasets and Compute Options
# --------------------
# In[3]:
b.add_dataset('lc', times=np.linspace(0,1,101), dataset='lc01')
b.add_dataset('rv', times=np.linspace(0,1,101), dataset='rvdyn')
b.add_dataset('rv', times=np.linspace(0,1,101), dataset='rvnum')
# Let's add compute options for phoebe using both the new (marching) method for creating meshes as well as the WD method which imitates the format of the mesh used within legacy.
# In[4]:
b.add_compute(compute='phoebe2marching', irrad_method='none', mesh_method='marching')
# In[5]:
b.add_compute(compute='phoebe2wd', irrad_method='none', mesh_method='wd', eclipse_method='graham')
# Now we add compute options for the 'legacy' backend.
# In[6]:
b.add_compute('legacy', compute='phoebe1', irrad_method='none')
# And set the two RV datasets to use the correct methods (for both compute options)
# In[7]:
b.set_value_all('rv_method', dataset='rvdyn', value='dynamical')
# In[8]:
b.set_value_all('rv_method', dataset='rvnum', value='flux-weighted')
# Let's use the external atmospheres available for both phoebe1 and phoebe2
# In[9]:
b.set_value_all('atm', 'extern_planckint')
# Let's make sure both 'phoebe1' and 'phoebe2wd' use the same value for gridsize
# In[10]:
b.set_value_all('gridsize', 30)
# Let's also disable other special effect such as heating, gravity, and light-time effects.
# In[11]:
b.set_value_all('ld_func', 'logarithmic')
b.set_value_all('ld_coeffs', [0.,0.])
# In[12]:
b.set_value_all('rv_grav', False)
# In[13]:
b.set_value_all('ltte', False)
# Finally, let's compute all of our models
# In[14]:
b.run_compute(compute='phoebe2marching', model='phoebe2marchingmodel')
# In[15]:
b.run_compute(compute='phoebe2wd', model='phoebe2wdmodel')
# In[16]:
b.run_compute(compute='phoebe1', model='phoebe1model')
# Plotting
# -------------------------
# ### Light Curve
# In[17]:
colors = {'phoebe2marchingmodel': 'g', 'phoebe2wdmodel': 'b', 'phoebe1model': 'r'}
afig, mplfig = b['lc01'].plot(c=colors, legend=True, show=True)
# Now let's plot the residuals between these two models
# In[18]:
artist, = plt.plot(b.get_value('fluxes@lc01@phoebe2marchingmodel') - b.get_value('fluxes@lc01@phoebe1model'), 'g-')
artist, = plt.plot(b.get_value('fluxes@lc01@phoebe2wdmodel') - b.get_value('fluxes@lc01@phoebe1model'), 'b-')
artist = plt.axhline(0.0, linestyle='dashed', color='k')
ylim = plt.ylim(-0.003, 0.003)
# ### Dynamical RVs
# Since the dynamical RVs don't depend on the mesh, there should be no difference between the 'phoebe2marching' and 'phoebe2wd' synthetic models. Here we'll just choose one to plot.
# In[19]:
afig, mplfig = b.filter(dataset='rvdyn', model=['phoebe2wdmodel', 'phoebe1model']).plot(c=colors, legend=True, show=True)
# And also plot the residuals of both the primary and secondary RVs (notice the scale on the y-axis)
# In[20]:
artist, = plt.plot(b.get_value('rvs@rvdyn@primary@phoebe2wdmodel') - b.get_value('rvs@rvdyn@primary@phoebe1model'), color='b', ls=':')
artist, = plt.plot(b.get_value('rvs@rvdyn@secondary@phoebe2wdmodel') - b.get_value('rvs@rvdyn@secondary@phoebe1model'), color='b', ls='-.')
artist = plt.axhline(0.0, linestyle='dashed', color='k')
ylim = plt.ylim(-1.5e-12, 1.5e-12)
# ### Numerical (flux-weighted) RVs
# In[21]:
afig, mplfig = b.filter(dataset='rvnum').plot(c=colors, show=True)
# In[22]:
artist, = plt.plot(b.get_value('rvs@rvnum@primary@phoebe2marchingmodel', ) - b.get_value('rvs@rvnum@primary@phoebe1model'), color='g', ls=':')
artist, = plt.plot(b.get_value('rvs@rvnum@secondary@phoebe2marchingmodel') - b.get_value('rvs@rvnum@secondary@phoebe1model'), color='g', ls='-.')
artist, = plt.plot(b.get_value('rvs@rvnum@primary@phoebe2wdmodel', ) - b.get_value('rvs@rvnum@primary@phoebe1model'), color='b', ls=':')
artist, = plt.plot(b.get_value('rvs@rvnum@secondary@phoebe2wdmodel') - b.get_value('rvs@rvnum@secondary@phoebe1model'), color='b', ls='-.')
artist = plt.axhline(0.0, linestyle='dashed', color='k')
ylim = plt.ylim(-1e-2, 1e-2)
# In[ ]:
| gpl-3.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/mpl_toolkits/axes_grid1/axes_rgb.py | 6 | 7005 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from .axes_divider import make_axes_locatable, Size, locatable_axes_factory
import sys
from .mpl_axes import Axes
def make_rgb_axes(ax, pad=0.01, axes_class=None, add_all=True):
"""
pad : fraction of the axes height.
"""
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
if axes_class is None:
try:
axes_class = locatable_axes_factory(ax._axes_class)
except AttributeError:
axes_class = locatable_axes_factory(type(ax))
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
for t in ax1.yaxis.get_ticklabels() + ax1.xaxis.get_ticklabels():
t.set_visible(False)
try:
for axis in ax1.axis.values():
axis.major_ticklabels.set_visible(False)
except AttributeError:
pass
ax_rgb.append(ax1)
if add_all:
fig = ax.get_figure()
for ax1 in ax_rgb:
fig.add_axes(ax1)
return ax_rgb
def imshow_rgb(ax, r, g, b, **kwargs):
ny, nx = r.shape
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
im_rgb = ax.imshow(RGB, **kwargs)
return im_rgb
class RGBAxesBase(object):
"""base class for a 4-panel imshow (RGB, R, G, B)
Layout:
+---------------+-----+
| | R |
+ +-----+
| RGB | G |
+ +-----+
| | B |
+---------------+-----+
Attributes
----------
_defaultAxesClass : matplotlib.axes.Axes
defaults to 'Axes' in RGBAxes child class.
No default in abstract base class
RGB : _defaultAxesClass
The axes object for the three-channel imshow
R : _defaultAxesClass
The axes object for the red channel imshow
G : _defaultAxesClass
The axes object for the green channel imshow
B : _defaultAxesClass
The axes object for the blue channel imshow
"""
def __init__(self, *kl, **kwargs):
"""
Parameters
----------
pad : float
fraction of the axes height to put as padding.
defaults to 0.0
add_all : bool
True: Add the {rgb, r, g, b} axes to the figure
defaults to True.
axes_class : matplotlib.axes.Axes
kl :
Unpacked into axes_class() init for RGB
kwargs :
Unpacked into axes_class() init for RGB, R, G, B axes
"""
pad = kwargs.pop("pad", 0.0)
add_all = kwargs.pop("add_all", True)
try:
axes_class = kwargs.pop("axes_class", self._defaultAxesClass)
except AttributeError:
new_msg = ("A subclass of RGBAxesBase must have a "
"_defaultAxesClass attribute. If you are not sure which "
"axes class to use, consider using "
"mpl_toolkits.axes_grid1.mpl_axes.Axes.")
six.reraise(AttributeError, AttributeError(new_msg),
sys.exc_info()[2])
ax = axes_class(*kl, **kwargs)
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax, **kwargs)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
ax1.axis[:].toggle(ticklabels=False)
ax_rgb.append(ax1)
self.RGB = ax
self.R, self.G, self.B = ax_rgb
if add_all:
fig = ax.get_figure()
fig.add_axes(ax)
self.add_RGB_to_figure()
self._config_axes()
def _config_axes(self, line_color='w', marker_edge_color='w'):
"""Set the line color and ticks for the axes
Parameters
----------
line_color : any matplotlib color
marker_edge_color : any matplotlib color
"""
for ax1 in [self.RGB, self.R, self.G, self.B]:
ax1.axis[:].line.set_color(line_color)
ax1.axis[:].major_ticks.set_markeredgecolor(marker_edge_color)
def add_RGB_to_figure(self):
"""Add the red, green and blue axes to the RGB composite's axes figure
"""
self.RGB.get_figure().add_axes(self.R)
self.RGB.get_figure().add_axes(self.G)
self.RGB.get_figure().add_axes(self.B)
def imshow_rgb(self, r, g, b, **kwargs):
"""Create the four images {rgb, r, g, b}
Parameters
----------
r : array-like
The red array
g : array-like
The green array
b : array-like
The blue array
kwargs : imshow kwargs
kwargs get unpacked into the imshow calls for the four images
Returns
-------
rgb : matplotlib.image.AxesImage
r : matplotlib.image.AxesImage
g : matplotlib.image.AxesImage
b : matplotlib.image.AxesImage
"""
ny, nx = r.shape
if not ((nx, ny) == g.shape == b.shape):
raise ValueError('Input shapes do not match.'
'\nr.shape = {0}'
'\ng.shape = {1}'
'\nb.shape = {2}'
''.format(r.shape, g.shape, b.shape))
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
im_rgb = self.RGB.imshow(RGB, **kwargs)
im_r = self.R.imshow(R, **kwargs)
im_g = self.G.imshow(G, **kwargs)
im_b = self.B.imshow(B, **kwargs)
return im_rgb, im_r, im_g, im_b
class RGBAxes(RGBAxesBase):
_defaultAxesClass = Axes
| apache-2.0 |
DonBeo/statsmodels | statsmodels/duration/tests/test_phreg.py | 6 | 11981 | import os
import numpy as np
from statsmodels.duration.hazard_regression import PHReg
from numpy.testing import (assert_allclose,
assert_equal)
import pandas as pd
# TODO: Include some corner cases: data sets with empty strata, strata
# with no events, entry times after censoring times, etc.
# All the R results
from . import survival_r_results
from . import survival_enet_r_results
"""
Tests of PHReg against R coxph.
Tests include entry times and stratification.
phreg_gentests.py generates the test data sets and puts them into the
results folder.
survival.R runs R on all the test data sets and constructs the
survival_r_results module.
"""
# Arguments passed to the PHReg fit method.
args = {"method": "bfgs", "disp": 0}
def get_results(n, p, ext, ties):
if ext is None:
coef_name = "coef_%d_%d_%s" % (n, p, ties)
se_name = "se_%d_%d_%s" % (n, p, ties)
time_name = "time_%d_%d_%s" % (n, p, ties)
hazard_name = "hazard_%d_%d_%s" % (n, p, ties)
else:
coef_name = "coef_%d_%d_%s_%s" % (n, p, ext, ties)
se_name = "se_%d_%d_%s_%s" % (n, p, ext, ties)
time_name = "time_%d_%d_%s_%s" % (n, p, ext, ties)
hazard_name = "hazard_%d_%d_%s_%s" % (n, p, ext, ties)
coef = getattr(survival_r_results, coef_name)
se = getattr(survival_r_results, se_name)
time = getattr(survival_r_results, time_name)
hazard = getattr(survival_r_results, hazard_name)
return coef, se, time, hazard
class TestPHReg(object):
# Load a data file from the results directory
def load_file(self, fname):
cur_dir = os.path.dirname(os.path.abspath(__file__))
data = np.genfromtxt(os.path.join(cur_dir, 'results', fname),
delimiter=" ")
time = data[:,0]
status = data[:,1]
entry = data[:,2]
exog = data[:,3:]
return time, status, entry, exog
# Run a single test against R output
def do1(self, fname, ties, entry_f, strata_f):
# Read the test data.
time, status, entry, exog = self.load_file(fname)
n = len(time)
vs = fname.split("_")
n = int(vs[2])
p = int(vs[3].split(".")[0])
ties1 = ties[0:3]
# Needs to match the kronecker statement in survival.R
strata = np.kron(range(5), np.ones(n/5))
# No stratification or entry times
mod = PHReg(time, exog, status, ties=ties)
phrb = mod.fit(**args)
coef_r, se_r, time_r, hazard_r = get_results(n, p, None, ties1)
assert_allclose(phrb.params, coef_r, rtol=1e-3)
assert_allclose(phrb.bse, se_r, rtol=1e-4)
#time_h, cumhaz, surv = phrb.baseline_hazard[0]
# Entry times but no stratification
phrb = PHReg(time, exog, status, entry=entry,
ties=ties).fit(**args)
coef, se, time_r, hazard_r = get_results(n, p, "et", ties1)
assert_allclose(phrb.params, coef, rtol=1e-3)
assert_allclose(phrb.bse, se, rtol=1e-3)
# Stratification but no entry times
phrb = PHReg(time, exog, status, strata=strata,
ties=ties).fit(**args)
coef, se, time_r, hazard_r = get_results(n, p, "st", ties1)
assert_allclose(phrb.params, coef, rtol=1e-4)
assert_allclose(phrb.bse, se, rtol=1e-4)
# Stratification and entry times
phrb = PHReg(time, exog, status, entry=entry,
strata=strata, ties=ties).fit(**args)
coef, se, time_r, hazard_r = get_results(n, p, "et_st", ties1)
assert_allclose(phrb.params, coef, rtol=1e-3)
assert_allclose(phrb.bse, se, rtol=1e-4)
# Run all the tests
def test_r(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
rdir = os.path.join(cur_dir, 'results')
fnames = os.listdir(rdir)
fnames = [x for x in fnames if x.startswith("survival")
and x.endswith(".csv")]
for fname in fnames:
for ties in "breslow","efron":
for entry_f in False,True:
for strata_f in False,True:
yield (self.do1, fname, ties, entry_f,
strata_f)
def test_missing(self):
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
time[0:5] = np.nan
status[5:10] = np.nan
exog[10:15,:] = np.nan
md = PHReg(time, exog, status, missing='drop')
assert_allclose(len(md.endog), 185)
assert_allclose(len(md.status), 185)
assert_allclose(md.exog.shape, np.r_[185,4])
def test_formula(self):
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
entry = np.zeros_like(time)
entry[0:10] = time[0:10] / 2
df = pd.DataFrame({"time": time, "status": status,
"exog1": exog[:, 0], "exog2": exog[:, 1],
"exog3": exog[:, 2], "exog4": exog[:, 3],
"entry": entry})
mod1 = PHReg(time, exog, status, entry=entry)
rslt1 = mod1.fit()
fml = "time ~ 0 + exog1 + exog2 + exog3 + exog4"
mod2 = PHReg.from_formula(fml, df, status=status,
entry=entry)
rslt2 = mod2.fit()
mod3 = PHReg.from_formula(fml, df, status="status",
entry="entry")
rslt3 = mod3.fit()
assert_allclose(rslt1.params, rslt2.params)
assert_allclose(rslt1.params, rslt3.params)
assert_allclose(rslt1.bse, rslt2.bse)
assert_allclose(rslt1.bse, rslt3.bse)
def test_predict_formula(self):
n = 100
np.random.seed(34234)
time = 50 * np.random.uniform(size=n)
status = np.random.randint(0, 2, n).astype(np.float64)
exog = np.random.uniform(1, 2, size=(n, 2))
df = pd.DataFrame({"time": time, "status": status,
"exog1": exog[:, 0], "exog2": exog[:, 1]})
fml = "time ~ 0 + exog1 + np.log(exog2) + exog1*exog2"
model1 = PHReg.from_formula(fml, df, status=status)
result1 = model1.fit()
from patsy import dmatrix
dfp = dmatrix(model1.data.design_info.builder, df)
pr1 = result1.predict()
pr2 = result1.predict(exog=df)
pr3 = model1.predict(result1.params, exog=dfp) # No standard errors
pr4 = model1.predict(result1.params, cov_params=result1.cov_params(), exog=dfp)
prl = (pr1, pr2, pr3, pr4)
for i in range(4):
for j in range(i):
assert_allclose(prl[i].predicted_values, prl[j].predicted_values)
prl = (pr1, pr2, pr4)
for i in range(3):
for j in range(i):
assert_allclose(prl[i].standard_errors, prl[j].standard_errors)
def test_offset(self):
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod1 = PHReg(time, exog, status)
rslt1 = mod1.fit()
offset = exog[:,0] * rslt1.params[0]
exog = exog[:, 1:]
mod2 = PHReg(time, exog, status, offset=offset)
rslt2 = mod2.fit()
assert_allclose(rslt2.params, rslt1.params[1:])
def test_post_estimation(self):
# All regression tests
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod = PHReg(time, exog, status)
rslt = mod.fit()
mart_resid = rslt.martingale_residuals
assert_allclose(np.abs(mart_resid).sum(), 120.72475743348433)
w_avg = rslt.weighted_covariate_averages
assert_allclose(np.abs(w_avg[0]).sum(0),
np.r_[7.31008415, 9.77608674,10.89515885, 13.1106801])
bc_haz = rslt.baseline_cumulative_hazard
v = [np.mean(np.abs(x)) for x in bc_haz[0]]
w = np.r_[23.482841556421608, 0.44149255358417017,
0.68660114081275281]
assert_allclose(v, w)
score_resid = rslt.score_residuals
v = np.r_[ 0.50924792, 0.4533952, 0.4876718, 0.5441128]
w = np.abs(score_resid).mean(0)
assert_allclose(v, w)
groups = np.random.randint(0, 3, 200)
mod = PHReg(time, exog, status)
rslt = mod.fit(groups=groups)
robust_cov = rslt.cov_params()
v = [0.00513432, 0.01278423, 0.00810427, 0.00293147]
w = np.abs(robust_cov).mean(0)
assert_allclose(v, w, rtol=1e-6)
s_resid = rslt.schoenfeld_residuals
ii = np.flatnonzero(np.isfinite(s_resid).all(1))
s_resid = s_resid[ii, :]
v = np.r_[0.85154336, 0.72993748, 0.73758071, 0.78599333]
assert_allclose(np.abs(s_resid).mean(0), v)
def test_summary(self):
# smoke test
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod = PHReg(time, exog, status)
rslt = mod.fit()
rslt.summary()
def test_predict(self):
# All smoke tests. We should be able to convert the lhr and hr
# tests into real tests against R. There are many options to
# this function that may interact in complicated ways. Only a
# few key combinations are tested here.
np.random.seed(34234)
endog = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod = PHReg(endog, exog, status)
rslt = mod.fit()
rslt.predict()
for pred_type in 'lhr', 'hr', 'cumhaz', 'surv':
rslt.predict(pred_type=pred_type)
rslt.predict(endog=endog[0:10], pred_type=pred_type)
rslt.predict(endog=endog[0:10], exog=exog[0:10,:],
pred_type=pred_type)
def test_get_distribution(self):
# Smoke test
np.random.seed(34234)
exog = np.random.normal(size=(200, 2))
lin_pred = exog.sum(1)
elin_pred = np.exp(-lin_pred)
time = -elin_pred * np.log(np.random.uniform(size=200))
mod = PHReg(time, exog)
rslt = mod.fit()
dist = rslt.get_distribution()
fitted_means = dist.mean()
true_means = elin_pred
fitted_var = dist.var()
fitted_sd = dist.std()
sample = dist.rvs()
def test_fit_regularized(self):
# Data set sizes
for n,p in (50,2),(100,5):
# Penalty weights
for js,s in enumerate([0,0.1]):
coef_name = "coef_%d_%d_%d" % (n, p, js)
coef = getattr(survival_enet_r_results, coef_name)
fname = "survival_data_%d_%d.csv" % (n, p)
time, status, entry, exog = self.load_file(fname)
exog -= exog.mean(0)
exog /= exog.std(0, ddof=1)
mod = PHReg(time, exog, status=status, ties='breslow')
rslt = mod.fit_regularized(alpha=s)
# The agreement isn't very high, the issue may be on
# their side. They seem to use some approximations
# that we are not using.
assert_allclose(rslt.params, coef, rtol=0.3)
# Smoke test for summary
smry = rslt.summary()
if __name__=="__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
| bsd-3-clause |
animesh-garg/cgt | examples/broken/mnist_torchstyle.py | 22 | 3157 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original', data_home='~/cgt/data') # XXX
print(mnist.data.shape)
print(mnist.target.shape)
np.unique(mnist.target)
#plt.imshow(mnist.data[1, :].reshape(28, 28))
#plt.show()
# do some preprocessing
X = mnist.data
y = mnist.target
X = X.astype('float64')
X = X / 255
# train-test split (as [Joachims, 2006])
# TODO can define own validation split...
n_train = 60000
X_train = X[:n_train, :]
X_test = X[n_train:, :]
y_train = y[:n_train]
y_test = y[n_train:]
# construct the network
import nn
import cgt
from opt import sgd_update
N_LAYERS = 2
hid_size = X.shape[1] # 28 * 28
out_size = 10
inps = [cgt.matrix(dtype=cgt.floatX)]
param_list = []
for k in xrange(N_LAYERS):
tmp = nn.Affine(hid_size, hid_size)#(inps[k])
param_list.extend([tmp.weight, tmp.bias])
inps.append(cgt.tanh(tmp(inps[k])))
tmp = nn.Affine(hid_size, out_size)
param_list.extend([tmp.weight, tmp.bias])
logprobs = nn.logsoftmax(tmp(inps[-1]))
#dnn = nn.Module(inps[0:1], [logprobs])
#params = dnn.get_parameters()
# XXX think should just make this part of get_parameters
theta = nn.setup_contiguous_storage(param_list)
# XXX initialize
theta[:] = np.random.uniform(-0.08, 0.08, theta.shape)
# XXX taken from other demo, move
def ind2onehot(inds, n_cls):
out = np.zeros(list(inds.shape)+[n_cls,], cgt.floatX)
for k in xrange(inds.shape[0]):
out[k, inds[k].astype('int32')] = 1
#out.flat[np.arange(inds.size)*n_cls + inds.ravel()] = 1
return out
b_size = 25
def make_loss_and_grad(net):
X_b = inps[0] #cgt.matrix(dtype=cgt.floatX)
y_onehot = cgt.matrix(dtype='i4')
outputs = [logprobs]
loss = nn.crossent(outputs[0], y_onehot) / b_size
#gradloss = cgt.grad(loss, params)
gradloss = cgt.grad(loss, param_list)
# XXX use flatcat function
grad = cgt.concatenate([x.flatten() for x in gradloss])
#grad = gradloss
return cgt.make_function([X_b, y_onehot], [loss, grad, logprobs])
f_loss_and_grad = make_loss_and_grad(None)
# train loop
# shuffle data
perm = np.random.permutation(np.arange(X_train.shape[0]))
X_train = X_train[perm, :]
y_train = y_train[perm]
class Table(object):
pass
state = Table()
state.theta = theta
state.step_size = 0.1
exploss = None
for k in xrange(X_train.shape[0] / b_size):
X_batch, y_batch = X_train[k*b_size:(k+1)*b_size, :], y_train[k*b_size:(k+1)*b_size]
loss, grad, logprobs = f_loss_and_grad(X_batch, ind2onehot(y_batch, 10))
exploss = loss if k == 0 else 0.99*exploss + 0.01*loss
print('iter %d, loss %f, exploss %f' % (k + 1, loss, exploss))
sgd_update(state, grad)
# test code
correct = 0
total = 0
print(X_test.shape)
print(y_test.shape)
for k in xrange(X_test.shape[0] / b_size):
X_batch, y_batch = X_test[k*b_size:(k+1)*b_size, :], y_test[k*b_size:(k+1)*b_size]
loss, grad, logprobs = f_loss_and_grad(X_batch, ind2onehot(y_batch, 10))
preds = logprobs.argmax(axis=1).flatten()
correct = correct + (preds == y_batch).sum()
total = total + b_size
print('%d/%d correct', correct, total)
| mit |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/feature_selection/tests/test_feature_select.py | 43 | 26651 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from numpy.testing import run_module_suite
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (
chi2, f_classif, f_oneway, f_regression, mutual_info_classif,
mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr,
SelectFdr, SelectFwe, GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# with centering, compare with sparse
F, pv = f_regression(X, y, center=True)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=True)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_boundary_case_ch2():
# Test boundary case, and always aim to select 1 feature.
X = np.array([[10, 20], [20, 20], [20, 30]])
y = np.array([[1], [0], [0]])
scores, pvalues = chi2(X, y)
assert_array_almost_equal(scores, np.array([4., 0.71428571]))
assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472]))
filter_fdr = SelectFdr(chi2, alpha=0.1)
filter_fdr.fit(X, y)
support_fdr = filter_fdr.get_support()
assert_array_equal(support_fdr, np.array([True, False]))
filter_kbest = SelectKBest(chi2, k=1)
filter_kbest.fit(X, y)
support_kbest = filter_kbest.get_support()
assert_array_equal(support_kbest, np.array([True, False]))
filter_percentile = SelectPercentile(chi2, percentile=50)
filter_percentile.fit(X, y)
support_percentile = filter_percentile.get_support()
assert_array_equal(support_percentile, np.array([True, False]))
filter_fpr = SelectFpr(chi2, alpha=0.1)
filter_fpr.fit(X, y)
support_fpr = filter_fpr.get_support()
assert_array_equal(support_fpr, np.array([True, False]))
filter_fwe = SelectFwe(chi2, alpha=0.1)
filter_fwe.fit(X, y)
support_fwe = filter_fwe.get_support()
assert_array_equal(support_fwe, np.array([True, False]))
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(100)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_scorefunc_multilabel():
# Test whether k-best and percentiles works with multilabels with chi2.
X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]])
y = [[1, 1], [0, 1], [1, 0]]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (3, 2))
assert_not_in(0, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (3, 2))
assert_not_in(0, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
def test_mutual_info_classif():
X, y = make_classification(n_samples=100, n_features=5,
n_informative=1, n_redundant=1,
n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_classif, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
def test_mutual_info_regression():
X, y = make_regression(n_samples=100, n_features=10, n_informative=2,
shuffle=False, random_state=0, noise=10)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_regression, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile',
param=20).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
if __name__ == '__main__':
run_module_suite()
| mit |
skosukhin/spack | lib/spack/docs/tutorial/examples/PyPackage/0.package.py | 1 | 2422 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
# This is a template package file for Spack. We've put "FIXME"
# next to all the things you'll want to change. Once you've handled
# them, you can save this file and test your package like this:
#
# spack install py-pandas
#
# You can edit this file again by typing:
#
# spack edit py-pandas
#
# See the Spack documentation for more information on packaging.
# If you submit this package back to Spack as a pull request,
# please first remove this boilerplate and all FIXME comments.
#
from spack import *
class PyPandas(PythonPackage):
"""FIXME: Put a proper description of your package here."""
# FIXME: Add a proper url for your package's homepage here.
homepage = "http://www.example.com"
url = "https://pypi.io/packages/source/p/pandas/pandas-0.19.0.tar.gz"
version('0.19.0', 'bc9bb7188e510b5d44fbdd249698a2c3')
# FIXME: Add dependencies if required.
# depends_on('py-setuptools', type='build')
# depends_on('py-foo', type=('build', 'run'))
def build_args(self, spec, prefix):
# FIXME: Add arguments other than --prefix
# FIXME: If not needed delete this function
args = []
return args
| lgpl-2.1 |
dieterich-lab/dorina | app/main.py | 1 | 2510 | #!/usr/bin/env python
# -*- coding: utf-8
"""
Created on 16:54 19/03/2018 2018
"""
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import layout, widgetbox
from bokeh.models import ColumnDataSource, HoverTool, Div
from bokeh.models.widgets import Slider, Select, TextInput
from bokeh.plotting import figure
from os.path import dirname, join
deseq = pd.read_csv(open(join(dirname(__file__), 'test_deseq_out.csv')))
deseq = deseq.loc[:10000]
deseq = deseq.rename(columns={'Unnamed: 0': 'Gene name'})
axis_map = {
"Mean of normalized counts": "baseMean",
"Fold change": "log2FoldChange",
"Adjusted pvalue": 'padj',
'p-value': 'pvalue'
}
def select_genes():
selected = deseq.copy()
gene_val = gene.value.strip()
selected = selected[
(selected['padj'] < padj.value)
]
if gene_val:
selected = selected[
selected['Gene name'].str.contains(gene_val) == True]
return selected
def update():
df = select_genes()
x_name = axis_map[x_axis.value]
y_name = axis_map[y_axis.value]
p.title.text = "%d genes selected" % len(df)
p.xaxis.axis_label = x_axis.value
p.yaxis.axis_label = y_axis.value
source.data = dict(
x=df[x_name],
y=df[y_name],
name=df["Gene name"],
)
# Input controls
x_axis = Select(title="X Axis", options=sorted(axis_map.keys()),
value="Adjusted pvalue")
y_axis = Select(title="Y Axis", options=sorted(axis_map.keys()),
value="Fold change")
gene = TextInput(title="Gene name contains")
padj = Slider(title="Adjusted pval", value=0.05, start=deseq['padj'].min(),
end=deseq['padj'].max(), step=0.05)
source = ColumnDataSource(data=dict(x=[], y=[], name=[]))
hover = HoverTool(tooltips=[
("Counts", "@x"),
("FC", "@y"),
("Name", "@name")
])
p = figure(plot_height=600, plot_width=700, title="", toolbar_location=None,
tools=[hover])
p.circle(x="x", y="y", source=source, size=7, line_color=None)
controls = [gene, x_axis, y_axis, padj]
for control in controls:
control.on_change('value', lambda attr, old, new: update())
sizing_mode = 'fixed' # 'scale_width' also looks nice with this example
inputs = widgetbox(*controls, sizing_mode=sizing_mode)
desc = Div(text=open(join(dirname(__file__), "description.html")).read(),
width=800)
l = layout([
[desc],
[inputs, p],
], sizing_mode=sizing_mode)
update() # initial load of the data
curdoc().add_root(l)
| gpl-3.0 |
marcocaccin/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
dabura667/electrum | lib/plot.py | 1 | 1704 | from PyQt5.QtGui import *
from electrum.i18n import _
import datetime
from collections import defaultdict
from electrum.util import format_satoshis
from electrum.bitcoin import COIN
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as md
from matplotlib.patches import Ellipse
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, DrawingArea, HPacker
def plot_history(wallet, history):
hist_in = defaultdict(int)
hist_out = defaultdict(int)
for item in history:
tx_hash, height, confirmations, timestamp, value, balance = item
if not confirmations:
continue
if timestamp is None:
continue
value = value*1./COIN
date = datetime.datetime.fromtimestamp(timestamp)
datenum = int(md.date2num(datetime.date(date.year, date.month, 1)))
if value > 0:
hist_in[datenum] += value
else:
hist_out[datenum] -= value
f, axarr = plt.subplots(2, sharex=True)
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax = plt.gca()
plt.ylabel('BTC')
plt.xlabel('Month')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[0].set_title('Monthly Volume')
xfmt = md.DateFormatter('%Y-%m')
ax.xaxis.set_major_formatter(xfmt)
width = 20
dates, values = zip(*sorted(hist_in.items()))
r1 = axarr[0].bar(dates, values, width, label='incoming')
axarr[0].legend(loc='upper left')
dates, values = zip(*sorted(hist_out.items()))
r2 = axarr[1].bar(dates, values, width, color='r', label='outgoing')
axarr[1].legend(loc='upper left')
return plt
| mit |
kgullikson88/HET-Scripts | MakeMassRatioDistribution.py | 1 | 9579 | """
This script goes through the stars observed, and searches for both known and new
companions to each target. Right now, it only does known companions automatically
"""
import sys
import os
from matplotlib import rc
import pySIMBAD as sim
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
#rc('text', usetex=True)
import matplotlib.pyplot as plt
import numpy as np
import HelperFunctions
import SpectralTypeRelations
from astropy.io import fits as pyfits
import StarData
from astropy import units, constants
"""
The following dictionary shows the new detections from my data.
It includes both brand-new detections, and detections of the secondary
star lines in known SB1s.
The key is the star name, and the value is the estimated temperature
"""
NewDetections = {"HIP 67782": [3900, ],
"HIP 77336": [6500, ],
"HIP 88818": [6000, ],
"HIP 85379": [6700, ],
"HIP 72154": [3500, 5700],
"HIP 93393": [3800, ],
"HIP 92312": [5000, ],
"HIP 96840": [3500, 5200],
"HIP 100069": [3200, ],
"HIP 8704": [3500, ],
"HIP 105972": [7600, ],
"HIP 116582": [3200, 6700],
"HIP 2548": [6500, ],
"HIP 17527": [3500, ],
"HIP 97870": [3300, ],
"HIP 13165": [3500, ],
"HIP 14143": [3500, 7300],
"HIP 20430": [5800, ],
"HIP 105282": [3700, 3700],
"HIP 8016": [3500, 3500],
"HIP 14043": [6200, ],
"HIP 58590": [3800, ],
"HIP 82673": [6000, ],
"HIP 87108": [3500, 4400],
"HIP 104139": [5000, ],
"HIP 95241": [4300, ],
"HIP 116247": [3400, ],
"HIP 117452": [4700, ],
"HIP 60009": [3300, 5500],
"HIP 63724": [3400, ],
"HIP 79404": [3800, 6000],
"HIP 92855": [4000, 5800],
"HIP 112029": [6300, ],
"HIP 76600": [5600, ],
"HIP 77516": [3500, ],
"HIP 78820": [4000, ],
"HIP 76267": [6500, ],
"HIP 88816": [6400, ],
"HIP 80883": [3700, ],
"HIP 78554": [3400, ]
}
"""
This function will search the WDS catalog for known companions within 'sep' arcseconds
"""
def GetWDSCompanions(starname, sep=5.0, MS=None):
if MS == None:
MS = SpectralTypeRelations.MainSequence()
companions = HelperFunctions.CheckMultiplicityWDS(starname)
companion_info = []
if companions:
for configuration in companions:
component = companions[configuration]
if component["Separation"] < sep:
s_spt = component["Secondary SpT"]
if s_spt == "Unknown":
print "Warning! Star %s has a companion with unknown magnitude/spectral type in WDS!" % starname
else:
mass = MS.Interpolate(MS.Mass, s_spt)
companion_info.append((component["Separation"], mass))
return companion_info
"""
This function searches the SB9 catalog for spectroscopic companions
The return type is determined by what information is given in the database,
but always consists of an integer followed by a float
-If no companion exists, the return values are 0,0
-If both K1 and K2 are known (double-lined binary):
-integer returned is 1, float returned is the mass ratio
-If only K1 is known (the case for most):
-integer returned is 2, and the float is the mass function f(M2)=M2 (sin(i))^2 / (1+1/q)^2
"""
def GetSB9Companions(starname, MS=None):
if MS == None:
MS = SpectralTypeRelations.MainSequence()
companion = HelperFunctions.CheckMultiplicitySB9(starname)
if not companion:
return 0, 0
if companion["K1"] != "Unknown" and companion["K2"] != "Unknown":
q = companion["K1"] / companion["K2"]
return 1, q
elif companion["K1"] != "Unknown":
K1 = companion["K1"]
P = companion["Period"]
mass_fcn = (K1 * units.km.to(units.cm)) ** 3 * (P * units.day.to(units.second)) / (
2 * np.pi * constants.G.cgs.value)
return 2, mass_fcn * units.gram.to(units.solMass)
if __name__ == "__main__":
dirlist = []
for arg in sys.argv[1:]:
dirlist.append(arg)
if len(dirlist) == 0:
sys.exit("This function has been obsoleted by the version in School/Research.\nPlease use that one!")
#dirlist = [d for d in os.listdir("./") if d.startswith("2013")]
MS = SpectralTypeRelations.MainSequence()
multiplicity = 0.0
numstars = 0.0
mass_ratios = []
new_massratios = []
for directory in dirlist:
print directory
starlist = [f for f in os.listdir(directory) if
((f.startswith("HR") and not f.startswith("HRS")) or f.startswith("HIP")) and f.endswith("-0.fits")]
for star in starlist:
#First, get the known companions
multiple = False
sb = False
header = pyfits.getheader("%s/%s" % (directory, star))
starname = header['OBJECT'].split()[0].replace("_", " ")
print star, starname
stardata = StarData.GetData(starname)
primary_mass = MS.Interpolate(MS.Mass, stardata.spectype[:2])
known_companions = GetWDSCompanions(starname, MS=MS, sep=100.0)
code, value = GetSB9Companions(starname)
if len(known_companions) > 0:
multiple = True
for comp in known_companions:
print "\tq = %g" % (comp[1] / (primary_mass))
mass_ratios.append(comp[1] / primary_mass)
if code == 1:
sb = True
multiple = True
q = value
wds = False
for comp in known_companions:
if abs(q - comp[1]) < 0.1 and comp[0] < 4.0:
wds = True
if not wds:
mass_ratios.append(q)
else:
print "Spectroscopic binary found which may match a WDS companion."
usr = raw_input("Use both (y or n)? ")
if "y" in usr:
mass_ratios.append(q)
print "Spectroscopic companion with q = %g" % q
elif code == 2:
print "Single-lined spectroscopic companion to %s found! Double-lined in my data?" % starname
multiple = True
#Now, put in my data
if starname in NewDetections:
for T in NewDetections[starname]:
spt = MS.GetSpectralType(MS.Temperature, T)
mass = MS.Interpolate(MS.Mass, spt)
new_q = mass / primary_mass
previously_known = False
for comp in known_companions:
if abs(new_q - comp[1]) < 0.1 and comp[0] < 4.0:
previously_known = True
if sb and abs(new_q - q) < 0.1:
previously_known = True
if not previously_known:
new_massratios.append(new_q)
multiple = True
#Keep track of total binary fraction
if multiple:
multiplicity += 1
numstars += 1.0
#Make some plots
mass_ratios = [min(q, 1.0) for q in mass_ratios]
print "Multiplicity fraction = %g" % (multiplicity / numstars)
bins = np.arange(0.0, 1.1, 0.1)
print bins.size, '\t', bins
plt.figure(1)
if len(new_massratios) > 0:
print "Found new entries!"
mass_ratios = [mass_ratios, new_massratios]
print len(mass_ratios)
#plt.hist(mass_ratios, bins=bins, color=['black','gray'], histtype='barstacked')
plt.hist(mass_ratios, bins=bins, color=['0.25', '0.5'], histtype='barstacked',
label=["Known companions", "Candidate companions"])
plt.legend(loc='best')
#Make error bars
nums = np.zeros(bins.size - 1)
for i in range(len(mass_ratios)):
nums += np.histogram(mass_ratios[i], bins=bins)[0]
lower = []
upper = []
for n in nums:
pl, pu = HelperFunctions.BinomialErrors(n, numstars)
lower.append(pl * np.sqrt(nums.sum()))
upper.append(pu * np.sqrt(nums.sum()))
plt.errorbar(bins[:-1] + 0.05, nums, yerr=[lower, upper], fmt=None, ecolor='0.0', elinewidth=2, capsize=5)
"""
if len(new_massratios) > 0:
y,edges = np.histogram(new_massratios, bins=bins)
print y
print edges
plt.bar(bins[:-1], y, bottom=np.array(height), color='green', align='edge')
#plt.hist(new_massratios, bins=bins, bottom=height, color='green')
"""
plt.xlabel(r"$\rm M_s/M_p$")
plt.ylabel("Number")
plt.title("Mass Ratio Distribution for Companions within 100\"")
#plt.figure(2)
#plt.hist(mass_ratios, bins=bins, color=['gray','green'], cumulative=True, normed=True, histtype='step', linewidth=2, stacked=True)
#plt.plot(bins, bins, 'k--', linewidth=2)
#plt.xlabel(r"$\rm M_s/M_p$")
#plt.ylabel("Cumulative Frequency")
plt.show()
| gpl-3.0 |
HopkinsIDD/EpiForecastStatMech | epi_forecast_stat_mech/datasets/nanhandling.py | 1 | 3953 | # Lint as: python3
"""A few methods to handle NaNs.
Library functions that take a numpy array.
Assumes dimensions are (location, time).
"""
import itertools
import matplotlib.pyplot as plt
import numpy as np
import scipy
import scipy.ndimage
def _assume_2d(array):
if len(array.shape) != 2:
raise ValueError(
'Functions in this library assume a 2d array with dimensions (location, time).'
)
def fillna_zero(array):
"""Fills all nans with zero."""
return np.nan_to_num(array)
def median_filter(array, filter_days=5):
"""Median filter.
Doesn't guarantee all nans will be filled.
Args:
array: A 2d numpy array with dimensions (location, time)
filter_days: An odd integer >= 1, size of the filter.
Returns:
A numpy array.
"""
_assume_2d(array)
return scipy.ndimage.generic_filter(
array, np.nanmedian, size=(1, filter_days), mode='nearest')
def mean_filter(array, filter_days=5):
"""Mean filter.
Doesn't guarantee all nans will be filled.
Args:
array: A 2d numpy array with dimensions (location, time)
filter_days: An odd integer >= 1, size of the filter.
Returns:
A numpy array.
"""
_assume_2d(array)
return scipy.ndimage.generic_filter(
array, np.nanmean, size=(1, filter_days), mode='nearest')
def fillna_beginning(array, value=0):
"""Fills consecutive NaNs at the beginning of an array.
Args:
array: A 2d numpy array with dimensions (location, time)
value: Float, the value to fill.
Returns:
A numpy array.
"""
def fillna_beginning_1d(array):
array = array.copy()
num_initial_nans = 0
while np.isnan(array[num_initial_nans]):
num_initial_nans += 1
if num_initial_nans == len(array):
break
array[:num_initial_nans] = value
return array
_assume_2d(array)
return np.apply_along_axis(fillna_beginning_1d, 1, array)
def fillna_ffill(array):
"""Forward fills an array.
Args:
array: A 2d numpy array with dimensions (location, time)
Returns:
A numpy array.
"""
_assume_2d(array)
mask = np.isnan(array)
idx = np.where(~mask, np.arange(mask.shape[1]), 0)
max_idx = np.maximum.accumulate(idx, axis=1)
result = array[np.arange(max_idx.shape[0])[:, None], max_idx]
return result
def fillna_bfill(array):
"""Backward fills an array.
Args:
array: A 2d numpy array with dimensions (location, time)
Returns:
A numpy array.
"""
_assume_2d(array)
flipped = np.flip(array)
filled = fillna_ffill(flipped)
return np.flip(filled)
def fillna_interp(array):
"""Applies 1-d linear interpolation.
Args:
array: A 2d numpy array with dimensions (location, time)
Returns:
A numpy array.
"""
def interp_1d(array):
indices = np.arange(array.shape[0])
values = np.where(np.isfinite(array))
# Interp requires at least 2 non-nan entries
if len(values[0]) < 2:
return array
f = scipy.interpolate.interp1d(
indices[values], array[values], bounds_error=False)
return np.where(np.isfinite(array), array, f(indices))
_assume_2d(array)
return np.apply_along_axis(interp_1d, 1, array)
def plot_nans(array, title, ax=None):
"""Plots nans."""
if ax is None:
plt.figure()
ax = plt.gca()
ax.imshow(np.isnan(array), interpolation='nearest')
ax.set_xlabel('time')
ax.set_ylabel('location')
ax.set_title(title)
def longest_nans(array):
"""Returns the longest consecutive nans in an array.
Args:
array: A 2d numpy array with dimensions (location, time)
Returns:
A numpy array.
"""
def longest_nans_1d(array):
mask = np.isnan(array)
grouped = [(el, sum(1
for element in group))
for el, group in itertools.groupby(mask)]
nan_groups = [g[1] for g in grouped if g[0] == 1]
if not nan_groups:
return 0
return max(nan_groups)
_assume_2d(array)
return np.apply_along_axis(longest_nans_1d, 1, array)
| gpl-3.0 |
saguziel/incubator-airflow | scripts/perf/scheduler_ops_metrics.py | 30 | 6536 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import logging
import pandas as pd
import sys
from airflow import configuration, settings
from airflow.jobs import SchedulerJob
from airflow.models import DagBag, DagModel, DagRun, TaskInstance
from airflow.utils.state import State
SUBDIR = 'scripts/perf/dags'
DAG_IDS = ['perf_dag_1', 'perf_dag_2']
MAX_RUNTIME_SECS = 6
class SchedulerMetricsJob(SchedulerJob):
"""
This class extends SchedulerJob to instrument the execution performance of
task instances contained in each DAG. We want to know if any DAG
is starved of resources, and this will be reflected in the stats printed
out at the end of the test run. The following metrics will be instrumented
for each task instance (dag_id, task_id, execution_date) tuple:
1. Queuing delay - time taken from starting the executor to the task
instance to be added to the executor queue.
2. Start delay - time taken from starting the executor to the task instance
to start execution.
3. Land time - time taken from starting the executor to task instance
completion.
4. Duration - time taken for executing the task instance.
The DAGs implement bash operators that call the system wait command. This
is representative of typical operators run on Airflow - queries that are
run on remote systems and spend the majority of their time on I/O wait.
To Run:
$ python scripts/perf/scheduler_ops_metrics.py
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerMetricsJob'
}
def print_stats(self):
"""
Print operational metrics for the scheduler test.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
successful_tis = filter(lambda x: x.state == State.SUCCESS, tis)
ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date,
(ti.queued_dttm - self.start_date).total_seconds(),
(ti.start_date - self.start_date).total_seconds(),
(ti.end_date - self.start_date).total_seconds(),
ti.duration) for ti in successful_tis]
ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id',
'execution_date',
'queue_delay',
'start_delay', 'land_time',
'duration'])
print('Performance Results')
print('###################')
for dag_id in DAG_IDS:
print('DAG {}'.format(dag_id))
print(ti_perf_df[ti_perf_df['dag_id'] == dag_id])
print('###################')
if len(tis) > len(successful_tis):
print("WARNING!! The following task instances haven't completed")
print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state)
for ti in filter(lambda x: x.state != State.SUCCESS, tis)],
columns=['dag_id', 'task_id', 'execution_date', 'state']))
session.commit()
def heartbeat(self):
"""
Override the scheduler heartbeat to determine when the test is complete
"""
super(SchedulerMetricsJob, self).heartbeat()
session = settings.Session()
# Get all the relevant task instances
TI = TaskInstance
successful_tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.filter(TI.state.in_([State.SUCCESS]))
.all()
)
session.commit()
dagbag = DagBag(SUBDIR)
dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]
# the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.
num_task_instances = sum([(datetime.today() - task.start_date).days
for dag in dags for task in dag.tasks])
if (len(successful_tis) == num_task_instances or
(datetime.now()-self.start_date).total_seconds() >
MAX_RUNTIME_SECS):
if (len(successful_tis) == num_task_instances):
self.logger.info("All tasks processed! Printing stats.")
else:
self.logger.info("Test timeout reached. "
"Printing available stats.")
self.print_stats()
set_dags_paused_state(True)
sys.exit()
def clear_dag_runs():
"""
Remove any existing DAG runs for the perf test DAGs.
"""
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id.in_(DAG_IDS),
).all()
for dr in drs:
logging.info('Deleting DagRun :: {}'.format(dr))
session.delete(dr)
def clear_dag_task_instances():
"""
Remove any existing task instances for the perf test DAGs.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
for ti in tis:
logging.info('Deleting TaskInstance :: {}'.format(ti))
session.delete(ti)
session.commit()
def set_dags_paused_state(is_paused):
"""
Toggle the pause state of the DAGs in the test.
"""
session = settings.Session()
dms = session.query(DagModel).filter(
DagModel.dag_id.in_(DAG_IDS))
for dm in dms:
logging.info('Setting DAG :: {} is_paused={}'.format(dm, is_paused))
dm.is_paused = is_paused
session.commit()
def main():
configuration.load_test_config()
set_dags_paused_state(False)
clear_dag_runs()
clear_dag_task_instances()
job = SchedulerMetricsJob(dag_ids=DAG_IDS, subdir=SUBDIR)
job.run()
if __name__ == "__main__":
main()
| apache-2.0 |
f3r/scikit-learn | sklearn/metrics/tests/test_regression.py | 272 | 6066 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| bsd-3-clause |
davidgbe/scikit-learn | examples/applications/face_recognition.py | 191 | 5513 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
ishanic/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 329 | 1850 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
stevenzhang18/Indeed-Flask | lib/pandas/stats/tests/test_moments.py | 9 | 86813 | import nose
import sys
import functools
import warnings
from datetime import datetime
from numpy.random import randn
from numpy.testing.decorators import slow
import numpy as np
from distutils.version import LooseVersion
from pandas import Series, DataFrame, Panel, bdate_range, isnull, notnull, concat
from pandas.util.testing import (
assert_almost_equal, assert_series_equal, assert_frame_equal, assert_panel_equal, assert_index_equal
)
import pandas.core.datetools as datetools
import pandas.stats.moments as mom
import pandas.util.testing as tm
from pandas.compat import range, zip, PY3, StringIO
N, K = 100, 10
class Base(tm.TestCase):
_multiprocess_can_split_ = True
_nan_locs = np.arange(20, 40)
_inf_locs = np.array([])
def _create_data(self):
arr = randn(N)
arr[self._nan_locs] = np.NaN
self.arr = arr
self.rng = bdate_range(datetime(2009, 1, 1), periods=N)
self.series = Series(arr.copy(), index=self.rng)
self.frame = DataFrame(randn(N, K), index=self.rng,
columns=np.arange(K))
class TestMoments(Base):
def setUp(self):
self._create_data()
def test_centered_axis_validation(self):
# ok
mom.rolling_mean(Series(np.ones(10)),3,center=True ,axis=0)
# bad axis
self.assertRaises(ValueError, mom.rolling_mean,Series(np.ones(10)),3,center=True ,axis=1)
# ok ok
mom.rolling_mean(DataFrame(np.ones((10,10))),3,center=True ,axis=0)
mom.rolling_mean(DataFrame(np.ones((10,10))),3,center=True ,axis=1)
# bad axis
self.assertRaises(ValueError, mom.rolling_mean,DataFrame(np.ones((10,10))),3,center=True ,axis=2)
def test_rolling_sum(self):
self._check_moment_func(mom.rolling_sum, np.sum)
def test_rolling_count(self):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(mom.rolling_count, counter,
has_min_periods=False,
preserve_nan=False,
fill_value=0)
def test_rolling_mean(self):
self._check_moment_func(mom.rolling_mean, np.mean)
def test_cmov_mean(self):
# GH 8238
tm._skip_if_no_scipy()
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49,
16.68, 9.48, 10.63, 14.48])
xp = np.array([np.nan, np.nan, 9.962, 11.27 , 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
rs = mom.rolling_mean(vals, 5, center=True)
assert_almost_equal(xp, rs)
xp = Series(rs)
rs = mom.rolling_mean(Series(vals), 5, center=True)
assert_series_equal(xp, rs)
def test_cmov_window(self):
# GH 8238
tm._skip_if_no_scipy()
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, 9.48, 10.63, 14.48])
xp = np.array([np.nan, np.nan, 9.962, 11.27 , 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
assert_almost_equal(xp, rs)
xp = Series(rs)
rs = mom.rolling_window(Series(vals), 5, 'boxcar', center=True)
assert_series_equal(xp, rs)
def test_cmov_window_corner(self):
# GH 8238
tm._skip_if_no_scipy()
# all nan
vals = np.empty(10, dtype=float)
vals.fill(np.nan)
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
self.assertTrue(np.isnan(rs).all())
# empty
vals = np.array([])
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
self.assertEqual(len(rs), 0)
# shorter than window
vals = np.random.randn(5)
rs = mom.rolling_window(vals, 10, 'boxcar')
self.assertTrue(np.isnan(rs).all())
self.assertEqual(len(rs), 5)
def test_cmov_window_frame(self):
# Gh 8238
tm._skip_if_no_scipy()
vals = np.array([[ 12.18, 3.64],
[ 10.18, 9.16],
[ 13.24, 14.61],
[ 4.51, 8.11],
[ 6.15, 11.44],
[ 9.14, 6.21],
[ 11.31, 10.67],
[ 2.94, 6.51],
[ 9.42, 8.39],
[ 12.44, 7.34 ]])
xp = np.array([[ np.nan, np.nan],
[ np.nan, np.nan],
[ 9.252, 9.392],
[ 8.644, 9.906],
[ 8.87 , 10.208],
[ 6.81 , 8.588],
[ 7.792, 8.644],
[ 9.05 , 7.824],
[ np.nan, np.nan],
[ np.nan, np.nan]])
# DataFrame
rs = mom.rolling_window(DataFrame(vals), 5, 'boxcar', center=True)
assert_frame_equal(DataFrame(xp), rs)
def test_cmov_window_na_min_periods(self):
tm._skip_if_no_scipy()
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = mom.rolling_mean(vals, 5, min_periods=4, center=True)
rs = mom.rolling_window(vals, 5, 'boxcar', min_periods=4, center=True)
assert_series_equal(xp, rs)
def test_cmov_window_regular(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
'hamming': [np.nan, np.nan, 8.71384, 9.56348, 12.38009,
14.03687, 13.8567, 11.81473, np.nan, np.nan],
'triang': [np.nan, np.nan, 9.28667, 10.34667, 12.00556,
13.33889, 13.38, 12.33667, np.nan, np.nan],
'barthann': [np.nan, np.nan, 8.4425, 9.1925, 12.5575,
14.3675, 14.0825, 11.5675, np.nan, np.nan],
'bohman': [np.nan, np.nan, 7.61599, 9.1764, 12.83559,
14.17267, 14.65923, 11.10401, np.nan, np.nan],
'blackmanharris': [np.nan, np.nan, 6.97691, 9.16438, 13.05052,
14.02156, 15.10512, 10.74574, np.nan, np.nan],
'nuttall': [np.nan, np.nan, 7.04618, 9.16786, 13.02671,
14.03559, 15.05657, 10.78514, np.nan, np.nan],
'blackman': [np.nan, np.nan, 7.73345, 9.17869, 12.79607,
14.20036, 14.57726, 11.16988, np.nan, np.nan],
'bartlett': [np.nan, np.nan, 8.4425, 9.1925, 12.5575,
14.3675, 14.0825, 11.5675, np.nan, np.nan]}
for wt in win_types:
xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, center=True)
assert_series_equal(xp, rs)
def test_cmov_window_regular_linear_range(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
for wt in win_types:
rs = mom.rolling_window(Series(vals), 5, wt, center=True)
assert_series_equal(xp, rs)
def test_cmov_window_regular_missing_data(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, np.nan, 10.63, 14.48])
xps = {
'bartlett': [np.nan, np.nan, 9.70333, 10.5225, 8.4425,
9.1925, 12.5575, 14.3675, 15.61667, 13.655],
'blackman': [np.nan, np.nan, 9.04582, 11.41536, 7.73345,
9.17869, 12.79607, 14.20036, 15.8706, 13.655],
'barthann': [np.nan, np.nan, 9.70333, 10.5225, 8.4425,
9.1925, 12.5575, 14.3675, 15.61667, 13.655],
'bohman': [np.nan, np.nan, 8.9444, 11.56327, 7.61599,
9.1764, 12.83559, 14.17267, 15.90976, 13.655],
'hamming': [np.nan, np.nan, 9.59321, 10.29694, 8.71384,
9.56348, 12.38009, 14.20565, 15.24694, 13.69758],
'nuttall': [np.nan, np.nan, 8.47693, 12.2821, 7.04618,
9.16786, 13.02671, 14.03673, 16.08759, 13.65553],
'triang': [np.nan, np.nan, 9.33167, 9.76125, 9.28667,
10.34667, 12.00556, 13.82125, 14.49429, 13.765],
'blackmanharris': [np.nan, np.nan, 8.42526, 12.36824, 6.97691,
9.16438, 13.05052, 14.02175, 16.1098,
13.65509]
}
for wt in win_types:
xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, min_periods=3)
assert_series_equal(xp, rs)
def test_cmov_window_special(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.},
{'width': 0.5}]
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
'gaussian': [np.nan, np.nan, 8.97297, 9.76077, 12.24763,
13.89053, 13.65671, 12.01002, np.nan, np.nan],
'general_gaussian': [np.nan, np.nan, 9.85011, 10.71589,
11.73161, 13.08516, 12.95111, 12.74577,
np.nan, np.nan],
'slepian': [np.nan, np.nan, 9.81073, 10.89359, 11.70284,
12.88331, 12.96079, 12.77008, np.nan, np.nan],
'kaiser': [np.nan, np.nan, 9.86851, 11.02969, 11.65161,
12.75129, 12.90702, 12.83757, np.nan, np.nan]
}
for wt, k in zip(win_types, kwds):
xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, center=True,
**k)
assert_series_equal(xp, rs)
def test_cmov_window_special_linear_range(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.},
{'width': 0.5}]
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
for wt, k in zip(win_types, kwds):
rs = mom.rolling_window(Series(vals), 5, wt, center=True,
**k)
assert_series_equal(xp, rs)
def test_rolling_median(self):
self._check_moment_func(mom.rolling_median, np.median)
def test_rolling_min(self):
self._check_moment_func(mom.rolling_min, np.min)
a = np.array([1, 2, 3, 4, 5])
b = mom.rolling_min(a, window=100, min_periods=1)
assert_almost_equal(b, np.ones(len(a)))
self.assertRaises(ValueError, mom.rolling_min, np.array([1,
2, 3]), window=3, min_periods=5)
def test_rolling_max(self):
self._check_moment_func(mom.rolling_max, np.max)
a = np.array([1, 2, 3, 4, 5])
b = mom.rolling_max(a, window=100, min_periods=1)
assert_almost_equal(a, b)
self.assertRaises(ValueError, mom.rolling_max, np.array([1,
2, 3]), window=3, min_periods=5)
def test_rolling_quantile(self):
qs = [.1, .5, .9]
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = per / 1. * (values.shape[0] - 1)
return values[int(idx)]
for q in qs:
def f(x, window, min_periods=None, freq=None, center=False):
return mom.rolling_quantile(x, window, q,
min_periods=min_periods,
freq=freq,
center=center)
def alt(x):
return scoreatpercentile(x, q)
self._check_moment_func(f, alt)
def test_rolling_apply(self):
# suppress warnings about empty slices, as we are deliberately testing with a 0-length Series
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning)
ser = Series([])
assert_series_equal(ser, mom.rolling_apply(ser, 10, lambda x: x.mean()))
def roll_mean(x, window, min_periods=None, freq=None, center=False):
return mom.rolling_apply(x, window,
lambda x: x[np.isfinite(x)].mean(),
min_periods=min_periods,
freq=freq,
center=center)
self._check_moment_func(roll_mean, np.mean)
# GH 8080
s = Series([None, None, None])
result = mom.rolling_apply(s, 2, lambda x: len(x), min_periods=0)
expected = Series([1., 2., 2.])
assert_series_equal(result, expected)
def test_rolling_apply_out_of_bounds(self):
# #1850
arr = np.arange(4)
# it works!
result = mom.rolling_apply(arr, 10, np.sum)
self.assertTrue(isnull(result).all())
result = mom.rolling_apply(arr, 10, np.sum, min_periods=1)
assert_almost_equal(result, result)
def test_rolling_std(self):
self._check_moment_func(mom.rolling_std,
lambda x: np.std(x, ddof=1))
self._check_moment_func(functools.partial(mom.rolling_std, ddof=0),
lambda x: np.std(x, ddof=0))
def test_rolling_std_1obs(self):
result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),
1, min_periods=1)
expected = np.array([np.nan] * 5)
assert_almost_equal(result, expected)
result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),
1, min_periods=1, ddof=0)
expected = np.zeros(5)
assert_almost_equal(result, expected)
result = mom.rolling_std(np.array([np.nan, np.nan, 3., 4., 5.]),
3, min_periods=2)
self.assertTrue(np.isnan(result[2]))
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = np.array([0.0011448196318903589,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767])
b = mom.rolling_std(a, window=3)
self.assertTrue(np.isfinite(b[2:]).all())
b = mom.ewmstd(a, span=3)
self.assertTrue(np.isfinite(b[2:]).all())
def test_rolling_var(self):
self._check_moment_func(mom.rolling_var,
lambda x: np.var(x, ddof=1),
test_stable=True)
self._check_moment_func(functools.partial(mom.rolling_var, ddof=0),
lambda x: np.var(x, ddof=0))
def test_rolling_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest('no scipy')
self._check_moment_func(mom.rolling_skew,
lambda x: skew(x, bias=False))
def test_rolling_kurt(self):
try:
from scipy.stats import kurtosis
except ImportError:
raise nose.SkipTest('no scipy')
self._check_moment_func(mom.rolling_kurt,
lambda x: kurtosis(x, bias=False))
def test_fperr_robustness(self):
# TODO: remove this once python 2.5 out of picture
if PY3:
raise nose.SkipTest("doesn't work on python 3")
# #2114
data = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a@\xaa\xaa\xaa\xaa\xaa\xaa\x02@8\x8e\xe38\x8e\xe3\xe8?z\t\xed%\xb4\x97\xd0?\xa2\x0c<\xdd\x9a\x1f\xb6?\x82\xbb\xfa&y\x7f\x9d?\xac\'\xa7\xc4P\xaa\x83?\x90\xdf\xde\xb0k8j?`\xea\xe9u\xf2zQ?*\xe37\x9d\x98N7?\xe2.\xf5&v\x13\x1f?\xec\xc9\xf8\x19\xa4\xb7\x04?\x90b\xf6w\x85\x9f\xeb>\xb5A\xa4\xfaXj\xd2>F\x02\xdb\xf8\xcb\x8d\xb8>.\xac<\xfb\x87^\xa0>\xe8:\xa6\xf9_\xd3\x85>\xfb?\xe2cUU\xfd?\xfc\x7fA\xed8\x8e\xe3?\xa5\xaa\xac\x91\xf6\x12\xca?n\x1cs\xb6\xf9a\xb1?\xe8%D\xf3L-\x97?5\xddZD\x11\xe7~?#>\xe7\x82\x0b\x9ad?\xd9R4Y\x0fxK?;7x;\nP2?N\xf4JO\xb8j\x18?4\xf81\x8a%G\x00?\x9a\xf5\x97\r2\xb4\xe5>\xcd\x9c\xca\xbcB\xf0\xcc>3\x13\x87(\xd7J\xb3>\x99\x19\xb4\xe0\x1e\xb9\x99>ff\xcd\x95\x14&\x81>\x88\x88\xbc\xc7p\xddf>`\x0b\xa6_\x96|N>@\xb2n\xea\x0eS4>U\x98\x938i\x19\x1b>\x8eeb\xd0\xf0\x10\x02>\xbd\xdc-k\x96\x16\xe8=(\x93\x1e\xf2\x0e\x0f\xd0=\xe0n\xd3Bii\xb5=*\xe9\x19Y\x8c\x8c\x9c=\xc6\xf0\xbb\x90]\x08\x83=]\x96\xfa\xc0|`i=>d\xfc\xd5\xfd\xeaP=R0\xfb\xc7\xa7\x8e6=\xc2\x95\xf9_\x8a\x13\x1e=\xd6c\xa6\xea\x06\r\x04=r\xda\xdd8\t\xbc\xea<\xf6\xe6\x93\xd0\xb0\xd2\xd1<\x9d\xdeok\x96\xc3\xb7<&~\xea9s\xaf\x9f<UUUUUU\x13@q\x1c\xc7q\x1c\xc7\xf9?\xf6\x12\xdaKh/\xe1?\xf2\xc3"e\xe0\xe9\xc6?\xed\xaf\x831+\x8d\xae?\xf3\x1f\xad\xcb\x1c^\x94?\x15\x1e\xdd\xbd>\xb8\x02@\xc6\xd2&\xfd\xa8\xf5\xe8?\xd9\xe1\x19\xfe\xc5\xa3\xd0?v\x82"\xa8\xb2/\xb6?\x9dX\x835\xee\x94\x9d?h\x90W\xce\x9e\xb8\x83?\x8a\xc0th~Kj?\\\x80\xf8\x9a\xa9\x87Q?%\xab\xa0\xce\x8c_7?1\xe4\x80\x13\x11*\x1f? \x98\x00\r\xb6\xc6\x04?\x80u\xabf\x9d\xb3\xeb>UNrD\xbew\xd2>\x1c\x13C[\xa8\x9f\xb8>\x12b\xd7<pj\xa0>m-\x1fQ@\xe3\x85>\xe6\x91)l\x00/m>Da\xc6\xf2\xaatS>\x05\xd7]\xee\xe3\xf09>'
arr = np.frombuffer(data, dtype='<f8')
if sys.byteorder != "little":
arr = arr.byteswap().newbyteorder()
result = mom.rolling_sum(arr, 2)
self.assertTrue((result[1:] >= 0).all())
result = mom.rolling_mean(arr, 2)
self.assertTrue((result[1:] >= 0).all())
result = mom.rolling_var(arr, 2)
self.assertTrue((result[1:] >= 0).all())
# #2527, ugh
arr = np.array([0.00012456, 0.0003, 0])
result = mom.rolling_mean(arr, 1)
self.assertTrue(result[-1] >= 0)
result = mom.rolling_mean(-arr, 1)
self.assertTrue(result[-1] <= 0)
def _check_moment_func(self, func, static_comp, window=50,
has_min_periods=True,
has_center=True,
has_time_rule=True,
preserve_nan=True,
fill_value=None,
test_stable=False):
self._check_ndarray(func, static_comp, window=window,
has_min_periods=has_min_periods,
preserve_nan=preserve_nan,
has_center=has_center,
fill_value=fill_value,
test_stable=test_stable)
self._check_structures(func, static_comp,
has_min_periods=has_min_periods,
has_time_rule=has_time_rule,
fill_value=fill_value,
has_center=has_center)
def _check_ndarray(self, func, static_comp, window=50,
has_min_periods=True,
preserve_nan=True,
has_center=True,
fill_value=None,
test_stable=False,
test_window=True):
result = func(self.arr, window)
assert_almost_equal(result[-1],
static_comp(self.arr[-50:]))
if preserve_nan:
assert(np.isnan(result[self._nan_locs]).all())
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
if has_min_periods:
result = func(arr, 50, min_periods=30)
assert_almost_equal(result[-1], static_comp(arr[10:-10]))
# min_periods is working correctly
result = func(arr, 20, min_periods=15)
self.assertTrue(np.isnan(result[23]))
self.assertFalse(np.isnan(result[24]))
self.assertFalse(np.isnan(result[-6]))
self.assertTrue(np.isnan(result[-5]))
arr2 = randn(20)
result = func(arr2, 10, min_periods=5)
self.assertTrue(isnull(result[3]))
self.assertTrue(notnull(result[4]))
# min_periods=0
result0 = func(arr, 20, min_periods=0)
result1 = func(arr, 20, min_periods=1)
assert_almost_equal(result0, result1)
else:
result = func(arr, 50)
assert_almost_equal(result[-1], static_comp(arr[10:-10]))
# GH 7925
if has_center:
if has_min_periods:
result = func(arr, 20, min_periods=15, center=True)
expected = func(np.concatenate((arr, np.array([np.NaN] * 9))), 20, min_periods=15)[9:]
else:
result = func(arr, 20, center=True)
expected = func(np.concatenate((arr, np.array([np.NaN] * 9))), 20)[9:]
self.assert_numpy_array_equal(result, expected)
if test_stable:
result = func(self.arr + 1e9, window)
assert_almost_equal(result[-1],
static_comp(self.arr[-50:] + 1e9))
# Test window larger than array, #7297
if test_window:
if has_min_periods:
for minp in (0, len(self.arr)-1, len(self.arr)):
result = func(self.arr, len(self.arr)+1, min_periods=minp)
expected = func(self.arr, len(self.arr), min_periods=minp)
nan_mask = np.isnan(result)
self.assertTrue(np.array_equal(nan_mask,
np.isnan(expected)))
nan_mask = ~nan_mask
assert_almost_equal(result[nan_mask], expected[nan_mask])
else:
result = func(self.arr, len(self.arr)+1)
expected = func(self.arr, len(self.arr))
nan_mask = np.isnan(result)
self.assertTrue(np.array_equal(nan_mask, np.isnan(expected)))
nan_mask = ~nan_mask
assert_almost_equal(result[nan_mask], expected[nan_mask])
def _check_structures(self, func, static_comp,
has_min_periods=True, has_time_rule=True,
has_center=True,
fill_value=None):
series_result = func(self.series, 50)
tm.assertIsInstance(series_result, Series)
frame_result = func(self.frame, 50)
self.assertEqual(type(frame_result), DataFrame)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
if has_min_periods:
series_result = func(self.series[::2], win, min_periods=minp,
freq='B')
frame_result = func(self.frame[::2], win, min_periods=minp,
freq='B')
else:
series_result = func(self.series[::2], win, freq='B')
frame_result = func(self.frame[::2], win, freq='B')
last_date = series_result.index[-1]
prev_date = last_date - 24 * datetools.bday
trunc_series = self.series[::2].truncate(prev_date, last_date)
trunc_frame = self.frame[::2].truncate(prev_date, last_date)
assert_almost_equal(series_result[-1], static_comp(trunc_series))
assert_almost_equal(frame_result.xs(last_date),
trunc_frame.apply(static_comp))
# GH 7925
if has_center:
if has_min_periods:
minp = 10
series_xp = func(self.series.reindex(list(self.series.index)+['x%d'%x for x in range(12)]), 25, min_periods=minp).shift(-12).reindex(self.series.index)
frame_xp = func(self.frame.reindex(list(self.frame.index)+['x%d'%x for x in range(12)]), 25, min_periods=minp).shift(-12).reindex(self.frame.index)
series_rs = func(self.series, 25, min_periods=minp,
center=True)
frame_rs = func(self.frame, 25, min_periods=minp,
center=True)
else:
series_xp = func(self.series.reindex(list(self.series.index)+['x%d'%x for x in range(12)]), 25).shift(-12).reindex(self.series.index)
frame_xp = func(self.frame.reindex(list(self.frame.index)+['x%d'%x for x in range(12)]), 25).shift(-12).reindex(self.frame.index)
series_rs = func(self.series, 25, center=True)
frame_rs = func(self.frame, 25, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
assert_series_equal(series_xp, series_rs)
assert_frame_equal(frame_xp, frame_rs)
def test_ewma(self):
self._check_ew(mom.ewma)
arr = np.zeros(1000)
arr[5] = 1
result = mom.ewma(arr, span=100, adjust=False).sum()
self.assertTrue(np.abs(result - 1) < 1e-2)
s = Series([1.0, 2.0, 4.0, 8.0])
expected = Series([1.0, 1.6, 2.736842, 4.923077])
for f in [lambda s: mom.ewma(s, com=2.0, adjust=True),
lambda s: mom.ewma(s, com=2.0, adjust=True, ignore_na=False),
lambda s: mom.ewma(s, com=2.0, adjust=True, ignore_na=True),
]:
result = f(s)
assert_series_equal(result, expected)
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
for f in [lambda s: mom.ewma(s, com=2.0, adjust=False),
lambda s: mom.ewma(s, com=2.0, adjust=False, ignore_na=False),
lambda s: mom.ewma(s, com=2.0, adjust=False, ignore_na=True),
]:
result = f(s)
assert_series_equal(result, expected)
def test_ewma_nan_handling(self):
s = Series([1.] + [np.nan] * 5 + [1.])
result = mom.ewma(s, com=5)
assert_almost_equal(result, [1.] * len(s))
s = Series([np.nan] * 2 + [1.] + [np.nan] * 2 + [1.])
result = mom.ewma(s, com=5)
assert_almost_equal(result, [np.nan] * 2 + [1.] * 4)
# GH 7603
s0 = Series([np.nan, 1., 101.])
s1 = Series([1., np.nan, 101.])
s2 = Series([np.nan, 1., np.nan, np.nan, 101., np.nan])
s3 = Series([1., np.nan, 101., 50.])
com = 2.
alpha = 1. / (1. + com)
def simple_wma(s, w):
return (s.multiply(w).cumsum() / w.cumsum()).fillna(method='ffill')
for (s, adjust, ignore_na, w) in [
(s0, True, False, [np.nan, (1. - alpha), 1.]),
(s0, True, True, [np.nan, (1. - alpha), 1.]),
(s0, False, False, [np.nan, (1. - alpha), alpha]),
(s0, False, True, [np.nan, (1. - alpha), alpha]),
(s1, True, False, [(1. - alpha)**2, np.nan, 1.]),
(s1, True, True, [(1. - alpha), np.nan, 1.]),
(s1, False, False, [(1. - alpha)**2, np.nan, alpha]),
(s1, False, True, [(1. - alpha), np.nan, alpha]),
(s2, True, False, [np.nan, (1. - alpha)**3, np.nan, np.nan, 1., np.nan]),
(s2, True, True, [np.nan, (1. - alpha), np.nan, np.nan, 1., np.nan]),
(s2, False, False, [np.nan, (1. - alpha)**3, np.nan, np.nan, alpha, np.nan]),
(s2, False, True, [np.nan, (1. - alpha), np.nan, np.nan, alpha, np.nan]),
(s3, True, False, [(1. - alpha)**3, np.nan, (1. - alpha), 1.]),
(s3, True, True, [(1. - alpha)**2, np.nan, (1. - alpha), 1.]),
(s3, False, False, [(1. - alpha)**3, np.nan, (1. - alpha) * alpha, alpha * ((1. - alpha)**2 + alpha)]),
(s3, False, True, [(1. - alpha)**2, np.nan, (1. - alpha) * alpha, alpha]),
]:
expected = simple_wma(s, Series(w))
result = mom.ewma(s, com=com, adjust=adjust, ignore_na=ignore_na)
assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = mom.ewma(s, com=com, adjust=adjust)
assert_series_equal(result, expected)
def test_ewmvar(self):
self._check_ew(mom.ewmvar)
def test_ewmvol(self):
self._check_ew(mom.ewmvol)
def test_ewma_span_com_args(self):
A = mom.ewma(self.arr, com=9.5)
B = mom.ewma(self.arr, span=20)
assert_almost_equal(A, B)
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, span=20)
self.assertRaises(Exception, mom.ewma, self.arr)
def test_ewma_halflife_arg(self):
A = mom.ewma(self.arr, com=13.932726172912965)
B = mom.ewma(self.arr, halflife=10.0)
assert_almost_equal(A, B)
self.assertRaises(Exception, mom.ewma, self.arr, span=20, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, span=20, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr)
def test_moment_preserve_series_name(self):
# GH 10565
s = Series(np.arange(100), name='foo')
s2 = mom.rolling_mean(s, 30)
s3 = mom.rolling_sum(s, 20)
self.assertEqual(s2.name, 'foo')
self.assertEqual(s3.name, 'foo')
def test_ew_empty_arrays(self):
arr = np.array([], dtype=np.float64)
funcs = [mom.ewma, mom.ewmvol, mom.ewmvar]
for f in funcs:
result = f(arr, 3)
assert_almost_equal(result, arr)
def _check_ew(self, func):
self._check_ew_ndarray(func)
self._check_ew_structures(func)
def _check_ew_ndarray(self, func, preserve_nan=False):
result = func(self.arr, com=10)
if preserve_nan:
assert(np.isnan(result[self._nan_locs]).all())
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
s = Series(arr)
# check min_periods
# GH 7898
result = func(s, 50, min_periods=2)
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
for min_periods in (0, 1):
result = func(s, 50, min_periods=min_periods)
if func == mom.ewma:
self.assertTrue(np.isnan(result.values[:10]).all())
self.assertFalse(np.isnan(result.values[10:]).any())
else:
# ewmstd, ewmvol, ewmvar (with bias=False) require at least two values
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
# check series of length 0
result = func(Series([]), 50, min_periods=min_periods)
assert_series_equal(result, Series([]))
# check series of length 1
result = func(Series([1.]), 50, min_periods=min_periods)
if func == mom.ewma:
assert_series_equal(result, Series([1.]))
else:
# ewmstd, ewmvol, ewmvar with bias=False require at least two values
assert_series_equal(result, Series([np.NaN]))
# pass in ints
result2 = func(np.arange(50), span=10)
self.assertEqual(result2.dtype, np.float_)
def _check_ew_structures(self, func):
series_result = func(self.series, com=10)
tm.assertIsInstance(series_result, Series)
frame_result = func(self.frame, com=10)
self.assertEqual(type(frame_result), DataFrame)
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
return [Series(),
Series([np.nan]),
Series([np.nan, np.nan]),
Series([3.]),
Series([np.nan, 3.]),
Series([3., np.nan]),
Series([1., 3.]),
Series([2., 2.]),
Series([3., 1.]),
Series([5., 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
Series([np.nan, 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
Series([np.nan, np.nan, 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
Series([np.nan, 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7., 12., 13., 14., 15.]),
Series([np.nan, 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3., 12., 13., 14., 15.]),
Series([2., 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7., 12., 13., 14., 15.]),
Series([2., 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3., 12., 13., 14., 15.]),
Series(range(10)),
Series(range(20, 0, -2)),
]
def create_dataframes():
return [DataFrame(),
DataFrame(columns=['a']),
DataFrame(columns=['a', 'a']),
DataFrame(columns=['a', 'b']),
DataFrame(np.arange(10).reshape((5, 2))),
DataFrame(np.arange(25).reshape((5, 5))),
DataFrame(np.arange(25).reshape((5, 5)), columns=['a', 'b', 99, 'd', 'd']),
] + [DataFrame(s) for s in create_series()]
def is_constant(x):
values = x.values.ravel()
return len(set(values[notnull(values)])) == 1
def no_nans(x):
return x.notnull().all().all()
# data is a tuple(object, is_contant, no_nans)
data = create_series() + create_dataframes()
return [ (x, is_constant(x), no_nans(x)) for x in data ]
_consistency_data = _create_consistency_data()
class TestMomentsConsistency(Base):
base_functions = [
(lambda v: Series(v).count(), None, 'count'),
(lambda v: Series(v).max(), None, 'max'),
(lambda v: Series(v).min(), None, 'min'),
(lambda v: Series(v).sum(), None, 'sum'),
(lambda v: Series(v).mean(), None, 'mean'),
(lambda v: Series(v).std(), 1, 'std'),
(lambda v: Series(v).cov(Series(v)), None, 'cov'),
(lambda v: Series(v).corr(Series(v)), None, 'corr'),
(lambda v: Series(v).var(), 1, 'var'),
#(lambda v: Series(v).skew(), 3, 'skew'), # restore once GH 8086 is fixed
#(lambda v: Series(v).kurt(), 4, 'kurt'), # restore once GH 8086 is fixed
#(lambda x, min_periods: mom.expanding_quantile(x, 0.3, min_periods=min_periods, 'quantile'),
# lambda v: Series(v).quantile(0.3), None, 'quantile'), # restore once GH 8084 is fixed
(lambda v: Series(v).median(), None ,'median'),
(np.nanmax, 1, 'max'),
(np.nanmin, 1, 'min'),
(np.nansum, 1, 'sum'),
]
if np.__version__ >= LooseVersion('1.8.0'):
base_functions += [
(np.nanmean, 1, 'mean'),
(lambda v: np.nanstd(v, ddof=1), 1 ,'std'),
(lambda v: np.nanvar(v, ddof=1), 1 ,'var'),
]
if np.__version__ >= LooseVersion('1.9.0'):
base_functions += [
(np.nanmedian, 1, 'median'),
]
no_nan_functions = [
(np.max, None, 'max'),
(np.min, None, 'min'),
(np.sum, None, 'sum'),
(np.mean, None, 'mean'),
(lambda v: np.std(v, ddof=1), 1 ,'std'),
(lambda v: np.var(v, ddof=1), 1 ,'var'),
(np.median, None, 'median'),
]
def _create_data(self):
super(TestMomentsConsistency, self)._create_data()
self.data = _consistency_data
def setUp(self):
self._create_data()
def _test_moments_consistency(self,
min_periods,
count, mean, mock_mean, corr,
var_unbiased=None, std_unbiased=None, cov_unbiased=None,
var_biased=None, std_biased=None, cov_biased=None,
var_debiasing_factors=None):
def _non_null_values(x):
values = x.values.ravel()
return set(values[notnull(values)].tolist())
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
count_x = count(x)
mean_x = mean(x)
if mock_mean:
# check that mean equals mock_mean
expected = mock_mean(x)
assert_equal(mean_x, expected.astype('float64'))
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = corr(x, x)
# self.assertTrue(_non_null_values(corr_x_x).issubset(set([1.]))) # restore once rolling_cov(x, x) is identically equal to var(x)
if is_constant:
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
assert_equal(corr_x_x, expected)
if var_unbiased and var_biased and var_debiasing_factors:
# check variance debiasing factors
var_unbiased_x = var_unbiased(x)
var_biased_x = var_biased(x)
var_debiasing_factors_x = var_debiasing_factors(x)
assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
for (std, var, cov) in [(std_biased, var_biased, cov_biased),
(std_unbiased, var_unbiased, cov_unbiased)]:
# check that var(x), std(x), and cov(x) are all >= 0
var_x = var(x)
std_x = std(x)
self.assertFalse((var_x < 0).any().any())
self.assertFalse((std_x < 0).any().any())
if cov:
cov_x_x = cov(x, x)
self.assertFalse((cov_x_x < 0).any().any())
# check that var(x) == cov(x, x)
assert_equal(var_x, cov_x_x)
# check that var(x) == std(x)^2
assert_equal(var_x, std_x * std_x)
if var is var_biased:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = mean(x * x)
assert_equal(var_x, mean_x2 - (mean_x * mean_x))
if is_constant:
# check that variance of constant series is identically 0
self.assertFalse((var_x > 0).any().any())
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.
if var is var_unbiased:
expected[count_x < 2] = np.nan
assert_equal(var_x, expected)
if isinstance(x, Series):
for (y, is_constant, no_nans) in self.data:
if not x.isnull().equals(y.isnull()):
# can only easily test two Series with similar structure
continue
# check that cor(x, y) is symmetric
corr_x_y = corr(x, y)
corr_y_x = corr(y, x)
assert_equal(corr_x_y, corr_y_x)
if cov:
# check that cov(x, y) is symmetric
cov_x_y = cov(x, y)
cov_y_x = cov(y, x)
assert_equal(cov_x_y, cov_y_x)
# check that cov(x, y) == (var(x+y) - var(x) - var(y)) / 2
var_x_plus_y = var(x + y)
var_y = var(y)
assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) * std(y))
std_y = std(y)
assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if cov is cov_biased:
# check that biased cov(x, y) == mean(x*y) - mean(x)*mean(y)
mean_y = mean(y)
mean_x_times_y = mean(x * y)
assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
@slow
def test_ewm_consistency(self):
def _weights(s, com, adjust, ignore_na):
if isinstance(s, DataFrame):
if not len(s.columns):
return DataFrame(index=s.index, columns=s.columns)
w = concat([ _weights(s.iloc[:, i],
com=com,
adjust=adjust,
ignore_na=ignore_na) for i, _ in enumerate(s.columns) ],
axis=1)
w.index=s.index
w.columns=s.columns
return w
w = Series(np.nan, index=s.index)
alpha = 1. / (1. + com)
if ignore_na:
w[s.notnull()] = _weights(s[s.notnull()], com=com, adjust=adjust, ignore_na=False)
elif adjust:
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
w.iat[i] = pow(1. / (1. - alpha), i)
else:
sum_wts = 0.
prev_i = -1
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
if prev_i == -1:
w.iat[i] = 1.
else:
w.iat[i] = alpha * sum_wts / pow(1. - alpha, i - prev_i)
sum_wts += w.iat[i]
prev_i = i
return w
def _variance_debiasing_factors(s, com, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
cum_sum = weights.cumsum().fillna(method='ffill')
cum_sum_sq = (weights * weights).cumsum().fillna(method='ffill')
numerator = cum_sum * cum_sum
denominator = numerator - cum_sum_sq
denominator[denominator <= 0.] = np.nan
return numerator / denominator
def _ewma(s, com, min_periods, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
result = s.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method='ffill')
result[mom.expanding_count(s) < (max(min_periods, 1) if min_periods else 1)] = np.nan
return result
com = 3.
for min_periods in [0, 1, 2, 3, 4]:
for adjust in [True, False]:
for ignore_na in [False, True]:
# test consistency between different ewm* moments
self._test_moments_consistency(
min_periods=min_periods,
count=mom.expanding_count,
mean=lambda x: mom.ewma(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
mock_mean=lambda x: _ewma(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
corr=lambda x, y: mom.ewmcorr(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
var_unbiased=lambda x: mom.ewmvar(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
std_unbiased=lambda x: mom.ewmstd(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
cov_unbiased=lambda x, y: mom.ewmcov(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
var_biased=lambda x: mom.ewmvar(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
std_biased=lambda x: mom.ewmstd(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
cov_biased=lambda x, y: mom.ewmcov(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
var_debiasing_factors=lambda x: _variance_debiasing_factors(x, com=com, adjust=adjust, ignore_na=ignore_na))
@slow
def test_expanding_consistency(self):
# suppress warnings about empty slices, as we are deliberately testing with empty/0-length Series/DataFrames
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning)
for min_periods in [0, 1, 2, 3, 4]:
# test consistency between different expanding_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=mom.expanding_count,
mean=lambda x: mom.expanding_mean(x, min_periods=min_periods),
mock_mean=lambda x: mom.expanding_sum(x, min_periods=min_periods) / mom.expanding_count(x),
corr=lambda x, y: mom.expanding_corr(x, y, min_periods=min_periods),
var_unbiased=lambda x: mom.expanding_var(x, min_periods=min_periods),
std_unbiased=lambda x: mom.expanding_std(x, min_periods=min_periods),
cov_unbiased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods),
var_biased=lambda x: mom.expanding_var(x, min_periods=min_periods, ddof=0),
std_biased=lambda x: mom.expanding_std(x, min_periods=min_periods, ddof=0),
cov_biased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods, ddof=0),
var_debiasing_factors=lambda x: mom.expanding_count(x) / (mom.expanding_count(x) - 1.).replace(0., np.nan)
)
# test consistency between expanding_xyz() and either (a) expanding_apply of Series.xyz(),
# or (b) expanding_apply of np.nanxyz()
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
functions = self.base_functions
# GH 8269
if no_nans:
functions = self.base_functions + self.no_nan_functions
for (f, require_min_periods, name) in functions:
expanding_f = getattr(mom,'expanding_{0}'.format(name))
if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
continue
if expanding_f is mom.expanding_count:
expanding_f_result = expanding_f(x)
expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=0)
else:
if expanding_f in [mom.expanding_cov, mom.expanding_corr]:
expanding_f_result = expanding_f(x, min_periods=min_periods, pairwise=False)
else:
expanding_f_result = expanding_f(x, min_periods=min_periods)
expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=min_periods)
if not tm._incompat_bottleneck_version(name):
assert_equal(expanding_f_result, expanding_apply_f_result)
if (expanding_f in [mom.expanding_cov, mom.expanding_corr]) and isinstance(x, DataFrame):
# test pairwise=True
expanding_f_result = expanding_f(x, x, min_periods=min_periods, pairwise=True)
expected = Panel(items=x.index, major_axis=x.columns, minor_axis=x.columns)
for i, _ in enumerate(x.columns):
for j, _ in enumerate(x.columns):
expected.iloc[:, i, j] = expanding_f(x.iloc[:, i], x.iloc[:, j], min_periods=min_periods)
assert_panel_equal(expanding_f_result, expected)
@slow
def test_rolling_consistency(self):
for window in [1, 2, 3, 10, 20]:
for min_periods in set([0, 1, 2, 3, 4, window]):
if min_periods and (min_periods > window):
continue
for center in [False, True]:
# test consistency between different rolling_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: mom.rolling_count(x, window=window, center=center),
mean=lambda x: mom.rolling_mean(x, window=window, min_periods=min_periods, center=center),
mock_mean=lambda x: mom.rolling_sum(x, window=window, min_periods=min_periods, center=center).divide(
mom.rolling_count(x, window=window, center=center)),
corr=lambda x, y: mom.rolling_corr(x, y, window=window, min_periods=min_periods, center=center),
var_unbiased=lambda x: mom.rolling_var(x, window=window, min_periods=min_periods, center=center),
std_unbiased=lambda x: mom.rolling_std(x, window=window, min_periods=min_periods, center=center),
cov_unbiased=lambda x, y: mom.rolling_cov(x, y, window=window, min_periods=min_periods, center=center),
var_biased=lambda x: mom.rolling_var(x, window=window, min_periods=min_periods, center=center, ddof=0),
std_biased=lambda x: mom.rolling_std(x, window=window, min_periods=min_periods, center=center, ddof=0),
cov_biased=lambda x, y: mom.rolling_cov(x, y, window=window, min_periods=min_periods, center=center, ddof=0),
var_debiasing_factors=lambda x: mom.rolling_count(x, window=window, center=center).divide(
(mom.rolling_count(x, window=window, center=center) - 1.).replace(0., np.nan)),
)
# test consistency between rolling_xyz() and either (a) rolling_apply of Series.xyz(),
# or (b) rolling_apply of np.nanxyz()
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
functions = self.base_functions
# GH 8269
if no_nans:
functions = self.base_functions + self.no_nan_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(mom,'rolling_{0}'.format(name))
if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
continue
if rolling_f is mom.rolling_count:
rolling_f_result = rolling_f(x, window=window, center=center)
rolling_apply_f_result = mom.rolling_apply(x, window=window, func=f,
min_periods=0, center=center)
else:
if rolling_f in [mom.rolling_cov, mom.rolling_corr]:
rolling_f_result = rolling_f(x, window=window, min_periods=min_periods, center=center, pairwise=False)
else:
rolling_f_result = rolling_f(x, window=window, min_periods=min_periods, center=center)
rolling_apply_f_result = mom.rolling_apply(x, window=window, func=f,
min_periods=min_periods, center=center)
if not tm._incompat_bottleneck_version(name):
assert_equal(rolling_f_result, rolling_apply_f_result)
if (rolling_f in [mom.rolling_cov, mom.rolling_corr]) and isinstance(x, DataFrame):
# test pairwise=True
rolling_f_result = rolling_f(x, x, window=window, min_periods=min_periods,
center=center, pairwise=True)
expected = Panel(items=x.index, major_axis=x.columns, minor_axis=x.columns)
for i, _ in enumerate(x.columns):
for j, _ in enumerate(x.columns):
expected.iloc[:, i, j] = rolling_f(x.iloc[:, i], x.iloc[:, j],
window=window, min_periods=min_periods, center=center)
assert_panel_equal(rolling_f_result, expected)
# binary moments
def test_rolling_cov(self):
A = self.series
B = A + randn(len(A))
result = mom.rolling_cov(A, B, 50, min_periods=25)
assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_cov_pairwise(self):
self._check_pairwise_moment(mom.rolling_cov, 10, min_periods=5)
def test_rolling_corr(self):
A = self.series
B = A + randn(len(A))
result = mom.rolling_corr(A, B, 50, min_periods=25)
assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = mom.rolling_corr(a, b, len(a), min_periods=1)
assert_almost_equal(result[-1], a.corr(b))
def test_rolling_corr_pairwise(self):
self._check_pairwise_moment(mom.rolling_corr, 10, min_periods=5)
def _check_pairwise_moment(self, func, *args, **kwargs):
panel = func(self.frame, *args, **kwargs)
actual = panel.ix[:, 1, 5]
expected = func(self.frame[1], self.frame[5], *args, **kwargs)
tm.assert_series_equal(actual, expected, check_names=False)
self.assertEqual(actual.name, 5)
def test_flex_binary_moment(self):
# GH3155
# don't blow the stack
self.assertRaises(TypeError, mom._flex_binary_moment,5,6,None)
def test_corr_sanity(self):
#GH 3155
df = DataFrame(
np.array(
[[ 0.87024726, 0.18505595],
[ 0.64355431, 0.3091617 ],
[ 0.92372966, 0.50552513],
[ 0.00203756, 0.04520709],
[ 0.84780328, 0.33394331],
[ 0.78369152, 0.63919667]])
)
res = mom.rolling_corr(df[0],df[1],5,center=True)
self.assertTrue(all([np.abs(np.nan_to_num(x)) <=1 for x in res]))
# and some fuzzing
for i in range(10):
df = DataFrame(np.random.rand(30,2))
res = mom.rolling_corr(df[0],df[1],5,center=True)
try:
self.assertTrue(all([np.abs(np.nan_to_num(x)) <=1 for x in res]))
except:
print(res)
def test_flex_binary_frame(self):
def _check(method):
series = self.frame[1]
res = method(series, self.frame, 10)
res2 = method(self.frame, series, 10)
exp = self.frame.apply(lambda x: method(series, x, 10))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = self.frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = method(self.frame, frame2, 10)
exp = DataFrame(dict((k, method(self.frame[k], frame2[k], 10))
for k in self.frame))
tm.assert_frame_equal(res3, exp)
methods = [mom.rolling_corr, mom.rolling_cov]
for meth in methods:
_check(meth)
def test_ewmcov(self):
self._check_binary_ew(mom.ewmcov)
def test_ewmcov_pairwise(self):
self._check_pairwise_moment(mom.ewmcov, span=10, min_periods=5)
def test_ewmcorr(self):
self._check_binary_ew(mom.ewmcorr)
def test_ewmcorr_pairwise(self):
self._check_pairwise_moment(mom.ewmcorr, span=10, min_periods=5)
def _check_binary_ew(self, func):
A = Series(randn(50), index=np.arange(50))
B = A[2:] + randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = func(A, B, 20, min_periods=5)
self.assertTrue(np.isnan(result.values[:14]).all())
self.assertFalse(np.isnan(result.values[14:]).any())
# GH 7898
for min_periods in (0, 1, 2):
result = func(A, B, 20, min_periods=min_periods)
# binary functions (ewmcov, ewmcorr) with bias=False require at least two values
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
# check series of length 0
result = func(Series([]), Series([]), 50, min_periods=min_periods)
assert_series_equal(result, Series([]))
# check series of length 1
result = func(Series([1.]), Series([1.]), 50, min_periods=min_periods)
assert_series_equal(result, Series([np.NaN]))
self.assertRaises(Exception, func, A, randn(50), 20, min_periods=5)
def test_expanding_apply(self):
ser = Series([])
assert_series_equal(ser, mom.expanding_apply(ser, lambda x: x.mean()))
def expanding_mean(x, min_periods=1, freq=None):
return mom.expanding_apply(x,
lambda x: x.mean(),
min_periods=min_periods,
freq=freq)
self._check_expanding(expanding_mean, np.mean)
# GH 8080
s = Series([None, None, None])
result = mom.expanding_apply(s, lambda x: len(x), min_periods=0)
expected = Series([1., 2., 3.])
assert_series_equal(result, expected)
def test_expanding_apply_args_kwargs(self):
def mean_w_arg(x, const):
return np.mean(x) + const
df = DataFrame(np.random.rand(20, 3))
expected = mom.expanding_apply(df, np.mean) + 20.
assert_frame_equal(mom.expanding_apply(df, mean_w_arg, args=(20,)),
expected)
assert_frame_equal(mom.expanding_apply(df, mean_w_arg,
kwargs={'const' : 20}),
expected)
def test_expanding_corr(self):
A = self.series.dropna()
B = (A + randn(len(A)))[:-5]
result = mom.expanding_corr(A, B)
rolling_result = mom.rolling_corr(A, B, len(A), min_periods=1)
assert_almost_equal(rolling_result, result)
def test_expanding_count(self):
result = mom.expanding_count(self.series)
assert_almost_equal(result, mom.rolling_count(self.series,
len(self.series)))
def test_expanding_quantile(self):
result = mom.expanding_quantile(self.series, 0.5)
rolling_result = mom.rolling_quantile(self.series,
len(self.series),
0.5, min_periods=1)
assert_almost_equal(result, rolling_result)
def test_expanding_cov(self):
A = self.series
B = (A + randn(len(A)))[:-5]
result = mom.expanding_cov(A, B)
rolling_result = mom.rolling_cov(A, B, len(A), min_periods=1)
assert_almost_equal(rolling_result, result)
def test_expanding_max(self):
self._check_expanding(mom.expanding_max, np.max, preserve_nan=False)
def test_expanding_cov_pairwise(self):
result = mom.expanding_cov(self.frame)
rolling_result = mom.rolling_cov(self.frame, len(self.frame),
min_periods=1)
for i in result.items:
assert_almost_equal(result[i], rolling_result[i])
def test_expanding_corr_pairwise(self):
result = mom.expanding_corr(self.frame)
rolling_result = mom.rolling_corr(self.frame, len(self.frame),
min_periods=1)
for i in result.items:
assert_almost_equal(result[i], rolling_result[i])
def test_expanding_cov_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.expanding_cov(s1, s2)
expected = Series([None, None, 2.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.expanding_cov(s1, s2a)
assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = mom.expanding_cov(s1, s2)
expected = Series([None, None, None, 4.5])
assert_series_equal(result, expected)
def test_expanding_corr_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.expanding_corr(s1, s2)
expected = Series([None, None, 1.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.expanding_corr(s1, s2a)
assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = mom.expanding_corr(s1, s2)
expected = Series([None, None, None, 1.])
assert_series_equal(result, expected)
def test_rolling_cov_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.rolling_cov(s1, s2, window=3, min_periods=2)
expected = Series([None, None, 2.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.rolling_cov(s1, s2a, window=3, min_periods=2)
assert_series_equal(result, expected)
def test_rolling_corr_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.rolling_corr(s1, s2, window=3, min_periods=2)
expected = Series([None, None, 1.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.rolling_corr(s1, s2a, window=3, min_periods=2)
assert_series_equal(result, expected)
def test_rolling_functions_window_non_shrinkage(self):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1,5], [3, 2], [3,9], [-1,0]], columns=['A','B'])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
df_expected_panel = Panel(items=df.index, major_axis=df.columns, minor_axis=df.columns)
functions = [lambda x: mom.rolling_cov(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_max(x, window=10, min_periods=5),
lambda x: mom.rolling_min(x, window=10, min_periods=5),
lambda x: mom.rolling_sum(x, window=10, min_periods=5),
lambda x: mom.rolling_mean(x, window=10, min_periods=5),
lambda x: mom.rolling_std(x, window=10, min_periods=5),
lambda x: mom.rolling_var(x, window=10, min_periods=5),
lambda x: mom.rolling_skew(x, window=10, min_periods=5),
lambda x: mom.rolling_kurt(x, window=10, min_periods=5),
lambda x: mom.rolling_quantile(x, quantile=0.5, window=10, min_periods=5),
lambda x: mom.rolling_median(x, window=10, min_periods=5),
lambda x: mom.rolling_apply(x, func=sum, window=10, min_periods=5),
lambda x: mom.rolling_window(x, win_type='boxcar', window=10, min_periods=5),
]
for f in functions:
try:
s_result = f(s)
assert_series_equal(s_result, s_expected)
df_result = f(df)
assert_frame_equal(df_result, df_expected)
except (ImportError):
# scipy needed for rolling_window
continue
functions = [lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
]
for f in functions:
df_result_panel = f(df)
assert_panel_equal(df_result_panel, df_expected_panel)
def test_moment_functions_zero_length(self):
# GH 8056
s = Series()
s_expected = s
df1 = DataFrame()
df1_expected = df1
df1_expected_panel = Panel(items=df1.index, major_axis=df1.columns, minor_axis=df1.columns)
df2 = DataFrame(columns=['a'])
df2['a'] = df2['a'].astype('float64')
df2_expected = df2
df2_expected_panel = Panel(items=df2.index, major_axis=df2.columns, minor_axis=df2.columns)
functions = [lambda x: mom.expanding_count(x),
lambda x: mom.expanding_cov(x, x, pairwise=False, min_periods=5),
lambda x: mom.expanding_corr(x, x, pairwise=False, min_periods=5),
lambda x: mom.expanding_max(x, min_periods=5),
lambda x: mom.expanding_min(x, min_periods=5),
lambda x: mom.expanding_sum(x, min_periods=5),
lambda x: mom.expanding_mean(x, min_periods=5),
lambda x: mom.expanding_std(x, min_periods=5),
lambda x: mom.expanding_var(x, min_periods=5),
lambda x: mom.expanding_skew(x, min_periods=5),
lambda x: mom.expanding_kurt(x, min_periods=5),
lambda x: mom.expanding_quantile(x, quantile=0.5, min_periods=5),
lambda x: mom.expanding_median(x, min_periods=5),
lambda x: mom.expanding_apply(x, func=sum, min_periods=5),
lambda x: mom.rolling_count(x, window=10),
lambda x: mom.rolling_cov(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_max(x, window=10, min_periods=5),
lambda x: mom.rolling_min(x, window=10, min_periods=5),
lambda x: mom.rolling_sum(x, window=10, min_periods=5),
lambda x: mom.rolling_mean(x, window=10, min_periods=5),
lambda x: mom.rolling_std(x, window=10, min_periods=5),
lambda x: mom.rolling_var(x, window=10, min_periods=5),
lambda x: mom.rolling_skew(x, window=10, min_periods=5),
lambda x: mom.rolling_kurt(x, window=10, min_periods=5),
lambda x: mom.rolling_quantile(x, quantile=0.5, window=10, min_periods=5),
lambda x: mom.rolling_median(x, window=10, min_periods=5),
lambda x: mom.rolling_apply(x, func=sum, window=10, min_periods=5),
lambda x: mom.rolling_window(x, win_type='boxcar', window=10, min_periods=5),
]
for f in functions:
try:
s_result = f(s)
assert_series_equal(s_result, s_expected)
df1_result = f(df1)
assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
assert_frame_equal(df2_result, df2_expected)
except (ImportError):
# scipy needed for rolling_window
continue
functions = [lambda x: mom.expanding_cov(x, x, pairwise=True, min_periods=5),
lambda x: mom.expanding_corr(x, x, pairwise=True, min_periods=5),
lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
]
for f in functions:
df1_result_panel = f(df1)
assert_panel_equal(df1_result_panel, df1_expected_panel)
df2_result_panel = f(df2)
assert_panel_equal(df2_result_panel, df2_expected_panel)
def test_expanding_cov_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1,5], [3, 2], [3,9]], columns=['A','B'])
df1a = DataFrame([[1,5], [3,9]], index=[0,2], columns=['A','B'])
df2 = DataFrame([[5,6], [None,None], [2,1]], columns=['X','Y'])
df2a = DataFrame([[5,6], [2,1]], index=[0,2], columns=['X','Y'])
result1 = mom.expanding_cov(df1, df2, pairwise=True)[2]
result2 = mom.expanding_cov(df1, df2a, pairwise=True)[2]
result3 = mom.expanding_cov(df1a, df2, pairwise=True)[2]
result4 = mom.expanding_cov(df1a, df2a, pairwise=True)[2]
expected = DataFrame([[-3., -5.], [-6., -10.]], index=['A','B'], columns=['X','Y'])
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected)
assert_frame_equal(result4, expected)
def test_expanding_corr_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1,2], [3, 2], [3,4]], columns=['A','B'])
df1a = DataFrame([[1,2], [3,4]], index=[0,2], columns=['A','B'])
df2 = DataFrame([[5,6], [None,None], [2,1]], columns=['X','Y'])
df2a = DataFrame([[5,6], [2,1]], index=[0,2], columns=['X','Y'])
result1 = mom.expanding_corr(df1, df2, pairwise=True)[2]
result2 = mom.expanding_corr(df1, df2a, pairwise=True)[2]
result3 = mom.expanding_corr(df1a, df2, pairwise=True)[2]
result4 = mom.expanding_corr(df1a, df2a, pairwise=True)[2]
expected = DataFrame([[-1.0, -1.0], [-1.0, -1.0]], index=['A','B'], columns=['X','Y'])
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected)
assert_frame_equal(result4, expected)
def test_pairwise_stats_column_names_order(self):
# GH 7738
df1s = [DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[0,1]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1,0]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1,1]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=['C','C']),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1.,0]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[0.,1]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=['C',1]),
DataFrame([[2.,4.],[1.,2.],[5.,2.],[8.,1.]], columns=[1,0.]),
DataFrame([[2,4.],[1,2.],[5,2.],[8,1.]], columns=[0,1.]),
DataFrame([[2,4],[1,2],[5,2],[8,1.]], columns=[1.,'X']),
]
df2 = DataFrame([[None,1,1],[None,1,2],[None,3,2],[None,8,1]], columns=['Y','Z','X'])
s = Series([1,1,3,8])
# suppress warnings about incomparable objects, as we are deliberately testing with such column labels
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*incomparable objects.*", category=RuntimeWarning)
# DataFrame methods (which do not call _flex_binary_moment())
for f in [lambda x: x.cov(),
lambda x: x.corr(),
]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.index, df.columns)
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equal(result, results[0])
# DataFrame with itself, pairwise=True
for f in [lambda x: mom.expanding_cov(x, pairwise=True),
lambda x: mom.expanding_corr(x, pairwise=True),
lambda x: mom.rolling_cov(x, window=3, pairwise=True),
lambda x: mom.rolling_corr(x, window=3, pairwise=True),
lambda x: mom.ewmcov(x, com=3, pairwise=True),
lambda x: mom.ewmcorr(x, com=3, pairwise=True),
]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.items, df.index)
assert_index_equal(result.major_axis, df.columns)
assert_index_equal(result.minor_axis, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equal(result, results[0])
# DataFrame with itself, pairwise=False
for f in [lambda x: mom.expanding_cov(x, pairwise=False),
lambda x: mom.expanding_corr(x, pairwise=False),
lambda x: mom.rolling_cov(x, window=3, pairwise=False),
lambda x: mom.rolling_corr(x, window=3, pairwise=False),
lambda x: mom.ewmcov(x, com=3, pairwise=False),
lambda x: mom.ewmcorr(x, com=3, pairwise=False),
]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.index, df.index)
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equal(result, results[0])
# DataFrame with another DataFrame, pairwise=True
for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=True),
lambda x, y: mom.expanding_corr(x, y, pairwise=True),
lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=True),
lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=True),
lambda x, y: mom.ewmcov(x, y, com=3, pairwise=True),
lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=True),
]:
results = [f(df, df2) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.items, df.index)
assert_index_equal(result.major_axis, df.columns)
assert_index_equal(result.minor_axis, df2.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equal(result, results[0])
# DataFrame with another DataFrame, pairwise=False
for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=False),
lambda x, y: mom.expanding_corr(x, y, pairwise=False),
lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=False),
lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=False),
lambda x, y: mom.ewmcov(x, y, com=3, pairwise=False),
lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=False),
]:
results = [f(df, df2) if df.columns.is_unique else None for df in df1s]
for (df, result) in zip(df1s, results):
if result is not None:
expected_index = df.index.union(df2.index)
expected_columns = df.columns.union(df2.columns)
assert_index_equal(result.index, expected_index)
assert_index_equal(result.columns, expected_columns)
else:
tm.assertRaisesRegexp(ValueError, "'arg1' columns are not unique", f, df, df2)
tm.assertRaisesRegexp(ValueError, "'arg2' columns are not unique", f, df2, df)
# DataFrame with a Series
for f in [lambda x, y: mom.expanding_cov(x, y),
lambda x, y: mom.expanding_corr(x, y),
lambda x, y: mom.rolling_cov(x, y, window=3),
lambda x, y: mom.rolling_corr(x, y, window=3),
lambda x, y: mom.ewmcov(x, y, com=3),
lambda x, y: mom.ewmcorr(x, y, com=3),
]:
results = [f(df, s) for df in df1s] + [f(s, df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.index, df.index)
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equal(result, results[0])
def test_rolling_skew_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = mom.rolling_skew(d, window=5)
assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = mom.rolling_skew(d, window=2)
assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039 , 0.19501095,
1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN,
0.177994, 1.548824])
x = mom.rolling_skew(d, window=4)
assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = mom.rolling_kurt(d, window=5)
assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = mom.rolling_kurt(d, window=3)
assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039 , 0.19501095,
1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN,
1.224307, 2.671499])
x = mom.rolling_kurt(d, window=4)
assert_series_equal(expected, x)
def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True,
has_time_rule=True, preserve_nan=True):
result = func(self.arr)
assert_almost_equal(result[10],
static_comp(self.arr[:11]))
if preserve_nan:
assert(np.isnan(result[self._nan_locs]).all())
arr = randn(50)
if has_min_periods:
result = func(arr, min_periods=30)
assert(np.isnan(result[:29]).all())
assert_almost_equal(result[-1], static_comp(arr[:50]))
# min_periods is working correctly
result = func(arr, min_periods=15)
self.assertTrue(np.isnan(result[13]))
self.assertFalse(np.isnan(result[14]))
arr2 = randn(20)
result = func(arr2, min_periods=5)
self.assertTrue(isnull(result[3]))
self.assertTrue(notnull(result[4]))
# min_periods=0
result0 = func(arr, min_periods=0)
result1 = func(arr, min_periods=1)
assert_almost_equal(result0, result1)
else:
result = func(arr)
assert_almost_equal(result[-1], static_comp(arr[:50]))
def _check_expanding_structures(self, func):
series_result = func(self.series)
tm.assertIsInstance(series_result, Series)
frame_result = func(self.frame)
self.assertEqual(type(frame_result), DataFrame)
def _check_expanding(self, func, static_comp, has_min_periods=True,
has_time_rule=True,
preserve_nan=True):
self._check_expanding_ndarray(func, static_comp,
has_min_periods=has_min_periods,
has_time_rule=has_time_rule,
preserve_nan=preserve_nan)
self._check_expanding_structures(func)
def test_rolling_max_gh6297(self):
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series([1.0, 2.0, 6.0, 4.0, 5.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D')
assert_series_equal(expected, x)
def test_rolling_max_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series([0.0, 1.0, 2.0, 3.0, 20.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D')
assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series([0.0, 1.0, 2.0, 3.0, 10.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D', how='median')
assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0+10.0+20.0)/3.0
expected = Series([0.0, 1.0, 2.0, 3.0, v],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D', how='mean')
assert_series_equal(expected, x)
def test_rolling_min_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series([0.0, 1.0, 2.0, 3.0, 4.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_min(series, window=1, freq='D')
assert_series_equal(expected, x)
def test_rolling_median_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series([0.0, 1.0, 2.0, 3.0, 10],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_median(series, window=1, freq='D')
assert_series_equal(expected, x)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
johndpope/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator.py | 5 | 55283 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import summary_io
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existance of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use `Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
all_hooks.extend(hooks)
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
summary_io.SummaryWriterCache.clear()
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) has less than 4 '
'arguments, but not None params (%s) are passed.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
features, labels = self._feature_engineering_fn(features, labels)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
else:
saver_for_restore = saver.Saver(sharded=True)
with tf_session.Session('') as session:
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
resources.initialize_resources(resources.shared_resources()),
lookup_ops.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
| apache-2.0 |