comments
stringlengths 2
31.4k
|
---|
# import ast
# import json
# import ruamel.yaml as ry
# from ruamel.yaml.comments import CommentedSeq
# from dolo.compiler.symbolic import check_expression
# from dolo.compiler.recipes import recipes
# from dolo.misc.termcolor import colored
# class Compare:
# def __init__(self):
# self.d = {}
# def compare(self, A, B):
# if isinstance(A, ast.Name) and (A.id[0] == '_'):
# if A.id not in self.d:
# self.d[A.id] = B
# return True
# else:
# return self.compare(self.d[A.id], B)
# if not (A.__class__ == B.__class__):
# return False
# if isinstance(A, ast.Name):
# return A.id == B.id
# elif isinstance(A, ast.Call):
# if not self.compare(A.func, B.func):
# return False
# if not len(A.args) == len(B.args):
# return False
# for i in range(len(A.args)):
# if not self.compare(A.args[i], B.args[i]):
# return False
# return True
# elif isinstance(A, ast.Num):
# return A.n == B.n
# elif isinstance(A, ast.Expr):
# return self.compare(A.value, B.value)
# elif isinstance(A, ast.Module):
# if not len(A.body) == len(B.body):
# return False
# for i in range(len(A.body)):
# if not self.compare(A.body[i], B.body[i]):
# return False
# return True
# elif isinstance(A, ast.BinOp):
# if not isinstance(A.op, B.op.__class__):
# return False
# if not self.compare(A.left, B.left):
# return False
# if not self.compare(A.right, B.right):
# return False
# return True
# elif isinstance(A, ast.UnaryOp):
# if not isinstance(A.op, B.op.__class__):
# return False
# return self.compare(A.operand, B.operand)
# elif isinstance(A, ast.Subscript):
# if not self.compare(A.value, B.value):
# return False
# return self.compare(A.slice, B.slice)
# elif isinstance(A, ast.Index):
# return self.compare(A.value, B.value)
# elif isinstance(A, ast.Compare):
# if not self.compare(A.left, B.left):
# return False
# if not len(A.ops) == len(B.ops):
# return False
# for i in range(len(A.ops)):
# if not self.compare(A.ops[i], B.ops[i]):
# return False
# if not len(A.comparators) == len(B.comparators):
# return False
# for i in range(len(A.comparators)):
# if not self.compare(A.comparators[i], B.comparators[i]):
# return False
# return True
# elif isinstance(A, ast.In):
# return True
# elif isinstance(A, (ast.Eq, ast.LtE)):
# return True
# else:
# print(A.__class__)
# raise Exception("Not implemented")
# def compare_strings(a, b):
# t1 = ast.parse(a)
# t2 = ast.parse(b)
# comp = Compare()
# val = comp.compare(t1, t2)
# return val
# def match(m, s):
# if isinstance(m, str):
# m = ast.parse(m).body[0].value
# if isinstance(s, str):
# s = ast.parse(s).body[0].value
# comp = Compare()
# val = comp.compare(m, s)
# d = comp.d
# if len(d) == 0:
# return val
# else:
# return d
# known_symbol_types = {
# 'dtcc': recipes['dtcc']['symbols'],
# }
# class ModelException(Exception):
# type = 'error'
# def check_symbol_validity(s):
# import ast
# val = ast.parse(s).body[0].value
# assert (isinstance(val, ast.Name))
# def check_symbols(data):
# # can raise three types of exceptions
# # - unknown symbol
# # - invalid symbol
# # - already declared
# # add: not declared if missing 'states', 'controls' ?
# exceptions = []
# symbols = data['symbols']
# cm_symbols = symbols
# model_type = 'dtcc'
# already_declared = {} # symbol: symbol_type, position
# for key, values in cm_symbols.items():
# # (start_line, start_column, end_line, end_column) of the key
# if key not in known_symbol_types[model_type]:
# l0, c0, l1, c1 = cm_symbols.lc.data[key]
# exc = ModelException(
# "Unknown symbol type '{}'".format(
# key, model_type))
# exc.pos = (l0, c0, l1, c1)
# # print(l0,c0,l1,c1)
# exceptions.append(exc)
# assert (isinstance(values, CommentedSeq))
# for i, v in enumerate(values):
# (l0, c0) = values.lc.data[i]
# length = len(v)
# l1 = l0
# c1 = c0 + length
# try:
# check_symbol_validity(v)
# except:
# exc = ModelException("Invalid symbol '{}'".format(v))
# exc.pos = (l0, c0, l1, c1)
# exceptions.append(exc)
# if v in already_declared:
# ll = already_declared[v]
# exc = ModelException(
# "Symbol '{}' already declared as '{}'. (pos {})".format(
# v, ll[0], (ll[1][0] + 1, ll[1][1])))
# exc.pos = (l0, c0, l1, c1)
# exceptions.append(exc)
# else:
# already_declared[v] = (key, (l0, c0))
# return exceptions
# def check_equations(data):
# model_type = data['model_type']
# pos0 = data.lc.data['equations']
# equations = data['equations']
# exceptions = []
# recipe = recipes[model_type]
# specs = recipe['specs']
# for eq_type in specs.keys():
# if (eq_type not in equations) and (not specs[eq_type].get(
# 'optional', True)):
# exc = ModelException("Missing equation type {}.".format(eq_type))
# exc.pos = pos0
# exceptions.append(exc)
# already_declared = {}
# unknown = []
# for eq_type in equations.keys():
# pos = equations.lc.data[eq_type]
# if eq_type not in specs:
# exc = ModelException("Unknown equation type {}.".format(eq_type))
# exc.pos = pos
# exceptions.append(exc)
# unknown.append(eq_type)
# # BUG: doesn't produce an error when a block is declared twice
# # should be raised by ruaml.yaml ?
# elif eq_type in already_declared.keys():
# exc = ModelException(
# "Equation type {} declared twice at ({})".format(eq_type, pos))
# exc.pos = pos
# exceptions.append(exc)
# else:
# already_declared[eq_type] = pos
# for eq_type in [k for k in equations.keys() if k not in unknown]:
# for n, eq in enumerate(equations[eq_type]):
# eq = eq.replace('<=', '<').replace('==',
# '=').replace('=', '==').replace(
# '<', '<=')
# # print(eq)
# pos = equations[eq_type].lc.data[n]
# try:
# ast.parse(eq)
# except SyntaxError as e:
# exc = ModelException("Syntax Error.")
# exc.pos = [
# pos[0], pos[1] + e.offset, pos[0], pos[1] + e.offset
# ]
# exceptions.append(exc)
# # TEMP: incorrect ordering
# if specs[eq_type].get('target'):
# for n, eq in enumerate(equations[eq_type]):
# eq = eq.replace('<=', '<').replace('==', '=').replace(
# '=', '==').replace('<', '<=')
# pos = equations[eq_type].lc.data[n]
# lhs_name = str.split(eq, '=')[0].strip()
# target = specs[eq_type]['target'][0]
# if lhs_name not in data['symbols'][target]:
# exc = ModelException(
# "Undeclared assignement target '{}'. Add it to '{}'.".
# format(lhs_name, target))
# exc.pos = [pos[0], pos[1], pos[0], pos[1] + len(lhs_name)]
# exceptions.append(exc)
# # if n>len(data['symbols'][target]):
# else:
# right_name = data['symbols'][target][n]
# if lhs_name != right_name:
# exc = ModelException(
# "Left hand side should be '{}' instead of '{}'.".
# format(right_name, lhs_name))
# exc.pos = [
# pos[0], pos[1], pos[0], pos[1] + len(lhs_name)
# ]
# exceptions.append(exc)
# # temp
# return exceptions
# def check_definitions(data):
# if 'definitions' not in data:
# return []
# definitions = data['definitions']
# if definitions is None:
# return []
# exceptions = []
# known_symbols = sum([[*v] for v in data['symbols'].values()], [])
# allowed_symbols = {v: (0, ) for v in known_symbols} # TEMP
# for p in data['symbols']['parameters']:
# allowed_symbols[p] = (0, )
# new_definitions = dict()
# for k, v in definitions.items():
# pos = definitions.lc.data[k]
# if k in known_symbols:
# exc = ModelException(
# 'Symbol {} has already been defined as a model symbol.'.format(
# k))
# exc.pos = pos
# exceptions.append(exc)
# continue
# if k in new_definitions:
# exc = ModelException(
# 'Symbol {} cannot be defined twice.'.format(k))
# exc.pos = pos
# exceptions.append(exc)
# continue
# try:
# check_symbol_validity(k)
# except:
# exc = ModelException("Invalid symbol '{}'".format(k))
# exc.pos = pos
# exceptions.append(exc)
# # pos = equations[eq_type].lc.data[n]
# try:
# expr = ast.parse(str(v))
# # print(allowed_symbols)
# check = check_expression(expr, allowed_symbols)
# # print(check['problems'])
# for pb in check['problems']:
# name, t, offset, err_type = [pb[0], pb[1], pb[2], pb[3]]
# if err_type == 'timing_error':
# exc = Exception(
# 'Timing for variable {} could not be determined.'.
# format(pb[0]))
# elif err_type == 'incorrect_timing':
# exc = Exception(
# 'Variable {} cannot have time {}. (Allowed: {})'.
# format(name, t, pb[4]))
# elif err_type == 'unknown_function':
# exc = Exception(
# 'Unknown variable/function {}.'.format(name))
# elif err_type == 'unknown_variable':
# exc = Exception(
# 'Unknown variable/parameter {}.'.format(name))
# else:
# print(err_type)
# exc.pos = (pos[0], pos[1] + offset, pos[0],
# pos[1] + offset + len(name))
# exc.type = 'error'
# exceptions.append(exc)
# new_definitions[k] = v
# allowed_symbols[k] = (0, ) # TEMP
# # allowed_symbols[k] = None
# except SyntaxError as e:
# pp = pos # TODO: find right mark for pp
# exc = ModelException("Syntax Error.")
# exc.pos = [pp[0], pp[1] + e.offset, pp[0], pp[1] + e.offset]
# exceptions.append(exc)
# return exceptions
# def check_calibration(data):
# # what happens here if symbols are not clean ?
# symbols = data['symbols']
# pos0 = data.lc.data['calibration']
# calibration = data['calibration']
# exceptions = []
# all_symbols = []
# for v in symbols.values():
# all_symbols += v
# for s in all_symbols:
# if (s not in calibration.keys()) and (s not in symbols["exogenous"]):
# # should skip invalid symbols there
# exc = ModelException(
# "Symbol {} has no calibrated value.".format(s))
# exc.pos = pos0
# exc.type = 'warning'
# exceptions.append(exc)
# for s in calibration.keys():
# val = str(calibration[s])
# try:
# ast.parse(val)
# except SyntaxError as e:
# pos = calibration.lc.data[s]
# exc = ModelException("Syntax Error.")
# exc.pos = [pos[0], pos[1] + e.offset, pos[0], pos[1] + e.offset]
# exceptions.append(exc)
# return exceptions
# def check_all(data):
# def serious(exsc):
# return ('error' in [e.type for e in exsc])
# exceptions = check_infos(data)
# if serious(exceptions):
# return exceptions
# exceptions = check_symbols(data)
# if serious(exceptions):
# return exceptions
# exceptions += check_definitions(data)
# if serious(exceptions):
# return exceptions
# exceptions += check_equations(data)
# if serious(exceptions):
# return exceptions
# exceptions += check_calibration(data)
# if serious(exceptions):
# return exceptions
# return exceptions
# def human_format(err):
# err_type = err['type']
# err_type = colored(
# err_type, color=('red' if err_type == 'error' else 'yellow'))
# err_range = str([e + 1 for e in err['range'][0]])[1:-1]
# return '{:7}: {:6}: {}'.format(err_type, err_range, err['text'])
# def check_infos(data):
# exceptions = []
# if 'model_type' in data:
# model_type = data['model_type']
# if model_type not in ['dtcc', 'dtmscc', 'dtcscc', 'dynare']:
# exc = ModelException('Uknown model type: {}.'.format(
# str(model_type)))
# exc.pos = data.lc.data['model_type']
# exc.type = 'error'
# exceptions.append(exc)
# else:
# model_type = 'dtcc'
# data['model_type'] = 'dtcc'
# # exc = ModelException("Missing field: 'model_type'.")
# # exc.pos = (0,0,0,0)
# # exc.type='error'
# # exceptions.append(exc)
# if 'name' not in data:
# exc = ModelException("Missing field: 'name'.")
# exc.pos = (0, 0, 0, 0)
# exc.type = 'warning'
# exceptions.append(exc)
# return exceptions
# def lint(txt, source='<string>', format='human', catch_exception=False):
# # raise ModelException if it doesn't work correctly
# if isinstance(txt, str):
# try:
# data = ry.load(txt, ry.RoundTripLoader)
# except Exception as exc:
# if not catch_exception:
# raise exc
# return [] # should return parse error
# else:
# # txt is then assumed to be a ruamel structure
# data = txt
# if not ('symbols' in data or 'equations' in data or 'calibration' in data):
# # this is probably not a yaml filename
# output = []
# else:
# try:
# exceptions = check_all(data)
# except Exception as e:
# if not catch_exception:
# raise(e)
# exc = ModelException("Linter Error: Uncaught Exception.")
# exc.pos = [0, 0, 0, 0]
# exc.type = 'error'
# exceptions = [exc]
# output = []
# for k in exceptions:
# try:
# err_type = k.type
# except:
# err_type = 'error'
# output.append({
# 'type':
# err_type,
# 'source':
# source,
# 'range': ((k.pos[0], k.pos[1]), (k.pos[2], k.pos[3])),
# 'text':
# k.args[0]
# })
# if format == 'json':
# return (json.dumps(output))
# elif format == 'human':
# return (str.join("\n", [human_format(e) for e in output]))
# elif not format:
# return output
# else:
# raise ModelException("Unkown format {}.".format(format))
# TODO:
# - check name (already defined by smbdy else ?)
# - description: ?
# - calibration:
# - incorrect key
# - warning if not a known symbol ?
# - not a recognized identifier
# - defined twice
# - impossible to solve in closed form (depends on ...)
# - incorrect equation
# - grammatically incorrect
# - contains timed variables
# - warnings:
# - missing values
# - equations: symbols already known (beware of speed issues)
# - unknown group of equations
# - incorrect syntax
# - undeclared variable (and not a function)
# - indexed parameter
# - incorrect order
# - incorrect complementarities
# - incorrect recipe: unexpected symbol type
# - nonzero residuals (warning, to be done without compiling)
# - options: if present
# - approximation_space:
# - inconsistent boundaries
# - must equal number of states
# - distribution:
# - same size as shocks
|
"""
=====================================
Structured Arrays (and Record Arrays)
=====================================
Introduction
============
Numpy provides powerful capabilities to create arrays of structs or records.
These arrays permit one to manipulate the data by the structs or by fields of
the struct. A simple example will show what is meant.: ::
>>> x = np.zeros((2,),dtype=('i4,f4,a10'))
>>> x[:] = [(1,2.,'Hello'),(2,3.,"World")]
>>> x
array([(1, 2.0, 'Hello'), (2, 3.0, 'World')],
dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')])
Here we have created a one-dimensional array of length 2. Each element of
this array is a record that contains three items, a 32-bit integer, a 32-bit
float, and a string of length 10 or less. If we index this array at the second
position we get the second record: ::
>>> x[1]
(2,3.,"World")
Conveniently, one can access any field of the array by indexing using the
string that names that field. In this case the fields have received the
default names 'f0', 'f1' and 'f2'. ::
>>> y = x['f1']
>>> y
array([ 2., 3.], dtype=float32)
>>> y[:] = 2*y
>>> y
array([ 4., 6.], dtype=float32)
>>> x
array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')])
In these examples, y is a simple float array consisting of the 2nd field
in the record. But, rather than being a copy of the data in the structured
array, it is a view, i.e., it shares exactly the same memory locations.
Thus, when we updated this array by doubling its values, the structured
array shows the corresponding values as doubled as well. Likewise, if one
changes the record, the field view also changes: ::
>>> x[1] = (-1,-1.,"Master")
>>> x
array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')],
dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')])
>>> y
array([ 4., -1.], dtype=float32)
Defining Structured Arrays
==========================
One defines a structured array through the dtype object. There are
**several** alternative ways to define the fields of a record. Some of
these variants provide backward compatibility with Numeric, numarray, or
another module, and should not be used except for such purposes. These
will be so noted. One specifies record structure in
one of four alternative ways, using an argument (as supplied to a dtype
function keyword or a dtype object constructor itself). This
argument must be one of the following: 1) string, 2) tuple, 3) list, or
4) dictionary. Each of these is briefly described below.
1) String argument (as used in the above examples).
In this case, the constructor expects a comma-separated list of type
specifiers, optionally with extra shape information.
The type specifiers can take 4 different forms: ::
a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f2, f4, f8, c8, c16, a<n>
(representing bytes, ints, unsigned ints, floats, complex and
fixed length strings of specified byte lengths)
b) int8,...,uint8,...,float16, float32, float64, complex64, complex128
(this time with bit sizes)
c) older Numeric/numarray type specifications (e.g. Float32).
Don't use these in new code!
d) Single character type specifiers (e.g H for unsigned short ints).
Avoid using these unless you must. Details can be found in the
Numpy book
These different styles can be mixed within the same string (but why would you
want to do that?). Furthermore, each type specifier can be prefixed
with a repetition number, or a shape. In these cases an array
element is created, i.e., an array within a record. That array
is still referred to as a single field. An example: ::
>>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64')
>>> x
array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])],
dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))])
By using strings to define the record structure, it precludes being
able to name the fields in the original definition. The names can
be changed as shown later, however.
2) Tuple argument: The only relevant tuple case that applies to record
structures is when a structure is mapped to an existing data type. This
is done by pairing in a tuple, the existing data type with a matching
dtype definition (using any of the variants being described here). As
an example (using a definition using a list, so see 3) for further
details): ::
>>> x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')]))
>>> x
array([0, 0, 0])
>>> x['r']
array([0, 0, 0], dtype=uint8)
In this case, an array is produced that looks and acts like a simple int32 array,
but also has definitions for fields that use only one byte of the int32 (a bit
like Fortran equivalencing).
3) List argument: In this case the record structure is defined with a list of
tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field
('' is permitted), 2) the type of the field, and 3) the shape (optional).
For example::
>>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
>>> x
array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])],
dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))])
4) Dictionary argument: two different forms are permitted. The first consists
of a dictionary with two required keys ('names' and 'formats'), each having an
equal sized list of values. The format list contains any type/shape specifier
allowed in other contexts. The names must be strings. There are two optional
keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to
the required two where offsets contain integer offsets for each field, and
titles are objects containing metadata for each field (these do not have
to be strings), where the value of None is permitted. As an example: ::
>>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[('col1', '>i4'), ('col2', '>f4')])
The other dictionary form permitted is a dictionary of name keys with tuple
values specifying type, offset, and an optional title. ::
>>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')])
Accessing and modifying field names
===================================
The field names are an attribute of the dtype object defining the record structure.
For the last example: ::
>>> x.dtype.names
('col1', 'col2')
>>> x.dtype.names = ('x', 'y')
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')])
>>> x.dtype.names = ('x', 'y', 'z') # wrong number of names
<type 'exceptions.ValueError'>: must replace all names at once with a sequence of length 2
Accessing field titles
====================================
The field titles provide a standard place to put associated info for fields.
They do not have to be strings. ::
>>> x.dtype.fields['x'][2]
'title 1'
Accessing multiple fields at once
====================================
You can access multiple fields at once using a list of field names: ::
>>> x = np.array([(1.5,2.5,(1.0,2.0)),(3.,4.,(4.,5.)),(1.,3.,(2.,6.))],
dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
Notice that `x` is created with a list of tuples. ::
>>> x[['x','y']]
array([(1.5, 2.5), (3.0, 4.0), (1.0, 3.0)],
dtype=[('x', '<f4'), ('y', '<f4')])
>>> x[['x','value']]
array([(1.5, [[1.0, 2.0], [1.0, 2.0]]), (3.0, [[4.0, 5.0], [4.0, 5.0]]),
(1.0, [[2.0, 6.0], [2.0, 6.0]])],
dtype=[('x', '<f4'), ('value', '<f4', (2, 2))])
The fields are returned in the order they are asked for.::
>>> x[['y','x']]
array([(2.5, 1.5), (4.0, 3.0), (3.0, 1.0)],
dtype=[('y', '<f4'), ('x', '<f4')])
Filling structured arrays
=========================
Structured arrays can be filled by field or row by row. ::
>>> arr = np.zeros((5,), dtype=[('var1','f8'),('var2','f8')])
>>> arr['var1'] = np.arange(5)
If you fill it in row by row, it takes a take a tuple
(but not a list or array!)::
>>> arr[0] = (10,20)
>>> arr
array([(10.0, 20.0), (1.0, 0.0), (2.0, 0.0), (3.0, 0.0), (4.0, 0.0)],
dtype=[('var1', '<f8'), ('var2', '<f8')])
More information
====================================
You can find some more information on recarrays and structured arrays
(including the difference between the two) `here
<http://www.scipy.org/Cookbook/Recarray>`_.
""" |
"""
Basic functions used by several sub-packages and
useful to have in the main name-space.
Type Handling
-------------
================ ===================
iscomplexobj Test for complex object, scalar result
isrealobj Test for real object, scalar result
iscomplex Test for complex elements, array result
isreal Test for real elements, array result
imag Imaginary part
real Real part
real_if_close Turns complex number with tiny imaginary part to real
isneginf Tests for negative infinity, array result
isposinf Tests for positive infinity, array result
isnan Tests for nans, array result
isinf Tests for infinity, array result
isfinite Tests for finite numbers, array result
isscalar True if argument is a scalar
nan_to_num Replaces NaN's with 0 and infinities with large numbers
cast Dictionary of functions to force cast to each type
common_type Determine the minimum common type code for a group
of arrays
mintypecode Return minimal allowed common typecode.
================ ===================
Index Tricks
------------
================ ===================
mgrid Method which allows easy construction of N-d
'mesh-grids'
``r_`` Append and construct arrays: turns slice objects into
ranges and concatenates them, for 2d arrays appends rows.
index_exp Konrad Hinsen's index_expression class instance which
can be useful for building complicated slicing syntax.
================ ===================
Useful Functions
----------------
================ ===================
select Extension of where to multiple conditions and choices
extract Extract 1d array from flattened array according to mask
insert Insert 1d array of values into Nd array according to mask
linspace Evenly spaced samples in linear space
logspace Evenly spaced samples in logarithmic space
fix Round x to nearest integer towards zero
mod Modulo mod(x,y) = x % y except keeps sign of y
amax Array maximum along axis
amin Array minimum along axis
ptp Array max-min along axis
cumsum Cumulative sum along axis
prod Product of elements along axis
cumprod Cumluative product along axis
diff Discrete differences along axis
angle Returns angle of complex argument
unwrap Unwrap phase along given axis (1-d algorithm)
sort_complex Sort a complex-array (based on real, then imaginary)
trim_zeros Trim the leading and trailing zeros from 1D array.
vectorize A class that wraps a Python function taking scalar
arguments into a generalized function which can handle
arrays of arguments using the broadcast rules of
numerix Python.
================ ===================
Shape Manipulation
------------------
================ ===================
squeeze Return a with length-one dimensions removed.
atleast_1d Force arrays to be > 1D
atleast_2d Force arrays to be > 2D
atleast_3d Force arrays to be > 3D
vstack Stack arrays vertically (row on row)
hstack Stack arrays horizontally (column on column)
column_stack Stack 1D arrays as columns into 2D array
dstack Stack arrays depthwise (along third dimension)
split Divide array into a list of sub-arrays
hsplit Split into columns
vsplit Split into rows
dsplit Split along third dimension
================ ===================
Matrix (2D Array) Manipulations
-------------------------------
================ ===================
fliplr 2D array with columns flipped
flipud 2D array with rows flipped
rot90 Rotate a 2D array a multiple of 90 degrees
eye Return a 2D array with ones down a given diagonal
diag Construct a 2D array from a vector, or return a given
diagonal from a 2D array.
mat Construct a Matrix
bmat Build a Matrix from blocks
================ ===================
Polynomials
-----------
================ ===================
poly1d A one-dimensional polynomial class
poly Return polynomial coefficients from roots
roots Find roots of polynomial given coefficients
polyint Integrate polynomial
polyder Differentiate polynomial
polyadd Add polynomials
polysub Substract polynomials
polymul Multiply polynomials
polydiv Divide polynomials
polyval Evaluate polynomial at given argument
================ ===================
Import Tricks
-------------
================ ===================
ppimport Postpone module import until trying to use it
ppimport_attr Postpone module import until trying to use its attribute
ppresolve Import postponed module and return it.
================ ===================
Machine Arithmetics
-------------------
================ ===================
machar_single Single precision floating point arithmetic parameters
machar_double Double precision floating point arithmetic parameters
================ ===================
Threading Tricks
----------------
================ ===================
ParallelExec Execute commands in parallel thread.
================ ===================
1D Array Set Operations
-----------------------
Set operations for 1D numeric arrays based on sort() function.
================ ===================
ediff1d Array difference (auxiliary function).
unique Unique elements of an array.
intersect1d Intersection of 1D arrays with unique elements.
setxor1d Set exclusive-or of 1D arrays with unique elements.
in1d Test whether elements in a 1D array are also present in
another array.
union1d Union of 1D arrays with unique elements.
setdiff1d Set difference of 1D arrays with unique elements.
================ ===================
""" |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
###########################################################################################
# Implementation of the stochastic depth algorithm described in the paper
#
# NAME et al. "Deep networks with stochastic depth." arXiv preprint arXiv:1603.09382 (2016).
#
# Reference torch implementation can be found at https://github.com/yueatsprograms/Stochastic_Depth
#
# There are some differences in the implementation:
# - A BN->ReLU->Conv is used for skip connection when input and output shapes are different,
# as oppose to a padding layer.
# - The residual block is different: we use BN->ReLU->Conv->BN->ReLU->Conv, as oppose to
# Conv->BN->ReLU->Conv->BN (->ReLU also applied to skip connection).
# - We did not try to match with the same initialization, learning rate scheduling, etc.
#
#--------------------------------------------------------------------------------
# A sample from the running log (We achieved ~9.4% error after 500 epochs, some
# more careful tuning of the hyper parameters and maybe also the arch is needed
# to achieve the reported numbers in the paper):
#
# INFO:root:Epoch[80] Batch [50] Speed: 1020.95 samples/sec Train-accuracy=0.910080
# INFO:root:Epoch[80] Batch [100] Speed: 1013.41 samples/sec Train-accuracy=0.912031
# INFO:root:Epoch[80] Batch [150] Speed: 1035.48 samples/sec Train-accuracy=0.913438
# INFO:root:Epoch[80] Batch [200] Speed: 1045.00 samples/sec Train-accuracy=0.907344
# INFO:root:Epoch[80] Batch [250] Speed: 1055.32 samples/sec Train-accuracy=0.905937
# INFO:root:Epoch[80] Batch [300] Speed: 1071.71 samples/sec Train-accuracy=0.912500
# INFO:root:Epoch[80] Batch [350] Speed: 1033.73 samples/sec Train-accuracy=0.910937
# INFO:root:Epoch[80] Train-accuracy=0.919922
# INFO:root:Epoch[80] Time cost=48.348
# INFO:root:Saved checkpoint to "sd-110-0081.params"
# INFO:root:Epoch[80] Validation-accuracy=0.880142
# ...
# INFO:root:Epoch[115] Batch [50] Speed: 1037.04 samples/sec Train-accuracy=0.937040
# INFO:root:Epoch[115] Batch [100] Speed: 1041.12 samples/sec Train-accuracy=0.934219
# INFO:root:Epoch[115] Batch [150] Speed: 1036.02 samples/sec Train-accuracy=0.933125
# INFO:root:Epoch[115] Batch [200] Speed: 1057.49 samples/sec Train-accuracy=0.938125
# INFO:root:Epoch[115] Batch [250] Speed: 1060.56 samples/sec Train-accuracy=0.933438
# INFO:root:Epoch[115] Batch [300] Speed: 1046.25 samples/sec Train-accuracy=0.935625
# INFO:root:Epoch[115] Batch [350] Speed: 1043.83 samples/sec Train-accuracy=0.927188
# INFO:root:Epoch[115] Train-accuracy=0.938477
# INFO:root:Epoch[115] Time cost=47.815
# INFO:root:Saved checkpoint to "sd-110-0116.params"
# INFO:root:Epoch[115] Validation-accuracy=0.884415
# ...
# INFO:root:Saved checkpoint to "sd-110-0499.params"
# INFO:root:Epoch[498] Validation-accuracy=0.908554
# INFO:root:Epoch[499] Batch [50] Speed: 1068.28 samples/sec Train-accuracy=0.991422
# INFO:root:Epoch[499] Batch [100] Speed: 1053.10 samples/sec Train-accuracy=0.991094
# INFO:root:Epoch[499] Batch [150] Speed: 1042.89 samples/sec Train-accuracy=0.995156
# INFO:root:Epoch[499] Batch [200] Speed: 1066.22 samples/sec Train-accuracy=0.991406
# INFO:root:Epoch[499] Batch [250] Speed: 1050.56 samples/sec Train-accuracy=0.990781
# INFO:root:Epoch[499] Batch [300] Speed: 1032.02 samples/sec Train-accuracy=0.992500
# INFO:root:Epoch[499] Batch [350] Speed: 1062.16 samples/sec Train-accuracy=0.992969
# INFO:root:Epoch[499] Train-accuracy=0.994141
# INFO:root:Epoch[499] Time cost=47.401
# INFO:root:Saved checkpoint to "sd-110-0500.params"
# INFO:root:Epoch[499] Validation-accuracy=0.906050
# ###########################################################################################
|
"""
==================================
Constants (:mod:`scipy.constants`)
==================================
.. currentmodule:: scipy.constants
Physical and mathematical constants and units.
Mathematical constants
======================
============ =================================================================
``pi`` Pi
``golden`` Golden ratio
============ =================================================================
Physical constants
==================
============= =================================================================
``c`` speed of light in vacuum
``mu_0`` the magnetic constant :math:`\mu_0`
``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0`
``h`` the Planck constant :math:`h`
``hbar`` :math:`\hbar = h/(2\pi)`
``G`` Newtonian constant of gravitation
``g`` standard acceleration of gravity
``e`` elementary charge
``R`` molar gas constant
``alpha`` fine-structure constant
``N_A`` Avogadro constant
``k`` Boltzmann constant
``sigma`` Stefan-Boltzmann constant :math:`\sigma`
``Wien`` Wien displacement law constant
``Rydberg`` Rydberg constant
``m_e`` electron mass
``m_p`` proton mass
``m_n`` neutron mass
============= =================================================================
Constants database
------------------
In addition to the above variables, :mod:`scipy.constants` also contains the
2010 CODATA recommended values [CODATA2010]_ database containing more physical
constants.
.. autosummary::
:toctree: generated/
value -- Value in physical_constants indexed by key
unit -- Unit in physical_constants indexed by key
precision -- Relative precision in physical_constants indexed by key
find -- Return list of physical_constant keys with a given string
ConstantWarning -- Constant sought not in newest CODATA data set
.. data:: physical_constants
Dictionary of physical constants, of the format
``physical_constants[name] = (value, unit, uncertainty)``.
Available constants:
====================================================================== ====
%(constant_names)s
====================================================================== ====
Units
=====
SI prefixes
-----------
============ =================================================================
``yotta`` :math:`10^{24}`
``zetta`` :math:`10^{21}`
``exa`` :math:`10^{18}`
``peta`` :math:`10^{15}`
``tera`` :math:`10^{12}`
``giga`` :math:`10^{9}`
``mega`` :math:`10^{6}`
``kilo`` :math:`10^{3}`
``hecto`` :math:`10^{2}`
``deka`` :math:`10^{1}`
``deci`` :math:`10^{-1}`
``centi`` :math:`10^{-2}`
``milli`` :math:`10^{-3}`
``micro`` :math:`10^{-6}`
``nano`` :math:`10^{-9}`
``pico`` :math:`10^{-12}`
``femto`` :math:`10^{-15}`
``atto`` :math:`10^{-18}`
``zepto`` :math:`10^{-21}`
============ =================================================================
Binary prefixes
---------------
============ =================================================================
``kibi`` :math:`2^{10}`
``mebi`` :math:`2^{20}`
``gibi`` :math:`2^{30}`
``tebi`` :math:`2^{40}`
``pebi`` :math:`2^{50}`
``exbi`` :math:`2^{60}`
``zebi`` :math:`2^{70}`
``yobi`` :math:`2^{80}`
============ =================================================================
Weight
------
================= ============================================================
``gram`` :math:`10^{-3}` kg
``metric_ton`` :math:`10^{3}` kg
``grain`` one grain in kg
``lb`` one pound (avoirdupous) in kg
``oz`` one ounce in kg
``stone`` one stone in kg
``grain`` one grain in kg
``long_ton`` one long ton in kg
``short_ton`` one short ton in kg
``troy_ounce`` one Troy ounce in kg
``troy_pound`` one Troy pound in kg
``carat`` one carat in kg
``m_u`` atomic mass constant (in kg)
================= ============================================================
Angle
-----
================= ============================================================
``degree`` degree in radians
``arcmin`` arc minute in radians
``arcsec`` arc second in radians
================= ============================================================
Time
----
================= ============================================================
``minute`` one minute in seconds
``hour`` one hour in seconds
``day`` one day in seconds
``week`` one week in seconds
``year`` one year (365 days) in seconds
``Julian_year`` one Julian year (365.25 days) in seconds
================= ============================================================
Length
------
================= ============================================================
``inch`` one inch in meters
``foot`` one foot in meters
``yard`` one yard in meters
``mile`` one mile in meters
``mil`` one mil in meters
``pt`` one point in meters
``survey_foot`` one survey foot in meters
``survey_mile`` one survey mile in meters
``nautical_mile`` one nautical mile in meters
``fermi`` one Fermi in meters
``angstrom`` one Angstrom in meters
``micron`` one micron in meters
``au`` one astronomical unit in meters
``light_year`` one light year in meters
``parsec`` one parsec in meters
================= ============================================================
Pressure
--------
================= ============================================================
``atm`` standard atmosphere in pascals
``bar`` one bar in pascals
``torr`` one torr (mmHg) in pascals
``psi`` one psi in pascals
================= ============================================================
Area
----
================= ============================================================
``hectare`` one hectare in square meters
``acre`` one acre in square meters
================= ============================================================
Volume
------
=================== ========================================================
``liter`` one liter in cubic meters
``gallon`` one gallon (US) in cubic meters
``gallon_imp`` one gallon (UK) in cubic meters
``fluid_ounce`` one fluid ounce (US) in cubic meters
``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters
``bbl`` one barrel in cubic meters
=================== ========================================================
Speed
-----
================= ==========================================================
``kmh`` kilometers per hour in meters per second
``mph`` miles per hour in meters per second
``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second
``knot`` one knot in meters per second
================= ==========================================================
Temperature
-----------
===================== =======================================================
``zero_Celsius`` zero of Celsius scale in Kelvin
``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins
===================== =======================================================
.. autosummary::
:toctree: generated/
C2K
K2C
F2C
C2F
F2K
K2F
Energy
------
==================== =======================================================
``eV`` one electron volt in Joules
``calorie`` one calorie (thermochemical) in Joules
``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules
``erg`` one erg in Joules
``Btu`` one British thermal unit (International Steam Table) in Joules
``Btu_th`` one British thermal unit (thermochemical) in Joules
``ton_TNT`` one ton of TNT in Joules
==================== =======================================================
Power
-----
==================== =======================================================
``hp`` one horsepower in watts
==================== =======================================================
Force
-----
==================== =======================================================
``dyn`` one dyne in newtons
``lbf`` one pound force in newtons
``kgf`` one kilogram force in newtons
==================== =======================================================
Optics
------
.. autosummary::
:toctree: generated/
lambda2nu
nu2lambda
References
==========
.. [CODATA2010] CODATA Recommended Values of the Fundamental
Physical Constants 2010.
http://physics.nist.gov/cuu/Constants/index.html
""" |
"""Generic socket server classes.
This module tries to capture the various aspects of defining a server:
For socket-based servers:
- address family:
- AF_INET{,6}: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
For request-based servers (including socket-based):
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
save some typing. (There's also the issue that a deep class hierarchy
slows down method lookups.)
There are five classes in an inheritance diagram, four of which represent
synchronous servers of four types:
+------------+
| BaseServer |
+------------+
|
v
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
stream server is the address family, which is simply repeated in both
unix server classes.
Forking and threading versions of each type of server can be created
using the ForkingMixIn and ThreadingMixIn mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer! Setting the various member variables also changes
the behavior of the underlying server mechanism.
To implement a service, you must derive a class from
BaseRequestHandler and redefine its handle() method. You can then run
various versions of the service by combining one of the server classes
with your request handler class.
The request handler class must be different for datagram or stream
services. This can be hidden by using the request handler
subclasses StreamRequestHandler or DatagramRequestHandler.
Of course, you still have to use your head!
For instance, it makes no sense to use a forking server if the service
contains state in memory that can be modified by requests (since the
modifications in the child process would never reach the initial state
kept in the parent process and passed to each child). In this case,
you can use a threading server, but you will probably have to use
locks to avoid two requests that come in nearly simultaneous to apply
conflicting changes to the server state.
On the other hand, if you are building e.g. an HTTP server, where all
data is stored externally (e.g. in the file system), a synchronous
class will essentially render the service "deaf" while one request is
being handled -- which may be for a very long time if a client is slow
to read all the data it has requested. Here a threading or forking
server is appropriate.
In some cases, it may be appropriate to process part of a request
synchronously, but to finish processing in a forked child depending on
the request data. This can be implemented by using a synchronous
server and doing an explicit fork in the request handler class
handle() method.
Another approach to handling multiple simultaneous requests in an
environment that supports neither threads nor fork (or where these are
too expensive or inappropriate for the service) is to maintain an
explicit table of partially finished requests and to use a selector to
decide which request to work on next (or whether to handle a new
incoming request). This is particularly important for stream services
where each client can potentially be connected for a long time (if
threads or subprocesses cannot be used).
Future work:
- Standard classes for Sun RPC (which uses either UDP or TCP)
- Standard mix-in classes to implement various authentication
and encryption schemes
XXX Open problems:
- What to do with out-of-band data?
BaseServer:
- split generic "request" functionality out into BaseServer class.
Copyright (C) 2000 NAME <EMAIL>
example: read entries from a SQL database (requires overriding
get_request() to return a table entry from the database).
entry is processed by a RequestHandlerClass.
""" |
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
# exported-sql-viewer.py: view data from sql database
# Copyright (c) 2014-2018, Intel Corporation.
# To use this script you will need to have exported data using either the
# export-to-sqlite.py or the export-to-postgresql.py script. Refer to those
# scripts for details.
#
# Following on from the example in the export scripts, a
# call-graph can be displayed for the pt_example database like this:
#
# python tools/perf/scripts/python/exported-sql-viewer.py pt_example
#
# Note that for PostgreSQL, this script supports connecting to remote databases
# by setting hostname, port, username, password, and dbname e.g.
#
# python tools/perf/scripts/python/exported-sql-viewer.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
#
# The result is a GUI window with a tree representing a context-sensitive
# call-graph. Expanding a couple of levels of the tree and adjusting column
# widths to suit will display something like:
#
# Call Graph: pt_example
# Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
# v- ls
# v- 2638:2638
# v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
# |- unknown unknown 1 13198 0.1 1 0.0
# >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
# >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
# v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
# >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
# >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
# >- __libc_csu_init ls 1 10354 0.1 10 0.0
# |- _setjmp libc-2.19.so 1 0 0.0 4 0.0
# v- main ls 1 8182043 99.6 180254 99.9
#
# Points to note:
# The top level is a command name (comm)
# The next level is a thread (pid:tid)
# Subsequent levels are functions
# 'Count' is the number of calls
# 'Time' is the elapsed time until the function returns
# Percentages are relative to the level above
# 'Branch Count' is the total number of branches for that function and all
# functions that it calls
# There is also a "All branches" report, which displays branches and
# possibly disassembly. However, presently, the only supported disassembler is
# Intel XED, and additionally the object code must be present in perf build ID
# cache. To use Intel XED, libxed.so must be present. To build and install
# libxed.so:
# git clone https://github.com/intelxed/mbuild.git mbuild
# git clone https://github.com/intelxed/xed
# cd xed
# ./mfile.py --share
# sudo ./mfile.py --prefix=/usr/local install
# sudo ldconfig
#
# Example report:
#
# Time CPU Command PID TID Branch Type In Tx Branch
# 8107675239590 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
# 7fab593ea260 48 89 e7 mov %rsp, %rdi
# 8107675239899 2 ls 22011 22011 hardware interrupt No 7fab593ea260 _start (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
# 8107675241900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
# 7fab593ea260 48 89 e7 mov %rsp, %rdi
# 7fab593ea263 e8 c8 06 00 00 callq 0x7fab593ea930
# 8107675241900 2 ls 22011 22011 call No 7fab593ea263 _start+0x3 (ld-2.19.so) -> 7fab593ea930 _dl_start (ld-2.19.so)
# 7fab593ea930 55 pushq %rbp
# 7fab593ea931 48 89 e5 mov %rsp, %rbp
# 7fab593ea934 41 57 pushq %r15
# 7fab593ea936 41 56 pushq %r14
# 7fab593ea938 41 55 pushq %r13
# 7fab593ea93a 41 54 pushq %r12
# 7fab593ea93c 53 pushq %rbx
# 7fab593ea93d 48 89 fb mov %rdi, %rbx
# 7fab593ea940 48 83 ec 68 sub $0x68, %rsp
# 7fab593ea944 0f 31 rdtsc
# 7fab593ea946 48 c1 e2 20 shl $0x20, %rdx
# 7fab593ea94a 89 c0 mov %eax, %eax
# 7fab593ea94c 48 09 c2 or %rax, %rdx
# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
# 8107675242232 2 ls 22011 22011 hardware interrupt No 7fab593ea94f _dl_start+0x1f (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
# 8107675242900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea94f _dl_start+0x1f (ld-2.19.so)
# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip)
# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
|
"""
========================
Broadcasting over arrays
========================
The term broadcasting describes how numpy treats arrays with different
shapes during arithmetic operations. Subject to certain constraints,
the smaller array is "broadcast" across the larger array so that they
have compatible shapes. Broadcasting provides a means of vectorizing
array operations so that looping occurs in C instead of Python. It does
this without making needless copies of data and usually leads to
efficient algorithm implementations. There are, however, cases where
broadcasting is a bad idea because it leads to inefficient use of memory
that slows computation.
NumPy operations are usually done on pairs of arrays on an
element-by-element basis. In the simplest case, the two arrays must
have exactly the same shape, as in the following example:
>>> a = np.array([1.0, 2.0, 3.0])
>>> b = np.array([2.0, 2.0, 2.0])
>>> a * b
array([ 2., 4., 6.])
NumPy's broadcasting rule relaxes this constraint when the arrays'
shapes meet certain constraints. The simplest broadcasting example occurs
when an array and a scalar value are combined in an operation:
>>> a = np.array([1.0, 2.0, 3.0])
>>> b = 2.0
>>> a * b
array([ 2., 4., 6.])
The result is equivalent to the previous example where ``b`` was an array.
We can think of the scalar ``b`` being *stretched* during the arithmetic
operation into an array with the same shape as ``a``. The new elements in
``b`` are simply copies of the original scalar. The stretching analogy is
only conceptual. NumPy is smart enough to use the original scalar value
without actually making copies, so that broadcasting operations are as
memory and computationally efficient as possible.
The code in the second example is more efficient than that in the first
because broadcasting moves less memory around during the multiplication
(``b`` is a scalar rather than an array).
General Broadcasting Rules
==========================
When operating on two arrays, NumPy compares their shapes element-wise.
It starts with the trailing dimensions, and works its way forward. Two
dimensions are compatible when
1) they are equal, or
2) one of them is 1
If these conditions are not met, a
``ValueError: frames are not aligned`` exception is thrown, indicating that
the arrays have incompatible shapes. The size of the resulting array
is the maximum size along each dimension of the input arrays.
Arrays do not need to have the same *number* of dimensions. For example,
if you have a ``256x256x3`` array of RGB values, and you want to scale
each color in the image by a different value, you can multiply the image
by a one-dimensional array with 3 values. Lining up the sizes of the
trailing axes of these arrays according to the broadcast rules, shows that
they are compatible::
Image (3d array): 256 x 256 x 3
Scale (1d array): 3
Result (3d array): 256 x 256 x 3
When either of the dimensions compared is one, the larger of the two is
used. In other words, the smaller of two axes is stretched or "copied"
to match the other.
In the following example, both the ``A`` and ``B`` arrays have axes with
length one that are expanded to a larger size during the broadcast
operation::
A (4d array): 8 x 1 x 6 x 1
B (3d array): 7 x 1 x 5
Result (4d array): 8 x 7 x 6 x 5
Here are some more examples::
A (2d array): 5 x 4
B (1d array): 1
Result (2d array): 5 x 4
A (2d array): 5 x 4
B (1d array): 4
Result (2d array): 5 x 4
A (3d array): 15 x 3 x 5
B (3d array): 15 x 1 x 5
Result (3d array): 15 x 3 x 5
A (3d array): 15 x 3 x 5
B (2d array): 3 x 5
Result (3d array): 15 x 3 x 5
A (3d array): 15 x 3 x 5
B (2d array): 3 x 1
Result (3d array): 15 x 3 x 5
Here are examples of shapes that do not broadcast::
A (1d array): 3
B (1d array): 4 # trailing dimensions do not match
A (2d array): 2 x 1
B (3d array): 8 x 4 x 3 # second from last dimensions mismatched
An example of broadcasting in practice::
>>> x = np.arange(4)
>>> xx = x.reshape(4,1)
>>> y = np.ones(5)
>>> z = np.ones((3,4))
>>> x.shape
(4,)
>>> y.shape
(5,)
>>> x + y
<type 'exceptions.ValueError'>: shape mismatch: objects cannot be broadcast to a single shape
>>> xx.shape
(4, 1)
>>> y.shape
(5,)
>>> (xx + y).shape
(4, 5)
>>> xx + y
array([[ 1., 1., 1., 1., 1.],
[ 2., 2., 2., 2., 2.],
[ 3., 3., 3., 3., 3.],
[ 4., 4., 4., 4., 4.]])
>>> x.shape
(4,)
>>> z.shape
(3, 4)
>>> (x + z).shape
(3, 4)
>>> x + z
array([[ 1., 2., 3., 4.],
[ 1., 2., 3., 4.],
[ 1., 2., 3., 4.]])
Broadcasting provides a convenient way of taking the outer product (or
any other outer operation) of two arrays. The following example shows an
outer addition operation of two 1-d arrays::
>>> a = np.array([0.0, 10.0, 20.0, 30.0])
>>> b = np.array([1.0, 2.0, 3.0])
>>> a[:, np.newaxis] + b
array([[ 1., 2., 3.],
[ 11., 12., 13.],
[ 21., 22., 23.],
[ 31., 32., 33.]])
Here the ``newaxis`` index operator inserts a new axis into ``a``,
making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array
with ``b``, which has shape ``(3,)``, yields a ``4x3`` array.
See `this article <http://www.scipy.org/EricsBroadcastingDoc>`_
for illustrations of broadcasting concepts.
""" |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact EMAIL Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the special and conditions of the license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact EMAIL with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email EMAIL for *
# * further information. *
# * *
# * If you received these files with a written license agreement or *
# * contract stating terms other than the terms above, then that *
# * alternative license agreement takes precedence over these comments. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the EMAIL mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
|
"""
Low-level LAPACK functions (:mod:`scipy.linalg.lapack`)
=======================================================
This module contains low-level functions from the LAPACK library.
The `*gegv` family of routines have been removed from LAPACK 3.6.0
and have been deprecated in SciPy 0.17.0. They will be removed in
a future release.
.. versionadded:: 0.12.0
.. note::
The common ``overwrite_<>`` option in many routines, allows the
input arrays to be overwritten to avoid extra memory allocation.
However this requires the array to satisfy two conditions
which are memory order and the data type to match exactly the
order and the type expected by the routine.
As an example, if you pass a double precision float array to any
``S....`` routine which expects single precision arguments, f2py
will create an intermediate array to match the argument types and
overwriting will be performed on that intermediate array.
Similarly, if a C-contiguous array is passed, f2py will pass a
FORTRAN-contiguous array internally. Please make sure that these
details are satisfied. More information can be found in the f2py
documentation.
.. warning::
These functions do little to no error checking.
It is possible to cause crashes by mis-using them,
so prefer using the higher-level routines in `scipy.linalg`.
Finding functions
-----------------
.. autosummary::
get_lapack_funcs
All functions
-------------
.. autosummary::
:toctree: generated/
sgbsv
dgbsv
cgbsv
zgbsv
sgbtrf
dgbtrf
cgbtrf
zgbtrf
sgbtrs
dgbtrs
cgbtrs
zgbtrs
sgebal
dgebal
cgebal
zgebal
sgees
dgees
cgees
zgees
sgeev
dgeev
cgeev
zgeev
sgeev_lwork
dgeev_lwork
cgeev_lwork
zgeev_lwork
sgegv
dgegv
cgegv
zgegv
sgehrd
dgehrd
cgehrd
zgehrd
sgehrd_lwork
dgehrd_lwork
cgehrd_lwork
zgehrd_lwork
sgelss
dgelss
cgelss
zgelss
sgelss_lwork
dgelss_lwork
cgelss_lwork
zgelss_lwork
sgelsd
dgelsd
cgelsd
zgelsd
sgelsd_lwork
dgelsd_lwork
cgelsd_lwork
zgelsd_lwork
sgelsy
dgelsy
cgelsy
zgelsy
sgelsy_lwork
dgelsy_lwork
cgelsy_lwork
zgelsy_lwork
sgeqp3
dgeqp3
cgeqp3
zgeqp3
sgeqrf
dgeqrf
cgeqrf
zgeqrf
sgerqf
dgerqf
cgerqf
zgerqf
sgesdd
dgesdd
cgesdd
zgesdd
sgesdd_lwork
dgesdd_lwork
cgesdd_lwork
zgesdd_lwork
sgesvd
dgesvd
cgesvd
zgesvd
sgesvd_lwork
dgesvd_lwork
cgesvd_lwork
zgesvd_lwork
sgesv
dgesv
cgesv
zgesv
sgesvx
dgesvx
cgesvx
zgesvx
sgecon
dgecon
cgecon
zgecon
ssysv
dsysv
csysv
zsysv
ssysv_lwork
dsysv_lwork
csysv_lwork
zsysv_lwork
ssysvx
dsysvx
csysvx
zsysvx
ssysvx_lwork
dsysvx_lwork
csysvx_lwork
zsysvx_lwork
ssygst
dsygst
ssytrd
dsytrd
ssytrd_lwork
dsytrd_lwork
chetrd
zhetrd
chetrd_lwork
zhetrd_lwork
chesv
zhesv
chesv_lwork
zhesv_lwork
chesvx
zhesvx
chesvx_lwork
zhesvx_lwork
chegst
zhegst
sgetrf
dgetrf
cgetrf
zgetrf
sgetri
dgetri
cgetri
zgetri
sgetri_lwork
dgetri_lwork
cgetri_lwork
zgetri_lwork
sgetrs
dgetrs
cgetrs
zgetrs
sgges
dgges
cgges
zgges
sggev
dggev
cggev
zggev
chbevd
zhbevd
chbevx
zhbevx
cheev
zheev
cheevd
zheevd
cheevr
zheevr
chegv
zhegv
chegvd
zhegvd
chegvx
zhegvx
slarf
dlarf
clarf
zlarf
slarfg
dlarfg
clarfg
zlarfg
slartg
dlartg
clartg
zlartg
slasd4
dlasd4
slaswp
dlaswp
claswp
zlaswp
slauum
dlauum
clauum
zlauum
spbsv
dpbsv
cpbsv
zpbsv
spbtrf
dpbtrf
cpbtrf
zpbtrf
spbtrs
dpbtrs
cpbtrs
zpbtrs
sposv
dposv
cposv
zposv
sposvx
dposvx
cposvx
zposvx
spocon
dpocon
cpocon
zpocon
spotrf
dpotrf
cpotrf
zpotrf
spotri
dpotri
cpotri
zpotri
spotrs
dpotrs
cpotrs
zpotrs
crot
zrot
strsyl
dtrsyl
ctrsyl
ztrsyl
strtri
dtrtri
ctrtri
ztrtri
strtrs
dtrtrs
ctrtrs
ztrtrs
cunghr
zunghr
cungqr
zungqr
cungrq
zungrq
cunmqr
zunmqr
sgtsv
dgtsv
cgtsv
zgtsv
sptsv
dptsv
cptsv
zptsv
slamch
dlamch
sorghr
dorghr
sorgqr
dorgqr
sorgrq
dorgrq
sormqr
dormqr
ssbev
dsbev
ssbevd
dsbevd
ssbevx
dsbevx
sstebz
dstebz
sstemr
dstemr
ssterf
dsterf
sstein
dstein
sstev
dstev
ssyev
dsyev
ssyevd
dsyevd
ssyevr
dsyevr
ssygv
dsygv
ssygvd
dsygvd
ssygvx
dsygvx
slange
dlange
clange
zlange
ilaver
""" |
"""
===============
Array Internals
===============
Internal organization of numpy arrays
=====================================
It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to NumPy".
NumPy arrays consist of two major components, the raw array data (from now on,
referred to as the data buffer), and the information about the raw array data.
The data buffer is typically what people think of as arrays in C or Fortran,
a contiguous (and fixed) block of memory containing fixed sized data items.
NumPy also contains a significant set of data that describes how to interpret
the data in the data buffer. This extra information contains (among other things):
1) The basic data element's size in bytes
2) The start of the data within the data buffer (an offset relative to the
beginning of the data buffer).
3) The number of dimensions and the size of each dimension
4) The separation between elements for each dimension (the 'stride'). This
does not have to be a multiple of the element size
5) The byte order of the data (which may not be the native byte order)
6) Whether the buffer is read-only
7) Information (via the dtype object) about the interpretation of the basic
data element. The basic data element may be as simple as a int or a float,
or it may be a compound object (e.g., struct-like), a fixed character field,
or Python object pointers.
8) Whether the array is to interpreted as C-order or Fortran-order.
This arrangement allow for very flexible use of arrays. One thing that it allows
is simple changes of the metadata to change the interpretation of the array buffer.
Changing the byteorder of the array is a simple change involving no rearrangement
of the data. The shape of the array can be changed very easily without changing
anything in the data buffer or any data copying at all
Among other things that are made possible is one can create a new array metadata
object that uses the same data buffer
to create a new view of that data buffer that has a different interpretation
of the buffer (e.g., different shape, offset, byte order, strides, etc) but
shares the same data bytes. Many operations in numpy do just this such as
slices. Other operations, such as transpose, don't move data elements
around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move.
Typically these new versions of the array metadata but the same data buffer are
new 'views' into the data buffer. There is a different ndarray object, but it
uses the same data buffer. This is why it is necessary to force copies through
use of the .copy() method if one really wants to make a new and independent
copy of the data buffer.
New views into arrays mean the object reference counts for the data buffer
increase. Simply doing away with the original array object will not remove the
data buffer if other views of it still exist.
Multidimensional Array Indexing Order Issues
============================================
What is the right way to index
multi-dimensional arrays? Before you jump to conclusions about the one and
true way to index multi-dimensional arrays, it pays to understand why this is
a confusing issue. This section will try to explain in detail how numpy
indexing works and why we adopt the convention we do for images, and when it
may be appropriate to adopt other conventions.
The first thing to understand is
that there are two conflicting conventions for indexing 2-dimensional arrays.
Matrix notation uses the first index to indicate which row is being selected and
the second index to indicate which column is selected. This is opposite the
geometrically oriented-convention for images where people generally think the
first index represents x position (i.e., column) and the second represents y
position (i.e., row). This alone is the source of much confusion;
matrix-oriented users and image-oriented users expect two different things with
regard to indexing.
The second issue to understand is how indices correspond
to the order the array is stored in memory. In Fortran the first index is the
most rapidly varying index when moving through the elements of a two
dimensional array as it is stored in memory. If you adopt the matrix
convention for indexing, then this means the matrix is stored one column at a
time (since the first index moves to the next row as it changes). Thus Fortran
is considered a Column-major language. C has just the opposite convention. In
C, the last index changes most rapidly as one moves through the array as
stored in memory. Thus C is a Row-major language. The matrix is stored by
rows. Note that in both cases it presumes that the matrix convention for
indexing is being used, i.e., for both Fortran and C, the first index is the
row. Note this convention implies that the indexing convention is invariant
and that the data order changes to keep that so.
But that's not the only way
to look at it. Suppose one has large two-dimensional arrays (images or
matrices) stored in data files. Suppose the data are stored by rows rather than
by columns. If we are to preserve our index convention (whether matrix or
image) that means that depending on the language we use, we may be forced to
reorder the data if it is read into memory to preserve our indexing
convention. For example if we read row-ordered data into memory without
reordering, it will match the matrix indexing convention for C, but not for
Fortran. Conversely, it will match the image indexing convention for Fortran,
but not for C. For C, if one is using data stored in row order, and one wants
to preserve the image index convention, the data must be reordered when
reading into memory.
In the end, which you do for Fortran or C depends on
which is more important, not reordering data or preserving the indexing
convention. For large images, reordering data is potentially expensive, and
often the indexing convention is inverted to avoid that.
The situation with
numpy makes this issue yet more complicated. The internal machinery of numpy
arrays is flexible enough to accept any ordering of indices. One can simply
reorder indices by manipulating the internal stride information for arrays
without reordering the data at all. NumPy will know how to map the new index
order to the data without moving the data.
So if this is true, why not choose
the index order that matches what you most expect? In particular, why not define
row-ordered images to use the image convention? (This is sometimes referred
to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN'
order options for array ordering in numpy.) The drawback of doing this is
potential performance penalties. It's common to access the data sequentially,
either implicitly in array operations or explicitly by looping over rows of an
image. When that is done, then the data will be accessed in non-optimal order.
As the first index is incremented, what is actually happening is that elements
spaced far apart in memory are being sequentially accessed, with usually poor
memory access speeds. For example, for a two dimensional image 'im' defined so
that im[0, 10] represents the value at x=0, y=10. To be consistent with usual
Python behavior then im[0] would represent a column at x=0. Yet that data
would be spread over the whole array since the data are stored in row order.
Despite the flexibility of numpy's indexing, it can't really paper over the fact
basic operations are rendered inefficient because of data order or that getting
contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs
im[0]), thus one can't use an idiom such as for row in im; for col in im does
work, but doesn't yield contiguous column data.
As it turns out, numpy is
smart enough when dealing with ufuncs to determine which index is the most
rapidly varying one in memory and uses that for the innermost loop. Thus for
ufuncs there is no large intrinsic advantage to either approach in most cases.
On the other hand, use of .flat with an FORTRAN ordered array will lead to
non-optimal memory access as adjacent elements in the flattened array (iterator,
actually) are not contiguous in memory.
Indeed, the fact is that Python
indexing on lists and other sequences naturally leads to an outside-to inside
ordering (the first index gets the largest grouping, the next the next largest,
and the last gets the smallest element). Since image data are normally stored
by rows, this corresponds to position within rows being the last item indexed.
If you do want to use Fortran ordering realize that
there are two approaches to consider: 1) accept that the first index is just not
the most rapidly changing in memory and have all your I/O routines reorder
your data when going from memory to disk or visa versa, or use numpy's
mechanism for mapping the first index to the most rapidly varying data. We
recommend the former if possible. The disadvantage of the latter is that many
of numpy's functions will yield arrays without Fortran ordering unless you are
careful to use the 'order' keyword. Doing this would be highly inconvenient.
Otherwise we recommend simply learning to reverse the usual order of indices
when accessing elements of an array. Granted, it goes against the grain, but
it is more in line with Python semantics and the natural order of the data.
""" |
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
""" |
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is font utility code.
#
# The Initial Developer of the Original Code is Mozilla Corporation.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# NAME <EMAIL>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK ***** */
# eotlitetool.py - create EOT version of OpenType font for use with IE
#
# Usage: eotlitetool.py [-o output-filename] font1 [font2 ...]
#
# OpenType file structure
# http://www.microsoft.com/typography/otspec/otff.htm
#
# Types:
#
# BYTE 8-bit unsigned integer.
# CHAR 8-bit signed integer.
# USHORT 16-bit unsigned integer.
# SHORT 16-bit signed integer.
# ULONG 32-bit unsigned integer.
# Fixed 32-bit signed fixed-point number (16.16)
# LONGDATETIME Date represented in number of seconds since 12:00 midnight, January 1, 1904. The value is represented as a signed 64-bit integer.
#
# SFNT Header
#
# Fixed sfnt version // 0x00010000 for version 1.0.
# USHORT numTables // Number of tables.
# USHORT searchRange // (Maximum power of 2 <= numTables) x 16.
# USHORT entrySelector // Log2(maximum power of 2 <= numTables).
# USHORT rangeShift // NumTables x 16-searchRange.
#
# Table Directory
#
# ULONG tag // 4-byte identifier.
# ULONG checkSum // CheckSum for this table.
# ULONG offset // Offset from beginning of TrueType font file.
# ULONG length // Length of this table.
#
# OS/2 Table (Version 4)
#
# USHORT version // 0x0004
# SHORT xAvgCharWidth
# USHORT usWeightClass
# USHORT usWidthClass
# USHORT fsType
# SHORT ySubscriptXSize
# SHORT ySubscriptYSize
# SHORT ySubscriptXOffset
# SHORT ySubscriptYOffset
# SHORT ySuperscriptXSize
# SHORT ySuperscriptYSize
# SHORT ySuperscriptXOffset
# SHORT ySuperscriptYOffset
# SHORT yStrikeoutSize
# SHORT yStrikeoutPosition
# SHORT sFamilyClass
# BYTE panose[10]
# ULONG ulUnicodeRange1 // Bits 0-31
# ULONG ulUnicodeRange2 // Bits 32-63
# ULONG ulUnicodeRange3 // Bits 64-95
# ULONG ulUnicodeRange4 // Bits 96-127
# CHAR achVendID[4]
# USHORT fsSelection
# USHORT usFirstCharIndex
# USHORT usLastCharIndex
# SHORT sTypoAscender
# SHORT sTypoDescender
# SHORT sTypoLineGap
# USHORT usWinAscent
# USHORT usWinDescent
# ULONG ulCodePageRange1 // Bits 0-31
# ULONG ulCodePageRange2 // Bits 32-63
# SHORT sxHeight
# SHORT sCapHeight
# USHORT usDefaultChar
# USHORT usBreakChar
# USHORT usMaxContext
#
#
# The Naming Table is organized as follows:
#
# [name table header]
# [name records]
# [string data]
#
# Name Table Header
#
# USHORT format // Format selector (=0).
# USHORT count // Number of name records.
# USHORT stringOffset // Offset to start of string storage (from start of table).
#
# Name Record
#
# USHORT platformID // Platform ID.
# USHORT encodingID // Platform-specific encoding ID.
# USHORT languageID // Language ID.
# USHORT nameID // Name ID.
# USHORT length // String length (in bytes).
# USHORT offset // String offset from start of storage area (in bytes).
#
# head Table
#
# Fixed tableVersion // Table version number 0x00010000 for version 1.0.
# Fixed fontRevision // Set by font manufacturer.
# ULONG checkSumAdjustment // To compute: set it to 0, sum the entire font as ULONG, then store 0xB1B0AFBA - sum.
# ULONG magicNumber // Set to 0x5F0F3CF5.
# USHORT flags
# USHORT unitsPerEm // Valid range is from 16 to 16384. This value should be a power of 2 for fonts that have TrueType outlines.
# LONGDATETIME created // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# LONGDATETIME modified // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# SHORT xMin // For all glyph bounding boxes.
# SHORT yMin
# SHORT xMax
# SHORT yMax
# USHORT macStyle
# USHORT lowestRecPPEM // Smallest readable size in pixels.
# SHORT fontDirectionHint
# SHORT indexToLocFormat // 0 for short offsets, 1 for long.
# SHORT glyphDataFormat // 0 for current format.
#
#
#
# Embedded OpenType (EOT) file format
# http://www.w3.org/Submission/EOT/
#
# EOT version 0x00020001
#
# An EOT font consists of a header with the original OpenType font
# appended at the end. Most of the data in the EOT header is simply a
# copy of data from specific tables within the font data. The exceptions
# are the 'Flags' field and the root string name field. The root string
# is a set of names indicating domains for which the font data can be
# used. A null root string implies the font data can be used anywhere.
# The EOT header is in little-endian byte order but the font data remains
# in big-endian order as specified by the OpenType spec.
#
# Overall structure:
#
# [EOT header]
# [EOT name records]
# [font data]
#
# EOT header
#
# ULONG eotSize // Total structure length in bytes (including string and font data)
# ULONG fontDataSize // Length of the OpenType font (FontData) in bytes
# ULONG version // Version number of this format - 0x00020001
# ULONG flags // Processing Flags (0 == no special processing)
# BYTE fontPANOSE[10] // OS/2 Table panose
# BYTE charset // DEFAULT_CHARSET (0x01)
# BYTE italic // 0x01 if ITALIC in OS/2 Table fsSelection is set, 0 otherwise
# ULONG weight // OS/2 Table usWeightClass
# USHORT fsType // OS/2 Table fsType (specifies embedding permission flags)
# USHORT magicNumber // Magic number for EOT file - 0x504C.
# ULONG unicodeRange1 // OS/2 Table ulUnicodeRange1
# ULONG unicodeRange2 // OS/2 Table ulUnicodeRange2
# ULONG unicodeRange3 // OS/2 Table ulUnicodeRange3
# ULONG unicodeRange4 // OS/2 Table ulUnicodeRange4
# ULONG codePageRange1 // OS/2 Table ulCodePageRange1
# ULONG codePageRange2 // OS/2 Table ulCodePageRange2
# ULONG checkSumAdjustment // head Table CheckSumAdjustment
# ULONG reserved[4] // Reserved - must be 0
# USHORT padding1 // Padding - must be 0
#
# EOT name records
#
# USHORT FamilyNameSize // Font family name size in bytes
# BYTE FamilyName[FamilyNameSize] // Font family name (name ID = 1), little-endian UTF-16
# USHORT Padding2 // Padding - must be 0
#
# USHORT StyleNameSize // Style name size in bytes
# BYTE StyleName[StyleNameSize] // Style name (name ID = 2), little-endian UTF-16
# USHORT Padding3 // Padding - must be 0
#
# USHORT VersionNameSize // Version name size in bytes
# bytes VersionName[VersionNameSize] // Version name (name ID = 5), little-endian UTF-16
# USHORT Padding4 // Padding - must be 0
#
# USHORT FullNameSize // Full name size in bytes
# BYTE FullName[FullNameSize] // Full name (name ID = 4), little-endian UTF-16
# USHORT Padding5 // Padding - must be 0
#
# USHORT RootStringSize // Root string size in bytes
# BYTE RootString[RootStringSize] // Root string, little-endian UTF-16
|
# DEPRECATED
# import json
# import time
# import multiprocessing
# from .generic_kafka_processor import GenericKafkaProcessor
# from ..imgio.imgio import buffer_to_B64
#
# default_prefix = "KIP_"
# default_prefix_frompkl = "KIPFP_"
#
# # TODO: This class should be rewritten to actually extract features from images...
# # TODO: Work on getting a pycaffe sentibank featurizer. Check we get same feature values than command line in 'sentibank_cmdline'
# # at 'https://github.com/ColumbiaDVMM/ColumbiaImageSearch/blob/master/cu_image_search/feature_extractor/sentibank_cmdline.py'
# # Should we have a generic extractor to inherit from, with just a different process_one_core() method?...
#
# class KafkaImageProcessor(GenericKafkaProcessor):
#
# def __init__(self, global_conf_filename, prefix=default_prefix, pid=None):
# # when running as deamon
# self.pid = pid
# # call GenericKafkaProcessor init (and others potentially)
# super(KafkaImageProcessor, self).__init__(global_conf_filename, prefix)
# # any additional initialization needed, like producer specific output logic
# self.cdr_out_topic = self.get_required_param('producer_cdr_out_topic')
# self.images_out_topic = self.get_required_param('producer_images_out_topic')
# # TODO: get s3 url prefix from actual location
# # for now "object_stored_prefix" in "_meta" of domain CDR
# # but just get from conf
# self.url_prefix = self.get_required_param('obj_stored_prefix')
# self.process_count = 0
# self.process_failed = 0
# self.process_time = 0
# self.set_pp()
#
# def set_pp(self):
# self.pp = "KafkaImageProcessor"
# if self.pid:
# self.pp += ":"+str(self.pid)
#
#
#
# def process_one(self, msg):
# from ..imgio.imgio import get_SHA1_img_info_from_buffer, get_buffer_from_URL
#
# self.print_stats(msg)
#
# msg_value = json.loads(msg.value)
#
# # From msg value get list_urls for image objects only
# list_urls = self.get_images_urls(msg_value)
#
# # Get images data and infos
# dict_imgs = dict()
# for url, obj_pos in list_urls:
# start_process = time.time()
# if self.verbose > 2:
# print_msg = "[{}.process_one: info] Downloading image from: {}"
# print print_msg.format(self.pp, url)
# try:
# img_buffer = get_buffer_from_URL(url)
# if img_buffer:
# sha1, img_type, width, height = get_SHA1_img_info_from_buffer(img_buffer)
# dict_imgs[url] = {'obj_pos': obj_pos, 'img_buffer': img_buffer, 'sha1': sha1, 'img_info': {'format': img_type, 'width': width, 'height': height}}
# self.toc_process_ok(start_process)
# else:
# self.toc_process_failed(start_process)
# if self.verbose > 1:
# print_msg = "[{}.process_one: info] Could not download image from: {}"
# print print_msg.format(self.pp, url)
# except Exception as inst:
# self.toc_process_failed(start_process)
# if self.verbose > 0:
# print_msg = "[{}.process_one: error] Could not download image from: {} ({})"
# print print_msg.format(self.pp, url, inst)
#
# # Push to cdr_out_topic
# self.producer.send(self.cdr_out_topic, self.build_cdr_msg(msg_value, dict_imgs))
#
# # TODO: we could have all extraction registered here, and not pushing an image if it has been processed by all extractions. But that violates the consumer design of Kafka...
# # Push to images_out_topic
# for img_out_msg in self.build_image_msg(dict_imgs):
# self.producer.send(self.images_out_topic, img_out_msg)
#
#
# class KafkaImageProcessorFromPkl(GenericKafkaProcessor):
# # To push list of images to be processed from a pickle file containing a dictionary
# # {'update_ids': update['update_ids'], 'update_images': out_update_images}
# # with 'out_update_images' being a list of tuples (sha1, url)
#
# def __init__(self, global_conf_filename, prefix=default_prefix_frompkl):
# # call GenericKafkaProcessor init (and others potentially)
# super(KafkaImageProcessorFromPkl, self).__init__(global_conf_filename, prefix)
# # any additional initialization needed, like producer specific output logic
# self.images_out_topic = self.get_required_param('images_out_topic')
# self.pkl_path = self.get_required_param('pkl_path')
# self.process_count = 0
# self.process_failed = 0
# self.process_time = 0
# self.display_count = 100
# self.set_pp()
#
# def set_pp(self):
# self.pp = "KafkaImageProcessorFromPkl"
#
# def get_next_img(self):
# import pickle
# update = pickle.load(open(self.pkl_path,'rb'))
# for sha1, url in update['update_images']:
# yield sha1, url
#
# def build_image_msg(self, dict_imgs):
# # Build dict ouput for each image with fields 's3_url', 'sha1', 'img_info' and 'img_buffer'
# img_out_msgs = []
# for url in dict_imgs:
# tmp_dict_out = dict()
# tmp_dict_out['s3_url'] = url
# tmp_dict_out['sha1'] = dict_imgs[url]['sha1']
# tmp_dict_out['img_info'] = dict_imgs[url]['img_info']
# # encode buffer in B64?
# tmp_dict_out['img_buffer'] = buffer_to_B64(dict_imgs[url]['img_buffer'])
# img_out_msgs.append(json.dumps(tmp_dict_out).encode('utf-8'))
# return img_out_msgs
#
# def process(self):
# from ..imgio.imgio import get_SHA1_img_info_from_buffer, get_buffer_from_URL
#
# # Get images data and infos
# for sha1, url in self.get_next_img():
#
# if (self.process_count + self.process_failed) % self.display_count == 0:
# avg_process_time = self.process_time / max(1, self.process_count + self.process_failed)
# print_msg = "[%s] dl count: %d, failed: %d, time: %f"
# print print_msg % (self.pp, self.process_count, self.process_failed, avg_process_time)
#
# dict_imgs = dict()
# start_process = time.time()
# if self.verbose > 2:
# print_msg = "[{}.process_one: info] Downloading image from: {}"
# print print_msg.format(self.pp, url)
# try:
# img_buffer = get_buffer_from_URL(url)
# if img_buffer:
# sha1, img_type, width, height = get_SHA1_img_info_from_buffer(img_buffer)
# dict_imgs[url] = {'img_buffer': img_buffer, 'sha1': sha1,
# 'img_info': {'format': img_type, 'width': width, 'height': height}}
# self.toc_process_ok(start_process)
# else:
# self.toc_process_failed(start_process)
# if self.verbose > 1:
# print_msg = "[{}.process_one: info] Could not download image from: {}"
# print print_msg.format(self.pp, url)
# except Exception as inst:
# self.toc_process_failed(start_process)
# if self.verbose > 0:
# print_msg = "[{}.process_one: error] Could not download image from: {} ({})"
# print print_msg.format(self.pp, url, inst)
#
# # Push to images_out_topic
# for img_out_msg in self.build_image_msg(dict_imgs):
# self.producer.send(self.images_out_topic, img_out_msg)
#
# class DaemonKafkaImageProcessor(multiprocessing.Process):
#
# daemon = True
#
# def __init__(self, conf, prefix=default_prefix):
# super(DaemonKafkaImageProcessor, self).__init__()
# self.conf = conf
# self.prefix = prefix
#
# def run(self):
# try:
# print "Starting worker KafkaImageProcessor.{}".format(self.pid)
# kp = KafkaImageProcessor(self.conf, prefix=self.prefix, pid=self.pid)
# for msg in kp.consumer:
# kp.process_one(msg)
# except Exception as inst:
|
"""
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
MATLAB |reg| [*]_ analogs and similar arguments.
.. |reg| unicode:: 0xAE
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
autoscale - turn axis autoscaling on or off, and apply it
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
violinplot - make a violin plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imsave - save array as an image file
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
locator_params - adjust parameters used in locating axis ticks
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
margins - set margins used in autoscaling
pause - pause for a specified interval
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make one subplot (numrows, numcols, axesnum)
subplots - make a figure with a set of (numrows, numcols) subplots
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
tick_params - control the appearance of ticks and tick labels
ticklabel_format - control the format of tick labels
title - add a title to the current axes
tricontour - make a contour plot on a triangular grid
tricontourf - make a filled contour plot on a triangular grid
tripcolor - make a pseudocolor plot on a triangular grid
triplot - plot a triangular grid
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
amax - the maximum along dimension m
amin - the minimum along dimension m
corrcoef - correlation coefficient
cov - covariance matrix
mean - the mean along dimension m
median - the median along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
ksdensity - the kernel density estimate
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - Deprecated--please use loadtxt.
loadtxt - load ASCII data into array.
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - Deprecated--please use savetxt.
savetxt - save an array to an ASCII file.
trapz - trapezoidal integration
__end
.. [*] MATLAB is a registered trademark of The MathWorks, Inc.
""" |
"""
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
""" |
## Graphite local_settings.py
# Edit this file to customize the default Graphite webapp settings
#
# Additional customizations to Django settings can be added to this file as well
#####################################
# General Configuration #
#####################################
# Set this to a long, random unique string to use as a secret key for this
# install. This key is used for salting of hashes used in auth tokens,
# CRSF middleware, cookie storage, etc. This should be set identically among
# instances if used behind a load balancer.
#SECRET_KEY = 'UNSAFE_DEFAULT'
# In Django 1.5+ set this to the list of hosts your graphite instances is
# accessible as. See:
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS
#ALLOWED_HOSTS = [ '*' ]
# Set your local timezone (Django's default is America/Chicago)
# If your graphs appear to be offset by a couple hours then this probably
# needs to be explicitly set to your local timezone.
#TIME_ZONE = 'America/Los_Angeles'
# Override this to provide documentation specific to your Graphite deployment
#DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
# Logging
#LOG_RENDERING_PERFORMANCE = True
#LOG_CACHE_PERFORMANCE = True
#LOG_METRIC_ACCESS = True
# Enable full debug page display on exceptions (Internal Server Error pages)
#DEBUG = True
# If using RRD files and rrdcached, set to the address or socket of the daemon
#FLUSHRRDCACHED = 'unix:/var/run/rrdcached.sock'
# This lists the memcached servers that will be used by this webapp.
# If you have a cluster of webapps you should ensure all of them
# have the *exact* same value for this setting. That will maximize cache
# efficiency. Setting MEMCACHE_HOSTS to be empty will turn off use of
# memcached entirely.
#
# You should not use the loopback address (IP_ADDRESS) here if using clustering
# as every webapp in the cluster should use the exact same values to prevent
# unneeded cache misses. Set to [] to disable caching of images and fetched data
#MEMCACHE_HOSTS = ['IP_ADDRESS:11211', 'IP_ADDRESS:11211', 'IP_ADDRESS:11211']
#DEFAULT_CACHE_DURATION = 60 # Cache images and data for 1 minute
#####################################
# Filesystem Paths #
#####################################
# Change only GRAPHITE_ROOT if your install is merely shifted from /opt/graphite
# to somewhere else
#GRAPHITE_ROOT = '/opt/graphite'
# Most installs done outside of a separate tree such as /opt/graphite will only
# need to change these three settings. Note that the default settings for each
# of these is relative to GRAPHITE_ROOT
#CONF_DIR = '/opt/graphite/conf'
#STORAGE_DIR = '/opt/graphite/storage'
#CONTENT_DIR = '/opt/graphite/webapp/content'
# To further or fully customize the paths, modify the following. Note that the
# default settings for each of these are relative to CONF_DIR and STORAGE_DIR
#
## Webapp config files
#DASHBOARD_CONF = '/opt/graphite/conf/dashboard.conf'
#GRAPHTEMPLATES_CONF = '/opt/graphite/conf/graphTemplates.conf'
## Data directories
# NOTE: If any directory is unreadable in DATA_DIRS it will break metric browsing
#WHISPER_DIR = '/opt/graphite/storage/whisper'
#RRD_DIR = '/opt/graphite/storage/rrd'
#DATA_DIRS = [WHISPER_DIR, RRD_DIR] # Default: set from the above variables
#LOG_DIR = '/opt/graphite/storage/log/webapp'
#INDEX_FILE = '/opt/graphite/storage/index' # Search index file
#####################################
# Email Configuration #
#####################################
# This is used for emailing rendered Graphs
# Default backend is SMTP
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
#EMAIL_HOST = 'localhost'
#EMAIL_PORT = 25
#EMAIL_HOST_USER = ''
#EMAIL_HOST_PASSWORD = ''
#EMAIL_USE_TLS = False
# To drop emails on the floor, enable the Dummy backend:
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
#####################################
# Authentication Configuration #
#####################################
## LDAP / ActiveDirectory authentication setup
#USE_LDAP_AUTH = True
#LDAP_SERVER = "ldap.mycompany.com"
#LDAP_PORT = 389
# OR
#LDAP_URI = "ldaps://ldap.mycompany.com:636"
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
#LDAP_BASE_PASS = "readonly_account_password"
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
#
# If you want to further customize the ldap connection options you should
# directly use ldap.set_option to set the ldap module's global options.
# For example:
#
#import ldap
#ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
#ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, "/etc/ssl/ca")
#ldap.set_option(ldap.OPT_X_TLS_CERTFILE, "/etc/ssl/mycert.pem")
#ldap.set_option(ldap.OPT_X_TLS_KEYFILE, "/etc/ssl/mykey.pem")
# See http://www.python-ldap.org/ for further details on these options.
## REMOTE_USER authentication. See: https://docs.djangoproject.com/en/dev/howto/auth-remote-user/
#USE_REMOTE_USER_AUTHENTICATION = True
# Override the URL for the login link (e.g. for django_openid_auth)
#LOGIN_URL = '/account/login'
##########################
# Database Configuration #
##########################
# By default sqlite is used. If you cluster multiple webapps you will need
# to setup an external database (such as MySQL) and configure all of the webapp
# instances to use the same database. Note that this database is only used to store
# Django models such as saved graphs, dashboards, user preferences, etc.
# Metric data is not stored here.
#
# DO NOT FORGET TO RUN 'manage.py syncdb' AFTER SETTING UP A NEW DATABASE
#
# The following built-in database engines are available:
# django.db.backends.postgresql # Removed in Django 1.4
# django.db.backends.postgresql_psycopg2
# django.db.backends.mysql
# django.db.backends.sqlite3
# django.db.backends.oracle
#
# The default is 'django.db.backends.sqlite3' with file 'graphite.db'
# located in STORAGE_DIR
#
#DATABASES = {
# 'default': {
# 'NAME': '/opt/graphite/storage/graphite.db',
# 'ENGINE': 'django.db.backends.sqlite3',
# 'USER': '',
# 'PASSWORD': '',
# 'HOST': '',
# 'PORT': ''
# }
#}
#
#########################
# Cluster Configuration #
#########################
# (To avoid excessive DNS lookups you want to stick to using IP addresses only in this entire section)
#
# This should list the IP address (and optionally port) of the webapp on each
# remote server in the cluster. These servers must each have local access to
# metric data. Note that the first server to return a match for a query will be
# used.
#CLUSTER_SERVERS = ["IP_ADDRESS:80", "IP_ADDRESS:80"]
## These are timeout values (in seconds) for requests to remote webapps
#REMOTE_STORE_FETCH_TIMEOUT = 6 # Timeout to fetch series data
#REMOTE_STORE_FIND_TIMEOUT = 2.5 # Timeout for metric find requests
#REMOTE_STORE_RETRY_DELAY = 60 # Time before retrying a failed remote webapp
#REMOTE_FIND_CACHE_DURATION = 300 # Time to cache remote metric find results
## Remote rendering settings
# Set to True to enable rendering of Graphs on a remote webapp
#REMOTE_RENDERING = True
# List of IP (and optionally port) of the webapp on each remote server that
# will be used for rendering. Note that each rendering host should have local
# access to metric data or should have CLUSTER_SERVERS configured
#RENDERING_HOSTS = []
#REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
# If you are running multiple carbon-caches on this machine (typically behind a relay using
# consistent hashing), you'll need to list the ip address, cache query port, and instance name of each carbon-cache
# instance on the local machine (NOT every carbon-cache in the entire cluster). The default cache query port is 7002
# and a common scheme is to use 7102 for instance b, 7202 for instance c, etc.
#
# You *should* use IP_ADDRESS here in most cases
#CARBONLINK_HOSTS = ["IP_ADDRESS:7002:a", "IP_ADDRESS:7102:b", "IP_ADDRESS:7202:c"]
#CARBONLINK_TIMEOUT = 1.0
#####################################
# Additional Django Settings #
#####################################
# Uncomment the following line for direct access to Django settings such as
# MIDDLEWARE_CLASSES or APPS
#from graphite.app_settings import *
|
"""Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, ypu must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
""" |
# Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2015 The University of Bologna
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: NAME NAME A Simplified model of a complete HMC device. Based on:
# [1] http://www.hybridmemorycube.org/specification-download/
# [2] High performance AXI-4.0 based interconnect for extensible smart memory
# cubes(E. Azarkhish et. al)
# [3] Low-Power Hybrid Memory Cubes With Link Power Management and Two-Level
# Prefetching (J. Ahn et. al)
# [4] Memory-centric system interconnect design with Hybrid Memory Cubes
# (G. NAME et. al)
# [5] Near Data Processing, Are we there yet? (M. Gokhale)
# http://www.cs.utah.edu/wondp/gokhale.pdf
# [6] openHMC - A Configurable Open-Source Hybrid Memory Cube Controller
# (J. NAME [7] Hybrid Memory Cube performance characterization on data-centric
# workloads (M. Gokhale)
#
# This script builds a complete HMC device composed of vault controllers,
# serial links, the main internal crossbar, and an external hmc controller.
#
# - VAULT CONTROLLERS:
# Instances of the HMC_2500_1x32 class with their functionality specified in
# dram_ctrl.cc
#
# - THE MAIN XBAR:
# This component is simply an instance of the NoncoherentXBar class, and its
# parameters are tuned to [2].
#
# - SERIAL LINKS CONTROLLER:
# SerialLink is a simple variation of the Bridge class, with the ability to
# account for the latency of packet serialization and controller latency. We
# assume that the serializer component at the transmitter side does not need
# to receive the whole packet to start the serialization. But the
# deserializer waits for the complete packet to check its integrity first.
#
# * Bandwidth of the serial links is not modeled in the SerialLink component
# itself.
#
# * Latency of serial link controller is composed of SerDes latency + link
# controller
#
# * It is inferred from the standard [1] and the literature [3] that serial
# links share the same address range and packets can travel over any of
# them so a load distribution mechanism is required among them.
#
# -----------------------------------------
# | Host/HMC Controller |
# | ---------------------- |
# | | Link Aggregator | opt |
# | ---------------------- |
# | ---------------------- |
# | | Serial Link + Ser | * 4 |
# | ---------------------- |
# |---------------------------------------
# -----------------------------------------
# | Device
# | ---------------------- |
# | | Xbar | * 4 |
# | ---------------------- |
# | ---------------------- |
# | | Vault Controller | * 16 |
# | ---------------------- |
# | ---------------------- |
# | | Memory | |
# | ---------------------- |
# |---------------------------------------|
#
# In this version we have present 3 different HMC archiecture along with
# alongwith their corresponding test script.
#
# same: It has 4 crossbars in HMC memory. All the crossbars are connected
# to each other, providing complete memory range. This archicture also covers
# the added latency for sending a request to non-local vault(bridge in b/t
# crossbars). All the 4 serial links can access complete memory. So each
# link can be connected to separate processor.
#
# distributed: It has 4 crossbars inside the HMC. Crossbars are not
# connected.Through each crossbar only local vaults can be accessed. But to
# support this architecture we need a crossbar between serial links and
# processor.
#
# mixed: This is a hybrid architecture. It has 4 crossbars inside the HMC.
# 2 Crossbars are connected to only local vaults. From other 2 crossbar, a
# request can be forwarded to any other vault.
|
"""
========================================
Special functions (:mod:`scipy.special`)
========================================
.. module:: scipy.special
Nearly all of the functions below are universal functions and follow
broadcasting and automatic array-looping rules. Exceptions are noted.
Error handling
==============
Errors are handled by returning nans, or other appropriate values.
Some of the special function routines will emit warnings when an error
occurs. By default this is disabled. To enable such messages use
``errprint(1)``, and to disable such messages use ``errprint(0)``.
Example:
>>> print scipy.special.bdtr(-1,10,0.3)
>>> scipy.special.errprint(1)
>>> print scipy.special.bdtr(-1,10,0.3)
.. autosummary::
:toctree: generated/
errprint
SpecialFunctionWarning -- Warning that can be issued with ``errprint(True)``
Available functions
===================
Airy functions
--------------
.. autosummary::
:toctree: generated/
airy -- Airy functions and their derivatives.
airye -- Exponentially scaled Airy functions
ai_zeros -- [+]Zeros of Airy functions Ai(x) and Ai'(x)
bi_zeros -- [+]Zeros of Airy functions Bi(x) and Bi'(x)
itairy --
Elliptic Functions and Integrals
--------------------------------
.. autosummary::
:toctree: generated/
ellipj -- Jacobian elliptic functions
ellipk -- Complete elliptic integral of the first kind.
ellipkm1 -- ellipkm1(x) == ellipk(1 - x)
ellipkinc -- Incomplete elliptic integral of the first kind.
ellipe -- Complete elliptic integral of the second kind.
ellipeinc -- Incomplete elliptic integral of the second kind.
Bessel Functions
----------------
.. autosummary::
:toctree: generated/
jv -- Bessel function of real-valued order and complex argument.
jn -- Alias for jv
jve -- Exponentially scaled Bessel function.
yn -- Bessel function of second kind (integer order).
yv -- Bessel function of the second kind (real-valued order).
yve -- Exponentially scaled Bessel function of the second kind.
kn -- Modified Bessel function of the second kind (integer order).
kv -- Modified Bessel function of the second kind (real order).
kve -- Exponentially scaled modified Bessel function of the second kind.
iv -- Modified Bessel function.
ive -- Exponentially scaled modified Bessel function.
hankel1 -- Hankel function of the first kind.
hankel1e -- Exponentially scaled Hankel function of the first kind.
hankel2 -- Hankel function of the second kind.
hankel2e -- Exponentially scaled Hankel function of the second kind.
The following is not an universal function:
.. autosummary::
:toctree: generated/
lmbda -- [+]Sequence of lambda functions with arbitrary order v.
Zeros of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
jnjnp_zeros -- [+]Zeros of integer-order Bessel functions and derivatives sorted in order.
jnyn_zeros -- [+]Zeros of integer-order Bessel functions and derivatives as separate arrays.
jn_zeros -- [+]Zeros of Jn(x)
jnp_zeros -- [+]Zeros of Jn'(x)
yn_zeros -- [+]Zeros of Yn(x)
ynp_zeros -- [+]Zeros of Yn'(x)
y0_zeros -- [+]Complex zeros: Y0(z0)=0 and values of Y0'(z0)
y1_zeros -- [+]Complex zeros: Y1(z1)=0 and values of Y1'(z1)
y1p_zeros -- [+]Complex zeros of Y1'(z1')=0 and values of Y1(z1')
Faster versions of common Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
j0 -- Bessel function of order 0.
j1 -- Bessel function of order 1.
y0 -- Bessel function of second kind of order 0.
y1 -- Bessel function of second kind of order 1.
i0 -- Modified Bessel function of order 0.
i0e -- Exponentially scaled modified Bessel function of order 0.
i1 -- Modified Bessel function of order 1.
i1e -- Exponentially scaled modified Bessel function of order 1.
k0 -- Modified Bessel function of the second kind of order 0.
k0e -- Exponentially scaled modified Bessel function of the second kind of order 0.
k1 -- Modified Bessel function of the second kind of order 1.
k1e -- Exponentially scaled modified Bessel function of the second kind of order 1.
Integrals of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
itj0y0 -- Basic integrals of j0 and y0 from 0 to x.
it2j0y0 -- Integrals of (1-j0(t))/t from 0 to x and y0(t)/t from x to inf.
iti0k0 -- Basic integrals of i0 and k0 from 0 to x.
it2i0k0 -- Integrals of (i0(t)-1)/t from 0 to x and k0(t)/t from x to inf.
besselpoly -- Integral of a Bessel function: Jv(2* a* x) * x[+]lambda from x=0 to 1.
Derivatives of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
jvp -- Nth derivative of Jv(v,z)
yvp -- Nth derivative of Yv(v,z)
kvp -- Nth derivative of Kv(v,z)
ivp -- Nth derivative of Iv(v,z)
h1vp -- Nth derivative of H1v(v,z)
h2vp -- Nth derivative of H2v(v,z)
Spherical Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
sph_jn -- [+]Sequence of spherical Bessel functions, jn(z)
sph_yn -- [+]Sequence of spherical Bessel functions, yn(z)
sph_jnyn -- [+]Sequence of spherical Bessel functions, jn(z) and yn(z)
sph_in -- [+]Sequence of spherical Bessel functions, in(z)
sph_kn -- [+]Sequence of spherical Bessel functions, kn(z)
sph_inkn -- [+]Sequence of spherical Bessel functions, in(z) and kn(z)
Riccati-Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
riccati_jn -- [+]Sequence of Ricatti-Bessel functions of first kind.
riccati_yn -- [+]Sequence of Ricatti-Bessel functions of second kind.
Struve Functions
----------------
.. autosummary::
:toctree: generated/
struve -- Struve function --- Hv(x)
modstruve -- Modified Struve function --- Lv(x)
itstruve0 -- Integral of H0(t) from 0 to x
it2struve0 -- Integral of H0(t)/t from x to Inf.
itmodstruve0 -- Integral of L0(t) from 0 to x.
Raw Statistical Functions
-------------------------
.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
.. autosummary::
:toctree: generated/
bdtr -- Sum of terms 0 through k of the binomial pdf.
bdtrc -- Sum of terms k+1 through n of the binomial pdf.
bdtri -- Inverse of bdtr
bdtrik --
bdtrin --
btdtr -- Integral from 0 to x of beta pdf.
btdtri -- Quantiles of beta distribution
btdtria --
btdtrib --
fdtr -- Integral from 0 to x of F pdf.
fdtrc -- Integral from x to infinity under F pdf.
fdtri -- Inverse of fdtrc
fdtridfd --
gdtr -- Integral from 0 to x of gamma pdf.
gdtrc -- Integral from x to infinity under gamma pdf.
gdtria -- Inverse with respect to `a` of gdtr.
gdtrib -- Inverse with respect to `b` of gdtr.
gdtrix -- Inverse with respect to `x` of gdtr.
nbdtr -- Sum of terms 0 through k of the negative binomial pdf.
nbdtrc -- Sum of terms k+1 to infinity under negative binomial pdf.
nbdtri -- Inverse of nbdtr
nbdtrik --
nbdtrin --
ncfdtr -- CDF of non-central t distribution.
ncfdtridfd -- Find degrees of freedom (denominator) of noncentral F distribution.
ncfdtridfn -- Find degrees of freedom (numerator) of noncentral F distribution.
ncfdtri -- Inverse CDF of noncentral F distribution.
ncfdtrinc -- Find noncentrality parameter of noncentral F distribution.
nctdtr -- CDF of noncentral t distribution.
nctdtridf -- Find degrees of freedom of noncentral t distribution.
nctdtrit -- Inverse CDF of noncentral t distribution.
nctdtrinc -- Find noncentrality parameter of noncentral t distribution.
nrdtrimn -- Find mean of normal distribution from cdf and std.
nrdtrisd -- Find std of normal distribution from cdf and mean.
pdtr -- Sum of terms 0 through k of the Poisson pdf.
pdtrc -- Sum of terms k+1 to infinity of the Poisson pdf.
pdtri -- Inverse of pdtr
pdtrik --
stdtr -- Integral from -infinity to t of the Student-t pdf.
stdtridf --
stdtrit --
chdtr -- Integral from 0 to x of the Chi-square pdf.
chdtrc -- Integral from x to infnity of Chi-square pdf.
chdtri -- Inverse of chdtrc.
chdtriv --
ndtr -- Integral from -infinity to x of standard normal pdf
log_ndtr -- Logarithm of integral from -infinity to x of standard normal pdf
ndtri -- Inverse of ndtr (quantiles)
chndtr --
chndtridf --
chndtrinc --
chndtrix --
smirnov -- Kolmogorov-Smirnov complementary CDF for one-sided test statistic (Dn+ or Dn-)
smirnovi -- Inverse of smirnov.
kolmogorov -- The complementary CDF of the (scaled) two-sided test statistic (Kn*) valid for large n.
kolmogi -- Inverse of kolmogorov
tklmbda -- Tukey-Lambda CDF
logit --
expit --
boxcox -- Compute the Box-Cox transformation.
boxcox1p -- Compute the Box-Cox transformation of 1 + x.
inv_boxcox -- Compute the inverse of the Box-Cox tranformation.
inv_boxcox1p -- Compute the inverse of the Box-Cox transformation of 1 + x.
Information Theory Functions
----------------------------
.. autosummary::
:toctree: generated/
entr -- entr(x) = -x*log(x)
rel_entr -- rel_entr(x, y) = x*log(x/y)
kl_div -- kl_div(x, y) = x*log(x/y) - x + y
huber -- Huber loss function.
pseudo_huber -- Pseudo-Huber loss function.
Gamma and Related Functions
---------------------------
.. autosummary::
:toctree: generated/
gamma -- Gamma function.
gammaln -- Log of the absolute value of the gamma function.
gammasgn -- Sign of the gamma function.
gammainc -- Incomplete gamma integral.
gammaincinv -- Inverse of gammainc.
gammaincc -- Complemented incomplete gamma integral.
gammainccinv -- Inverse of gammaincc.
beta -- Beta function.
betaln -- Log of the absolute value of the beta function.
betainc -- Incomplete beta integral.
betaincinv -- Inverse of betainc.
psi -- Logarithmic derivative of the gamma function.
rgamma -- One divided by the gamma function.
polygamma -- Nth derivative of psi function.
multigammaln -- Log of the multivariate gamma.
digamma -- Digamma function (derivative of the logarithm of gamma).
poch -- The Pochhammer symbol (rising factorial).
Error Function and Fresnel Integrals
------------------------------------
.. autosummary::
:toctree: generated/
erf -- Error function.
erfc -- Complemented error function (1- erf(x))
erfcx -- Scaled complemented error function exp(x**2)*erfc(x)
erfi -- Imaginary error function, -i erf(i x)
erfinv -- Inverse of error function
erfcinv -- Inverse of erfc
wofz -- Fadeeva function.
dawsn -- Dawson's integral.
fresnel -- Fresnel sine and cosine integrals.
fresnel_zeros -- Complex zeros of both Fresnel integrals
modfresnelp -- Modified Fresnel integrals F_+(x) and K_+(x)
modfresnelm -- Modified Fresnel integrals F_-(x) and K_-(x)
These are not universal functions:
.. autosummary::
:toctree: generated/
erf_zeros -- [+]Complex zeros of erf(z)
fresnelc_zeros -- [+]Complex zeros of Fresnel cosine integrals
fresnels_zeros -- [+]Complex zeros of Fresnel sine integrals
Legendre Functions
------------------
.. autosummary::
:toctree: generated/
lpmv -- Associated Legendre Function of arbitrary non-negative degree v.
sph_harm -- Spherical Harmonics (complex-valued) Y^m_n(theta,phi)
These are not universal functions:
.. autosummary::
:toctree: generated/
clpmn -- [+]Associated Legendre Function of the first kind for complex arguments.
lpn -- [+]Legendre Functions (polynomials) of the first kind
lqn -- [+]Legendre Functions of the second kind.
lpmn -- [+]Associated Legendre Function of the first kind for real arguments.
lqmn -- [+]Associated Legendre Function of the second kind.
Ellipsoidal Harmonics
---------------------
.. autosummary::
:toctree: generated/
ellip_harm -- Ellipsoidal harmonic E
ellip_harm_2 -- Ellipsoidal harmonic F
ellip_normal -- Ellipsoidal normalization constant
Orthogonal polynomials
----------------------
The following functions evaluate values of orthogonal polynomials:
.. autosummary::
:toctree: generated/
assoc_laguerre
eval_legendre
eval_chebyt
eval_chebyu
eval_chebyc
eval_chebys
eval_jacobi
eval_laguerre
eval_genlaguerre
eval_hermite
eval_hermitenorm
eval_gegenbauer
eval_sh_legendre
eval_sh_chebyt
eval_sh_chebyu
eval_sh_jacobi
The functions below, in turn, return the polynomial coefficients in
:class:`~.orthopoly1d` objects, which function similarly as :ref:`numpy.poly1d`.
The :class:`~.orthopoly1d` class also has an attribute ``weights`` which returns
the roots, weights, and total weights for the appropriate form of Gaussian
quadrature. These are returned in an ``n x 3`` array with roots in the first
column, weights in the second column, and total weights in the final column.
Note that :class:`~.orthopoly1d` objects are converted to ``poly1d`` when doing
arithmetic, and lose information of the original orthogonal polynomial.
.. autosummary::
:toctree: generated/
legendre -- [+]Legendre polynomial P_n(x) (lpn -- for function).
chebyt -- [+]Chebyshev polynomial T_n(x)
chebyu -- [+]Chebyshev polynomial U_n(x)
chebyc -- [+]Chebyshev polynomial C_n(x)
chebys -- [+]Chebyshev polynomial S_n(x)
jacobi -- [+]Jacobi polynomial P^(alpha,beta)_n(x)
laguerre -- [+]Laguerre polynomial, L_n(x)
genlaguerre -- [+]Generalized (Associated) Laguerre polynomial, L^alpha_n(x)
hermite -- [+]Hermite polynomial H_n(x)
hermitenorm -- [+]Normalized Hermite polynomial, He_n(x)
gegenbauer -- [+]Gegenbauer (Ultraspherical) polynomials, C^(alpha)_n(x)
sh_legendre -- [+]shifted Legendre polynomial, P*_n(x)
sh_chebyt -- [+]shifted Chebyshev polynomial, T*_n(x)
sh_chebyu -- [+]shifted Chebyshev polynomial, U*_n(x)
sh_jacobi -- [+]shifted Jacobi polynomial, J*_n(x) = G^(p,q)_n(x)
.. warning::
Computing values of high-order polynomials (around ``order > 20``) using
polynomial coefficients is numerically unstable. To evaluate polynomial
values, the ``eval_*`` functions should be used instead.
Roots and weights for orthogonal polynomials
.. autosummary::
:toctree: generated/
c_roots
cg_roots
h_roots
he_roots
j_roots
js_roots
l_roots
la_roots
p_roots
ps_roots
s_roots
t_roots
ts_roots
u_roots
us_roots
Hypergeometric Functions
------------------------
.. autosummary::
:toctree: generated/
hyp2f1 -- Gauss hypergeometric function (2F1)
hyp1f1 -- Confluent hypergeometric function (1F1)
hyperu -- Confluent hypergeometric function (U)
hyp0f1 -- Confluent hypergeometric limit function (0F1)
hyp2f0 -- Hypergeometric function (2F0)
hyp1f2 -- Hypergeometric function (1F2)
hyp3f0 -- Hypergeometric function (3F0)
Parabolic Cylinder Functions
----------------------------
.. autosummary::
:toctree: generated/
pbdv -- Parabolic cylinder function Dv(x) and derivative.
pbvv -- Parabolic cylinder function Vv(x) and derivative.
pbwa -- Parabolic cylinder function W(a,x) and derivative.
These are not universal functions:
.. autosummary::
:toctree: generated/
pbdv_seq -- [+]Sequence of parabolic cylinder functions Dv(x)
pbvv_seq -- [+]Sequence of parabolic cylinder functions Vv(x)
pbdn_seq -- [+]Sequence of parabolic cylinder functions Dn(z), complex z
Mathieu and Related Functions
-----------------------------
.. autosummary::
:toctree: generated/
mathieu_a -- Characteristic values for even solution (ce_m)
mathieu_b -- Characteristic values for odd solution (se_m)
These are not universal functions:
.. autosummary::
:toctree: generated/
mathieu_even_coef -- [+]sequence of expansion coefficients for even solution
mathieu_odd_coef -- [+]sequence of expansion coefficients for odd solution
The following return both function and first derivative:
.. autosummary::
:toctree: generated/
mathieu_cem -- Even Mathieu function
mathieu_sem -- Odd Mathieu function
mathieu_modcem1 -- Even modified Mathieu function of the first kind
mathieu_modcem2 -- Even modified Mathieu function of the second kind
mathieu_modsem1 -- Odd modified Mathieu function of the first kind
mathieu_modsem2 -- Odd modified Mathieu function of the second kind
Spheroidal Wave Functions
-------------------------
.. autosummary::
:toctree: generated/
pro_ang1 -- Prolate spheroidal angular function of the first kind
pro_rad1 -- Prolate spheroidal radial function of the first kind
pro_rad2 -- Prolate spheroidal radial function of the second kind
obl_ang1 -- Oblate spheroidal angular function of the first kind
obl_rad1 -- Oblate spheroidal radial function of the first kind
obl_rad2 -- Oblate spheroidal radial function of the second kind
pro_cv -- Compute characteristic value for prolate functions
obl_cv -- Compute characteristic value for oblate functions
pro_cv_seq -- Compute sequence of prolate characteristic values
obl_cv_seq -- Compute sequence of oblate characteristic values
The following functions require pre-computed characteristic value:
.. autosummary::
:toctree: generated/
pro_ang1_cv -- Prolate spheroidal angular function of the first kind
pro_rad1_cv -- Prolate spheroidal radial function of the first kind
pro_rad2_cv -- Prolate spheroidal radial function of the second kind
obl_ang1_cv -- Oblate spheroidal angular function of the first kind
obl_rad1_cv -- Oblate spheroidal radial function of the first kind
obl_rad2_cv -- Oblate spheroidal radial function of the second kind
Kelvin Functions
----------------
.. autosummary::
:toctree: generated/
kelvin -- All Kelvin functions (order 0) and derivatives.
kelvin_zeros -- [+]Zeros of All Kelvin functions (order 0) and derivatives
ber -- Kelvin function ber x
bei -- Kelvin function bei x
berp -- Derivative of Kelvin function ber x
beip -- Derivative of Kelvin function bei x
ker -- Kelvin function ker x
kei -- Kelvin function kei x
kerp -- Derivative of Kelvin function ker x
keip -- Derivative of Kelvin function kei x
These are not universal functions:
.. autosummary::
:toctree: generated/
ber_zeros -- [+]Zeros of Kelvin function bei x
bei_zeros -- [+]Zeros of Kelvin function ber x
berp_zeros -- [+]Zeros of derivative of Kelvin function ber x
beip_zeros -- [+]Zeros of derivative of Kelvin function bei x
ker_zeros -- [+]Zeros of Kelvin function kei x
kei_zeros -- [+]Zeros of Kelvin function ker x
kerp_zeros -- [+]Zeros of derivative of Kelvin function ker x
keip_zeros -- [+]Zeros of derivative of Kelvin function kei x
Combinatorics
-------------
.. autosummary::
:toctree: generated/
comb -- [+]Combinations of N things taken k at a time, "N choose k"
perm -- [+]Permutations of N things taken k at a time, "k-permutations of N"
Other Special Functions
-----------------------
.. autosummary::
:toctree: generated/
agm -- Arithmetic-Geometric Mean
bernoulli -- Bernoulli numbers
binom -- Binomial coefficient.
diric -- Dirichlet function (periodic sinc)
euler -- Euler numbers
expn -- Exponential integral.
exp1 -- Exponential integral of order 1 (for complex argument)
expi -- Another exponential integral -- Ei(x)
factorial -- The factorial function, n! = special.gamma(n+1)
factorial2 -- Double factorial, (n!)!
factorialk -- [+](...((n!)!)!...)! where there are k '!'
shichi -- Hyperbolic sine and cosine integrals.
sici -- Integral of the sinc and "cosinc" functions.
spence -- Dilogarithm integral.
lambertw -- Lambert W function
zeta -- Riemann zeta function of two arguments.
zetac -- Standard Riemann zeta function minus 1.
Convenience Functions
---------------------
.. autosummary::
:toctree: generated/
cbrt -- Cube root.
exp10 -- 10 raised to the x power.
exp2 -- 2 raised to the x power.
radian -- radian angle given degrees, minutes, and seconds.
cosdg -- cosine of the angle given in degrees.
sindg -- sine of the angle given in degrees.
tandg -- tangent of the angle given in degrees.
cotdg -- cotangent of the angle given in degrees.
log1p -- log(1+x)
expm1 -- exp(x)-1
cosm1 -- cos(x)-1
round -- round the argument to the nearest integer. If argument ends in 0.5 exactly, pick the nearest even integer.
xlogy -- x*log(y)
xlog1py -- x*log1p(y)
exprel -- (exp(x)-1)/x
sinc -- sin(x)/x
.. [+] in the description indicates a function which is not a universal
.. function and does not follow broadcasting and automatic
.. array-looping rules.
""" |
"""Drag-and-drop support for Tkinter.
This is very preliminary. I currently only support dnd *within* one
application, between different windows (or within the same window).
I an trying to make this as generic as possible -- not dependent on
the use of a particular widget or icon type, etc. I also hope that
this will work with Pmw.
To enable an object to be dragged, you must create an event binding
for it that starts the drag-and-drop process. Typically, you should
bind <ButtonPress> to a callback function that you write. The function
should call Tkdnd.dnd_start(source, event), where 'source' is the
object to be dragged, and 'event' is the event that invoked the call
(the argument to your callback function). Even though this is a class
instantiation, the returned instance should not be stored -- it will
be kept alive automatically for the duration of the drag-and-drop.
When a drag-and-drop is already in process for the Tk interpreter, the
call is *ignored*; this normally averts starting multiple simultaneous
dnd processes, e.g. because different button callbacks all
dnd_start().
The object is *not* necessarily a widget -- it can be any
application-specific object that is meaningful to potential
drag-and-drop targets.
Potential drag-and-drop targets are discovered as follows. Whenever
the mouse moves, and at the start and end of a drag-and-drop move, the
Tk widget directly under the mouse is inspected. This is the target
widget (not to be confused with the target object, yet to be
determined). If there is no target widget, there is no dnd target
object. If there is a target widget, and it has an attribute
dnd_accept, this should be a function (or any callable object). The
function is called as dnd_accept(source, event), where 'source' is the
object being dragged (the object passed to dnd_start() above), and
'event' is the most recent event object (generally a <Motion> event;
it can also be <ButtonPress> or <ButtonRelease>). If the dnd_accept()
function returns something other than None, this is the new dnd target
object. If dnd_accept() returns None, or if the target widget has no
dnd_accept attribute, the target widget's parent is considered as the
target widget, and the search for a target object is repeated from
there. If necessary, the search is repeated all the way up to the
root widget. If none of the target widgets can produce a target
object, there is no target object (the target object is None).
The target object thus produced, if any, is called the new target
object. It is compared with the old target object (or None, if there
was no old target widget). There are several cases ('source' is the
source object, and 'event' is the most recent event object):
- Both the old and new target objects are None. Nothing happens.
- The old and new target objects are the same object. Its method
dnd_motion(source, event) is called.
- The old target object was None, and the new target object is not
None. The new target object's method dnd_enter(source, event) is
called.
- The new target object is None, and the old target object is not
None. The old target object's method dnd_leave(source, event) is
called.
- The old and new target objects differ and neither is None. The old
target object's method dnd_leave(source, event), and then the new
target object's method dnd_enter(source, event) is called.
Once this is done, the new target object replaces the old one, and the
Tk mainloop proceeds. The return value of the methods mentioned above
is ignored; if they raise an exception, the normal exception handling
mechanisms take over.
The drag-and-drop processes can end in two ways: a final target object
is selected, or no final target object is selected. When a final
target object is selected, it will always have been notified of the
potential drop by a call to its dnd_enter() method, as described
above, and possibly one or more calls to its dnd_motion() method; its
dnd_leave() method has not been called since the last call to
dnd_enter(). The target is notified of the drop by a call to its
method dnd_commit(source, event).
If no final target object is selected, and there was an old target
object, its dnd_leave(source, event) method is called to complete the
dnd sequence.
Finally, the source object is notified that the drag-and-drop process
is over, by a call to source.dnd_end(target, event), specifying either
the selected target object, or None if no target object was selected.
The source object can use this to implement the commit action; this is
sometimes simpler than to do it in the target's dnd_commit(). The
target's dnd_commit() method could then simply be aliased to
dnd_leave().
At any time during a dnd sequence, the application can cancel the
sequence by calling the cancel() method on the object returned by
dnd_start(). This will call dnd_leave() if a target is currently
active; it will never call dnd_commit().
""" |
"""
# ggame
The simple cross-platform sprite and game platform for Brython Server (Pygame, Tkinter to follow?).
Ggame stands for a couple of things: "good game" (of course!) and also "git game" or "github game"
because it is designed to operate with [Brython Server](http://runpython.com) in concert with
Github as a backend file store.
Ggame is **not** intended to be a full-featured gaming API, with every bell and whistle. Ggame is
designed primarily as a tool for teaching computer programming, recognizing that the ability
to create engaging and interactive games is a powerful motivator for many progamming students.
Accordingly, any functional or performance enhancements that *can* be reasonably implemented
by the user are left as an exercise.
## Functionality Goals
The ggame library is intended to be trivially easy to use. For example:
from ggame import App, ImageAsset, Sprite
# Create a displayed object at 100,100 using an image asset
Sprite(ImageAsset("ggame/bunny.png"), (100,100))
# Create the app, with a 500x500 pixel stage
app = App(500,500)
# Run the app
app.run()
## Overview
There are three major components to the `ggame` system: Assets, Sprites and the App.
### Assets
Asset objects (i.e. `ggame.ImageAsset`, etc.) typically represent separate files that
are provided by the "art department". These might be background images, user interface
images, or images that represent objects in the game. In addition, `ggame.SoundAsset`
is used to represent sound files (`.wav` or `.mp3` format) that can be played in the
game.
Ggame also extends the asset concept to include graphics that are generated dynamically
at run-time, such as geometrical objects, e.g. rectangles, lines, etc.
### Sprites
All of the visual aspects of the game are represented by instances of `ggame.Sprite` or
subclasses of it.
### App
Every ggame application must create a single instance of the `ggame.App` class (or
a sub-class of it). Creating an instance of the `ggame.App` class will initiate
creation of a pop-up window on your browser. Executing the app's `run` method will
begin the process of refreshing the visual assets on the screen.
### Events
No game is complete without a player and players produce events. Your code handles user
input by registering to receive keyboard and mouse events using `ggame.App.listenKeyEvent` and
`ggame.App.listenMouseEvent` methods.
## Execution Environment
Ggame is designed to be executed in a web browser using [Brython](http://brython.info/),
[Pixi.js](http://www.pixijs.com/) and [Buzz](http://buzz.jaysalvat.com/). The easiest
way to do this is by executing from [runpython](http://runpython.com), with source
code residing on [github](http://github.com).
When using [runpython](http://runpython.com), you will have to configure your browser
to allow popup windows.
To use Ggame in your own application, you will minimally need to create a folder called
`ggame` in your project. Within `ggame`, copy the `ggame.py`, `sysdeps.py` and
`__init__.py` files from the [ggame project](https://github.com/BrythonServer/ggame).
### Include Ggame as a Git Subtree
From the same directory as your own python sources (note: you must have an existing git
repository with committed files in order for the following to work properly),
execute the following terminal commands:
git remote add -f ggame https://github.com/BrythonServer/ggame.git
git merge -s ours --no-commit ggame/master
mkdir ggame
git read-tree --prefix=ggame/ -u ggame/master
git commit -m "Merge ggame project as our subdirectory"
If you want to pull in updates from ggame in the future:
git pull -s subtree ggame master
You can see an example of how a ggame subtree is used by examining the
[Brython Server Spacewar](https://github.com/BrythonServer/Spacewar) repo on Github.
## Geometry
When referring to screen coordinates, note that the x-axis of the computer screen
is *horizontal* with the zero position on the left hand side of the screen. The
y-axis is *vertical* with the zero position at the **top** of the screen.
Increasing positive y-coordinates correspond to the downward direction on the
computer screen. Note that this is **different** from the way you may have learned
about x and y coordinates in math class!
""" |
"""
========================
Broadcasting over arrays
========================
The term broadcasting describes how numpy treats arrays with different
shapes during arithmetic operations. Subject to certain constraints,
the smaller array is "broadcast" across the larger array so that they
have compatible shapes. Broadcasting provides a means of vectorizing
array operations so that looping occurs in C instead of Python. It does
this without making needless copies of data and usually leads to
efficient algorithm implementations. There are, however, cases where
broadcasting is a bad idea because it leads to inefficient use of memory
that slows computation.
NumPy operations are usually done on pairs of arrays on an
element-by-element basis. In the simplest case, the two arrays must
have exactly the same shape, as in the following example:
>>> a = np.array([1.0, 2.0, 3.0])
>>> b = np.array([2.0, 2.0, 2.0])
>>> a * b
array([ 2., 4., 6.])
NumPy's broadcasting rule relaxes this constraint when the arrays'
shapes meet certain constraints. The simplest broadcasting example occurs
when an array and a scalar value are combined in an operation:
>>> a = np.array([1.0, 2.0, 3.0])
>>> b = 2.0
>>> a * b
array([ 2., 4., 6.])
The result is equivalent to the previous example where ``b`` was an array.
We can think of the scalar ``b`` being *stretched* during the arithmetic
operation into an array with the same shape as ``a``. The new elements in
``b`` are simply copies of the original scalar. The stretching analogy is
only conceptual. NumPy is smart enough to use the original scalar value
without actually making copies, so that broadcasting operations are as
memory and computationally efficient as possible.
The code in the second example is more efficient than that in the first
because broadcasting moves less memory around during the multiplication
(``b`` is a scalar rather than an array).
General Broadcasting Rules
==========================
When operating on two arrays, NumPy compares their shapes element-wise.
It starts with the trailing dimensions, and works its way forward. Two
dimensions are compatible when
1) they are equal, or
2) one of them is 1
If these conditions are not met, a
``ValueError: frames are not aligned`` exception is thrown, indicating that
the arrays have incompatible shapes. The size of the resulting array
is the maximum size along each dimension of the input arrays.
Arrays do not need to have the same *number* of dimensions. For example,
if you have a ``256x256x3`` array of RGB values, and you want to scale
each color in the image by a different value, you can multiply the image
by a one-dimensional array with 3 values. Lining up the sizes of the
trailing axes of these arrays according to the broadcast rules, shows that
they are compatible::
Image (3d array): 256 x 256 x 3
Scale (1d array): 3
Result (3d array): 256 x 256 x 3
When either of the dimensions compared is one, the other is
used. In other words, dimensions with size 1 are stretched or "copied"
to match the other.
In the following example, both the ``A`` and ``B`` arrays have axes with
length one that are expanded to a larger size during the broadcast
operation::
A (4d array): 8 x 1 x 6 x 1
B (3d array): 7 x 1 x 5
Result (4d array): 8 x 7 x 6 x 5
Here are some more examples::
A (2d array): 5 x 4
B (1d array): 1
Result (2d array): 5 x 4
A (2d array): 5 x 4
B (1d array): 4
Result (2d array): 5 x 4
A (3d array): 15 x 3 x 5
B (3d array): 15 x 1 x 5
Result (3d array): 15 x 3 x 5
A (3d array): 15 x 3 x 5
B (2d array): 3 x 5
Result (3d array): 15 x 3 x 5
A (3d array): 15 x 3 x 5
B (2d array): 3 x 1
Result (3d array): 15 x 3 x 5
Here are examples of shapes that do not broadcast::
A (1d array): 3
B (1d array): 4 # trailing dimensions do not match
A (2d array): 2 x 1
B (3d array): 8 x 4 x 3 # second from last dimensions mismatched
An example of broadcasting in practice::
>>> x = np.arange(4)
>>> xx = x.reshape(4,1)
>>> y = np.ones(5)
>>> z = np.ones((3,4))
>>> x.shape
(4,)
>>> y.shape
(5,)
>>> x + y
<type 'exceptions.ValueError'>: shape mismatch: objects cannot be broadcast to a single shape
>>> xx.shape
(4, 1)
>>> y.shape
(5,)
>>> (xx + y).shape
(4, 5)
>>> xx + y
array([[ 1., 1., 1., 1., 1.],
[ 2., 2., 2., 2., 2.],
[ 3., 3., 3., 3., 3.],
[ 4., 4., 4., 4., 4.]])
>>> x.shape
(4,)
>>> z.shape
(3, 4)
>>> (x + z).shape
(3, 4)
>>> x + z
array([[ 1., 2., 3., 4.],
[ 1., 2., 3., 4.],
[ 1., 2., 3., 4.]])
Broadcasting provides a convenient way of taking the outer product (or
any other outer operation) of two arrays. The following example shows an
outer addition operation of two 1-d arrays::
>>> a = np.array([0.0, 10.0, 20.0, 30.0])
>>> b = np.array([1.0, 2.0, 3.0])
>>> a[:, np.newaxis] + b
array([[ 1., 2., 3.],
[ 11., 12., 13.],
[ 21., 22., 23.],
[ 31., 32., 33.]])
Here the ``newaxis`` index operator inserts a new axis into ``a``,
making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array
with ``b``, which has shape ``(3,)``, yields a ``4x3`` array.
See `this article <http://wiki.scipy.org/EricsBroadcastingDoc>`_
for illustrations of broadcasting concepts.
""" |
"""
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing
of executable commands and scripts (in any language, not just Python),
especially commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd
module manages and cleans up one or more temporary workspace
directories, and provides methods for creating files and directories in
those workspace directories from in-line data, here-documents), allowing
tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
import TestCmd
test = TestCmd.TestCmd()
There are a bunch of keyword arguments available at instantiation:
test = TestCmd.TestCmd(description = 'string',
program = 'program_or_script_to_test',
interpreter = 'script_interpreter',
workdir = 'prefix',
subdir = 'subdir',
verbose = Boolean,
match = default_match_function,
diff = default_diff_function,
combine = Boolean)
There are a bunch of methods that let you do different things:
test.verbose_set(1)
test.description_set('string')
test.program_set('program_or_script_to_test')
test.interpreter_set('script_interpreter')
test.interpreter_set(['script_interpreter', 'arg'])
test.workdir_set('prefix')
test.workdir_set('')
test.workpath('file')
test.workpath('subdir', 'file')
test.subdir('subdir', ...)
test.rmdir('subdir', ...)
test.write('file', "contents\n")
test.write(['subdir', 'file'], "contents\n")
test.read('file')
test.read(['subdir', 'file'])
test.read('file', mode)
test.read(['subdir', 'file'], mode)
test.writable('dir', 1)
test.writable('dir', None)
test.preserve(condition, ...)
test.cleanup(condition)
test.command_args(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program')
test.run(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
chdir = 'directory_to_chdir_to',
stdin = 'input to feed to the program\n')
universal_newlines = True)
p = test.start(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
universal_newlines = None)
test.finish(self, p)
test.pass_test()
test.pass_test(condition)
test.pass_test(condition, function)
test.fail_test()
test.fail_test(condition)
test.fail_test(condition, function)
test.fail_test(condition, function, skip)
test.no_result()
test.no_result(condition)
test.no_result(condition, function)
test.no_result(condition, function, skip)
test.stdout()
test.stdout(run)
test.stderr()
test.stderr(run)
test.symlink(target, link)
test.banner(string)
test.banner(string, width)
test.diff(actual, expected)
test.match(actual, expected)
test.match_exact("actual 1\nactual 2\n", "expected 1\nexpected 2\n")
test.match_exact(["actual 1\n", "actual 2\n"],
["expected 1\n", "expected 2\n"])
test.match_re("actual 1\nactual 2\n", regex_string)
test.match_re(["actual 1\n", "actual 2\n"], list_of_regexes)
test.match_re_dotall("actual 1\nactual 2\n", regex_string)
test.match_re_dotall(["actual 1\n", "actual 2\n"], list_of_regexes)
test.tempdir()
test.tempdir('temporary-directory')
test.sleep()
test.sleep(seconds)
test.where_is('foo')
test.where_is('foo', 'PATH1:PATH2')
test.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
test.unlink('file')
test.unlink('subdir', 'file')
The TestCmd module provides pass_test(), fail_test(), and no_result()
unbound functions that report test results for use with the Aegis change
management system. These methods terminate the test immediately,
reporting PASSED, FAILED, or NO RESULT respectively, and exiting with
status 0 (success), 1 or 2 respectively. This allows for a distinction
between an actual failed test and a test that could not be properly
evaluated because of an external condition (such as a full file system
or incorrect permissions).
import TestCmd
TestCmd.pass_test()
TestCmd.pass_test(condition)
TestCmd.pass_test(condition, function)
TestCmd.fail_test()
TestCmd.fail_test(condition)
TestCmd.fail_test(condition, function)
TestCmd.fail_test(condition, function, skip)
TestCmd.no_result()
TestCmd.no_result(condition)
TestCmd.no_result(condition, function)
TestCmd.no_result(condition, function, skip)
The TestCmd module also provides unbound functions that handle matching
in the same way as the match_*() methods described above.
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_exact)
test = TestCmd.TestCmd(match = TestCmd.match_re)
test = TestCmd.TestCmd(match = TestCmd.match_re_dotall)
The TestCmd module provides unbound functions that can be used for the
"diff" argument to TestCmd.TestCmd instantiation:
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_re,
diff = TestCmd.diff_re)
test = TestCmd.TestCmd(diff = TestCmd.simple_diff)
The "diff" argument can also be used with standard difflib functions:
import difflib
test = TestCmd.TestCmd(diff = difflib.context_diff)
test = TestCmd.TestCmd(diff = difflib.unified_diff)
Lastly, the where_is() method also exists in an unbound function
version.
import TestCmd
TestCmd.where_is('foo')
TestCmd.where_is('foo', 'PATH1:PATH2')
TestCmd.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
""" |
"""
Linear mixed effects models are regression models for dependent data.
They can be used to estimate regression relationships involving both
means and variances.
These models are also known as multilevel linear models, and
hierachical linear models.
The MixedLM class fits linear mixed effects models to data, and
provides support for some common post-estimation tasks. This is a
group-based implementation that is most efficient for models in which
the data can be partitioned into independent groups. Some models with
crossed effects can be handled by specifying a model with a single
group.
The data are partitioned into disjoint groups. The probability model
for group i is:
Y = X*beta + Z*gamma + epsilon
where
* n_i is the number of observations in group i
* Y is a n_i dimensional response vector (called endog in MixedLM)
* X is a n_i x k_fe dimensional design matrix for the fixed effects
(called exog in MixedLM)
* beta is a k_fe-dimensional vector of fixed effects parameters
(called fe_params in MixedLM)
* Z is a design matrix for the random effects with n_i rows (called
exog_re in MixedLM). The number of columns in Z can vary by group
as discussed below.
* gamma is a random vector with mean 0. The covariance matrix for the
first `k_re` elements of `gamma` (called cov_re in MixedLM) is
common to all groups. The remaining elements of `gamma` are
variance components as discussed in more detail below. Each group
receives its own independent realization of gamma.
* epsilon is a n_i dimensional vector of iid normal
errors with mean 0 and variance sigma^2; the epsilon
values are independent both within and between groups
Y, X and Z must be entirely observed. beta, Psi, and sigma^2 are
estimated using ML or REML estimation, and gamma and epsilon are
random so define the probability model.
The marginal mean structure is E[Y | X, Z] = X*beta. If only the mean
structure is of interest, GEE is an alternative to using linear mixed
models.
Two types of random effects are supported. Standard random effects
are correlated with each other in arbitary ways. Every group has the
same number (`k_re`) of standard random effects, with the same joint
distribution (but with independent realizations across the groups).
Variance components are uncorrelated with each other, and with the
standard random effects. Each variance component has mean zero, and
all realizations of a given variance component have the same variance
parameter. The number of realized variance components per variance
parameter can differ across the groups.
The primary reference for the implementation details is:
MJ NAME NAME (1988). "Newton Raphson and EM algorithms for
linear mixed effects models for repeated measures data". Journal of
the American Statistical Association. Volume 83, Issue 404, pages
1014-1022.
See also this more recent document:
http://econ.ucsb.edu/~doug/245a/Papers/Mixed%20Effects%20Implement.pdf
All the likelihood, gradient, and Hessian calculations closely follow
Lindstrom and Bates 1988, adapted to support variance components.
The following two documents are written more from the perspective of
users:
http://lme4.r-forge.r-project.org/lMMwR/lrgprt.pdf
http://lme4.r-forge.r-project.org/slides/2009-07-07-Rennes/3Longitudinal-4.pdf
Notation:
* `cov_re` is the random effects covariance matrix (referred to above
as Psi) and `scale` is the (scalar) error variance. For a single
group, the marginal covariance matrix of endog given exog is scale*I
+ Z * cov_re * Z', where Z is the design matrix for the random
effects in one group.
* `vcomp` is a vector of variance parameters. The length of `vcomp`
is determined by the number of keys in either the `exog_vc` argument
to ``MixedLM``, or the `vc_formula` argument when using formulas to
fit a model.
Notes:
1. Three different parameterizations are used in different places.
The regression slopes (usually called `fe_params`) are identical in
all three parameterizations, but the variance parameters differ. The
parameterizations are:
* The "user parameterization" in which cov(endog) = scale*I + Z *
cov_re * Z', as described above. This is the main parameterization
visible to the user.
* The "profile parameterization" in which cov(endog) = I +
Z * cov_re1 * Z'. This is the parameterization of the profile
likelihood that is maximized to produce parameter estimates.
(see Lindstrom and Bates for details). The "user" cov_re is
equal to the "profile" cov_re1 times the scale.
* The "square root parameterization" in which we work with the Cholesky
factor of cov_re1 instead of cov_re directly. This is hidden from the
user.
All three parameterizations can be packed into a vector by
(optionally) concatenating `fe_params` together with the lower
triangle or Cholesky square root of the dependence structure, followed
by the variance parameters for the variance components. The are
stored as square roots if (and only if) the random effects covariance
matrix is stored as its Choleky factor. Note that when unpacking, it
is important to either square or reflect the dependence structure
depending on which parameterization is being used.
Two score methods are implemented. One takes the score with respect
to the elements of the random effects covariance matrix (used for
inference once the MLE is reached), and the other takes the score with
respect to the parameters of the Choleky square root of the random
effects covariance matrix (used for optimization).
The numerical optimization uses GLS to avoid explicitly optimizing
over the fixed effects parameters. The likelihood that is optimized
is profiled over both the scale parameter (a scalar) and the fixed
effects parameters (if any). As a result of this profiling, it is
difficult and unnecessary to calculate the Hessian of the profiled log
likelihood function, so that calculation is not implemented here.
Therefore, optimization methods requiring the Hessian matrix such as
the Newton-Raphson algorihm cannot be used for model fitting.
""" |
"""Instantiating Graphs with default store (IOMemory) and default identifier
(a BNode):
>>> g = Graph()
>>> g.store.__class__
<class 'rdflib.plugins.memory.IOMemory'>
>>> g.identifier.__class__
<class 'rdflib.term.BNode'>
Instantiating Graphs with a specific kind of store (IOMemory) and a default
identifier (a BNode):
Other store kinds: Sleepycat, MySQL, SQLite
>>> store = plugin.get('IOMemory', Store)()
>>> store.__class__.__name__
'IOMemory'
>>> graph = Graph(store)
>>> graph.store.__class__
<class 'rdflib.plugins.memory.IOMemory'>
Instantiating Graphs with Sleepycat store and an identifier -
<http://rdflib.net>:
>>> g = Graph('IOMemory', URIRef("http://rdflib.net"))
>>> g.identifier
rdflib.term.URIRef('http://rdflib.net')
>>> str(g)
"<http://rdflib.net> a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'IOMemory']."
Creating a ConjunctiveGraph - The top level container for all named Graphs
in a 'database':
>>> g = ConjunctiveGraph()
>>> str(g.default_context)
"[a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'IOMemory']]."
Adding / removing reified triples to Graph and iterating over it directly or
via triple pattern:
>>> g = Graph('IOMemory')
>>> statementId = BNode()
>>> print len(g)
0
>>> g.add((statementId, RDF.type, RDF.Statement))
>>> g.add((statementId, RDF.subject, URIRef('http://rdflib.net/store/ConjunctiveGraph')))
>>> g.add((statementId, RDF.predicate, RDFS.label))
>>> g.add((statementId, RDF.object, Literal("Conjunctive Graph")))
>>> print len(g)
4
>>> for s, p, o in g:
... print type(s)
...
<class 'rdflib.term.BNode'>
<class 'rdflib.term.BNode'>
<class 'rdflib.term.BNode'>
<class 'rdflib.term.BNode'>
>>> for s, p, o in g.triples((None, RDF.object, None)):
... print o
...
Conjunctive Graph
>>> g.remove((statementId, RDF.type, RDF.Statement))
>>> print len(g)
3
None terms in calls to triple can be thought of as "open variables".
Graph Aggregation - ConjunctiveGraphs and ReadOnlyGraphAggregate within
the same store:
>>> store = plugin.get('IOMemory', Store)()
>>> g1 = Graph(store)
>>> g2 = Graph(store)
>>> g3 = Graph(store)
>>> stmt1 = BNode()
>>> stmt2 = BNode()
>>> stmt3 = BNode()
>>> g1.add((stmt1, RDF.type, RDF.Statement))
>>> g1.add((stmt1, RDF.subject, URIRef('http://rdflib.net/store/ConjunctiveGraph')))
>>> g1.add((stmt1, RDF.predicate, RDFS.label))
>>> g1.add((stmt1, RDF.object, Literal("Conjunctive Graph")))
>>> g2.add((stmt2, RDF.type, RDF.Statement))
>>> g2.add((stmt2, RDF.subject, URIRef('http://rdflib.net/store/ConjunctiveGraph')))
>>> g2.add((stmt2, RDF.predicate, RDF.type))
>>> g2.add((stmt2, RDF.object, RDFS.Class))
>>> g3.add((stmt3, RDF.type, RDF.Statement))
>>> g3.add((stmt3, RDF.subject, URIRef('http://rdflib.net/store/ConjunctiveGraph')))
>>> g3.add((stmt3, RDF.predicate, RDFS.comment))
>>> g3.add((stmt3, RDF.object, Literal("The top-level aggregate graph - The sum of all named graphs within a Store")))
>>> len(list(ConjunctiveGraph(store).subjects(RDF.type, RDF.Statement)))
3
>>> len(list(ReadOnlyGraphAggregate([g1,g2]).subjects(RDF.type, RDF.Statement)))
2
ConjunctiveGraphs have a 'quads' method which returns quads instead of
triples, where the fourth item is the Graph (or subclass thereof) instance
in which the triple was asserted:
>>> uniqueGraphNames = set([graph.identifier for s, p, o, graph in ConjunctiveGraph(store).quads((None, RDF.predicate, None))])
>>> len(uniqueGraphNames)
3
>>> unionGraph = ReadOnlyGraphAggregate([g1, g2])
>>> uniqueGraphNames = set([graph.identifier for s, p, o, graph in unionGraph.quads((None, RDF.predicate, None))])
>>> len(uniqueGraphNames)
2
Parsing N3 from StringIO
>>> g2 = Graph()
>>> src = '''
... @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
... @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
... [ a rdf:Statement ;
... rdf:subject <http://rdflib.net/store#ConjunctiveGraph>;
... rdf:predicate rdfs:label;
... rdf:object "Conjunctive Graph" ] .
... '''
>>> g2 = g2.parse(StringIO(src), format='n3')
>>> print len(g2)
4
Using Namespace class:
>>> RDFLib = Namespace('http://rdflib.net')
>>> RDFLib.ConjunctiveGraph
rdflib.term.URIRef('http://rdflib.netConjunctiveGraph')
>>> RDFLib['Graph']
rdflib.term.URIRef('http://rdflib.netGraph')
""" |
# This test generates all variants of wmma intrinsics and verifies that LLVM
# generates correct instructions for them.
# Check all variants of instructions supported by PTX60 on SM70
# RUN: python %s --ptx=60 --gpu-arch=70 > %t-ptx60-sm_70.ll
# RUN: FileCheck %t-ptx60-sm_70.ll < %t-ptx60-sm_70.ll \
# RUN: --check-prefixes=INTRINSICS,M16N16
# RUN: FileCheck %t-ptx60-sm_70.ll < %t-ptx60-sm_70.ll \
# RUN: --check-prefixes=INTRINSICS,NOEXTGEOM,NOINT,NOSUBINT,NOMMA
# RUN: llc < %t-ptx60-sm_70.ll -march=nvptx64 -mcpu=sm_70 -mattr=+ptx60 \
# RUN: | FileCheck %t-ptx60-sm_70.ll
# Check all variants of instructions supported by PTX61 on SM70
# RUN: python %s --ptx=61 --gpu-arch=70 > %t-ptx61-sm_70.ll
# RUN: FileCheck %t-ptx61-sm_70.ll < %t-ptx61-sm_70.ll \
# RUN: --check-prefixes=INTRINSICS,M16N16,EXTGEOM
# RUN: FileCheck %t-ptx61-sm_70.ll < %t-ptx61-sm_70.ll \
# RUN: --check-prefixes=INTRINSICS,NOINT,NOSUBINT,NOMMA
# RUN: llc < %t-ptx61-sm_70.ll -march=nvptx64 -mcpu=sm_70 -mattr=+ptx61 \
# RUN: | FileCheck %t-ptx61-sm_70.ll
# Check all variants of instructions supported by PTX63 on SM72
# RUN: python %s --ptx=63 --gpu-arch=72 > %t-ptx63-sm_72.ll
# RUN: FileCheck %t-ptx63-sm_72.ll < %t-ptx63-sm_72.ll \
# RUN: --check-prefixes=INTRINSICS,M16N16,EXTGEOM,INT
# RUN: FileCheck %t-ptx63-sm_72.ll < %t-ptx63-sm_72.ll \
# RUN: --check-prefixes=INTRINSICS,NOSUBINT,NOMMA
# RUN: llc < %t-ptx63-sm_72.ll -march=nvptx64 -mcpu=sm_72 -mattr=+ptx63 \
# RUN: | FileCheck %t-ptx63-sm_72.ll
# Check all variants of instructions supported by PTX63 on SM75
# RUN: python %s --ptx=63 --gpu-arch=75 > %t-ptx63-sm_75.ll
# RUN: FileCheck %t-ptx63-sm_75.ll < %t-ptx63-sm_75.ll \
# RUN: --check-prefixes=INTRINSICS,M16N16,EXTGEOM,INT,SUBINT
# RUN: FileCheck %t-ptx63-sm_75.ll < %t-ptx63-sm_75.ll \
# RUN: --check-prefixes=INTRINSICS,NOMMA
# RUN: llc < %t-ptx63-sm_75.ll -march=nvptx64 -mcpu=sm_75 -mattr=+ptx63 \
# RUN: | FileCheck %t-ptx63-sm_75.ll
# Check all variants of instructions supported by PTX64 on SM70+
# RUN: python %s --ptx=64 --gpu-arch=70 > %t-ptx64-sm_70.ll
# RUN: FileCheck %t-ptx64-sm_70.ll < %t-ptx64-sm_70.ll \
# RUN: --check-prefixes=INTRINSICS,M16N16,EXTGEOM,MMA
# RUN: FileCheck %t-ptx64-sm_70.ll < %t-ptx64-sm_70.ll \
# RUN: --check-prefixes=INTRINSICS,NOINT,NOSUBINT
# RUN: llc < %t-ptx64-sm_70.ll -march=nvptx64 -mcpu=sm_70 -mattr=+ptx64 \
# RUN: | FileCheck %t-ptx64-sm_70.ll
|
"""
Django model for OAS.
CREATE DATABASE IF NOT EXISTS `accounting`
CHARACTER SET utf8 COLLATE utf8_general_ci;
USE `accounting`;
-- at this point run manage.py syncdb
DROP TABLE IF EXISTS `oas_template_journal_entry`;
DROP TABLE IF EXISTS `oas_template_journal_entry_group`;
DROP TABLE IF EXISTS `oas_template_name`;
DROP TABLE IF EXISTS `oas_initial_amount`;
DROP TABLE IF EXISTS `oas_internal_investment`;
DROP TABLE IF EXISTS `oas_journal_entry`;
DROP TABLE IF EXISTS `oas_journal_entry_group`;
DROP TABLE IF EXISTS `oas_account`;
DROP TABLE IF EXISTS `oas_account_type`;
DROP TABLE IF EXISTS `oas_accounting_period`;
DROP TABLE IF EXISTS `oas_legal_entity`;
DROP TABLE IF EXISTS `oas_currency`;
CREATE TABLE `oas_account_type` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`code` varchar(1) NOT NULL,
`name` varchar(64) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `code` (`code`),
UNIQUE KEY `name` (`name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_currency` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`code` varchar(3) NOT NULL,
`name` varchar(64) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `code` (`code`),
UNIQUE KEY `name` (`name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_legal_entity` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`user_id` int(11) NOT NULL,
`currency_id` int(11) NOT NULL,
`code` varchar(32) NOT NULL,
`name` varchar(64) NOT NULL,
`description` longtext,
`is_individual` tinyint(1) NOT NULL DEFAULT '0',
PRIMARY KEY (`id`),
UNIQUE KEY `code` (`code`),
UNIQUE KEY `name` (`name`),
KEY `user_id` (`user_id`),
KEY `currency_id` (`currency_id`),
CONSTRAINT `legal_entity_ibfk_1` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`),
CONSTRAINT `legal_entity_ibfk_2` FOREIGN KEY (`currency_id`) REFERENCES `oas_currency` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_account` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`code` varchar(32) NOT NULL,
`name` varchar(192) NOT NULL,
`description` longtext,
`account_type_id` int(11) NOT NULL,
`legal_entity_id` int(11) NOT NULL,
`user_id` int(11) NOT NULL,
`parent_id` int(11) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY (`code`, `legal_entity_id`),
UNIQUE KEY (`name`, `legal_entity_id`),
KEY `account_type_id` (`account_type_id`),
KEY `legal_entity_id` (`legal_entity_id`),
KEY `user_id` (`user_id`),
KEY `parent_id` (`parent_id`),
CONSTRAINT `account_ibfk_1` FOREIGN KEY (`account_type_id`) REFERENCES `oas_account_type` (`id`),
CONSTRAINT `account_ibfk_2` FOREIGN KEY (`legal_entity_id`) REFERENCES `oas_legal_entity` (`id`),
CONSTRAINT `account_ibfk_3` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`),
CONSTRAINT `account_ibfk_4` FOREIGN KEY (`parent_id`) REFERENCES `oas_account` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_accounting_period` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(128) NOT NULL,
`legal_entity_id` int(11) NOT NULL,
`till_date` datetime NULL,
PRIMARY KEY (`id`),
UNIQUE KEY (`name`,`legal_entity_id`),
UNIQUE KEY (`till_date`,`legal_entity_id`),
KEY (`legal_entity_id`),
CONSTRAINT FOREIGN KEY (`legal_entity_id`) REFERENCES `oas_legal_entity` (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_journal_entry_group` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`date` datetime NOT NULL,
`description` longtext NULL,
`currency_id` int(11) NOT NULL,
`accounting_period_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
KEY `currency_id` (`currency_id`),
KEY `accounting_period_id` (`accounting_period_id`),
CONSTRAINT FOREIGN KEY (`currency_id`) REFERENCES `oas_currency` (`id`),
CONSTRAINT FOREIGN KEY (`accounting_period_id`) REFERENCES `oas_accounting_period` (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_journal_entry` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`description` longtext NULL,
`ref_num` int(11) NULL,
`account_id` int(11) NOT NULL,
is_debit tinyint(1) NOT NULL,
`quantity` decimal(20,6) NOT NULL DEFAULT '1.000000',
`unit_cost` decimal(20,6) NOT NULL DEFAULT '1.000000',
`group_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
KEY `account_id` (`account_id`),
CONSTRAINT FOREIGN KEY (`account_id`) REFERENCES `oas_account` (`id`),
CONSTRAINT FOREIGN KEY (`group_id`) REFERENCES `oas_journal_entry_group` (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_internal_investment` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`account_asset_id` int(11) NOT NULL,
`account_liability_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY (`account_asset_id`,`account_liability_id`),
UNIQUE KEY (`account_asset_id`),
KEY (`account_asset_id`),
KEY (`account_liability_id`),
CONSTRAINT FOREIGN KEY (`account_asset_id`) REFERENCES `oas_account` (`id`),
CONSTRAINT FOREIGN KEY (`account_liability_id`) REFERENCES `oas_account` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_initial_amount` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`account_id` int(11) NOT NULL,
`accounting_period_id` int(11) NOT NULL,
`quantity` decimal(20,6) NOT NULL DEFAULT '1.000000',
`unit_cost` decimal(20,6) NOT NULL DEFAULT '1.000000',
PRIMARY KEY (`id`),
UNIQUE KEY (`account_id`,`accounting_period_id`),
KEY (`account_id`),
KEY (`accounting_period_id`),
CONSTRAINT FOREIGN KEY (`account_id`) REFERENCES `oas_account` (`id`),
CONSTRAINT FOREIGN KEY (`accounting_period_id`) REFERENCES `oas_accounting_period` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_template_name` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(96) NOT NULL,
`description` longtext,
`template_currency_id` int(11) NOT NULL,
`legal_entity_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
CONSTRAINT FOREIGN KEY (`template_currency_id`) REFERENCES `oas_currency` (`id`),
CONSTRAINT FOREIGN KEY (`legal_entity_id`) REFERENCES `oas_legal_entity` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_template_journal_entry_group` (
`id` int(11) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`id`),
`template_name_id` int(11) NOT NULL,
CONSTRAINT FOREIGN KEY (`template_name_id`) REFERENCES `oas_template_name` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_template_journal_entry` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`description` longtext,
`account_id` int(11) NOT NULL,
`is_debit` tinyint(1) NOT NULL,
`quantity` decimal(20,6) NOT NULL DEFAULT '1.000000',
`unit_cost` decimal(20,6) NOT NULL DEFAULT '1.000000',
`template_group_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
CONSTRAINT FOREIGN KEY (`account_id`) REFERENCES `oas_account` (`id`),
CONSTRAINT FOREIGN KEY (`template_group_id`) REFERENCES `oas_template_journal_entry_group` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
INSERT INTO oas_account_type (id,code,name) VALUES (1,'A','Asset');
INSERT INTO oas_account_type (id,code,name) VALUES (2,'L','Liability & Equity');
INSERT INTO oas_account_type (id,code,name) VALUES (3,'I','Income');
INSERT INTO oas_account_type (id,code,name) VALUES (4,'E','Expense');
INSERT INTO oas_currency (code, name) VALUES ('USD', 'US Dollar');
INSERT INTO oas_currency (code, name) VALUES ('GBP', 'Sterling');
INSERT INTO oas_currency (code, name) VALUES ('CHF', 'Swiss Franc');
INSERT INTO oas_currency (code, name) VALUES ('EUR', 'Euro');
""" |
"""
==============
Array indexing
==============
Array indexing refers to any use of the square brackets ([]) to index
array values. There are many options to indexing, which give numpy
indexing great power, but with power comes some complexity and the
potential for confusion. This section is just an overview of the
various options and issues related to indexing. Aside from single
element indexing, the details on most of these options are to be
found in related sections.
Assignment vs referencing
=========================
Most of the following examples show the use of indexing when
referencing data in an array. The examples work just as well
when assigning to an array. See the section at the end for
specific examples and explanations on how assignments work.
Single element indexing
=======================
Single element indexing for a 1-D array is what one expects. It work
exactly like that for other standard Python sequences. It is 0-based,
and accepts negative indices for indexing from the end of the array. ::
>>> x = np.arange(10)
>>> x[2]
2
>>> x[-2]
8
Unlike lists and tuples, numpy arrays support multidimensional indexing
for multidimensional arrays. That means that it is not necessary to
separate each dimension's index into its own set of square brackets. ::
>>> x.shape = (2,5) # now x is 2-dimensional
>>> x[1,3]
8
>>> x[1,-1]
9
Note that if one indexes a multidimensional array with fewer indices
than dimensions, one gets a subdimensional array. For example: ::
>>> x[0]
array([0, 1, 2, 3, 4])
That is, each index specified selects the array corresponding to the
rest of the dimensions selected. In the above example, choosing 0
means that remaining dimension of lenth 5 is being left unspecified,
and that what is returned is an array of that dimensionality and size.
It must be noted that the returned array is not a copy of the original,
but points to the same values in memory as does the original array.
In this case, the 1-D array at the first position (0) is returned.
So using a single index on the returned array, results in a single
element being returned. That is: ::
>>> x[0][2]
2
So note that ``x[0,2] = x[0][2]`` though the second case is more
inefficient a new temporary array is created after the first index
that is subsequently indexed by 2.
Note to those used to IDL or Fortran memory order as it relates to
indexing. Numpy uses C-order indexing. That means that the last
index usually represents the most rapidly changing memory location,
unlike Fortran or IDL, where the first index represents the most
rapidly changing location in memory. This difference represents a
great potential for confusion.
Other indexing options
======================
It is possible to slice and stride arrays to extract arrays of the
same number of dimensions, but of different sizes than the original.
The slicing and striding works exactly the same way it does for lists
and tuples except that they can be applied to multiple dimensions as
well. A few examples illustrates best: ::
>>> x = np.arange(10)
>>> x[2:5]
array([2, 3, 4])
>>> x[:-7]
array([0, 1, 2])
>>> x[1:7:2]
array([1, 3, 5])
>>> y = np.arange(35).reshape(5,7)
>>> y[1:5:2,::3]
array([[ 7, 10, 13],
[21, 24, 27]])
Note that slices of arrays do not copy the internal array data but
also produce new views of the original data.
It is possible to index arrays with other arrays for the purposes of
selecting lists of values out of arrays into new arrays. There are
two different ways of accomplishing this. One uses one or more arrays
of index values. The other involves giving a boolean array of the proper
shape to indicate the values to be selected. Index arrays are a very
powerful tool that allow one to avoid looping over individual elements in
arrays and thus greatly improve performance.
It is possible to use special features to effectively increase the
number of dimensions in an array through indexing so the resulting
array aquires the shape needed for use in an expression or with a
specific function.
Index arrays
============
Numpy arrays may be indexed with other arrays (or any other sequence-
like object that can be converted to an array, such as lists, with the
exception of tuples; see the end of this document for why this is). The
use of index arrays ranges from simple, straightforward cases to
complex, hard-to-understand cases. For all cases of index arrays, what
is returned is a copy of the original data, not a view as one gets for
slices.
Index arrays must be of integer type. Each value in the array indicates
which value in the array to use in place of the index. To illustrate: ::
>>> x = np.arange(10,1,-1)
>>> x
array([10, 9, 8, 7, 6, 5, 4, 3, 2])
>>> x[np.array([3, 3, 1, 8])]
array([7, 7, 9, 2])
The index array consisting of the values 3, 3, 1 and 8 correspondingly
create an array of length 4 (same as the index array) where each index
is replaced by the value the index array has in the array being indexed.
Negative values are permitted and work as they do with single indices
or slices: ::
>>> x[np.array([3,3,-3,8])]
array([7, 7, 4, 2])
It is an error to have index values out of bounds: ::
>>> x[np.array([3, 3, 20, 8])]
<type 'exceptions.IndexError'>: index 20 out of bounds 0<=index<9
Generally speaking, what is returned when index arrays are used is
an array with the same shape as the index array, but with the type
and values of the array being indexed. As an example, we can use a
multidimensional index array instead: ::
>>> x[np.array([[1,1],[2,3]])]
array([[9, 9],
[8, 7]])
Indexing Multi-dimensional arrays
=================================
Things become more complex when multidimensional arrays are indexed,
particularly with multidimensional index arrays. These tend to be
more unusal uses, but theyare permitted, and they are useful for some
problems. We'll start with thesimplest multidimensional case (using
the array y from the previous examples): ::
>>> y[np.array([0,2,4]), np.array([0,1,2])]
array([ 0, 15, 30])
In this case, if the index arrays have a matching shape, and there is
an index array for each dimension of the array being indexed, the
resultant array has the same shape as the index arrays, and the values
correspond to the index set for each position in the index arrays. In
this example, the first index value is 0 for both index arrays, and
thus the first value of the resultant array is y[0,0]. The next value
is y[2,1], and the last is y[4,2].
If the index arrays do not have the same shape, there is an attempt to
broadcast them to the same shape. If they cannot be broadcast to the
same shape, an exception is raised: ::
>>> y[np.array([0,2,4]), np.array([0,1])]
<type 'exceptions.ValueError'>: shape mismatch: objects cannot be
broadcast to a single shape
The broadcasting mechanism permits index arrays to be combined with
scalars for other indices. The effect is that the scalar value is used
for all the corresponding values of the index arrays: ::
>>> y[np.array([0,2,4]), 1]
array([ 1, 15, 29])
Jumping to the next level of complexity, it is possible to only
partially index an array with index arrays. It takes a bit of thought
to understand what happens in such cases. For example if we just use
one index array with y: ::
>>> y[np.array([0,2,4])]
array([[ 0, 1, 2, 3, 4, 5, 6],
[14, 15, 16, 17, 18, 19, 20],
[28, 29, 30, 31, 32, 33, 34]])
What results is the construction of a new array where each value of
the index array selects one row from the array being indexed and the
resultant array has the resulting shape (size of row, number index
elements).
An example of where this may be useful is for a color lookup table
where we want to map the values of an image into RGB triples for
display. The lookup table could have a shape (nlookup, 3). Indexing
such an array with an image with shape (ny, nx) with dtype=np.uint8
(or any integer type so long as values are with the bounds of the
lookup table) will result in an array of shape (ny, nx, 3) where a
triple of RGB values is associated with each pixel location.
In general, the shape of the resulant array will be the concatenation
of the shape of the index array (or the shape that all the index arrays
were broadcast to) with the shape of any unused dimensions (those not
indexed) in the array being indexed.
Boolean or "mask" index arrays
==============================
Boolean arrays used as indices are treated in a different manner
entirely than index arrays. Boolean arrays must be of the same shape
as the initial dimensions of the array being indexed. In the
most straightforward case, the boolean array has the same shape: ::
>>> b = y>20
>>> y[b]
array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
The result is a 1-D array containing all the elements in the indexed
array corresponding to all the true elements in the boolean array. As
with index arrays, what is returned is a copy of the data, not a view
as one gets with slices.
The result will be multidimensional if y has more dimensions than b.
For example: ::
>>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y
array([False, False, False, True, True], dtype=bool)
>>> y[b[:,5]]
array([[21, 22, 23, 24, 25, 26, 27],
[28, 29, 30, 31, 32, 33, 34]])
Here the 4th and 5th rows are selected from the indexed array and
combined to make a 2-D array.
In general, when the boolean array has fewer dimensions than the array
being indexed, this is equivalent to y[b, ...], which means
y is indexed by b followed by as many : as are needed to fill
out the rank of y.
Thus the shape of the result is one dimension containing the number
of True elements of the boolean array, followed by the remaining
dimensions of the array being indexed.
For example, using a 2-D boolean array of shape (2,3)
with four True elements to select rows from a 3-D array of shape
(2,3,5) results in a 2-D result of shape (4,5): ::
>>> x = np.arange(30).reshape(2,3,5)
>>> x
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]]])
>>> b = np.array([[True, True, False], [False, True, True]])
>>> x[b]
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]])
For further details, consult the numpy reference documentation on array indexing.
Combining index arrays with slices
==================================
Index arrays may be combined with slices. For example: ::
>>> y[np.array([0,2,4]),1:3]
array([[ 1, 2],
[15, 16],
[29, 30]])
In effect, the slice is converted to an index array
np.array([[1,2]]) (shape (1,2)) that is broadcast with the index array
to produce a resultant array of shape (3,2).
Likewise, slicing can be combined with broadcasted boolean indices: ::
>>> y[b[:,5],1:3]
array([[22, 23],
[29, 30]])
Structural indexing tools
=========================
To facilitate easy matching of array shapes with expressions and in
assignments, the np.newaxis object can be used within array indices
to add new dimensions with a size of 1. For example: ::
>>> y.shape
(5, 7)
>>> y[:,np.newaxis,:].shape
(5, 1, 7)
Note that there are no new elements in the array, just that the
dimensionality is increased. This can be handy to combine two
arrays in a way that otherwise would require explicitly reshaping
operations. For example: ::
>>> x = np.arange(5)
>>> x[:,np.newaxis] + x[np.newaxis,:]
array([[0, 1, 2, 3, 4],
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8]])
The ellipsis syntax maybe used to indicate selecting in full any
remaining unspecified dimensions. For example: ::
>>> z = np.arange(81).reshape(3,3,3,3)
>>> z[1,...,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
This is equivalent to: ::
>>> z[1,:,:,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
Assigning values to indexed arrays
==================================
As mentioned, one can select a subset of an array to assign to using
a single index, slices, and index and mask arrays. The value being
assigned to the indexed array must be shape consistent (the same shape
or broadcastable to the shape the index produces). For example, it is
permitted to assign a constant to a slice: ::
>>> x = np.arange(10)
>>> x[2:7] = 1
or an array of the right size: ::
>>> x[2:7] = np.arange(5)
Note that assignments may result in changes if assigning
higher types to lower types (like floats to ints) or even
exceptions (assigning complex to floats or ints): ::
>>> x[1] = 1.2
>>> x[1]
1
>>> x[1] = 1.2j
<type 'exceptions.TypeError'>: can't convert complex to long; use
long(abs(z))
Unlike some of the references (such as array and mask indices)
assignments are always made to the original data in the array
(indeed, nothing else would make sense!). Note though, that some
actions may not work as one may naively expect. This particular
example is often surprising to people: ::
>>> x = np.arange(0, 50, 10)
>>> x
array([ 0, 10, 20, 30, 40])
>>> x[np.array([1, 1, 3, 1])] += 1
>>> x
array([ 0, 11, 20, 31, 40])
Where people expect that the 1st location will be incremented by 3.
In fact, it will only be incremented by 1. The reason is because
a new array is extracted from the original (as a temporary) containing
the values at 1, 1, 3, 1, then the value 1 is added to the temporary,
and then the temporary is assigned back to the original array. Thus
the value of the array at x[1]+1 is assigned to x[1] three times,
rather than being incremented 3 times.
Dealing with variable numbers of indices within programs
========================================================
The index syntax is very powerful but limiting when dealing with
a variable number of indices. For example, if you want to write
a function that can handle arguments with various numbers of
dimensions without having to write special case code for each
number of possible dimensions, how can that be done? If one
supplies to the index a tuple, the tuple will be interpreted
as a list of indices. For example (using the previous definition
for the array z): ::
>>> indices = (1,1,1,1)
>>> z[indices]
40
So one can use code to construct tuples of any number of indices
and then use these within an index.
Slices can be specified within programs by using the slice() function
in Python. For example: ::
>>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2]
>>> z[indices]
array([39, 40])
Likewise, ellipsis can be specified by code by using the Ellipsis
object: ::
>>> indices = (1, Ellipsis, 1) # same as [1,...,1]
>>> z[indices]
array([[28, 31, 34],
[37, 40, 43],
[46, 49, 52]])
For this reason it is possible to use the output from the np.where()
function directly as an index since it always returns a tuple of index
arrays.
Because the special treatment of tuples, they are not automatically
converted to an array as a list would be. As an example: ::
>>> z[[1,1,1,1]] # produces a large array
array([[[[27, 28, 29],
[30, 31, 32], ...
>>> z[(1,1,1,1)] # returns a single value
40
""" |
# #!/usr/bin/env python
# # -*- coding:utf-8 -*-
#
# __author__ = 'hiroki'
#
# import uuid
# import logging
# import os
#
# from bottle_auth.core.auth import FacebookGraphMixin, HTTPRedirect
#
# from kokemomo import app
# from bottle import template, route, static_file, url, request, response, redirect
# from kokemomo.plugins.engine.controller.km_login import auth, RESULT_SUCCESS
# from kokemomo.plugins.engine.controller.km_session_manager import add_value_to_session
# from bottle.ext import auth
# from bottle.ext.auth.decorator import login
# from bottle.ext.auth.social.facebook import Facebook, UserDenied
# from bottle.ext.auth.social.facebook import NegotiationError
# from pprint import pformat
# from urllib2 import Request,urlopen
# from bottle_auth.core.auth import FacebookGraphMixin, HTTPRedirect
#
# """
# This is the login screen of the Facebook login.
#
# """
#
#
# CLIENT_ID = 'app-id' # TODO: 設定ファイルへ抜き出し
# REDIRECT_URI='redirect-uri'
# SECRET_KEY = 'secret-key'
# EMAIL = 'e-mail'
# DATA_DIR_PATH = "./kokemomo/data/test/"# TODO: 実行する場所によって変わる為、外部ファイルでHOMEを定義するような仕組みへ修正する
#
# facebook = Facebook(CLIENT_ID, SECRET_KEY,
# REDIRECT_URI, EMAIL)
#
# plugin = auth.AuthPlugin(facebook)
# app.install(plugin)
#
#
# RESULT_SUCCESS = "SUCCESS"
# RESULT_FAIL = "FAIL"
#
# @route('/fb_login/js/<filename>', name='fb_login_static_js')
# def fb_login_js_static(filename):
# """
# set javascript files.
# :param filename: javascript file name.
# :return: static path.
# """
# return static_file(filename, root='kokemomo/plugins/fb_login/view/resource/js')
#
#
# @route('/fb_login/css/<filename>', name='fb_login_static_css')
# def fb_login_css_static(filename):
# """
# set css files.
# :param filename: css file name.
# :return: static path.
# """
# return static_file(filename, root='kokemomo/plugins/fb_login/view/resource/css')
#
#
# @route('/fb_login/img/<filename>', name='fb_login_static_img')
# def fb_login_img_static(filename):
# """
# set image files.
# :param filename: image file name.
# :return: static path.
# """
# return static_file(filename, root='kokemomo/plugins/fb_login/view/resource/img')
#
# @app.route('/fb_login')
# def fb_login_top(auth):
# return template('kokemomo/plugins/fb_login/view/login', url=url) # TODO: パス解決を修正する
#
# @app.route('/fb_login_auth')
# def fb_login_auth():
# auth = FacebookGraphMixin(request.environ)
# try:
# auth.authorize_redirect(
# redirect_uri=facebook.callback_url,
# client_id=facebook.settings['facebook_api_key'])
# # extra_params={'scope': facebook.scope}) # TODO: 現状scopeにEMAILが入りスコープエラーとなるため、bottle-authのバグかどうか要調査
# except HTTPRedirect, e:
# logging.info('Redirecting Facebook user to {0}'.format(e.url))
# return redirect(e.url)
# return None
#
# @app.route('/fb_engine/<filename>')
# def fb_engine(filename):
# # TODO: 認証処理などはcontrollerへ移動
# auth = FacebookGraphMixin(request.environ)
# container = {}
# try:
# code = request.params['code']
# except KeyError as e:
# return "<p>Please login!</p>" # TODO: 例外スロー時にエラー画面に遷移するようにする
# result = RESULT_FAIL
# if code is '':
# code = get_fb_code()
# code = str(code) + '#_=_'
# def get_user_callback(user):
# container['id'] = user['id']
#
# auth.get_authenticated_user(
# redirect_uri=facebook.callback_url,
# client_id=facebook.settings['facebook_api_key'],
# client_secret=facebook.settings['facebook_secret'],
# code=code,
# callback=get_user_callback)
#
# if container['id'] is not '':
# create_session(request, response, container['id'])
# result = RESULT_SUCCESS
#
# if result is RESULT_FAIL: # TODO: チェック処理とページ遷移の処理を統一化する必要がある(通常ログインの場合とFacebookログインの場合)
# return "<p>Please login!</p>" # TODO: 例外スロー時にエラー画面に遷移するようにする
# if filename == "file":
# dir_list = []
# for (root, dirs, files) in os.walk(DATA_DIR_PATH):
# for dir_name in dirs:
# dir_path = root + os.sep + dir_name
# dir_list.append(dir_path[len(DATA_DIR_PATH):])
# files = os.listdir(DATA_DIR_PATH + dir_list[0])
# files = os.listdir(DATA_DIR_PATH + dir_list[0])
# for file_name in files:
# if os.path.isdir(DATA_DIR_PATH + os.sep + dir_list[0] + os.sep + file_name):
# files.remove(file_name)
# return template('kokemomo/plugins/engine/view/file', dirs=dir_list, files=files, url=url) # TODO: パス解決を改修
# else:
# return template('kokemomo/plugins/engine/view/' + filename, url=url) # TODO: パス解決を改修
#
#
# def create_session(request, response, id):
# result = RESULT_FAIL
# add_value_to_session(request, 'fb_code', session_id)
# return result
#
# def get_fb_code():
# session = request.environ.get('beaker.session')
# code = 'fb_code' in session
# return code
|
"""
==============
Array indexing
==============
Array indexing refers to any use of the square brackets ([]) to index
array values. There are many options to indexing, which give numpy
indexing great power, but with power comes some complexity and the
potential for confusion. This section is just an overview of the
various options and issues related to indexing. Aside from single
element indexing, the details on most of these options are to be
found in related sections.
Assignment vs referencing
=========================
Most of the following examples show the use of indexing when
referencing data in an array. The examples work just as well
when assigning to an array. See the section at the end for
specific examples and explanations on how assignments work.
Single element indexing
=======================
Single element indexing for a 1-D array is what one expects. It work
exactly like that for other standard Python sequences. It is 0-based,
and accepts negative indices for indexing from the end of the array. ::
>>> x = np.arange(10)
>>> x[2]
2
>>> x[-2]
8
Unlike lists and tuples, numpy arrays support multidimensional indexing
for multidimensional arrays. That means that it is not necessary to
separate each dimension's index into its own set of square brackets. ::
>>> x.shape = (2,5) # now x is 2-dimensional
>>> x[1,3]
8
>>> x[1,-1]
9
Note that if one indexes a multidimensional array with fewer indices
than dimensions, one gets a subdimensional array. For example: ::
>>> x[0]
array([0, 1, 2, 3, 4])
That is, each index specified selects the array corresponding to the
rest of the dimensions selected. In the above example, choosing 0
means that the remaining dimension of length 5 is being left unspecified,
and that what is returned is an array of that dimensionality and size.
It must be noted that the returned array is not a copy of the original,
but points to the same values in memory as does the original array.
In this case, the 1-D array at the first position (0) is returned.
So using a single index on the returned array, results in a single
element being returned. That is: ::
>>> x[0][2]
2
So note that ``x[0,2] = x[0][2]`` though the second case is more
inefficient as a new temporary array is created after the first index
that is subsequently indexed by 2.
Note to those used to IDL or Fortran memory order as it relates to
indexing. NumPy uses C-order indexing. That means that the last
index usually represents the most rapidly changing memory location,
unlike Fortran or IDL, where the first index represents the most
rapidly changing location in memory. This difference represents a
great potential for confusion.
Other indexing options
======================
It is possible to slice and stride arrays to extract arrays of the
same number of dimensions, but of different sizes than the original.
The slicing and striding works exactly the same way it does for lists
and tuples except that they can be applied to multiple dimensions as
well. A few examples illustrates best: ::
>>> x = np.arange(10)
>>> x[2:5]
array([2, 3, 4])
>>> x[:-7]
array([0, 1, 2])
>>> x[1:7:2]
array([1, 3, 5])
>>> y = np.arange(35).reshape(5,7)
>>> y[1:5:2,::3]
array([[ 7, 10, 13],
[21, 24, 27]])
Note that slices of arrays do not copy the internal array data but
only produce new views of the original data. This is different from
list or tuple slicing and an explicit ``copy()`` is recommended if
the original data is not required anymore.
It is possible to index arrays with other arrays for the purposes of
selecting lists of values out of arrays into new arrays. There are
two different ways of accomplishing this. One uses one or more arrays
of index values. The other involves giving a boolean array of the proper
shape to indicate the values to be selected. Index arrays are a very
powerful tool that allow one to avoid looping over individual elements in
arrays and thus greatly improve performance.
It is possible to use special features to effectively increase the
number of dimensions in an array through indexing so the resulting
array acquires the shape needed for use in an expression or with a
specific function.
Index arrays
============
NumPy arrays may be indexed with other arrays (or any other sequence-
like object that can be converted to an array, such as lists, with the
exception of tuples; see the end of this document for why this is). The
use of index arrays ranges from simple, straightforward cases to
complex, hard-to-understand cases. For all cases of index arrays, what
is returned is a copy of the original data, not a view as one gets for
slices.
Index arrays must be of integer type. Each value in the array indicates
which value in the array to use in place of the index. To illustrate: ::
>>> x = np.arange(10,1,-1)
>>> x
array([10, 9, 8, 7, 6, 5, 4, 3, 2])
>>> x[np.array([3, 3, 1, 8])]
array([7, 7, 9, 2])
The index array consisting of the values 3, 3, 1 and 8 correspondingly
create an array of length 4 (same as the index array) where each index
is replaced by the value the index array has in the array being indexed.
Negative values are permitted and work as they do with single indices
or slices: ::
>>> x[np.array([3,3,-3,8])]
array([7, 7, 4, 2])
It is an error to have index values out of bounds: ::
>>> x[np.array([3, 3, 20, 8])]
<type 'exceptions.IndexError'>: index 20 out of bounds 0<=index<9
Generally speaking, what is returned when index arrays are used is
an array with the same shape as the index array, but with the type
and values of the array being indexed. As an example, we can use a
multidimensional index array instead: ::
>>> x[np.array([[1,1],[2,3]])]
array([[9, 9],
[8, 7]])
Indexing Multi-dimensional arrays
=================================
Things become more complex when multidimensional arrays are indexed,
particularly with multidimensional index arrays. These tend to be
more unusual uses, but they are permitted, and they are useful for some
problems. We'll start with the simplest multidimensional case (using
the array y from the previous examples): ::
>>> y[np.array([0,2,4]), np.array([0,1,2])]
array([ 0, 15, 30])
In this case, if the index arrays have a matching shape, and there is
an index array for each dimension of the array being indexed, the
resultant array has the same shape as the index arrays, and the values
correspond to the index set for each position in the index arrays. In
this example, the first index value is 0 for both index arrays, and
thus the first value of the resultant array is y[0,0]. The next value
is y[2,1], and the last is y[4,2].
If the index arrays do not have the same shape, there is an attempt to
broadcast them to the same shape. If they cannot be broadcast to the
same shape, an exception is raised: ::
>>> y[np.array([0,2,4]), np.array([0,1])]
<type 'exceptions.ValueError'>: shape mismatch: objects cannot be
broadcast to a single shape
The broadcasting mechanism permits index arrays to be combined with
scalars for other indices. The effect is that the scalar value is used
for all the corresponding values of the index arrays: ::
>>> y[np.array([0,2,4]), 1]
array([ 1, 15, 29])
Jumping to the next level of complexity, it is possible to only
partially index an array with index arrays. It takes a bit of thought
to understand what happens in such cases. For example if we just use
one index array with y: ::
>>> y[np.array([0,2,4])]
array([[ 0, 1, 2, 3, 4, 5, 6],
[14, 15, 16, 17, 18, 19, 20],
[28, 29, 30, 31, 32, 33, 34]])
What results is the construction of a new array where each value of
the index array selects one row from the array being indexed and the
resultant array has the resulting shape (number of index elements,
size of row).
An example of where this may be useful is for a color lookup table
where we want to map the values of an image into RGB triples for
display. The lookup table could have a shape (nlookup, 3). Indexing
such an array with an image with shape (ny, nx) with dtype=np.uint8
(or any integer type so long as values are with the bounds of the
lookup table) will result in an array of shape (ny, nx, 3) where a
triple of RGB values is associated with each pixel location.
In general, the shape of the resultant array will be the concatenation
of the shape of the index array (or the shape that all the index arrays
were broadcast to) with the shape of any unused dimensions (those not
indexed) in the array being indexed.
Boolean or "mask" index arrays
==============================
Boolean arrays used as indices are treated in a different manner
entirely than index arrays. Boolean arrays must be of the same shape
as the initial dimensions of the array being indexed. In the
most straightforward case, the boolean array has the same shape: ::
>>> b = y>20
>>> y[b]
array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
Unlike in the case of integer index arrays, in the boolean case, the
result is a 1-D array containing all the elements in the indexed array
corresponding to all the true elements in the boolean array. The
elements in the indexed array are always iterated and returned in
:term:`row-major` (C-style) order. The result is also identical to
``y[np.nonzero(b)]``. As with index arrays, what is returned is a copy
of the data, not a view as one gets with slices.
The result will be multidimensional if y has more dimensions than b.
For example: ::
>>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y
array([False, False, False, True, True])
>>> y[b[:,5]]
array([[21, 22, 23, 24, 25, 26, 27],
[28, 29, 30, 31, 32, 33, 34]])
Here the 4th and 5th rows are selected from the indexed array and
combined to make a 2-D array.
In general, when the boolean array has fewer dimensions than the array
being indexed, this is equivalent to y[b, ...], which means
y is indexed by b followed by as many : as are needed to fill
out the rank of y.
Thus the shape of the result is one dimension containing the number
of True elements of the boolean array, followed by the remaining
dimensions of the array being indexed.
For example, using a 2-D boolean array of shape (2,3)
with four True elements to select rows from a 3-D array of shape
(2,3,5) results in a 2-D result of shape (4,5): ::
>>> x = np.arange(30).reshape(2,3,5)
>>> x
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]]])
>>> b = np.array([[True, True, False], [False, True, True]])
>>> x[b]
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]])
For further details, consult the numpy reference documentation on array indexing.
Combining index arrays with slices
==================================
Index arrays may be combined with slices. For example: ::
>>> y[np.array([0,2,4]),1:3]
array([[ 1, 2],
[15, 16],
[29, 30]])
In effect, the slice is converted to an index array
np.array([[1,2]]) (shape (1,2)) that is broadcast with the index array
to produce a resultant array of shape (3,2).
Likewise, slicing can be combined with broadcasted boolean indices: ::
>>> b = y > 20
>>> b
array([[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[ True, True, True, True, True, True, True],
[ True, True, True, True, True, True, True]])
>>> y[b[:,5],1:3]
array([[22, 23],
[29, 30]])
Structural indexing tools
=========================
To facilitate easy matching of array shapes with expressions and in
assignments, the np.newaxis object can be used within array indices
to add new dimensions with a size of 1. For example: ::
>>> y.shape
(5, 7)
>>> y[:,np.newaxis,:].shape
(5, 1, 7)
Note that there are no new elements in the array, just that the
dimensionality is increased. This can be handy to combine two
arrays in a way that otherwise would require explicitly reshaping
operations. For example: ::
>>> x = np.arange(5)
>>> x[:,np.newaxis] + x[np.newaxis,:]
array([[0, 1, 2, 3, 4],
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8]])
The ellipsis syntax maybe used to indicate selecting in full any
remaining unspecified dimensions. For example: ::
>>> z = np.arange(81).reshape(3,3,3,3)
>>> z[1,...,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
This is equivalent to: ::
>>> z[1,:,:,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
Assigning values to indexed arrays
==================================
As mentioned, one can select a subset of an array to assign to using
a single index, slices, and index and mask arrays. The value being
assigned to the indexed array must be shape consistent (the same shape
or broadcastable to the shape the index produces). For example, it is
permitted to assign a constant to a slice: ::
>>> x = np.arange(10)
>>> x[2:7] = 1
or an array of the right size: ::
>>> x[2:7] = np.arange(5)
Note that assignments may result in changes if assigning
higher types to lower types (like floats to ints) or even
exceptions (assigning complex to floats or ints): ::
>>> x[1] = 1.2
>>> x[1]
1
>>> x[1] = 1.2j
<type 'exceptions.TypeError'>: can't convert complex to long; use
long(abs(z))
Unlike some of the references (such as array and mask indices)
assignments are always made to the original data in the array
(indeed, nothing else would make sense!). Note though, that some
actions may not work as one may naively expect. This particular
example is often surprising to people: ::
>>> x = np.arange(0, 50, 10)
>>> x
array([ 0, 10, 20, 30, 40])
>>> x[np.array([1, 1, 3, 1])] += 1
>>> x
array([ 0, 11, 20, 31, 40])
Where people expect that the 1st location will be incremented by 3.
In fact, it will only be incremented by 1. The reason is because
a new array is extracted from the original (as a temporary) containing
the values at 1, 1, 3, 1, then the value 1 is added to the temporary,
and then the temporary is assigned back to the original array. Thus
the value of the array at x[1]+1 is assigned to x[1] three times,
rather than being incremented 3 times.
Dealing with variable numbers of indices within programs
========================================================
The index syntax is very powerful but limiting when dealing with
a variable number of indices. For example, if you want to write
a function that can handle arguments with various numbers of
dimensions without having to write special case code for each
number of possible dimensions, how can that be done? If one
supplies to the index a tuple, the tuple will be interpreted
as a list of indices. For example (using the previous definition
for the array z): ::
>>> indices = (1,1,1,1)
>>> z[indices]
40
So one can use code to construct tuples of any number of indices
and then use these within an index.
Slices can be specified within programs by using the slice() function
in Python. For example: ::
>>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2]
>>> z[indices]
array([39, 40])
Likewise, ellipsis can be specified by code by using the Ellipsis
object: ::
>>> indices = (1, Ellipsis, 1) # same as [1,...,1]
>>> z[indices]
array([[28, 31, 34],
[37, 40, 43],
[46, 49, 52]])
For this reason it is possible to use the output from the np.nonzero()
function directly as an index since it always returns a tuple of index
arrays.
Because the special treatment of tuples, they are not automatically
converted to an array as a list would be. As an example: ::
>>> z[[1,1,1,1]] # produces a large array
array([[[[27, 28, 29],
[30, 31, 32], ...
>>> z[(1,1,1,1)] # returns a single value
40
""" |
"""
Signal Processing Tools
=======================
Convolution:
convolve:
N-dimensional convolution.
correlate:
N-dimensional correlation.
fftconvolve:
N-dimensional convolution using the FFT.
convolve2d:
2-dimensional convolution (more options).
correlate2d:
2-dimensional correlation (more options).
sepfir2d:
Convolve with a 2-D separable FIR filter.
B-splines:
bspline:
B-spline basis function of order n.
gauss_spline:
Gaussian approximation to the B-spline basis function.
cspline1d:
Coefficients for 1-D cubic (3rd order) B-spline.
qspline1d:
Coefficients for 1-D quadratic (2nd order) B-spline.
cspline2d:
Coefficients for 2-D cubic (3rd order) B-spline.
qspline2d:
Coefficients for 2-D quadratic (2nd order) B-spline.
spline_filter:
Smoothing spline (cubic) filtering of a rank-2 array.
Filtering:
order_filter:
N-dimensional order filter.
medfilt:
N-dimensional median filter.
medfilt2:
2-dimensional median filter (faster).
wiener:
N-dimensional wiener filter.
symiirorder1:
2nd-order IIR filter (cascade of first-order systems).
symiirorder2:
4th-order IIR filter (cascade of second-order systems).
lfilter:
1-dimensional FIR and IIR digital linear filtering.
lfiltic:
Construct initial conditions for `lfilter`.
deconvolve:
1-d deconvolution using lfilter.
hilbert:
Compute the analytic signal of a 1-d signal.
get_window:
Create FIR window.
decimate:
Downsample a signal.
detrend:
Remove linear and/or constant trends from data.
resample:
Resample using Fourier method.
Filter design:
bilinear:
Return a digital filter from an analog filter using the bilinear transform.
firwin:
Windowed FIR filter design, with frequency response defined as pass and stop bands.
firwin2:
Windowed FIR filter design, with arbitrary frequency response.
freqs:
Analog filter frequency response.
freqz:
Digital filter frequency response.
iirdesign:
IIR filter design given bands and gains.
iirfilter:
IIR filter design given order and critical frequencies.
invres:
Inverse partial fraction expansion.
kaiser_beta:
Compute the Kaiser parameter beta, given the desired FIR filter attenuation.
kaiser_atten:
Compute the attenuation of a Kaiser FIR filter, given the number of taps
and the transition width at discontinuities in the frequency response.
kaiserord:
Design a Kaiser window to limit ripple and width of transition region.
remez:
Optimal FIR filter design.
residue:
Partial fraction expansion of b(s) / a(s).
residuez:
Partial fraction expansion of b(z) / a(z).
unique_roots:
Unique roots and their multiplicities.
Matlab-style IIR filter design:
butter (buttord):
Butterworth
cheby1 (cheb1ord):
Chebyshev Type I
cheby2 (cheb2ord):
Chebyshev Type II
ellip (ellipord):
Elliptic (Cauer)
bessel:
Bessel (no order selection available -- try butterod)
Linear Systems:
lti:
linear time invariant system object.
lsim:
continuous-time simulation of output to linear system.
lsim2:
like lsim, but `scipy.integrate.odeint` is used.
impulse:
impulse response of linear, time-invariant (LTI) system.
impulse2:
like impulse, but `scipy.integrate.odeint` is used.
step:
step response of continous-time LTI system.
step2:
like step, but `scipy.integrate.odeint` is used.
LTI Representations:
tf2zpk:
transfer function to zero-pole-gain.
zpk2tf:
zero-pole-gain to transfer function.
tf2ss:
transfer function to state-space.
ss2tf:
state-pace to transfer function.
zpk2ss:
zero-pole-gain to state-space.
ss2zpk:
state-space to pole-zero-gain.
Waveforms:
sawtooth:
Periodic sawtooth
square:
Square wave
gausspulse:
Gaussian modulated sinusoid
chirp:
Frequency swept cosine signal, with several frequency functions.
sweep_poly:
Frequency swept cosine signal; frequency is arbitrary polynomial.
Window functions:
get_window:
Return a window of a given length and type.
barthann:
Bartlett-Hann window
bartlett:
Bartlett window
blackman:
Blackman window
blackmanharris:
Minimum 4-term Blackman-Harris window
bohman:
Bohman window
boxcar:
Boxcar window
chebwin:
Dolph-Chebyshev window
flattop:
Flat top window
gaussian:
Gaussian window
general_gaussian:
Generalized Gaussian window
hamming:
Hamming window
hann:
Hann window
kaiser:
Kaiser window
nuttall:
Nuttall's minimum 4-term Blackman-Harris window
parzen:
Parzen window
slepian:
Slepian window
triang:
Triangular window
Wavelets:
daub:
return low-pass
qmf:
return quadrature mirror filter from low-pass
cascade:
compute scaling function and wavelet from coefficients
morlet:
Complex Morlet wavelet.
""" |
"""
# ggame
The simple cross-platform sprite and game platform for Brython Server (Pygame, Tkinter to follow?).
Ggame stands for a couple of things: "good game" (of course!) and also "git game" or "github game"
because it is designed to operate with [Brython Server](http://runpython.com) in concert with
Github as a backend file store.
Ggame is **not** intended to be a full-featured gaming API, with every bell and whistle. Ggame is
designed primarily as a tool for teaching computer programming, recognizing that the ability
to create engaging and interactive games is a powerful motivator for many progamming students.
Accordingly, any functional or performance enhancements that *can* be reasonably implemented
by the user are left as an exercise.
## Functionality Goals
The ggame library is intended to be trivially easy to use. For example:
from ggame import App, ImageAsset, Sprite
# Create a displayed object at 100,100 using an image asset
Sprite(ImageAsset("ggame/bunny.png"), (100,100))
# Create the app, with a 500x500 pixel stage
app = App(500,500)
# Run the app
app.run()
## Overview
There are three major components to the `ggame` system: Assets, Sprites and the App.
### Assets
Asset objects (i.e. `ggame.ImageAsset`, etc.) typically represent separate files that
are provided by the "art department". These might be background images, user interface
images, or images that represent objects in the game. In addition, `ggame.SoundAsset`
is used to represent sound files (`.wav` or `.mp3` format) that can be played in the
game.
Ggame also extends the asset concept to include graphics that are generated dynamically
at run-time, such as geometrical objects, e.g. rectangles, lines, etc.
### Sprites
All of the visual aspects of the game are represented by instances of `ggame.Sprite` or
subclasses of it.
### App
Every ggame application must create a single instance of the `ggame.App` class (or
a sub-class of it). Creating an instance of the `ggame.App` class will initiate
creation of a pop-up window on your browser. Executing the app's `run` method will
begin the process of refreshing the visual assets on the screen.
### Events
No game is complete without a player and players produce events. Your code handles user
input by registering to receive keyboard and mouse events using `ggame.App.listenKeyEvent` and
`ggame.App.listenMouseEvent` methods.
## Execution Environment
Ggame is designed to be executed in a web browser using [Brython](http://brython.info/),
[Pixi.js](http://www.pixijs.com/) and [Buzz](http://buzz.jaysalvat.com/). The easiest
way to do this is by executing from [runpython](http://runpython.com), with source
code residing on [github](http://github.com).
When using [runpython](http://runpython.com), you will have to configure your browser
to allow popup windows.
To use Ggame in your own application, you will minimally need to create a folder called
`ggame` in your project. Within `ggame`, copy the `ggame.py`, `sysdeps.py` and
`__init__.py` files from the [ggame project](https://github.com/BrythonServer/ggame).
### Include Ggame as a Git Subtree
From the same directory as your own python sources (note: you must have an existing git
repository with committed files in order for the following to work properly),
execute the following terminal commands:
git remote add -f ggame https://github.com/BrythonServer/ggame.git
git merge -s ours --no-commit ggame/master
mkdir ggame
git read-tree --prefix=ggame/ -u ggame/master
git commit -m "Merge ggame project as our subdirectory"
If you want to pull in updates from ggame in the future:
git pull -s subtree ggame master
You can see an example of how a ggame subtree is used by examining the
[Brython Server Spacewar](https://github.com/BrythonServer/Spacewar) repo on Github.
## Geometry
When referring to screen coordinates, note that the x-axis of the computer screen
is *horizontal* with the zero position on the left hand side of the screen. The
y-axis is *vertical* with the zero position at the **top** of the screen.
Increasing positive y-coordinates correspond to the downward direction on the
computer screen. Note that this is **different** from the way you may have learned
about x and y coordinates in math class!
""" |
"""
==================================
Constants (:mod:`scipy.constants`)
==================================
.. currentmodule:: scipy.constants
Physical and mathematical constants and units.
Mathematical constants
======================
============ =================================================================
``pi`` Pi
``golden`` Golden ratio
============ =================================================================
Physical constants
==================
============= =================================================================
``c`` speed of light in vacuum
``mu_0`` the magnetic constant :math:`\mu_0`
``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0`
``h`` the Planck constant :math:`h`
``hbar`` :math:`\hbar = h/(2\pi)`
``G`` Newtonian constant of gravitation
``g`` standard acceleration of gravity
``e`` elementary charge
``R`` molar gas constant
``alpha`` fine-structure constant
``N_A`` Avogadro constant
``k`` Boltzmann constant
``sigma`` Stefan-Boltzmann constant :math:`\sigma`
``Wien`` Wien displacement law constant
``Rydberg`` Rydberg constant
``m_e`` electron mass
``m_p`` proton mass
``m_n`` neutron mass
============= =================================================================
Constants database
------------------
In addition to the above variables, :mod:`scipy.constants` also contains the
2010 CODATA recommended values [CODATA2010]_ database containing more physical
constants.
.. autosummary::
:toctree: generated/
value -- Value in physical_constants indexed by key
unit -- Unit in physical_constants indexed by key
precision -- Relative precision in physical_constants indexed by key
find -- Return list of physical_constant keys with a given string
ConstantWarning -- Constant sought not in newest CODATA data set
.. data:: physical_constants
Dictionary of physical constants, of the format
``physical_constants[name] = (value, unit, uncertainty)``.
Available constants:
====================================================================== ====
%(constant_names)s
====================================================================== ====
Units
=====
SI prefixes
-----------
============ =================================================================
``yotta`` :math:`10^{24}`
``zetta`` :math:`10^{21}`
``exa`` :math:`10^{18}`
``peta`` :math:`10^{15}`
``tera`` :math:`10^{12}`
``giga`` :math:`10^{9}`
``mega`` :math:`10^{6}`
``kilo`` :math:`10^{3}`
``hecto`` :math:`10^{2}`
``deka`` :math:`10^{1}`
``deci`` :math:`10^{-1}`
``centi`` :math:`10^{-2}`
``milli`` :math:`10^{-3}`
``micro`` :math:`10^{-6}`
``nano`` :math:`10^{-9}`
``pico`` :math:`10^{-12}`
``femto`` :math:`10^{-15}`
``atto`` :math:`10^{-18}`
``zepto`` :math:`10^{-21}`
============ =================================================================
Binary prefixes
---------------
============ =================================================================
``kibi`` :math:`2^{10}`
``mebi`` :math:`2^{20}`
``gibi`` :math:`2^{30}`
``tebi`` :math:`2^{40}`
``pebi`` :math:`2^{50}`
``exbi`` :math:`2^{60}`
``zebi`` :math:`2^{70}`
``yobi`` :math:`2^{80}`
============ =================================================================
Weight
------
================= ============================================================
``gram`` :math:`10^{-3}` kg
``metric_ton`` :math:`10^{3}` kg
``grain`` one grain in kg
``lb`` one pound (avoirdupous) in kg
``oz`` one ounce in kg
``stone`` one stone in kg
``grain`` one grain in kg
``long_ton`` one long ton in kg
``short_ton`` one short ton in kg
``troy_ounce`` one Troy ounce in kg
``troy_pound`` one Troy pound in kg
``carat`` one carat in kg
``m_u`` atomic mass constant (in kg)
================= ============================================================
Angle
-----
================= ============================================================
``degree`` degree in radians
``arcmin`` arc minute in radians
``arcsec`` arc second in radians
================= ============================================================
Time
----
================= ============================================================
``minute`` one minute in seconds
``hour`` one hour in seconds
``day`` one day in seconds
``week`` one week in seconds
``year`` one year (365 days) in seconds
``Julian_year`` one Julian year (365.25 days) in seconds
================= ============================================================
Length
------
================= ============================================================
``inch`` one inch in meters
``foot`` one foot in meters
``yard`` one yard in meters
``mile`` one mile in meters
``mil`` one mil in meters
``pt`` one point in meters
``survey_foot`` one survey foot in meters
``survey_mile`` one survey mile in meters
``nautical_mile`` one nautical mile in meters
``fermi`` one Fermi in meters
``angstrom`` one Angstrom in meters
``micron`` one micron in meters
``au`` one astronomical unit in meters
``light_year`` one light year in meters
``parsec`` one parsec in meters
================= ============================================================
Pressure
--------
================= ============================================================
``atm`` standard atmosphere in pascals
``bar`` one bar in pascals
``torr`` one torr (mmHg) in pascals
``psi`` one psi in pascals
================= ============================================================
Area
----
================= ============================================================
``hectare`` one hectare in square meters
``acre`` one acre in square meters
================= ============================================================
Volume
------
=================== ========================================================
``liter`` one liter in cubic meters
``gallon`` one gallon (US) in cubic meters
``gallon_imp`` one gallon (UK) in cubic meters
``fluid_ounce`` one fluid ounce (US) in cubic meters
``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters
``bbl`` one barrel in cubic meters
=================== ========================================================
Speed
-----
================= ==========================================================
``kmh`` kilometers per hour in meters per second
``mph`` miles per hour in meters per second
``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second
``knot`` one knot in meters per second
================= ==========================================================
Temperature
-----------
===================== =======================================================
``zero_Celsius`` zero of Celsius scale in Kelvin
``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins
===================== =======================================================
.. autosummary::
:toctree: generated/
C2K
K2C
F2C
C2F
F2K
K2F
Energy
------
==================== =======================================================
``eV`` one electron volt in Joules
``calorie`` one calorie (thermochemical) in Joules
``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules
``erg`` one erg in Joules
``Btu`` one British thermal unit (International Steam Table) in Joules
``Btu_th`` one British thermal unit (thermochemical) in Joules
``ton_TNT`` one ton of TNT in Joules
==================== =======================================================
Power
-----
==================== =======================================================
``hp`` one horsepower in watts
==================== =======================================================
Force
-----
==================== =======================================================
``dyn`` one dyne in newtons
``lbf`` one pound force in newtons
``kgf`` one kilogram force in newtons
==================== =======================================================
Optics
------
.. autosummary::
:toctree: generated/
lambda2nu
nu2lambda
References
==========
.. [CODATA2010] CODATA Recommended Values of the Fundamental
Physical Constants 2010.
http://physics.nist.gov/cuu/Constants/index.html
""" |
# -*- coding: utf-8 -*-
#ABADIA NAME CAMPO 31-00302
#ACAIACA 31-00401
#ACUCENA 31-00500
#AGUA BOA 31-00609
#AGUA COMPRIDA 31-00708
#AGUANIL 31-00807
#AGUAS FORMOSAS 31-00906
#AGUAS VERMELHAS 31-01003
#AIMORES 31-01102
#AIURUOCA 31-01201
#ALAGOA 31-01300
#ALBERTINA 31-01409
#ALEM NAME 31-01508
#ALFENAS 31-01607
#ALFREDO NAME 31-01631
#ALMENARA 31-01706
#ALPERCATA 31-01805
#ALPINOPOLIS 31-01904
#ALTEROSA 31-02001
#ALTO CAPARAO 31-02050
#ALTO JEQUITIBA 31-53509
#ALTO RIO DOCE 31-02100
#ALVARENGA 31-02209
#ALVINOPOLIS 31-02308
#ALVORADA DE MINAS 31-02407
#AMPARO DO SERRA 31-02506
#ANDRADAS 31-02605
#ANDRELANDIA 31-02803
#ANGELANDIA 31-02852
#NAME NAME 31-02902
#NAME NAME 31-03009
#NAME PRADO DE MINAS 31-03108
#ARACAI 31-03207
#ARACITABA 31-03306
#ARACUAI 31-03405
#ARAGUARI 31-03504
#ARANTINA 31-03603
#ARAPONGA 31-03702
#ARAPORA 31-03751
#ARAPUA 31-03801
#ARAUJOS 31-03900
#ARAXA 31-04007
#ARCEBURGO 31-04106
#ARCOS 31-04205
#AREADO 31-04304
#ARGIRITA 31-04403
#ARICANDUVA 31-04452
#ARINOS 31-04502
#ASTOLFO NAME 31-04601
#ATALEIA 31-04700
#AUGUSTO DE LIMA 31-04809
#BAEPENDI 31-04908
#BALDIM 31-05004
#BAMBUI 31-05103
#BANDEIRA 31-05202
#BANDEIRA DO SUL 31-05301
#BARAO DE COCAIS 31-05400
#BARAO DE MONTE ALTO 31-05509
#BARBACENA 31-05608
#BARRA LONGA 31-05707
#BARROSO 31-05905
#BELA VISTA DE MINAS 31-06002
#BELMIRO BRAGA 31-06101
#BELO HORIZONTE 31-06200
#BELO ORIENTE 31-06309
#BELO VALE 31-06408
#BERILO 31-06507
#BERIZAL 31-06655
#BETIM 31-06705
#BERTOPOLIS 31-06606
#BIAS FORTES 31-06804
#BICAS 31-06903
#BIQUINHAS 31-07000
#BOA ESPERANCA 31-07109
#BOCAINA DE MINAS 31-07208
#BOCAIUVA 31-07307
#BOM DESPACHO 31-07406
#BOM JARDIM DE MINAS 31-07505
#BOM NAME 31-07604
#BOM NAME 31-07703
#BOM NAME 31-07802
#BOM REPOUSO 31-07901
#BOM SUCESSO 31-08008
#BONFIM 31-08107
#BONFINOPOLIS DE MINAS 31-08206
#BONITO DE MINAS 31-08255
#BORDA DA MATA 31-08305
#BOTELHOS 31-08404
#BOTUMIRIM 31-08503
#BRASILANDIA DE MINAS 31-08552
#BRASILIA DE MINAS 31-08602
#BRASOPOLIS 31-08909
#BRAS PIRES 31-08701
#BRAUNAS 31-08800
#BRUMADINHO 31-09006
#BUENO NAME 31-09105
#BUENOPOLIS 31-09204
#BUGRE 31-09253
#BURITIS 31-09303
#BURITIZEIRO 31-09402
#CABECEIRA NAME 31-09451
#CABO NAME 31-09501
#CACHOEIRA DA PRATA 31-09600
#CACHOEIRA DE MINAS 31-09709
#CACHOEIRA DOURADA 31-09808
#CACHOEIRA DE PAJEU 31-02704
#CAETANOPOLIS 31-09907
#CAETE 31-10004
#CAIANA 31-10103
#CAJURI 31-10202
#NAME 31-10301
#CAMACHO 31-10400
#CAMANDUCAIA 31-10509
#CAMBUI 31-10608
#CAMBUQUIRA 31-10707
#CAMPANARIO 31-10806
#CAMPANHA 31-10905
#CAMPESTRE 31-11002
#CAMPINA NAME 31-11101
#CAMPO AZUL 31-11150
#CAMPO BELO 31-11200
#CAMPO DO MEIO 31-11309
#CAMPO FLORIDO 31-11408
#NAME ALTOS 31-11507
#NAME GERAIS 31-11606
#CANAA 31-11705
#CANAPOLIS 31-11804
#CANA NAME 31-11903
#CANDEIAS 31-12000
#CANTAGALO 31-12059
#CAPARAO 31-12109
#CAPELA NOVA 31-12208
#CAPELINHA 31-12307
#CAPETINGA 31-12406
#CAPIM NAME 31-12505
#CAPINOPOLIS 31-12604
#CAPITAO ANDRADE 31-12653
#CAPITAO ENEAS 31-12703
#CAPITOLIO 31-12802
#CAPUTIRA 31-12901
#CARAI 31-13008
#CARANAIBA 31-13107
#CARANDAI 31-13206
#CARANGOLA 31-13305
#CARATINGA 31-13404
#CARNAME 31-13503
#CAREACU 31-13602
#NAME CHAGAS 31-13701
#CARMESIA 31-13800
#CARMO DA CACHOEIRA 31-13909
#CARMO DA MATA 31-14006
#CARMO DE MINAS 31-14105
#CARMO DO CAJURU 31-14204
#CARMO DO PARANAIBA 31-14303
#CARMO DO RIO CLARO 31-14402
#CARMOPOLIS DE MINAS 31-14501
#CARNEIRINHO 31-14550
#CARRANCAS 31-14600
#CARVALHOPOLIS 31-14709
#CARVALHOS 31-14808
#CASA NAME 31-14907
#CASCALHO NAME 31-15003
#CASSIA 31-15102
#CATAGUASES 31-15300
#CATAS ALTAS 31-15359
#CATAS ALTAS DA NORUEGA 31-15409
#CATUJI 31-15458
#CATUTI 31-15474
#CAXAMBU 31-15508
#CEDRO DO ABAETE 31-15607
#CENTRAL DE MINAS 31-15706
#CENTRALINA 31-15805
#CHACARA 31-15904
#CHALE 31-16001
#CHAPADA DO NORTE 31-16100
#CHAPADA GAUCHA 31-16159
#CHIADOR 31-16209
#CIPOTANEA 31-16308
#CLARAVAL 31-16407
#CLARO DOS POCOES 31-16506
#CLAUDIO 31-16605
#COIMBRA 31-16704
#COLUNA 31-16803
#COMENDADOR GOMES 31-16902
#COMERCINHO 31-17009
#CONCEICAO DA APARECIDA 31-17108
#CONCEICAO DA BARRA DE MINAS 31-15201
#CONCEICAO DAS ALAGOAS 31-17306
#CONCEICAO DAS PEDRAS 31-17207
#CONCEICAO DE IPANEMA 31-17405
#CONCEICAO DO MATO NAME 31-17504
#CONCEICAO DO PARA 31-17603
#CONCEICAO DO RIO NAME 31-17702
#CONCEICAO DOS OUROS 31-17801
#CONEGO MARINHO 31-17836
#CONFINS 31-17876
#CONGONHAL 31-17900
#CONGONHAS 31-18007
#CONGONHAS DO NORTE 31-18106
#CONQUISTA 31-18205
#CONSELHEIRO LAFAIETE 31-18304
#CONSELHEIRO PENA 31-18403
#CONSOLACAO 31-18502
#CONTAGEM 31-18601
#COQUEIRAL 31-18700
#NAMEACAO NAME 31-18809
#NAMEDISBURGO 31-18908
#NAMEDISLANDIA 31-19005
#NAMEINTO 31-19104
#NAMEOACI 31-19203
#NAMEOMANDEL 31-19302
#NAMEONEL NAME 31-19401
#NAMEONEL NAME 31-19500
#NAMEONEL NAME 31-19609
#NAMEONEL NAME 31-19708
#NAMEREGO NAME 31-19807
#NAMEREGO NAME 31-19906
#NAMEREGO NAME 31-19955
#NAMEREGO NAME 31-20003
#COUTO NAME 31-20102
#CRISOLITA 31-20151
#CRISTAIS 31-20201
#CRISTALIA 31-20300
#CRISTIANO NAME 31-20409
#CRISTINA 31-20508
#CRUCILANDIA 31-20607
#CRUZEIRO DA FORTALEZA 31-20706
#CRUZILIA 31-20805
#CUPARAQUE 31-20839
#CURRAL DE NAME 31-20870
#CURVELO 31-20904
#DATAS 31-21001
#DELFIM NAME 31-21100
#DELFINOPOLIS 31-21209
#DELTA 31-21258
#DESCOBERTO 31-21308
#DESTERRO DE ENTRE RIOS 31-21407
#DESTERRO DO MELO 31-21506
#DIAMANTINA 31-21605
#DIOGO DE NAME 31-21704
#DIONISIO 31-21803
#DIVINESIA 31-21902
#DIVINO 31-22009
#DIVINO DAS LARANJEIRAS 31-22108
#DIVINOLANDIA DE MINAS 31-22207
#DIVINOPOLIS 31-22306
#DIVISA NAME 31-22355
#DIVISA NOVA 31-22405
#DIVISOPOLIS 31-22454
#DOM BOSCO 31-22470
#DOM CAVATI 31-22504
#DOM JOAQUIM 31-22603
#DOM SILVERIO 31-22702
#DOM VICOSO 31-22801
#DONA EUSEBIA 31-22900
#DORES DE NAME 31-23007
#DORES DE GUANHAES 31-23106
#DORES DO INDAIA 31-23205
#DORES DO TURVO 31-23304
#DORESOPOLIS 31-23403
#DOURADOQUARA 31-23502
#DURANDE 31-23528
#ELOI NAME 31-23601
#ENGENHEIRO NAME 31-23700
#ENGENHEIRO NAME 31-23809
#ENTRE FOLHAS 31-23858
#ENTRE RIOS DE MINAS 31-23908
#ERVALIA 31-24005
#ESMERALDAS 31-24104
#ESPERA FELIZ 31-24203
#ESPINOSA 31-24302
#ESPIRITO SANTO DO DOURADO 31-24401
#ESTIVA 31-24500
#ESTRELA DALVA 31-24609
#ESTRELA DO INDAIA 31-24708
#ESTRELA DO SUL 31-24807
#EUGENOPOLIS 31-24906
#EWBANK DA CAMARA 31-25002
#EXTREMA 31-25101
#FAMA 31-25200
#FARIA LEMOS 31-25309
#NAME DOS SANTOS 31-25408
#FELISBURGO 31-25606
#FELIXLANDIA 31-25705
#FERNANDES NAME 31-25804
#FERROS 31-25903
#FERVEDOURO 31-25952
#FLORESTAL 31-26000
#FORMIGA 31-26109
#FORMOSO 31-26208
#FORTALEZA DE MINAS 31-26307
#FORTUNA DE MINAS 31-26406
#FRANCISCO NAME 31-26505
#FRANCISCO NAME 31-26604
#FRANCISCO SA 31-26703
#FRANCISCOPOLIS 31-26752
#FREI GASPAR 31-26802
#FREI INOCENCIO 31-26901
#FREI LAGONEGRO 31-26950
#FRONTEIRA 31-27008
#FRONTEIRA DOS VALES 31-27057
#FRUTA DE LEITE 31-27073
#FRUTAL 31-27107
#FUNILANDIA 31-27206
#GALILEIA 31-27305
#GAMELEIRAS 31-27339
#GLAUCILANDIA 31-27354
#GOIABEIRA 31-27370
#GOIANA 31-27388
#NAME 31-27404
#GONZAGA 31-27503
#GOUVEA 31-27602
#GOVERNADOR VALADARES 31-27701
#GRAO MOGOL 31-27800
#GRUPIARA 31-27909
#GUANHAES 31-28006
#GUAPE 31-28105
#GUARACIABA 31-28204
#GUARACIAMA 31-28253
#GUARANESIA 31-28303
#GUARANI 31-28402
#GUARARA 31-28501
#GUARDA-MOR 31-28600
#GUAXUPE 31-28709
#GUIDOVAL 31-28808
#GUIMARANIA 31-28907
#GUIRICEMA 31-29004
#GURINHATA 31-29103
#HELIODORA 31-29202
#IAPU 31-29301
#IBERTIOGA 31-29400
#IBIA 31-29509
#IBIAI 31-29608
#IBIRACATU 31-29657
#IBIRACI 31-29707
#IBIRITE 31-29806
#IBITIURA DE MINAS 31-29905
#IBITURUNA 31-30002
#ICARAI DE MINAS 31-30051
#IGARAPE 31-30101
#IGARATINGA 31-30200
#IGUATAMA 31-30309
#IJACI 31-30408
#ILICINEA 31-30507
#IMBE DE MINAS 31-30556
#INCONFIDENTES 31-30606
#INDAIABIRA 31-30655
#INDIANOPOLIS 31-30705
#INGAI 31-30804
#INHAPIM 31-30903
#INHAUMA 31-31000
#INIMUTABA 31-31109
#IPABA 31-31158
#IPANEMA 31-31208
#IPATINGA 31-31307
#IPIACU 31-31406
#IPUIUNA 31-31505
#IRAI DE MINAS 31-31604
#ITABIRA 31-31703
#ITABIRINHA 31-31802
#ITABIRITO 31-31901
#ITACAMBIRA 31-32008
#ITACARAMBI 31-32107
#ITAGUARA 31-32206
#ITAIPE 31-32305
#ITAJUBA 31-32404
#ITAMARANDIBA 31-32503
#ITAMARATI DE MINAS 31-32602
#ITAMBACURI 31-32701
#ITAMBE DO MATO NAME 31-32800
#ITAMOGI 31-32909
#ITAMONTE 31-33006
#ITANHANDU 31-33105
#ITANHOMI 31-33204
#ITAOBIM 31-33303
#ITAPAGIPE 31-33402
#ITAPECERICA 31-33501
#ITAPEVA 31-33600
#ITATIAIUCU 31-33709
#ITAU DE MINAS 31-33758
#ITAUNA 31-33808
#ITAVERAVA 31-33907
#ITINGA 31-34004
#ITUETA 31-34103
#ITUIUTABA 31-34202
#ITUMIRIM 31-34301
#ITURAMA 31-34400
#ITUTINGA 31-34509
#JABOTICATUBAS 31-34608
#JACINTO 31-34707
#JACUI 31-34806
#JACUTINGA 31-34905
#JAGUARACU 31-35001
#JAIBA 31-35050
#JAMPRUCA 31-35076
#JANAUBA 31-35100
#JANUARIA 31-35209
#JANAME 31-35308
#JAPONVAR 31-35357
#JECEABA 31-35407
#JENIPAPO DE MINAS 31-35456
#JEQUERI 31-35506
#JEQUITAI 31-35605
#JEQUITIBA 31-35704
#JEQUITINHONHA 31-35803
#JESUANIA 31-35902
#JOAIMA 31-36009
#JOANESIA 31-36108
#JOAO NAME 31-36207
#JOAO NAME 31-36306
#JOAQUIM NAME 31-36405
#JORDANIA 31-36504
#JOSE NAME DE MINAS 31-36520
#JOSENOPOLIS 31-36579
#JOSE NAME 31-36553
#JUATUBA 31-36652
#JUIZ DE FORA 31-36702
#JURAMENTO 31-36801
#JURUAIA 31-36900
#JUVENILIA 31-36959
#LADAINHA 31-37007
#LAGAMAR 31-37106
#LAGOA DA PRATA 31-37205
#LAGOA DOS PATOS 31-37304
#LAGOA DOURADA 31-37403
#LAGOA FORMOSA 31-37502
#LAGOA NAME 31-37536
#LAGOA SANTA 31-37601
#LAJINHA 31-37700
#LAMBARI 31-37809
#LAMIM 31-37908
#LARANJAL 31-38005
#LASSANCE 31-38104
#LAVRAS 31-38203
#LEANDRO NAME 31-38302
#NAME DO PRADO 31-38351
#LEOPOLDINA 31-38401
#LIBERDADE 31-38500
#LIMA DUARTE 31-38609
#LIMEIRA DO OESTE 31-38625
#LONTRA 31-38658
#LUISLANDIA 31-38682
#LUISBURGO 31-38674
#LUMINARIAS 31-38708
#LUZ 31-38807
#MACHACALIS 31-38906
#MACHADO 31-39003
#MADRE DE DEUS DE MINAS 31-39102
#MALACACHETA 31-39201
#MAMONAS 31-39250
#MANGA 31-39300
#MANHUACU 31-39409
#MANHUMIRIM 31-39508
#MANTENA 31-39607
#MARAVILHAS 31-39706
#MAR DE ESPANHA 31-39805
#MARIA DA FE 31-39904
#MARIANA 31-40001
#MARILAC 31-40100
#MARIO NAME 31-40159
#MARIPA DE MINAS 31-40209
#MARLIERIA 31-40308
#MARMELOPOLIS 31-40407
#MARTINHO NAME 31-40506
#MARTINS NAME 31-40530
#MATA NAME 31-40555
#MATERLANDIA 31-40605
#MATEUS NAME 31-40704
#MATIAS NAME 31-40803
#MATIAS NAME 31-40852
#MATIPO 31-40902
#MATHIAS NAME 31-71501
#MATO NAME 31-41009
#MATOZINHOS 31-41108
#MATUTINA 31-41207
#MEDEIROS 31-41306
#MEDINA 31-41405
#NAME PIMENTEL 31-41504
#MERCES 31-41603
#MESQUITA 31-41702
#MINAS NOVAS 31-41801
#MINDURI 31-41900
#MIRABELA 31-42007
#MIRADOURO 31-42106
#MIRAI 31-42205
#MIRAVANIA 31-42254
#MOEDA 31-42304
#MOEMA 31-42403
#MONJOLOS 31-42502
#MONSENHOR NAME 31-42601
#MONTALVANIA 31-42700
#MONTE NAME DE MINAS 31-42809
#MONTE AZUL 31-42908
#MONTE BELO 31-43005
#MONTE CARMELO 31-43104
#MONTE FORMOSO 31-43153
#MONTE SANTO DE MINAS 31-43203
#MONTES NAME 31-43302
#MONTE SIAO 31-43401
#MONTEZUMA 31-43450
#MORADA NOVA DE MINAS 31-43500
#MORRO NAME 31-43609
#MORRO NAME 31-43708
#MUNHOZ 31-43807
#MURIAE 31-43906
#MUTUM 31-44003
#MUZAMBINHO 31-44102
#NACIP NAME 31-44201
#NANUQUE 31-44300
#NAQUE 31-44359
#NATALANDIA 31-44375
#NATERCIA 31-44409
#NAZARENO 31-44508
#NEPOMUCENO 31-44607
#NINHEIRA 31-44656
#NOVA BELEM 31-44672
#NOVA ERA 31-44706
#NOVA LIMA 31-44805
#NOVA MODICA 31-44904
#NOVA PONTE 31-45000
#NOVA PORTEIRINHA 31-45059
#NOVA RESENDE 31-45109
#NOVA SERRANA 31-45208
#NOVA UNIAO 31-36603
#NAME CRUZEIRO 31-45307
#NAME ORIENTE DE MINAS 31-45356
#NAMERIZONTE 31-45372
#OLARIA 31-45406
#OLHOS D AGUA 31-45455
#OLIMPIO NAME 31-45505
#OLIVEIRA 31-45604
#OLIVEIRA FORTES 31-45703
#ONCA DE PITANGUI 31-45802
#ORATORIOS 31-45851
#ORIZANIA 31-45877
#OURO NAME 31-45901
#OURO FINO 31-46008
#OURO PRETO 31-46107
#OURO NAME DE MINAS 31-46206
#PADRE CARVALHO 31-46255
#PADRE PARAISO 31-46305
#PAINEIRAS 31-46404
#PAINS 31-46503
#PAI PEDRO 31-46552
#PAIVA 31-46602
#PALMA 31-46701
#PALMOPOLIS 31-46750
#PAPAGAIOS 31-46909
#PARACATU 31-47006
#PARA DE MINAS 31-47105
#PARAGUACU 31-47204
#PARAISOPOLIS 31-47303
#PARAOPEBA 31-47402
#PASSABEM 31-47501
#PASSA QUATRO 31-47600
#PASSA TEMPO 31-47709
#PASSA-VINTE 31-47808
#PASSOS 31-47907
#PATIS 31-47956
#PATOS DE MINAS 31-48004
#PATROCINIO 31-48103
#PATROCINIO DO MURIAE 31-48202
#PAULA CANDIDO 31-48301
#PAULISTAS 31-48400
#PAVAO 31-48509
#PECANHA 31-48608
#PEDRA AZUL 31-48707
#PEDRA NAME 31-48756
#PEDRA DO ANTA 31-48806
#PEDRA DO INDAIA 31-48905
#PEDRA DOURADA 31-49002
#PEDRALVA 31-49101
#PEDRAS DE MARIA DA CRUZ 31-49150
#PEDRINOPOLIS 31-49200
#PEDRO LEOPOLDO 31-49309
#PEDRO TEIXEIRA 31-49408
#PEQUERI 31-49507
#PEQUI 31-49606
#PERDIGAO 31-49705
#PERDIZES 31-49804
#PERDOES 31-49903
#PERIQUITO 31-49952
#PESCADOR 31-50000
#PIAU 31-50109
#PIEDADE DE CARATINGA 31-50158
#PIEDADE DE PONTE NOVA 31-50208
#PIEDADE DO RIO NAME 31-50307
#PIEDADE DOS GERAIS 31-50406
#PIMENTA 31-50505
#PINGO-D AGUA 31-50539
#PINTOPOLIS 31-50570
#PIRACEMA 31-50604
#PIRAJUBA 31-50703
#PIRANGA 31-50802
#PIRANGUCU 31-50901
#PIRANGUINHO 31-51008
#PIRAPETINGA 31-51107
#PIRAPORA 31-51206
#PIRAUBA 31-51305
#PITANGUI 31-51404
#PIUMHI 31-51503
#PLANURA 31-51602
#POCO NAME 31-51701
#POCOS DE NAME 31-51800
#POCRANE 31-51909
#POMPEU 31-52006
#PONTE NOVA 31-52105
#PONTO NAME 31-52131
#PONTO NAME 31-52170
#PORTEIRINHA 31-52204
#PORTO FIRME 31-52303
#POTE 31-52402
#POUSO NAME 31-52501
#POUSO ALTO 31-52600
#PRADOS 31-52709
#PRATA 31-52808
#PRATAPOLIS 31-52907
#PRATINHA 31-53004
#PRESIDENTE NAME 31-53103
#PRESIDENTE NAME 31-53202
#PRESIDENTE NAME 31-53301
#PRESIDENTE OLEGARIO 31-53400
#PRUDENTE DE MORAIS 31-53608
#QUARTEL NAME 31-53707
#QUELUZITO 31-53806
#RAPOSOS 31-53905
#RAUL NAME 31-54002
#RECREIO 31-54101
#REDUTO 31-54150
#RESENDE COSTA 31-54200
#RESPLENDOR 31-54309
#RESSAQUINHA 31-54408
#RIACHINHO 31-54457
#RIACHO NAME 31-54507
#RIBEIRAO NAME 31-54606
#RIBEIRAO NAME 31-54705
#RIO ACIMA 31-54804
#RIO CASCA 31-54903
#RIO DOCE 31-55009
#RIO DO PRADO 31-55108
#RIO ESPERA 31-55207
#RIO MANSO 31-55306
#RIO NAME 31-55405
#RIO PARANAIBA 31-55504
#RIO PARDO DE MINAS 31-55603
#RIO PIRACICABA 31-55702
#RIO POMBA 31-55801
#RIO PRETO 31-55900
#RIO NAME 31-56007
#RITAPOLIS 31-56106
#ROCHEDO DE MINAS 31-56205
#RODEIRO 31-56304
#ROMARIA 31-56403
#ROSARIO DA LIMEIRA 31-56452
#RUBELITA 31-56502
#RUBIM 31-56601
#SABARA 31-56700
#SABINOPOLIS 31-56809
#SACRAMENTO 31-56908
#SALINAS 31-57005
#SALTO DA DIVISA 31-57104
#SANTA NAME 31-57203
#SANTA NAME DO LESTE 31-57252
#SANTA NAME DO MONTE NAME 31-57278
#SANTA NAME DO TUGURIO 31-57302
#SANTA CRUZ DE MINAS 31-57336
#SANTA CRUZ DE SALINAS 31-57377
#SANTA CRUZ DO ESCALVADO 31-57401
#SANTA EFIGENIA DE MINAS 31-57500
#SANTA FE DE MINAS 31-57609
#SANTA HELENA DE MINAS 31-57658
#SANTA NAME 31-57708
#SANTA LUZIA 31-57807
#SANTA NAME 31-57906
#SANTA MARIA DE ITABIRA 31-58003
#SANTA NAME 31-58102
#SANTA MARIA DO SUACUI 31-58201
#SANTANA NAME 31-58300
#SANTANA DE CATAGUASES 31-58409
#SANTANA DE PIRAPAMA 31-58508
#SANTANA DO DESERTO 31-58607
#SANTANA NAME 31-58706
#SANTANA NAME 31-58805
#SANTANA NAME 31-58904
#SANTANA DO PARAISO 31-58953
#SANTANA DO RIACHO 31-59001
#SANTANA DOS MONTES 31-59100
#SANTA RITA DE NAME 31-59209
#SANTA RITA DE JACUTINGA 31-59308
#SANTA RITA DE MINAS 31-59357
#SANTA RITA DO IBITIPOCA 31-59407
#SANTA RITA DO ITUETO 31-59506
#SANTA RITA DO SAPUCAI 31-59605
#SANTA ROSA DA SERRA 31-59704
#SANTA VITORIA 31-59803
#SANTO NAME DO AMPARO 31-59902
#SANTO NAME DO AVENTUREIRO 31-60009
#SANTO NAME DO GRAMA 31-60108
#SANTO NAME DO ITAMBE 31-60207
#SANTO NAME DO JACINTO 31-60306
#SANTO NAME DO MONTE 31-60405
#SANTO NAME DO RETIRO 31-60454
#SANTO NAME DO RIO ABAIXO 31-60504
#SANTO NAME 31-60603
#SANTOS NAME 31-60702
#SAO BENTO ABADE 31-60801
#SAO BRAS DO SUACUI 31-60900
#SAO DOMINGOS DAS DORES 31-60959
#SAO DOMINGOS DO PRATA 31-61007
#SAO FELIX DE MINAS 31-61056
#SAO FRANCISCO 31-61106
#SAO FRANCISCO DE PAULA 31-61205
#SAO FRANCISCO DE SALES 31-61304
#SAO FRANCISCO DO GLORIA 31-61403
#SAO NAMEDO 31-61502
#SAO NAMEDO DA PIEDADE 31-61601
#SAO NAMEDO DO BAIXIO 31-61650
#SAO GONCALO DO ABAETE 31-61700
#SAO GONCALO DO PARA 31-61809
#SAO GONCALO DO RIO ABAIXO 31-61908
#SAO GONCALO DO SAPUCAI 31-62005
#SAO GONCALO DO RIO PRETO 31-25507
#SAO GOTARDO 31-62104
#SAO JOAO BATISTA DO GLORIA 31-62203
#SAO JOAO DA LAGOA 31-62252
#SAO JOAO DA MATA 31-62302
#SAO JOAO DA PONTE 31-62401
#SAO JOAO DAS MISSOES 31-62450
#SAO JOAO DEL REI 31-62500
#SAO JOAO NAME 31-62559
#SAO JOAO DO MANTENINHA 31-62575
#SAO JOAO DO ORIENTE 31-62609
#SAO JOAO DO PACUI 31-62658
#SAO JOAO DO PARAISO 31-62708
#SAO JOAO EVANGELISTA 31-62807
#SAO JOAO NEPOMUCENO 31-62906
#SAO JOAQUIM DE BICAS 31-62922
#SAO JOSE DA BARRA 31-62948
#SAO JOSE DA LAPA 31-62955
#SAO JOSE DA SAFIRA 31-63003
#SAO JOSE DA VARGINHA 31-63102
#SAO JOSE DO NAME 31-63201
#SAO JOSE DO DIVINO 31-63300
#SAO JOSE DO GOIABAL 31-63409
#SAO JOSE DO JACURI 31-63508
#SAO JOSE DO MANTIMENTO 31-63607
#SAO LOURENCO 31-63706
#SAO MIGUEL DO ANTA 31-63805
#SAO PEDRO DA UNIAO 31-63904
#SAO PEDRO DOS FERROS 31-64001
#SAO PEDRO DO SUACUI 31-64100
#SAO ROMAO 31-64209
#SAO ROQUE DE MINAS 31-64308
#SAO SEBASTIAO DA BELA VISTA 31-64407
#SAO SEBASTIAO NAME NAME 31-64431
#SAO SEBASTIAO DO ANTA 31-64472
#SAO SEBASTIAO DO MARANHAO 31-64506
#SAO SEBASTIAO DO OESTE 31-64605
#SAO SEBASTIAO DO PARAISO 31-64704
#SAO SEBASTIAO DO RIO PRETO 31-64803
#SAO SEBASTIAO DO RIO NAME 31-64902
#SAO THOME DAS LETRAS 31-65206
#SAO TIAGO 31-65008
#SAO TOMAS DE AQUINO 31-65107
#SAO VICENTE DE MINAS 31-65305
#SAPUCAI-MIRIM 31-65404
#SARDOA 31-65503
#SARZEDO 31-65537
#SETUBINHA 31-65552
#SEM-PEIXE 31-65560
#SENADOR AMARAL 31-65578
#SENADOR NAMETES 31-65602
#SENADOR NAME 31-65701
#SENADOR NAME 31-65800
#SENADOR MODESTINO NAME 31-65909
#SENHORA DE OLIVEIRA 31-66006
#SENHORA DO PORTO 31-66105
#SENHORA DOS REMEDIOS 31-66204
#SERICITA 31-66303
#SERITINGA 31-66402
#SERRA AZUL DE MINAS 31-66501
#SERRA DA SAUDADE 31-66600
#SERRA DOS AIMORES 31-66709
#SERRA NAME 31-66808
#SERRANIA 31-66907
#SERRANOPOLIS DE MINAS 31-66956
#SERRANOS 31-67004
#SERRO 31-67103
#SETE LAGOAS 31-67202
#SILVEIRANIA 31-67301
#SILVIANOPOLIS 31-67400
#SIMAO NAME 31-67509
#SIMONESIA 31-67608
#SOBRALIA 31-67707
#SOLEDADE DE MINAS 31-67806
#TABULEIRO 31-67905
#TAIOBEIRAS 31-68002
#TAPARUBA 31-68051
#TAPIRA 31-68101
#TAPIRAI 31-68200
#TAQUARACU DE MINAS 31-68309
#TARUMIRIM 31-68408
#TEIXEIRAS 31-68507
#TEOFILO NAME 31-68606
#TIMOTEO 31-68705
#TIRADENTES 31-68804
#TIROS 31-68903
#TOCANTINS 31-69000
#TOCOS DO MOJI 31-69059
#TOLEDO 31-69109
#TOMBOS 31-69208
#TRES NAMEACOES 31-69307
#TRES MARIAS 31-69356
#TRES PONTAS 31-69406
#TUMIRITINGA 31-69505
#TUPACIGUARA 31-69604
#TURMALINA 31-69703
#TURVOLANDIA 31-69802
#UBA 31-69901
#UBAI 31-70008
#UBAPORANGA 31-70057
#UBERABA 31-70107
#UBERLANDIA 31-70206
#UMBURATIBA 31-70305
#UNAI 31-70404
#UNIAO DE MINAS 31-70438
#URUANA DE MINAS 31-70479
#URUCANIA 31-70503
#URUCUIA 31-70529
#NAME 31-70578
#VARGEM NAME 31-70602
#VARGEM NAME DO RIO PARDO 31-70651
#VARGINHA 31-70701
#VARJAO DE MINAS 31-70750
#VARZEA NAME 31-70800
#VARZELANDIA 31-70909
#VAZANTE 31-71006
#NAMELANDIA 31-71030
#VEREDINHA 31-71071
#VERISSIMO 31-71105
#NAME NAME 31-71154
#VESPASIANO 31-71204
#VICOSA 31-71303
#VIEIRAS 31-71402
#VIRGEM DA LAPA 31-71600
#VIRGINIA 31-71709
#VIRGINOPOLIS 31-71808
#VIRGOLANDIA 31-71907
#VISCONDE DO RIO NAME 31-72004
#VOLTA NAME 31-72103
|
# # 10.1
#
# tens_list = [10, 20, 30, 40]
# funny_strings = ['crunchy frog', 'ram bladder', 'lark vomit']
# some_other = ['spam', 2.0, 5, [10, 20]]
#
# empty_list = []
#
# print(tens_list)
# print(funny_strings)
# print(some_other)
# print(empty_list)
# print()
# print(tens_list[1:])
# print(funny_strings[1])
# print(some_other[::2])
# # 10.2
# food_birds = ['chicken', 'turkey']
#
# food_birds[0] = 'seagull'
# print(food_birds)
# 10.3
# cheeses = ['cheddar', 'gouda', 'munster']
#
# for cheese in cheeses:
# print(cheese)
#
# print()
#
# numbers = [1, 2, 3, 4, 5]
#
# for i in range(len(numbers)):
# numbers[i] = numbers[i] * 2
#
# print(numbers)
#
# print()
#
# for x in []:
# print("this won't execute")
#
# a = [1, 2, 3]
# b = [4, 5, 6]
# c = a + b
# print(c)
#
# 10.7
# def add_all(t):
# total = 0
# for x in t:
# total += x
# return total
#
# print("Result of add all function is {0}".format(add_all([1, 2, 3])))
# exercise 1
# def nested_sum(lists_of_list_of_ints):
# for lists in lists_of_list_of_ints:
# print(sum(lists))
# pass
#
# print(nested_sum([[1, 1, 1], [2, 2, 2], [3, 3, 3]]))
#
# def capitalize_all(t):
# res = []
# for s in t:
# res.append(s.capitalize())
# return res
#
# print(capitalize_all(["ahoy there."]))
# """
# Exercise 3
# Write a function that takes a list of numbers and returns the cumulative sum; that is, a new list where the ith element is the sum of the first i+1 elements from the original list. For example, the cumulative sum of [1, 2, 3] is [1, 3, 6].
# """
#
# list_to_update = [1, 2, 3, 4, 5, 6, 7, 8, 9]
#
# def cumulativer(nums):
# sums = 0
# cums_list = []
# for i in nums:
# sums += i
# cums_list.append(sums)
# return cums_list
#
# print(cumulativer(list_to_update))
# 10.8
# t = ['a', 'b', 'c']
# x = t.pop(1)
# print(t)
# x = t.pop()
# print(t)
# print(x)
# """Exercise 4
# Write a function called middle that takes a list and returns a new list that contains all but the first and last elements. So middle([1,2,3,4]) should return [2,3]."""
#
#
# def middle(str):
# return str[1:-1]
#
# print(middle("Oklahoma"))
# print(middle("Michelle NAME print(middle("Beyonce NAME """Exercise 5
# Write a function called chop that takes a list, modifies it by removing the first and last elements, and returns None."""
#
# def chop(lst):
# lst = lst[1:-1]
# print(lst)
# return None
#
# chop(["Apple", "Banana", "Canteloupe"])
|
#
# ElementTree
# $Id: ElementTree.py 2326 2005-03-17 07:45:21Z USERNAME $
#
# light-weight XML support for Python 1.5.2 and later.
#
# history:
# 2001-10-20 fl created (from various sources)
# 2001-11-01 fl return root from parse method
# 2002-02-16 fl sort attributes in lexical order
# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
# 2002-05-01 fl finished TreeBuilder refactoring
# 2002-07-14 fl added basic namespace support to ElementTree.write
# 2002-07-25 fl added QName attribute support
# 2002-10-20 fl fixed encoding in write
# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
# 2002-11-27 fl accept file objects or file names for parse/write
# 2002-12-04 fl moved XMLTreeBuilder back to this module
# 2003-01-11 fl fixed entity encoding glitch for us-ascii
# 2003-02-13 fl added XML literal factory
# 2003-02-21 fl added ProcessingInstruction/PI factory
# 2003-05-11 fl added tostring/fromstring helpers
# 2003-05-26 fl added ElementPath support
# 2003-07-05 fl added makeelement factory method
# 2003-07-28 fl added more well-known namespace prefixes
# 2003-08-15 fl fixed typo in ElementTree.findtext (Thomas NAME 2003-09-04 fl fall back on emulator if ElementPath is not installed
# 2003-10-31 fl markup updates
# 2003-11-15 fl fixed nested namespace bug
# 2004-03-28 fl added XMLID helper
# 2004-06-02 fl added default support to findtext
# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
# 2004-08-23 fl take advantage of post-2.1 expat features
# 2005-02-01 fl added iterparse implementation
# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
#
# Copyright (c) 1999-2005 by NAME All rights reserved.
#
# EMAIL
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2005 by NAME By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
|
#originally by NAME 25Apr.2016
#hy:Changes by NAME 21Dec.2016 v0.45
#sudo apt-get install python-h5py
# Added evaluation function for multiple models, their result file names contain calculated mAP.
# Added functionality to set different dropout rate for each layer for 3conv net
# Moved auxiliary functions to a new file tools.py
# Added function to obtain images of estimated receptive fields/active fields
# Added function to save all models and specified names according to training status
# Added graph 3conv, 4conv
# Added real batch training functionality
# Added functionality of feeding a tensor name
# Added function to save tensorflow models with max precision for a class, not overwritten by following data
# Added function do_crop2_parts to get parts in different sizes
# Added function for displaying evaluation results in a worksheet (result_for_table = 0).
# Added similarity.py to analyse similarity between classes, CAD sampls and camera test images
# Created tensor_cnn_evaluate.py. It is used for testing multiple models. Input of each evaluation function includes:
# session,num_class,img_list,_labels
# Added stop condition to avoid overfitting
# Added function to load two models of different graphs. requirement: install tensorflow version > 0.8, numpy > 1.11.2
# Added display of all defined results for training, validation and test in one graph in tensorboard
# Added optimizer Adam and its parameters
# Added display of test result in RETRAIN
# Added a function to add more training data during a training. This data contains random noise.
# Added display of test result in CONTINUE_TRAIN. Some new variables are created for tensorflow for this purpose.
# Created a function for importing data, import_data(). This is used for displaying test result parallel to validation result.
# Added function to evaluate two models of same graph
# Added adaptive testing - evaluate_image_vague, create_test_slices to get top,bottom, left, right, center parts of a test image
# Added formula for calculating window size when webcam is used, also for rectangular form
# Added functions: random crop, random rotation, set scale, remove small object area
# Added def convert_result for converting sub-class to main-class result.
# Changed tensorboard backup path and added sub-folder to store tensorboard logs so that the logs can be compared easily.
# Changed model name to include specification info of a model.
# Specification information of a model such as number of hidden layers and tensor size must be set as the same when this model is reused later.
# Added functionality of continuing a broken training
# Added distortion tools for automatically generating and moving/removing data
# Added tensorboard log timestamp for comparing different model in live time, changed tensorboard log path
# Added function to do tracking in terms of shift mean #
# Added date time for log
# Training set: CAD samples for all six classes
# Added functionality of saving first convolutional layer feature output in training phase and test phase
# Added function to evaluate model with webcam
# Prepare_list is activated according to action selected for training or test
# Test set: lego positive samples for all six classes
# Added output info: when evaluating with images, proportion of correctly classified is included
# Added sequence configurations for based on training or test which is selected
# Added function to save correctly classified images/frames
# Added function to save misclassified images to folder ./MisClassifed, upper limit can be set
# Added log function, time count for training duration
# Test_Images: stored under ./Test_Images, they are lego positive samples that are not included in training set.
# Added the functionality to evaluate model with images
# Changed prepare_list to a global function to make test run smoothly.
# Changed condition for label, predict
# Changed display precision of matrix outputs to 2
# Added a formula to calculate shape, in settings.py
# Added a formula to set cropped frame to show ROI in demo
# Tested video_crop_tool.py, it does not require strict parameter for width as in this script
# Added global variables for width, height, crop sizes, defined in settings.py
# Changed places to adapt to lego data
# - All file paths in tensor_cnn_video.py, prepare_list.py, prep_images.py, test.py
# - LABELS(=6), which is the number of sub-folders under ./Data
# To see tensorflow output use following command
# $tensorflow --logdir='enter_the_path_of_tensorboard_log'
#####################################################################################################
|
"""
==================================
Constants (:mod:`scipy.constants`)
==================================
.. currentmodule:: scipy.constants
Physical and mathematical constants and units.
Mathematical constants
======================
================ =================================================================
``pi`` Pi
``golden`` Golden ratio
``golden_ratio`` Golden ratio
================ =================================================================
Physical constants
==================
=========================== =================================================================
``c`` speed of light in vacuum
``speed_of_light`` speed of light in vacuum
``mu_0`` the magnetic constant :math:`\mu_0`
``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0`
``h`` the Planck constant :math:`h`
``Planck`` the Planck constant :math:`h`
``hbar`` :math:`\hbar = h/(2\pi)`
``G`` Newtonian constant of gravitation
``gravitational_constant`` Newtonian constant of gravitation
``g`` standard acceleration of gravity
``e`` elementary charge
``elementary_charge`` elementary charge
``R`` molar gas constant
``gas_constant`` molar gas constant
``alpha`` fine-structure constant
``fine_structure`` fine-structure constant
``N_A`` Avogadro constant
``Avogadro`` Avogadro constant
``k`` Boltzmann constant
``Boltzmann`` Boltzmann constant
``sigma`` Stefan-Boltzmann constant :math:`\sigma`
``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\sigma`
``Wien`` Wien displacement law constant
``Rydberg`` Rydberg constant
``m_e`` electron mass
``electron_mass`` electron mass
``m_p`` proton mass
``proton_mass`` proton mass
``m_n`` neutron mass
``neutron_mass`` neutron mass
=========================== =================================================================
Constants database
------------------
In addition to the above variables, :mod:`scipy.constants` also contains the
2014 CODATA recommended values [CODATA2014]_ database containing more physical
constants.
.. autosummary::
:toctree: generated/
value -- Value in physical_constants indexed by key
unit -- Unit in physical_constants indexed by key
precision -- Relative precision in physical_constants indexed by key
find -- Return list of physical_constant keys with a given string
ConstantWarning -- Constant sought not in newest CODATA data set
.. data:: physical_constants
Dictionary of physical constants, of the format
``physical_constants[name] = (value, unit, uncertainty)``.
Available constants:
====================================================================== ====
%(constant_names)s
====================================================================== ====
Units
=====
SI prefixes
-----------
============ =================================================================
``yotta`` :math:`10^{24}`
``zetta`` :math:`10^{21}`
``exa`` :math:`10^{18}`
``peta`` :math:`10^{15}`
``tera`` :math:`10^{12}`
``giga`` :math:`10^{9}`
``mega`` :math:`10^{6}`
``kilo`` :math:`10^{3}`
``hecto`` :math:`10^{2}`
``deka`` :math:`10^{1}`
``deci`` :math:`10^{-1}`
``centi`` :math:`10^{-2}`
``milli`` :math:`10^{-3}`
``micro`` :math:`10^{-6}`
``nano`` :math:`10^{-9}`
``pico`` :math:`10^{-12}`
``femto`` :math:`10^{-15}`
``atto`` :math:`10^{-18}`
``zepto`` :math:`10^{-21}`
============ =================================================================
Binary prefixes
---------------
============ =================================================================
``kibi`` :math:`2^{10}`
``mebi`` :math:`2^{20}`
``gibi`` :math:`2^{30}`
``tebi`` :math:`2^{40}`
``pebi`` :math:`2^{50}`
``exbi`` :math:`2^{60}`
``zebi`` :math:`2^{70}`
``yobi`` :math:`2^{80}`
============ =================================================================
Weight
------
================= ============================================================
``gram`` :math:`10^{-3}` kg
``metric_ton`` :math:`10^{3}` kg
``grain`` one grain in kg
``lb`` one pound (avoirdupous) in kg
``pound`` one pound (avoirdupous) in kg
``oz`` one ounce in kg
``ounce`` one ounce in kg
``stone`` one stone in kg
``grain`` one grain in kg
``long_ton`` one long ton in kg
``short_ton`` one short ton in kg
``troy_ounce`` one Troy ounce in kg
``troy_pound`` one Troy pound in kg
``carat`` one carat in kg
``m_u`` atomic mass constant (in kg)
``u`` atomic mass constant (in kg)
``atomic_mass`` atomic mass constant (in kg)
================= ============================================================
Angle
-----
================= ============================================================
``degree`` degree in radians
``arcmin`` arc minute in radians
``arcminute`` arc minute in radians
``arcsec`` arc second in radians
``arcsecond`` arc second in radians
================= ============================================================
Time
----
================= ============================================================
``minute`` one minute in seconds
``hour`` one hour in seconds
``day`` one day in seconds
``week`` one week in seconds
``year`` one year (365 days) in seconds
``Julian_year`` one Julian year (365.25 days) in seconds
================= ============================================================
Length
------
===================== ============================================================
``inch`` one inch in meters
``foot`` one foot in meters
``yard`` one yard in meters
``mile`` one mile in meters
``mil`` one mil in meters
``pt`` one point in meters
``point`` one point in meters
``survey_foot`` one survey foot in meters
``survey_mile`` one survey mile in meters
``nautical_mile`` one nautical mile in meters
``fermi`` one Fermi in meters
``angstrom`` one Angstrom in meters
``micron`` one micron in meters
``au`` one astronomical unit in meters
``astronomical_unit`` one astronomical unit in meters
``light_year`` one light year in meters
``parsec`` one parsec in meters
===================== ============================================================
Pressure
--------
================= ============================================================
``atm`` standard atmosphere in pascals
``atmosphere`` standard atmosphere in pascals
``bar`` one bar in pascals
``torr`` one torr (mmHg) in pascals
``mmHg`` one torr (mmHg) in pascals
``psi`` one psi in pascals
================= ============================================================
Area
----
================= ============================================================
``hectare`` one hectare in square meters
``acre`` one acre in square meters
================= ============================================================
Volume
------
=================== ========================================================
``liter`` one liter in cubic meters
``litre`` one liter in cubic meters
``gallon`` one gallon (US) in cubic meters
``gallon_US`` one gallon (US) in cubic meters
``gallon_imp`` one gallon (UK) in cubic meters
``fluid_ounce`` one fluid ounce (US) in cubic meters
``fluid_ounce_US`` one fluid ounce (US) in cubic meters
``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters
``bbl`` one barrel in cubic meters
``barrel`` one barrel in cubic meters
=================== ========================================================
Speed
-----
================== ==========================================================
``kmh`` kilometers per hour in meters per second
``mph`` miles per hour in meters per second
``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second
``speed_of_sound`` one Mach (approx., at 15 C, 1 atm) in meters per second
``knot`` one knot in meters per second
================== ==========================================================
Temperature
-----------
===================== =======================================================
``zero_Celsius`` zero of Celsius scale in Kelvin
``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins
===================== =======================================================
.. autosummary::
:toctree: generated/
convert_temperature
C2K
K2C
F2C
C2F
F2K
K2F
Energy
------
==================== =======================================================
``eV`` one electron volt in Joules
``electron_volt`` one electron volt in Joules
``calorie`` one calorie (thermochemical) in Joules
``calorie_th`` one calorie (thermochemical) in Joules
``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules
``erg`` one erg in Joules
``Btu`` one British thermal unit (International Steam Table) in Joules
``Btu_IT`` one British thermal unit (International Steam Table) in Joules
``Btu_th`` one British thermal unit (thermochemical) in Joules
``ton_TNT`` one ton of TNT in Joules
==================== =======================================================
Power
-----
==================== =======================================================
``hp`` one horsepower in watts
``horsepower`` one horsepower in watts
==================== =======================================================
Force
-----
==================== =======================================================
``dyn`` one dyne in newtons
``dyne`` one dyne in newtons
``lbf`` one pound force in newtons
``pound_force`` one pound force in newtons
``kgf`` one kilogram force in newtons
``kilogram_force`` one kilogram force in newtons
==================== =======================================================
Optics
------
.. autosummary::
:toctree: generated/
lambda2nu
nu2lambda
References
==========
.. [CODATA2014] CODATA Recommended Values of the Fundamental
Physical Constants 2014.
http://physics.nist.gov/cuu/Constants/index.html
""" |
"""
=============================
Byteswapping and byte order
=============================
Introduction to byte ordering and ndarrays
==========================================
The ``ndarray`` is an object that provide a python array interface to data
in memory.
It often happens that the memory that you want to view with an array is
not of the same byte ordering as the computer on which you are running
Python.
For example, I might be working on a computer with a little-endian CPU -
such as an Intel Pentium, but I have loaded some data from a file
written by a computer that is big-endian. Let's say I have loaded 4
bytes from a file written by a Sun (big-endian) computer. I know that
these 4 bytes represent two 16-bit integers. On a big-endian machine, a
two-byte integer is stored with the Most Significant Byte (MSB) first,
and then the Least Significant Byte (LSB). Thus the bytes are, in memory order:
#. MSB integer 1
#. LSB integer 1
#. MSB integer 2
#. LSB integer 2
Let's say the two integers were in fact 1 and 770. Because 770 = 256 *
3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2.
The bytes I have loaded from the file would have these contents:
>>> big_end_str = chr(0) + chr(1) + chr(3) + chr(2)
>>> big_end_str
'\\x00\\x01\\x03\\x02'
We might want to use an ``ndarray`` to access these integers. In that
case, we can create an array around this memory, and tell numpy that
there are two integers, and that they are 16 bit and big-endian:
>>> import numpy as np
>>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_str)
>>> big_end_arr[0]
1
>>> big_end_arr[1]
770
Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian'
(``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For
example, if our data represented a single unsigned 4-byte little-endian
integer, the dtype string would be ``<u4``.
In fact, why don't we try that?
>>> little_end_u4 = np.ndarray(shape=(1,),dtype='<u4', buffer=big_end_str)
>>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3
True
Returning to our ``big_end_arr`` - in this case our underlying data is
big-endian (data endianness) and we've set the dtype to match (the dtype
is also big-endian). However, sometimes you need to flip these around.
.. warning::
Scalars currently do not include byte order information, so extracting
a scalar from an array will return an integer in native byte order.
Hence:
>>> big_end_arr[0].dtype.byteorder == little_end_u4[0].dtype.byteorder
True
Changing byte ordering
======================
As you can imagine from the introduction, there are two ways you can
affect the relationship between the byte ordering of the array and the
underlying memory it is looking at:
* Change the byte-ordering information in the array dtype so that it
interprets the underlying data as being in a different byte order.
This is the role of ``arr.newbyteorder()``
* Change the byte-ordering of the underlying data, leaving the dtype
interpretation as it was. This is what ``arr.byteswap()`` does.
The common situations in which you need to change byte ordering are:
#. Your data and dtype endianess don't match, and you want to change
the dtype so that it matches the data.
#. Your data and dtype endianess don't match, and you want to swap the
data so that they match the dtype
#. Your data and dtype endianess match, but you want the data swapped
and the dtype to reflect this
Data and dtype endianness don't match, change dtype to match data
-----------------------------------------------------------------
We make something where they don't match:
>>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='<i2', buffer=big_end_str)
>>> wrong_end_dtype_arr[0]
256
The obvious fix for this situation is to change the dtype so it gives
the correct endianness:
>>> fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder()
>>> fixed_end_dtype_arr[0]
1
Note the array has not changed in memory:
>>> fixed_end_dtype_arr.tobytes() == big_end_str
True
Data and type endianness don't match, change data to match dtype
----------------------------------------------------------------
You might want to do this if you need the data in memory to be a certain
ordering. For example you might be writing the memory out to a file
that needs a certain byte ordering.
>>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap()
>>> fixed_end_mem_arr[0]
1
Now the array *has* changed in memory:
>>> fixed_end_mem_arr.tobytes() == big_end_str
False
Data and dtype endianness match, swap data and dtype
----------------------------------------------------
You may have a correctly specified array dtype, but you need the array
to have the opposite byte order in memory, and you want the dtype to
match so the array values make sense. In this case you just do both of
the previous operations:
>>> swapped_end_arr = big_end_arr.byteswap().newbyteorder()
>>> swapped_end_arr[0]
1
>>> swapped_end_arr.tobytes() == big_end_str
False
An easier way of casting the data to a specific dtype and byte ordering
can be achieved with the ndarray astype method:
>>> swapped_end_arr = big_end_arr.astype('<i2')
>>> swapped_end_arr[0]
1
>>> swapped_end_arr.tobytes() == big_end_str
False
""" |
"""
A multi-dimensional ``Vector`` class, take 4
A ``Vector`` is built from an iterable of numbers::
>>> Vector([3.1, 4.2])
Vector([3.1, 4.2])
>>> Vector((3, 4, 5))
Vector([3.0, 4.0, 5.0])
>>> Vector(range(10))
Vector([0.0, 1.0, 2.0, 3.0, 4.0, ...])
Tests with 2-dimensions (same results as ``vector2d_v1.py``)::
>>> v1 = Vector([3, 4])
>>> x, y = v1
>>> x, y
(3.0, 4.0)
>>> v1
Vector([3.0, 4.0])
>>> v1_clone = eval(repr(v1))
>>> v1 == v1_clone
True
>>> print(v1)
(3.0, 4.0)
>>> octets = bytes(v1)
>>> octets
b'd\\x00\\x00\\x00\\x00\\x00\\x00\\x08@\\x00\\x00\\x00\\x00\\x00\\x00\\x10@'
>>> abs(v1)
5.0
>>> bool(v1), bool(Vector([0, 0]))
(True, False)
Test of ``.frombytes()`` class method:
>>> v1_clone = Vector.frombytes(bytes(v1))
>>> v1_clone
Vector([3.0, 4.0])
>>> v1 == v1_clone
True
Tests with 3-dimensions::
>>> v1 = Vector([3, 4, 5])
>>> x, y, z = v1
>>> x, y, z
(3.0, 4.0, 5.0)
>>> v1
Vector([3.0, 4.0, 5.0])
>>> v1_clone = eval(repr(v1))
>>> v1 == v1_clone
True
>>> print(v1)
(3.0, 4.0, 5.0)
>>> abs(v1) # doctest:+ELLIPSIS
7.071067811...
>>> bool(v1), bool(Vector([0, 0, 0]))
(True, False)
Tests with many dimensions::
>>> v7 = Vector(range(7))
>>> v7
Vector([0.0, 1.0, 2.0, 3.0, 4.0, ...])
>>> abs(v7) # doctest:+ELLIPSIS
9.53939201...
Test of ``.__bytes__`` and ``.frombytes()`` methods::
>>> v1 = Vector([3, 4, 5])
>>> v1_clone = Vector.frombytes(bytes(v1))
>>> v1_clone
Vector([3.0, 4.0, 5.0])
>>> v1 == v1_clone
True
Tests of sequence behavior::
>>> v1 = Vector([3, 4, 5])
>>> len(v1)
3
>>> v1[0], v1[len(v1)-1], v1[-1]
(3.0, 5.0, 5.0)
Test of slicing::
>>> v7 = Vector(range(7))
>>> v7[-1]
6.0
>>> v7[1:4]
Vector([1.0, 2.0, 3.0])
>>> v7[-1:]
Vector([6.0])
>>> v7[1,2]
Traceback (most recent call last):
...
TypeError: Vector indices must be integers
Tests of dynamic attribute access::
>>> v7 = Vector(range(10))
>>> v7.x
0.0
>>> v7.y, v7.z, v7.t
(1.0, 2.0, 3.0)
Dynamic attribute lookup failures::
>>> v7.k
Traceback (most recent call last):
...
AttributeError: 'Vector' object has no attribute 'k'
>>> v3 = Vector(range(3))
>>> v3.t
Traceback (most recent call last):
...
AttributeError: 'Vector' object has no attribute 't'
>>> v3.spam
Traceback (most recent call last):
...
AttributeError: 'Vector' object has no attribute 'spam'
Tests of hashing::
>>> v1 = Vector([3, 4])
>>> v2 = Vector([3.1, 4.2])
>>> v3 = Vector([3, 4, 5])
>>> v6 = Vector(range(6))
>>> hash(v1), hash(v3), hash(v6)
(7, 2, 1)
Most hash values of non-integers vary from a 32-bit to 64-bit CPython build::
>>> import sys
>>> hash(v2) == (384307168202284039 if sys.maxsize > 2**32 else 357915986)
True
""" |
"""
This page is in the table of contents.
The gear script can generate a spur gear couple, a bevel gear couple, a ring gear couple and a rack & pinion couple.
A helix pattern can be added to each gear type. All the gear types have a clearance and all the teeth can be beveled. A keyway, shaft and lightening holes can be added to all the round gears, and rack holes can be added to the rack. The script can output solid gears or only the gear profiles. Both gears of the couple can be generated or just one.
The couple has a pinion gear and a complement.
==Examples==
The link text includes the distinguishing parameters. Each svg page was generated from an xml page of the same root name using carve. For example, gear.svg was generated by clicking 'Carve' on the carve tool panel and choosing gear.xml in the file chooser.
Each generated svg file has the xml fabmetheus element without comments towards the end of the file. To see it, open the svg file in a text editor and search for 'fabmetheus' If you copy that into a new text document, add the line '<?xml version='1.0' ?>' at the beginning and then give it a file name with the extension '.xml', you could then generate another svg file using carve.
===Bevel===
Bevel gear couple.
<a href='../models/xml_models/creation/gear/bevel.svg'>gear operatingAngle=90</a>
===Collar===
Spur gear couple and each gear has a collar.
<a href='../models/xml_models/creation/gear/collar.svg'>gear complementCollarLengthOverFaceWidth='1' pinionCollarLengthOverFaceWidth='1' shaftRadius='5'</a>
===Gear===
Default spur gear with no parameters.
<a href='../models/xml_models/creation/gear/gear.svg'>gear</a>
===Keyway===
Spur gear couple and each gear has a collar and defined keyway.
<a href='../models/xml_models/creation/gear/keyway.svg'>gear complementCollarLengthOverFaceWidth='1' keywayRadius='2' pinionCollarLengthOverFaceWidth='1' shaftRadius='5'</a>
===Rack===
Rack and pinion couple.
<a href='../models/xml_models/creation/gear/rack.svg'>gear teethComplement='0'</a>
===Rack Hole===
Rack and pinion couple, with holes in the rack.
<a href='../models/xml_models/creation/gear/rack_hole.svg'>gear rackHoleRadiusOverWidth='0.2' rackWidthOverFaceWidth='2' teethComplement='0'</a>
===Ring===
Pinion and ring gear.
<a href='../models/xml_models/creation/gear/ring.svg'>gear teethComplement='-23'</a>
===Shaft===
Spur gear couple and each gear has a square shaft hole.
<a href='../models/xml_models/creation/gear/shaft.svg'>gear shaftRadius='5'</a>
===Shaft Top===
Spur gear couple and each gear has a round shaft hole, truncated on top.
<a href='../models/xml_models/creation/gear/shaft_top.svg'>gear shaftRadius='5' shaftSides='13' shaftDepthTop='2'</a>
===Spur Helix===
Spur gear couple with the gear teeth following a helix path.
<a href='../models/xml_models/creation/gear/spur_helix.svg'>gear helixAngle='45'</a>
===Spur Herringbone===
Spur gear couple with the gear teeth following a herringbone path.
<a href='../models/xml_models/creation/gear/spur_herringbone.svg'>gear helixAngle='45' helixType='herringbone'</a>
===Spur Parabolic===
Spur gear couple with the gear teeth following a parabolic path.
<a href='../models/xml_models/creation/gear/spur_parabolic.svg'>gear helixAngle='45' helixType='parabolic'</a>
===Spur Profile===
Spur gear couple profile. Since this is just a horizontal path, it can not be sliced, so the path is then extruded to create a solid which can be sliced and viewed.
<a href='../models/xml_models/creation/gear/spur_profile.svg'>gear id='spurProfile' faceWidth='0' | extrude target='=document.getElementByID(spurProfile)</a>
==Parameters==
===Center Distance===
Default is such that the pitch radius works out to twenty.
Defines the distance between the gear centers.
===Clearance Couplet===
====Clearance Over Wavelength====
Default is 0.1.
Defines the ratio of the clearance over the wavelength of the gear profile. The wavelength is the arc distance between the gear teeth.
====Clearance====
Default is the 'Clearance Over Wavelength' times the wavelength.
Defines the clearance between the gear tooth and the other gear of the couple. If the clearance is zero, the outside of the gear tooth will touch the other gear. If the clearance is too high, the gear teeth will be long and weak.
===Collar Addendum Couplet===
====Collar Addendum Over Radius====
Default is one.
Defines the ratio of the collar addendum over the shaft radius.
====Collar Addendum====
Default is the 'Collar Addendum Over Radius' times the shaft radius.
Defines the collar addendum.
===Complement Collar Length Couplet===
====Complement Collar Length Over Face Width====
Default is zero.
Defines the ratio of the complement collar length over the face width.
====Complement Collar Length====
Default is the 'Complement Collar Length Over Face Width' times the face width.
Defines the complement collar length. If the complement collar length is zero, there will not be a collar on the complement gear.
===Creation Type===
Default is 'both'.
====Both====
When selected, the pinion and complement will be generated.
====Complement====
When selected, only the complement gear or rack will be generated.
====Pinion====
When selected, only the pinion will be generated.
===Face Width===
Default is ten.
Defines the face width.
===Gear Hole Paths===
Default is empty.
Defines the centers of the gear holes. If the gear hole paths parameter is the default empty, then the centers of the gear holes will be generated from other parameters.
===Helix Angle===
Default is zero.
===Helix Path===
Default is empty.
Defines the helix path of the gear teeth. If the helix path is the default empty, then the helix will be generated from the helix angle and helix type.
===Helix Type===
Default is 'basic'.
====Basic====
When selected, the helix will be basic.
====Herringbone====
When selected, the helix will have a herringbone pattern.
====Parabolic====
When selected, the helix will have a parabolic pattern.
===Keyway Radius Couplet===
====Keyway Radius Over Radius====
Default is half.
Defines the ratio of the keyway radius over the shaft radius.
====Keyway Radius====
Default is the 'Keyway Radius Over Radius' times the shaft radius.
Defines the keyway radius. If the keyway radius is zero, there will not be a keyway on the collar.
===Lightening Hole Margin Couplet===
====Lightening Hole Margin Over Rim Dedendum====
Default is one.
Defines the ratio of the lightening hole margin over the rim dedendum.
====Lightening Hole Margin====
Default is the 'Lightening Hole Margin Over Rim Dedendum' times the rim dedendum.
Defines the minimum margin between lightening holes.
===Lightening Hole Minimum Radius===
Default is one.
Defines the minimum radius of the lightening holes.
===Move Type===
Default is 'separate'.
====None====
When selected, the gears will be not be moved and will therefore overlap. Afterwards the write plugin could be used to write each gear to a different file, so they can be fabricated in separate operations.
====Mesh====
When selected, the gears will be separated horizontally so that they just mesh. This is useful to test if the gears mesh properly.
====Separate====
When selected, the gears will be separated horizontally with a gap between them.
====Vertical====
When selected, the gears will be separated vertically.
===Operating Angle===
Default is 180 degrees.
Defines the operating angle between the gear axes. If the operating angle is not 180 degrees, a bevel gear couple will be generated.
===Pinion Collar Length Couplet===
====Pinion Collar Length Over Face Width====
Default is zero.
Defines the ratio of the pinion collar length over the face width.
====Pinion Collar Length====
Default is the 'Pinion Collar Length Over Face Width' times the face width.
Defines the pinion collar length. If the pinion collar length is zero, there will not be a collar on the pinion gear.
===Pitch Radius===
Default is twenty if the pitch radius has not been set. If the center distance is set, the default pitch radius is the center distance times the number of pinion teeth divided by the total number of gear teeth.
Defines the pinion pitch radius.
===Plate Clearance Couplet===
====Plate Clearance Over Length====
Default is 0.2.
Defines the ratio of the plate clearance over the plate length.
====Plate Clearance====
Default is the 'Plate Clearance Over Length' times the plate length.
Defines the clearance between the pinion and the plate of the ring gear. If the clearance is zero, they will touch.
===Plate Length Couplet===
====Plate Length Over Face Width====
Default is half.
Defines the ratio of the plate length over the face width.
====Plate Length====
Default is the 'Plate Length Over Face Width' times the face width.
Defines the length of the plate of the ring gear.
===Pressure Angle===
Default is twenty degrees.
Defines the pressure angle of the gear couple.
===Profile Surfaces===
Default is eleven.
Defines the number of profile surfaces.
===Rack Hole Below Over Width Couplet===
====Rack Hole Below Over Width====
Default is 0.6.
Defines the ratio of the distance below the pitch of the rack holes over the rack width.
====Rack Hole Below====
Default is the 'Rack Hole Below Over Width' times the rack width.
Defines the the distance below the pitch of the rack holes.
===Rack Hole Radius Couplet===
====Rack Hole Radius Over Width====
Default is zero.
Defines the ratio of the rack hole radius over the rack width.
====Rack Hole Radius====
Default is the 'Rack Hole Radius Over Width' times the rack width.
Defines the radius of the rack holes. If the rack hole radius is zero, there won't be any rack holes.
===Rack Hole Step Over Width Couplet===
====Rack Hole Step Over Width====
Default is one.
Defines the ratio of the rack hole step over the rack width.
====Rack Hole Step====
Default is the 'Rack Hole Step Over Width' times the rack width.
Defines the horizontal step distance between the rack holes.
===Rack Length Over Radius Couplet===
====Rack Length Over Radius====
Default is two times pi.
Defines the ratio of the rack length over the pitch radius.
====Rack Length====
Default is the 'Rack Length Over Radius' times the pitch radius.
Defines the rack length.
===Rack Width Couplet===
====Rack Width Over Face Width====
Default is one.
Defines the ratio of the rack width over the face width.
====Rack Width====
Default is the 'Rack Width Over Face Width' times the face width.
Defines the rack width.
===Rim Dedendum Couplet===
====Rim Dedendum Over Radius====
Default is 0.2.
Defines the ratio of the rim dedendum over the pitch radius.
====Rim Dedendum====
Default is the 'Rim Dedendum Over Radius' times the pitch radius.
Defines the rim dedendum of the gear.
===Root Bevel Couplet===
====Root Bevel Over Clearance====
Default is half.
Defines the ratio of the root bevel over the clearance.
====Root Bevel====
Default is the 'Root Bevel Over Clearance' times the clearance.
Defines the bevel at the root of the gear tooth.
===Shaft Depth Bottom Couplet===
====Shaft Depth Bottom Over Radius====
Default is zero.
Defines the ratio of the bottom shaft depth over the shaft radius.
====Shaft Depth Bottom====
Default is the 'Shaft Depth Bottom Over Radius' times the shaft radius.
Defines the bottom shaft depth.
===Shaft Depth Top Couplet===
====Shaft Depth Top Over Radius====
Default is zero.
Defines the ratio of the top shaft depth over the shaft radius.
====Shaft Depth Top====
Default is the 'Shaft Depth Top Over Radius' times the shaft radius.
Defines the top shaft depth.
===Shaft Path===
Default is empty.
Defines the path of the shaft hole. If the shaft path is the default empty, then the shaft path will be generated from the shaft depth bottom, shaft depth top, shaft radius and shaft sides.
===Shaft Radius Couplet===
====Shaft Radius Over Pitch Radius====
Default is zero.
Defines the ratio of the shaft radius over the pitch radius.
====Shaft Radius====
Default is the 'Shaft Radius Over Pitch Radius' times the pitch radius.
Defines the shaft radius. If the shaft radius is zero there will not be a shaft hole.
===Shaft Sides===
Default is four.
Defines the number of shaft sides.
===Teeth Pinion===
Default is seven.
Defines the number of teeth in the pinion.
===Teeth Complement===
Default is seventeen.
Defines the number of teeth in the complement of the gear couple. If the number of teeth is positive, the gear couple will be a spur or bevel type. If the number of teeth is zero, the gear couple will be a rack and pinion. If the number of teeth is negative, the gear couple will be a spur and ring.
===Tip Bevel Couplet===
====Tip Bevel Over Clearance====
Default is 0.1.
Defines the ratio of the tip bevel over the clearance.
====Tip Bevel====
Default is the 'Tip Bevel Over Clearance' times the clearance.
Defines the bevel at the tip of the gear tooth.
===Tooth Thickness Multiplier===
Default is 0.99999.
Defines the amount the thickness of the tooth will multiplied. If when the gears are produced, they mesh too tightly, you can reduce the tooth thickness multiplier so that they mesh with reasonable tightness.
""" |
"""
Numerical python functions written for compatability with matlab(TM)
commands with the same names.
Matlab(TM) compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (power spectral density over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in matlab(TM), but are useful anyway:
:meth:`cohere_pairs`
Coherence over all pairs. This is not a matlab function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:meth:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly::
See :ref:`misc-examples-index`
:meth:`rec2txt`
pretty print a record array
:meth:`rec2csv`
store record array in CSV file
:meth:`csv2rec`
import record array from CSV file with type inspection
:meth:`rec_append_fields`
adds field(s)/array(s) to record array
:meth:`rec_drop_fields`
drop fields from record array
:meth:`rec_join`
join two record arrays on sequence of fields
:meth:`rec_groupby`
summarize data by groups (similar to SQL GROUP BY)
:meth:`rec_summarize`
helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:meth:`conv`
convolution (numpy.convolve)
:meth:`corrcoef`
The matrix of correlation coefficients
:meth:`hist`
Histogram (numpy.histogram)
:meth:`linspace`
Linear spaced array from min to max
:meth:`load`
load ASCII file - use numpy.loadtxt
:meth:`meshgrid`
Make a 2D grid from 2 1 arrays (numpy.meshgrid)
:meth:`polyfit`
least squares best polynomial fit of x to y (numpy.polyfit)
:meth:`polyval`
evaluate a vector for a vector of polynomial coeffs (numpy.polyval)
:meth:`save`
save ASCII file - use numpy.savetxt
:meth:`trapz`
trapeziodal integration (trapz(x,y) -> numpy.trapz(y,x))
:meth:`vander`
the Vandermonde matrix (numpy.vander)
""" |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# SKR03
# =====
# Dieses Modul bietet Ihnen einen deutschen Kontenplan basierend auf dem SKR03.
# Gemäss der aktuellen Einstellungen ist die Firma nicht Umsatzsteuerpflichtig.
# Diese Grundeinstellung ist sehr einfach zu ändern und bedarf in der Regel
# grundsätzlich eine initiale Zuweisung von Steuerkonten zu Produkten und / oder
# Sachkonten oder zu Partnern.
# Die Umsatzsteuern (voller Steuersatz, reduzierte Steuer und steuerfrei)
# sollten bei den Produktstammdaten hinterlegt werden (in Abhängigkeit der
# Steuervorschriften). Die Zuordnung erfolgt auf dem Aktenreiter Finanzbuchhaltung
# (Kategorie: Umsatzsteuer).
# Die Vorsteuern (voller Steuersatz, reduzierte Steuer und steuerfrei)
# sollten ebenso bei den Produktstammdaten hinterlegt werden (in Abhängigkeit
# der Steuervorschriften). Die Zuordnung erfolgt auf dem Aktenreiter
# Finanzbuchhaltung (Kategorie: Vorsteuer).
# Die Zuordnung der Steuern für Ein- und Ausfuhren aus EU Ländern, sowie auch
# für den Ein- und Verkauf aus und in Drittländer sollten beim Partner
# (Lieferant/Kunde)hinterlegt werden (in Anhängigkeit vom Herkunftsland
# des Lieferanten/Kunden). Die Zuordnung beim Kunden ist "höherwertig" als
# die Zuordnung bei Produkten und überschreibt diese im Einzelfall.
#
# Zur Vereinfachung der Steuerausweise und Buchung bei Auslandsgeschäften
# erlaubt OpenERP ein generelles Mapping von Steuerausweis und Steuerkonten
# (z.B. Zuordnung "Umsatzsteuer 19%" zu "steuerfreie Einfuhren aus der EU")
# zwecks Zuordnung dieses Mappings zum ausländischen Partner (Kunde/Lieferant).
# Die Rechnungsbuchung beim Einkauf bewirkt folgendes:
# Die Steuerbemessungsgrundlage (exklusive Steuer) wird ausgewiesen bei den
# jeweiligen Kategorien für den Vorsteuer Steuermessbetrag (z.B. Vorsteuer
# Steuermessbetrag Voller Steuersatz 19%).
# Der Steuerbetrag erscheint unter der Kategorie "Vorsteuern" (z.B. Vorsteuer
# 19%). Durch multidimensionale Hierachien können verschiedene Positionen
# zusammengefasst werden und dann in Form eines Reports ausgegeben werden.
#
# Die Rechnungsbuchung beim Verkauf bewirkt folgendes:
# Die Steuerbemessungsgrundlage (exklusive Steuer) wird ausgewiesen bei den
# jeweiligen Kategorien für den Umsatzsteuer Steuermessbetrag
# (z.B. Umsatzsteuer Steuermessbetrag Voller Steuersatz 19%).
# Der Steuerbetrag erscheint unter der Kategorie "Umsatzsteuer"
# (z.B. Umsatzsteuer 19%). Durch multidimensionale Hierachien können
# verschiedene Positionen zusammengefasst werden.
# Die zugewiesenen Steuerausweise können auf Ebene der einzelnen
# Rechnung (Eingangs- und Ausgangsrechnung) nachvollzogen werden,
# und dort gegebenenfalls angepasst werden.
# Rechnungsgutschriften führen zu einer Korrektur (Gegenposition)
# der Steuerbuchung, in Form einer spiegelbildlichen Buchung.
# SKR04
# =====
# Dieses Modul bietet Ihnen einen deutschen Kontenplan basierend auf dem SKR04.
# Gemäss der aktuellen Einstellungen ist die Firma nicht Umsatzsteuerpflichtig,
# d.h. im Standard existiert keine Zuordnung von Produkten und Sachkonten zu
# Steuerschlüsseln.
# Diese Grundeinstellung ist sehr einfach zu ändern und bedarf in der Regel
# grundsätzlich eine initiale Zuweisung von Steuerschlüsseln zu Produkten und / oder
# Sachkonten oder zu Partnern.
# Die Umsatzsteuern (voller Steuersatz, reduzierte Steuer und steuerfrei)
# sollten bei den Produktstammdaten hinterlegt werden (in Abhängigkeit der
# Steuervorschriften). Die Zuordnung erfolgt auf dem Aktenreiter Finanzbuchhaltung
# (Kategorie: Umsatzsteuer).
# Die Vorsteuern (voller Steuersatz, reduzierte Steuer und steuerfrei)
# sollten ebenso bei den Produktstammdaten hinterlegt werden (in Abhängigkeit
# der Steuervorschriften). Die Zuordnung erfolgt auf dem Aktenreiter
# Finanzbuchhaltung (Kategorie: Vorsteuer).
# Die Zuordnung der Steuern für Ein- und Ausfuhren aus EU Ländern, sowie auch
# für den Ein- und Verkauf aus und in Drittländer sollten beim Partner
# (Lieferant/Kunde) hinterlegt werden (in Anhängigkeit vom Herkunftsland
# des Lieferanten/Kunden). Die Zuordnung beim Kunden ist "höherwertig" als
# die Zuordnung bei Produkten und überschreibt diese im Einzelfall.
#
# Zur Vereinfachung der Steuerausweise und Buchung bei Auslandsgeschäften
# erlaubt OpenERP ein generelles Mapping von Steuerausweis und Steuerkonten
# (z.B. Zuordnung "Umsatzsteuer 19%" zu "steuerfreie Einfuhren aus der EU")
# zwecks Zuordnung dieses Mappings zum ausländischen Partner (Kunde/Lieferant).
# Die Rechnungsbuchung beim Einkauf bewirkt folgendes:
# Die Steuerbemessungsgrundlage (exklusive Steuer) wird ausgewiesen bei den
# jeweiligen Kategorien für den Vorsteuer Steuermessbetrag (z.B. Vorsteuer
# Steuermessbetrag Voller Steuersatz 19%).
# Der Steuerbetrag erscheint unter der Kategorie "Vorsteuern" (z.B. Vorsteuer
# 19%). Durch multidimensionale Hierachien können verschiedene Positionen
# zusammengefasst werden und dann in Form eines Reports ausgegeben werden.
#
# Die Rechnungsbuchung beim Verkauf bewirkt folgendes:
# Die Steuerbemessungsgrundlage (exklusive Steuer) wird ausgewiesen bei den
# jeweiligen Kategorien für den Umsatzsteuer Steuermessbetrag
# (z.B. Umsatzsteuer Steuermessbetrag Voller Steuersatz 19%).
# Der Steuerbetrag erscheint unter der Kategorie "Umsatzsteuer"
# (z.B. Umsatzsteuer 19%). Durch multidimensionale Hierachien können
# verschiedene Positionen zusammengefasst werden.
# Die zugewiesenen Steuerausweise können auf Ebene der einzelnen
# Rechnung (Eingangs- und Ausgangsrechnung) nachvollzogen werden,
# und dort gegebenenfalls angepasst werden.
# Rechnungsgutschriften führen zu einer Korrektur (Gegenposition)
# der Steuerbuchung, in Form einer spiegelbildlichen Buchung.
|
"""
===============
Array Internals
===============
Internal organization of numpy arrays
=====================================
It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to Numpy".
Numpy arrays consist of two major components, the raw array data (from now on,
referred to as the data buffer), and the information about the raw array data.
The data buffer is typically what people think of as arrays in C or Fortran,
a contiguous (and fixed) block of memory containing fixed sized data items.
Numpy also contains a significant set of data that describes how to interpret
the data in the data buffer. This extra information contains (among other things):
1) The basic data element's size in bytes
2) The start of the data within the data buffer (an offset relative to the
beginning of the data buffer).
3) The number of dimensions and the size of each dimension
4) The separation between elements for each dimension (the 'stride'). This
does not have to be a multiple of the element size
5) The byte order of the data (which may not be the native byte order)
6) Whether the buffer is read-only
7) Information (via the dtype object) about the interpretation of the basic
data element. The basic data element may be as simple as a int or a float,
or it may be a compound object (e.g., struct-like), a fixed character field,
or Python object pointers.
8) Whether the array is to interpreted as C-order or Fortran-order.
This arrangement allow for very flexible use of arrays. One thing that it allows
is simple changes of the metadata to change the interpretation of the array buffer.
Changing the byteorder of the array is a simple change involving no rearrangement
of the data. The shape of the array can be changed very easily without changing
anything in the data buffer or any data copying at all
Among other things that are made possible is one can create a new array metadata
object that uses the same data buffer
to create a new view of that data buffer that has a different interpretation
of the buffer (e.g., different shape, offset, byte order, strides, etc) but
shares the same data bytes. Many operations in numpy do just this such as
slices. Other operations, such as transpose, don't move data elements
around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move.
Typically these new versions of the array metadata but the same data buffer are
new 'views' into the data buffer. There is a different ndarray object, but it
uses the same data buffer. This is why it is necessary to force copies through
use of the .copy() method if one really wants to make a new and independent
copy of the data buffer.
New views into arrays mean the the object reference counts for the data buffer
increase. Simply doing away with the original array object will not remove the
data buffer if other views of it still exist.
Multidimensional Array Indexing Order Issues
============================================
What is the right way to index
multi-dimensional arrays? Before you jump to conclusions about the one and
true way to index multi-dimensional arrays, it pays to understand why this is
a confusing issue. This section will try to explain in detail how numpy
indexing works and why we adopt the convention we do for images, and when it
may be appropriate to adopt other conventions.
The first thing to understand is
that there are two conflicting conventions for indexing 2-dimensional arrays.
Matrix notation uses the first index to indicate which row is being selected and
the second index to indicate which column is selected. This is opposite the
geometrically oriented-convention for images where people generally think the
first index represents x position (i.e., column) and the second represents y
position (i.e., row). This alone is the source of much confusion;
matrix-oriented users and image-oriented users expect two different things with
regard to indexing.
The second issue to understand is how indices correspond
to the order the array is stored in memory. In Fortran the first index is the
most rapidly varying index when moving through the elements of a two
dimensional array as it is stored in memory. If you adopt the matrix
convention for indexing, then this means the matrix is stored one column at a
time (since the first index moves to the next row as it changes). Thus Fortran
is considered a Column-major language. C has just the opposite convention. In
C, the last index changes most rapidly as one moves through the array as
stored in memory. Thus C is a Row-major language. The matrix is stored by
rows. Note that in both cases it presumes that the matrix convention for
indexing is being used, i.e., for both Fortran and C, the first index is the
row. Note this convention implies that the indexing convention is invariant
and that the data order changes to keep that so.
But that's not the only way
to look at it. Suppose one has large two-dimensional arrays (images or
matrices) stored in data files. Suppose the data are stored by rows rather than
by columns. If we are to preserve our index convention (whether matrix or
image) that means that depending on the language we use, we may be forced to
reorder the data if it is read into memory to preserve our indexing
convention. For example if we read row-ordered data into memory without
reordering, it will match the matrix indexing convention for C, but not for
Fortran. Conversely, it will match the image indexing convention for Fortran,
but not for C. For C, if one is using data stored in row order, and one wants
to preserve the image index convention, the data must be reordered when
reading into memory.
In the end, which you do for Fortran or C depends on
which is more important, not reordering data or preserving the indexing
convention. For large images, reordering data is potentially expensive, and
often the indexing convention is inverted to avoid that.
The situation with
numpy makes this issue yet more complicated. The internal machinery of numpy
arrays is flexible enough to accept any ordering of indices. One can simply
reorder indices by manipulating the internal stride information for arrays
without reordering the data at all. Numpy will know how to map the new index
order to the data without moving the data.
So if this is true, why not choose
the index order that matches what you most expect? In particular, why not define
row-ordered images to use the image convention? (This is sometimes referred
to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN'
order options for array ordering in numpy.) The drawback of doing this is
potential performance penalties. It's common to access the data sequentially,
either implicitly in array operations or explicitly by looping over rows of an
image. When that is done, then the data will be accessed in non-optimal order.
As the first index is incremented, what is actually happening is that elements
spaced far apart in memory are being sequentially accessed, with usually poor
memory access speeds. For example, for a two dimensional image 'im' defined so
that im[0, 10] represents the value at x=0, y=10. To be consistent with usual
Python behavior then im[0] would represent a column at x=0. Yet that data
would be spread over the whole array since the data are stored in row order.
Despite the flexibility of numpy's indexing, it can't really paper over the fact
basic operations are rendered inefficient because of data order or that getting
contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs
im[0]), thus one can't use an idiom such as for row in im; for col in im does
work, but doesn't yield contiguous column data.
As it turns out, numpy is
smart enough when dealing with ufuncs to determine which index is the most
rapidly varying one in memory and uses that for the innermost loop. Thus for
ufuncs there is no large intrinsic advantage to either approach in most cases.
On the other hand, use of .flat with an FORTRAN ordered array will lead to
non-optimal memory access as adjacent elements in the flattened array (iterator,
actually) are not contiguous in memory.
Indeed, the fact is that Python
indexing on lists and other sequences naturally leads to an outside-to inside
ordering (the first index gets the largest grouping, the next the next largest,
and the last gets the smallest element). Since image data are normally stored
by rows, this corresponds to position within rows being the last item indexed.
If you do want to use Fortran ordering realize that
there are two approaches to consider: 1) accept that the first index is just not
the most rapidly changing in memory and have all your I/O routines reorder
your data when going from memory to disk or visa versa, or use numpy's
mechanism for mapping the first index to the most rapidly varying data. We
recommend the former if possible. The disadvantage of the latter is that many
of numpy's functions will yield arrays without Fortran ordering unless you are
careful to use the 'order' keyword. Doing this would be highly inconvenient.
Otherwise we recommend simply learning to reverse the usual order of indices
when accessing elements of an array. Granted, it goes against the grain, but
it is more in line with Python semantics and the natural order of the data.
""" |
"""
Objects for dealing with Chebyshev series.
This module provides a number of objects (mostly functions) useful for
dealing with Chebyshev series, including a `Chebyshev` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `chebdomain` -- Chebyshev series default domain, [-1,1].
- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates
identically to 0.
- `chebone` -- (Coefficients of the) Chebyshev series that evaluates
identically to 1.
- `chebx` -- (Coefficients of the) Chebyshev series for the identity map,
``f(x) = x``.
Arithmetic
----------
- `chebadd` -- add two Chebyshev series.
- `chebsub` -- subtract one Chebyshev series from another.
- `chebmul` -- multiply two Chebyshev series.
- `chebdiv` -- divide one Chebyshev series by another.
- `chebpow` -- raise a Chebyshev series to an positive integer power
- `chebval` -- evaluate a Chebyshev series at given points.
- `chebval2d` -- evaluate a 2D Chebyshev series at given points.
- `chebval3d` -- evaluate a 3D Chebyshev series at given points.
- `chebgrid2d` -- evaluate a 2D Chebyshev series on a Cartesian product.
- `chebgrid3d` -- evaluate a 3D Chebyshev series on a Cartesian product.
Calculus
--------
- `chebder` -- differentiate a Chebyshev series.
- `chebint` -- integrate a Chebyshev series.
Misc Functions
--------------
- `chebfromroots` -- create a Chebyshev series with specified roots.
- `chebroots` -- find the roots of a Chebyshev series.
- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials.
- `chebvander2d` -- Vandermonde-like matrix for 2D power series.
- `chebvander3d` -- Vandermonde-like matrix for 3D power series.
- `chebgauss` -- Gauss-Chebyshev quadrature, points and weights.
- `chebweight` -- Chebyshev weight function.
- `chebcompanion` -- symmetrized companion matrix in Chebyshev form.
- `chebfit` -- least-squares fit returning a Chebyshev series.
- `chebpts1` -- Chebyshev points of the first kind.
- `chebpts2` -- Chebyshev points of the second kind.
- `chebtrim` -- trim leading coefficients from a Chebyshev series.
- `chebline` -- Chebyshev series representing given straight line.
- `cheb2poly` -- convert a Chebyshev series to a polynomial.
- `poly2cheb` -- convert a polynomial to a Chebyshev series.
Classes
-------
- `Chebyshev` -- A Chebyshev series class.
See also
--------
`numpy.polynomial`
Notes
-----
The implementations of multiplication, division, integration, and
differentiation use the algebraic identities [1]_:
.. math ::
T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\
z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}.
where
.. math :: x = \\frac{z + z^{-1}}{2}.
These identities allow a Chebyshev series to be expressed as a finite,
symmetric Laurent series. In this module, this sort of Laurent series
is referred to as a "z-series."
References
----------
.. [1] NAME et al., "Combinatorial Trigonometry with Chebyshev
Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
(preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
""" |
#
# tested on | Windows native | Linux cross-compilation
# ------------------------+-------------------+---------------------------
# MSVS C++ 2010 Express | WORKS | n/a
# Mingw-w64 | WORKS | WORKS
# Mingw-w32 | WORKS | WORKS
# MinGW | WORKS | untested
#
#####
# Notes about MSVS C++ :
#
# - MSVC2010-Express compiles to 32bits only.
#
#####
# Notes about Mingw-w64 and Mingw-w32 under Windows :
#
# - both can be installed using the official installer :
# http://mingw-w64.sourceforge.net/download.php#mingw-builds
#
# - if you want to compile both 32bits and 64bits, don't forget to
# run the installer twice to install them both.
#
# - install them into a path that does not contain spaces
# ( example : "C:/Mingw-w32", "C:/Mingw-w64" )
#
# - if you want to compile faster using the "-j" option, don't forget
# to install the appropriate version of the Pywin32 python extension
# available from : http://sourceforge.net/projects/pywin32/files/
#
# - before running scons, you must add into the environment path
# the path to the "/bin" directory of the Mingw version you want
# to use :
#
# set PATH=C:/Mingw-w32/bin;%PATH%
#
# - then, scons should be able to detect gcc.
# - Mingw-w32 only compiles 32bits.
# - Mingw-w64 only compiles 64bits.
#
# - it is possible to add them both at the same time into the PATH env,
# if you also define the MINGW32_PREFIX and MINGW64_PREFIX environment
# variables.
# For instance, you could store that set of commands into a .bat script
# that you would run just before scons :
#
# set PATH=C:\mingw-w32\bin;%PATH%
# set PATH=C:\mingw-w64\bin;%PATH%
# set MINGW32_PREFIX=C:\mingw-w32\bin\
# set MINGW64_PREFIX=C:\mingw-w64\bin\
#
#####
# Notes about Mingw, Mingw-w64 and Mingw-w32 under Linux :
#
# - default toolchain prefixes are :
# "i586-mingw32msvc-" for MinGW
# "i686-w64-mingw32-" for Mingw-w32
# "x86_64-w64-mingw32-" for Mingw-w64
#
# - if both MinGW and Mingw-w32 are installed on your system
# Mingw-w32 should take the priority over MinGW.
#
# - it is possible to manually override prefixes by defining
# the MINGW32_PREFIX and MINGW64_PREFIX environment variables.
#
#####
# Notes about Mingw under Windows :
#
# - this is the MinGW version from http://mingw.org/
# - install it into a path that does not contain spaces
# ( example : "C:/MinGW" )
# - several DirectX headers might be missing. You can copy them into
# the C:/MinGW/include" directory from this page :
# https://code.google.com/p/mingw-lib/source/browse/trunk/working/avcodec_to_widget_5/directx_include/
# - before running scons, add the path to the "/bin" directory :
# set PATH=C:/MinGW/bin;%PATH%
# - scons should be able to detect gcc.
#
#####
# TODO :
#
# - finish to cleanup this script to remove all the remains of previous hacks and workarounds
# - make it work with the Windows7 SDK that is supposed to enable 64bits compilation for MSVC2010-Express
# - confirm it works well with other Visual Studio versions.
# - update the wiki about the pywin32 extension required for the "-j" option under Windows.
# - update the wiki to document MINGW32_PREFIX and MINGW64_PREFIX
#
|
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
""" |
"""
Barter system
Evennia contribution - NAME 2012
This implements a full barter system - a way for players to safely
trade items between each other using code rather than simple free-form
talking. The advantage of this is increased buy/sell safety but it
also streamlines the process and makes it faster when doing many
transactions (since goods are automatically exchanged once both
agree).
This system is primarily intended for a barter economy, but can easily
be used in a monetary economy as well -- just let the "goods" on one
side be coin objects (this is more flexible than a simple "buy"
command since you can mix coins and goods in your trade).
In this module, a "barter" is generally referred to as a "trade".
- Trade example
A trade (barter) action works like this: A and B are the parties.
1) opening a trade
A: trade B: Hi, I have a nice extra sword. You wanna trade?
B sees: A says: "Hi, I have a nice extra sword. You wanna trade?"
A wants to trade with you. Enter 'trade A <emote>' to accept.
B: trade A: Hm, I could use a good sword ...
A sees: B says: "Hm, I could use a good sword ...
B accepts the trade. Use 'trade help' for aid.
B sees: You are now trading with A. Use 'trade help' for aid.
2) negotiating
A: offer sword: This is a nice sword. I would need some rations in trade.
B sees: A says: "This is a nice sword. I would need some rations in trade."
[A offers Sword of might.]
B evalute sword
B sees: <Sword's description and possibly stats>
B: offer ration: This is a prime ration.
A sees: B says: "These is a prime ration."
[B offers iron ration]
A: say Hey, this is a nice sword, I need something more for it.
B sees: A says: "Hey this is a nice sword, I need something more for it."
B: offer sword,apple: Alright. I will also include a magic apple. That's my last offer.
A sees: B says: "Alright, I will also include a magic apple. That's my last offer."
[B offers iron ration and magic apple]
A accept: You are killing me here, but alright.
B sees: A says: "You are killing me here, but alright."
[A accepts your offer. You must now also accept.]
B accept: Good, nice making business with you.
You accept the deal. Deal is made and goods changed hands.
A sees: B says: "Good, nice making business with you."
B accepts the deal. Deal is made and goods changed hands.
At this point the trading system is exited and the negotiated items
are automatically exchanged between the parties. In this example B was
the only one changing their offer, but also A could have changed their
offer until the two parties found something they could agree on. The
emotes are optional but useful for RP-heavy worlds.
- Technical info
The trade is implemented by use of a TradeHandler. This object is a
common place for storing the current status of negotiations. It is
created on the object initiating the trade, and also stored on the
other party once that party agrees to trade. The trade request times
out after a certain time - this is handled by a Script. Once trade
starts, the CmdsetTrade cmdset is initiated on both parties along with
the commands relevant for the trading.
- Ideas for NPC bartering:
This module is primarily intended for trade between two players. But
it can also in principle be used for a player negotiating with an
AI-controlled NPC. If the NPC uses normal commands they can use it
directly -- but more efficient is to have the NPC object send its
replies directly through the tradehandler to the player. One may want
to add some functionality to the decline command, so players can
decline specific objects in the NPC offer (decline <object>) and allow
the AI to maybe offer something else and make it into a proper
barter. Along with an AI that "needs" things or has some sort of
personality in the trading, this can make bartering with NPCs at least
moderately more interesting than just plain 'buy'.
- Installation:
Just import the CmdTrade command into (for example) the default
cmdset. This will make the trade (or barter) command available
in-game.
""" |
#
# ElementTree
# $Id: ElementTree.py 2326 2005-03-17 07:45:21Z USERNAME $
#
# light-weight XML support for Python 1.5.2 and later.
#
# history:
# 2001-10-20 fl created (from various sources)
# 2001-11-01 fl return root from parse method
# 2002-02-16 fl sort attributes in lexical order
# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
# 2002-05-01 fl finished TreeBuilder refactoring
# 2002-07-14 fl added basic namespace support to ElementTree.write
# 2002-07-25 fl added QName attribute support
# 2002-10-20 fl fixed encoding in write
# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
# 2002-11-27 fl accept file objects or file names for parse/write
# 2002-12-04 fl moved XMLTreeBuilder back to this module
# 2003-01-11 fl fixed entity encoding glitch for us-ascii
# 2003-02-13 fl added XML literal factory
# 2003-02-21 fl added ProcessingInstruction/PI factory
# 2003-05-11 fl added tostring/fromstring helpers
# 2003-05-26 fl added ElementPath support
# 2003-07-05 fl added makeelement factory method
# 2003-07-28 fl added more well-known namespace prefixes
# 2003-08-15 fl fixed typo in ElementTree.findtext (Thomas NAME 2003-09-04 fl fall back on emulator if ElementPath is not installed
# 2003-10-31 fl markup updates
# 2003-11-15 fl fixed nested namespace bug
# 2004-03-28 fl added XMLID helper
# 2004-06-02 fl added default support to findtext
# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
# 2004-08-23 fl take advantage of post-2.1 expat features
# 2005-02-01 fl added iterparse implementation
# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
# 2012-06-29 EMAIL Made all classes new-style
# 2012-07-02 EMAIL Include dist. ElementPath
# 2013-02-27 EMAIL renamed module files, kept namespace.
#
# Copyright (c) 1999-2005 by NAME All rights reserved.
#
# EMAIL
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2005 by NAME By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
|
"""
Computing Riemann Theta Functions
This module implements the algorithms for computing Riemann theta
functions and their derivatives featured in the paper *"Computing
Riemann Theta Functions"* by NAME NAME Bobenko, NAME and
Schmies [CRTF].
**DEFINITION OF THE RIEMANN THETA FUNCTION:**
Let `g` be a positive integer, the *genus* of the Riemann theta
function. Let `H_g` denote the Siegel upper half space of dimension
`g(g+1)/2` over `\CC` , that is the space of symmetric complex
matrices whose imaginary parts are positive definite. When `g = 1`,
this is just the complex upper half plane.
The Riemann theta function `\theta : \CC^g \times H_g \to \CC` is
defined by the infinite series
.. math::
\theta( z | \Omega ) = \sum_{ n \in \ZZ^g } e^{ 2 \pi i \left( \tfrac{1}{2} n \cdot \Omega n + n \cdot z \right) }
It is holomorphic in both `z` and `\Omega`. It is quasiperiodic in `z`
with respect to the lattice `\{ M + \Omega N | M,N \in \ZZ^g \}`,
meaning that `\theta(z|\Omega)` is periodic upon translation of `z` by
vectors in `\ZZ^g` and periodic up to a multiplicative exponential
factor upon translation of `z` by vectors in `\Omega \ZZ^g`. As a
consequence, `\theta(z | \Omega)` has exponential growth in the
imaginary parts of `z`.
When `g=1`, the Riemann theta function is the third Jacobi theta
function.
.. math::
\theta( z | \Omega) = \theta_3(\pi z | \Omega) = 1 + 2 \sum_{n=1}^\infty e^{i \pi \Omega n^2} \cos(2 \pi n z)
Riemann theta functions are the fundamental building blocks for
Abelian functions, which generalize the classical elliptic functions
to multiple variables. Like elliptic functions, Abelian functions and
consequently Riemann theta functions arise in many applications such
as integrable partial differential equations, algebraic geometry, and
optimization.
For more information about the basic facts of and definitions
associated with Riemann theta funtions, see the Digital Library of
Mathematics Functions ``http://dlmf.nist.gov/21``.
**ALGORITHM:**
The algorithm in [CRTF] is based on the observation that the
exponential growth of `\theta` can be factored out of the sum. Thus,
we only need to find an approximation for the oscillatory part. The
derivation is omitted here but the key observation is to write `z = x
+ i y` and `\Omega = X + i Y` where `x`, `y`, `X`, and `Y` are real
vectors and matrices. With the exponential growth part factored out
of the sum, the goal is to find the integral points `n \in \ZZ^g` such
that the sum over these points is within `O(\epsilon)` accuracy of the
infinite sum, for a given `z \in \CC^g` and numerical accuracy
`\epsilon`.
By default we use the uniform approximation formulas which use the
same integral points for all `z` for a fixed `\Omega`. This can be
changed by setting ``uniform=False``. This is ill-advised if you need
to compute the Riemann theta function for a fixed `\Omega` for many
different `z`.
**REFERENCES:**
- [CRTF] Computing Riemann Theta Functions. Bernard NAME NAME
NAME NAME NAME and NAME Mathematics
of Computation 73 (2004) 1417-1442. The paper is available at
http://www.amath.washington.edu/~bernard/papers/pdfs/computingtheta.pdf.
Accompanying Maple code is available at
http://www.math.fsu.edu/~hoeij/RiemannTheta/
- Digital Library of Mathematics Functions - Riemann Theta Functions ( http://dlmf.nist.gov/21 ).
**AUTHORS:**
- NAME (2011-11): major overhaul to match notation of
[CRTF], numerous bug fixes, documentation, doctests, symbolic
evaluation
- NAME (2012-2013)
""" |
""" interpolate data given on an Nd rectangular grid, uniform or non-uniform.
Purpose: extend the fast N-dimensional interpolator
`scipy.ndimage.map_coordinates` to non-uniform grids, using `np.interp`.
Background: please look at
http://en.wikipedia.org/wiki/Bilinear_interpolation
http://stackoverflow.com/questions/6238250/multivariate-spline-interpolation-in-python-scipy
http://docs.scipy.org/doc/scipy-dev/reference/generated/scipy.ndimage.interpolation.map_coordinates.html
Example
-------
Say we have rainfall on a 4 x 5 grid of rectangles, lat 52 .. 55 x lon -10 .. -6,
and want to interpolate (estimate) rainfall at 1000 query points
in between the grid points.
# define the grid --
griddata = np.loadtxt(...) # griddata.shape == (4, 5)
lo = np.array([ 52, -10 ]) # lowest lat, lowest lon
hi = np.array([ 55, -6 ]) # highest lat, highest lon
# set up an interpolator function "interfunc()" with class Intergrid --
interfunc = Intergrid( griddata, lo=lo, hi=hi )
# generate 1000 random query points, lo <= [lat, lon] <= hi --
query_points = lo + np.random.uniform( size=(1000, 2) ) * (hi - lo)
# get rainfall at the 1000 query points --
query_values = interfunc( query_points ) # -> 1000 values
What this does:
for each [lat, lon] in query_points:
1) find the square of griddata it's in,
e.g. [52.5, -8.1] -> [0, 3] [0, 4] [1, 4] [1, 3]
2) do bilinear (multilinear) interpolation in that square,
using `scipy.ndimage.map_coordinates` .
Check:
interfunc( lo ) -> griddata[0, 0],
interfunc( hi ) -> griddata[-1, -1] i.e. griddata[3, 4]
Parameters
----------
griddata: numpy array_like, 2d 3d 4d ...
lo, hi: user coordinates of the corners of griddata, 1d array-like, lo < hi
maps: a list of `dim` descriptors of piecewise-linear or nonlinear maps,
e.g. [[50, 52, 62, 63], None] # uniformize lat, linear lon
copy: make a copy of query_points, default True;
copy=False overwrites query_points, runs in less memory
verbose: default 1: print a 1-line summary for each call, with run time
order=1: see `map_coordinates`
prefilter: 0 or False, the default: smoothing B-spline
1 or True: exact-fit interpolating spline (IIR, not C-R)
1/3: Mitchell-Netravali spline, 1/3 B + 2/3 fit
(prefilter is only for order > 1, since order = 1 interpolates)
Non-uniform rectangular grids
-----------------------------
What if our griddata above is at non-uniformly-spaced latitudes,
say [50, 52, 62, 63] ? `Intergrid` can "uniformize" these
before interpolation, like this:
lo = np.array([ 50, -10 ])
hi = np.array([ 63, -6 ])
maps = [[50, 52, 62, 63], None] # uniformize lat, linear lon
interfunc = Intergrid( griddata, lo=lo, hi=hi, maps=maps )
This will map (transform, stretch, warp) the lats in query_points column 0
to array coordinates in the range 0 .. 3, using `np.interp` to do
piecewise-linear (PWL) mapping:
50 51 52 53 54 55 56 57 58 59 60 61 62 63 # lo[0] .. hi[0]
0 .5 1 1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 2 3
`maps[1] None` says to map the lons in query_points column 1 linearly:
-10 -9 -8 -7 -6 # lo[1] .. hi[1]
0 1 2 3 4
More doc: https://denis-bz.github.com/docs/intergrid.html
""" |
"""
Implementation of the Colella 2nd order unsplit Godunov scheme. This
is a 2-dimensional implementation only. We assume that the grid is
uniform, but it is relatively straightforward to relax this
assumption.
There are several different options for this solver (they are all
discussed in the Colella paper).
limiter = 0 to use no limiting
= 1 to use the 2nd order MC limiter
= 2 to use the 4th order MC limiter
riemann = HLLC to use the HLLC solver
= CGF to use the Colella, Glaz, and Ferguson solver
use_flattening = 1 to use the multidimensional flattening
algorithm at shocks
delta, z0, z1 these are the flattening parameters. The default
are the values listed in Colella 1990.
j+3/2--+---------+---------+---------+
| | | |
j+1 _| | | |
| | | |
| | | |
j+1/2--+---------XXXXXXXXXXX---------+
| X X |
j _| X X |
| X X |
| X X |
j-1/2--+---------XXXXXXXXXXX---------+
| | | |
j-1 _| | | |
| | | |
| | | |
j-3/2--+---------+---------+---------+
| | | | | | |
i-1 i i+1
i-3/2 i-1/2 i+1/2 i+3/2
We wish to solve
U_t + F^x_x + F^y_y = H
we want U_{i+1/2}^{n+1/2} -- the interface values that are input to
the Riemann problem through the faces for each zone.
Taylor expanding yields
n+1/2 dU dU
U = U + 0.5 dx -- + 0.5 dt --
i+1/2,j,L i,j dx dt
dU dF^x dF^y
= U + 0.5 dx -- - 0.5 dt ( ---- + ---- - H )
i,j dx dx dy
dU dF^x dF^y
= U + 0.5 ( dx -- - dt ---- ) - 0.5 dt ---- + 0.5 dt H
i,j dx dx dy
dt dU dF^y
= U + 0.5 dx ( 1 - -- A^x ) -- - 0.5 dt ---- + 0.5 dt H
i,j dx dx dy
dt _ dF^y
= U + 0.5 ( 1 - -- A^x ) DU - 0.5 dt ---- + 0.5 dt H
i,j dx dy
+----------+-----------+ +----+----+ +---+---+
| | |
this is the monotonized this is the source term
central difference term transverse
flux term
There are two components, the central difference in the normal to the
interface, and the transverse flux difference. This is done for the
left and right sides of all 4 interfaces in a zone, which are then
used as input to the Riemann problem, yielding the 1/2 time interface
values,
n+1/2
U
i+1/2,j
Then, the zone average values are updated in the usual finite-volume
way:
n+1 n dt x n+1/2 x n+1/2
U = U + -- { F (U ) - F (U ) }
i,j i,j dx i-1/2,j i+1/2,j
dt y n+1/2 y n+1/2
+ -- { F (U ) - F (U ) }
dy i,j-1/2 i,j+1/2
Updating U_{i,j}:
-- We want to find the state to the left and right (or top and
bottom) of each interface, ex. U_{i+1/2,j,[lr]}^{n+1/2}, and use
them to solve a Riemann problem across each of the four
interfaces.
-- U_{i+1/2,j,[lr]}^{n+1/2} is comprised of two parts, the
computation of the monotonized central differences in the normal
direction (eqs. 2.8, 2.10) and the computation of the transverse
derivatives, which requires the solution of a Riemann problem in
the transverse direction (eqs. 2.9, 2.14).
-- the monotonized central difference part is computed using
the primitive variables.
-- We compute the central difference part in both directions
before doing the transverse flux differencing, since for the
high-order transverse flux implementation, we use these as
the input to the transverse Riemann problem.
""" |
"""
.. versionadded:: 0.93
Geopy can calculate geodesic distance between two points using the
[Vincenty distance](https://en.wikipedia.org/wiki/Vincenty's_formulae) or
[great-circle distance](https://en.wikipedia.org/wiki/Great-circle_distance)
formulas, with a default of Vincenty available as the function
`geopy.distance.distance`.
Great-circle distance (:class:`.great_circle`) uses a spherical model of
the earth, using the average great-circle radius of 6372.795 kilometers,
resulting in an error of up to about 0.5%. The radius value is stored in
:const:`distance.EARTH_RADIUS`, so it can be customized
(it should always be in kilometers, however).
Vincenty distance (:class:`.vincenty`) uses a more accurate ellipsoidal model
of the earth. This is the default distance formula, and is thus aliased as
``distance.distance``. There are multiple popular ellipsoidal models, and
which one will be the most accurate depends on where your points are located
on the earth. The default is the WGS-84 ellipsoid, which is the most globally
accurate. geopy includes a few other
models in the distance.ELLIPSOIDS dictionary::
model major (km) minor (km) flattening
ELLIPSOIDS = {'WGS-84': (6378.137, 6356.7523142, 1 / \
298.257223563),
'GRS-80': (6378.137, 6356.7523141, 1 / \
298.257222101),
'Airy (1830)': (6377.563396, 6356.256909, 1 / \
299.3249646),
'Intl 1924': (6378.388, 6356.911946, 1 / 297.0),
'Clarke (1880)': (6378.249145, 6356.51486955, 1 / 293.465),
'GRS-67': (6378.1600, 6356.774719, 1 / 298.25),
}
Here's an example usage of distance.vincenty::
>>> from geopy.distance import vincenty
>>> newport_ri = (41.49008, -71.312796)
>>> cleveland_oh = (41.499498, -81.695391)
>>> print(vincenty(newport_ri, cleveland_oh).miles)
538.3904451566326
Using great-circle distance::
>>> from geopy.distance import great_circle
>>> newport_ri = (41.49008, -71.312796)
>>> cleveland_oh = (41.499498, -81.695391)
>>> print(great_circle(newport_ri, cleveland_oh).miles)
537.1485284062816
You can change the ellipsoid model used by the Vincenty formula like so::
>>> distance.vincenty(ne, cl, ellipsoid='GRS-80').miles
The above model name will automatically be retrieved from the
ELLIPSOIDS dictionary. Alternatively, you can specify the model values
directly::
>>> distance.vincenty(ne, cl, ellipsoid=(6377., 6356., 1 / 297.)).miles
Distances support simple arithmetic, making it easy to do things like
calculate the length of a path::
>>> d = distance.distance
>>> _, wa = g.geocode('Washington, DC')
>>> _, pa = g.geocode('Palo Alto, CA')
>>> print((d(ne, cl) + d(cl, wa) + d(wa, pa)).miles)
3276.157156868931
""" |
"""
Wrappers to LAPACK library
==========================
flapack -- wrappers for Fortran [*] LAPACK routines
clapack -- wrappers for ATLAS LAPACK routines
calc_lwork -- calculate optimal lwork parameters
get_lapack_funcs -- query for wrapper functions.
[*] If ATLAS libraries are available then Fortran routines
actually use ATLAS routines and should perform equally
well to ATLAS routines.
Module flapack
++++++++++++++
In the following all function names are shown without
type prefix (s,d,c,z). Optimal values for lwork can
be computed using calc_lwork module.
Linear Equations
----------------
Drivers::
lu,piv,x,info = gesv(a,b,overwrite_a=0,overwrite_b=0)
lub,piv,x,info = gbsv(kl,ku,ab,b,overwrite_ab=0,overwrite_b=0)
c,x,info = posv(a,b,lower=0,overwrite_a=0,overwrite_b=0)
Computational routines::
lu,piv,info = getrf(a,overwrite_a=0)
x,info = getrs(lu,piv,b,trans=0,overwrite_b=0)
inv_a,info = getri(lu,piv,lwork=min_lwork,overwrite_lu=0)
c,info = potrf(a,lower=0,clean=1,overwrite_a=0)
x,info = potrs(c,b,lower=0,overwrite_b=0)
inv_a,info = potri(c,lower=0,overwrite_c=0)
inv_c,info = trtri(c,lower=0,unitdiag=0,overwrite_c=0)
Linear Least Squares (LLS) Problems
-----------------------------------
Drivers::
v,x,s,rank,info = gelss(a,b,cond=-1.0,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
Computational routines::
qr,tau,info = geqrf(a,lwork=min_lwork,overwrite_a=0)
q,info = orgqr|ungqr(qr,tau,lwork=min_lwork,overwrite_qr=0,overwrite_tau=1)
Generalized Linear Least Squares (LSE and GLM) Problems
-------------------------------------------------------
Standard Eigenvalue and Singular Value Problems
-----------------------------------------------
Drivers::
w,v,info = syev|heev(a,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0)
w,v,info = syevd|heevd(a,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0)
w,v,info = syevr|heevr(a,compute_v=1,lower=0,vrange=,irange=,atol=-1.0,lwork=min_lwork,overwrite_a=0)
t,sdim,(wr,wi|w),vs,info = gees(select,a,compute_v=1,sort_t=0,lwork=min_lwork,select_extra_args=(),overwrite_a=0)
wr,(wi,vl|w),vr,info = geev(a,compute_vl=1,compute_vr=1,lwork=min_lwork,overwrite_a=0)
u,s,vt,info = gesdd(a,compute_uv=1,lwork=min_lwork,overwrite_a=0)
Computational routines::
ht,tau,info = gehrd(a,lo=0,hi=n-1,lwork=min_lwork,overwrite_a=0)
ba,lo,hi,pivscale,info = gebal(a,scale=0,permute=0,overwrite_a=0)
Generalized Eigenvalue and Singular Value Problems
--------------------------------------------------
Drivers::
w,v,info = sygv|hegv(a,b,itype=1,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
w,v,info = sygvd|hegvd(a,b,itype=1,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
(alphar,alphai|alpha),beta,vl,vr,info = ggev(a,b,compute_vl=1,compute_vr=1,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
Auxiliary routines
------------------
a,info = lauum(c,lower=0,overwrite_c=0)
a = laswp(a,piv,k1=0,k2=len(piv)-1,off=0,inc=1,overwrite_a=0)
Module clapack
++++++++++++++
Linear Equations
----------------
Drivers::
lu,piv,x,info = gesv(a,b,rowmajor=1,overwrite_a=0,overwrite_b=0)
c,x,info = posv(a,b,lower=0,rowmajor=1,overwrite_a=0,overwrite_b=0)
Computational routines::
lu,piv,info = getrf(a,rowmajor=1,overwrite_a=0)
x,info = getrs(lu,piv,b,trans=0,rowmajor=1,overwrite_b=0)
inv_a,info = getri(lu,piv,rowmajor=1,overwrite_lu=0)
c,info = potrf(a,lower=0,clean=1,rowmajor=1,overwrite_a=0)
x,info = potrs(c,b,lower=0,rowmajor=1,overwrite_b=0)
inv_a,info = potri(c,lower=0,rowmajor=1,overwrite_c=0)
inv_c,info = trtri(c,lower=0,unitdiag=0,rowmajor=1,overwrite_c=0)
Auxiliary routines
------------------
a,info = lauum(c,lower=0,rowmajor=1,overwrite_c=0)
Module calc_lwork
+++++++++++++++++
Optimal lwork is maxwrk. Default is minwrk.
minwrk,maxwrk = gehrd(prefix,n,lo=0,hi=n-1)
minwrk,maxwrk = gesdd(prefix,m,n,compute_uv=1)
minwrk,maxwrk = gelss(prefix,m,n,nrhs)
minwrk,maxwrk = getri(prefix,n)
minwrk,maxwrk = geev(prefix,n,compute_vl=1,compute_vr=1)
minwrk,maxwrk = heev(prefix,n,lower=0)
minwrk,maxwrk = syev(prefix,n,lower=0)
minwrk,maxwrk = gees(prefix,n,compute_v=1)
minwrk,maxwrk = geqrf(prefix,m,n)
minwrk,maxwrk = gqr(prefix,m,n)
""" |
"""
This page is in the table of contents.
Stretch is very important Skeinforge plugin that allows you to partially compensate for the fact that extruded holes are smaller then they should be. It stretches the threads to partially compensate for filament shrinkage when extruded.
The stretch manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Stretch
Extruded holes are smaller than the model because while printing an arc the head is depositing filament on both sides of the arc but in the inside of the arc you actually need less material then on the outside of the arc. You can read more about this on the RepRap ArcCompensation page:
http://reprap.org/bin/view/Main/ArcCompensation
In general, stretch will widen holes and push corners out. In practice the filament contraction will not be identical to the algorithm, so even once the optimal parameters are determined, the stretch script will not be able to eliminate the inaccuracies caused by contraction, but it should reduce them.
All the defaults assume that the thread sequence choice setting in fill is the edge being extruded first, then the loops, then the infill. If the thread sequence choice is different, the optimal thread parameters will also be different. In general, if the infill is extruded first, the infill would have to be stretched more so that even after the filament shrinkage, it would still be long enough to connect to the loop or edge.
Holes should be made with the correct area for their radius. In other words, for example if your modeling program approximates a hole of radius one (area = pi) by making a square with the points at [(1,0), (0,1), (-1,0), (0,-1)] (area = 2), the radius should be increased by sqrt(pi/2). This can be done in fabmetheus xml by writing:
radiusAreal='True'
in the attributes of the object or any parent of that object. In other modeling programs, you'll have to this manually or make a script. If area compensation is not done, then changing the stretch parameters to over compensate for too small hole areas will lead to incorrect compensation in other shapes.
==Operation==
The default 'Activate Stretch' checkbox is off. When it is on, the functions described below will work, when it is off, the functions will not be called.
==Settings==
===Loop Stretch Over Perimeter Width===
Default is 0.1.
Defines the ratio of the maximum amount the loop aka inner shell threads will be stretched compared to the edge width, in general this value should be the same as the 'Perimeter Outside Stretch Over Perimeter Width' setting.
===Path Stretch Over Perimeter Width===
Default is zero.
Defines the ratio of the maximum amount the threads which are not loops, like the infill threads, will be stretched compared to the edge width.
===Perimeter===
====Perimeter Inside Stretch Over Perimeter Width====
Default is 0.32.
Defines the ratio of the maximum amount the inside edge thread will be stretched compared to the edge width, this is the most important setting in stretch. The higher the value the more it will stretch the edge and the wider holes will be. If the value is too small, the holes could be drilled out after fabrication, if the value is too high, the holes would be too wide and the part would have to junked.
====Perimeter Outside Stretch Over Perimeter Width====
Default is 0.1.
Defines the ratio of the maximum amount the outside edge thread will be stretched compared to the edge width, in general this value should be around a third of the 'Perimeter Inside Stretch Over Perimeter Width' setting.
===Stretch from Distance over Perimeter Width===
Default is two.
The stretch algorithm works by checking at each turning point on the extrusion path what the direction of the thread is at a distance of 'Stretch from Distance over Perimeter Width' times the edge width, on both sides, and moves the thread in the opposite direction. So it takes the current turning-point, goes "Stretch from Distance over Perimeter Width" * "Perimeter Width" ahead, reads the direction at that point. Then it goes the same distance in back in time, reads the direction at that other point. It then moves the thread in the opposite direction, away from the center of the arc formed by these 2 points+directions.
The magnitude of the stretch increases with:
the amount that the direction of the two threads is similar and
by the '..Stretch Over Perimeter Width' ratio.
==Examples==
The following examples stretch the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and stretch.py.
> python stretch.py
This brings up the stretch dialog.
> python stretch.py Screw Holder Bottom.stl
The stretch tool is parsing the file:
Screw Holder Bottom.stl
..
The stretch tool has created the file:
.. Screw Holder Bottom_stretch.gcode
""" |
#
# XML-RPC CLIENT LIBRARY
# $Id$
#
# an XML-RPC client interface for Python.
#
# the marshalling and response parser code can also be used to
# implement XML-RPC servers.
#
# Notes:
# this version is designed to work with Python 2.1 or newer.
#
# History:
# 1999-01-14 fl Created
# 1999-01-15 fl Changed dateTime to use localtime
# 1999-01-16 fl Added Binary/base64 element, default to RPC2 service
# 1999-01-19 fl Fixed array data element (from Skip Montanaro)
# 1999-01-21 fl Fixed dateTime constructor, etc.
# 1999-02-02 fl Added fault handling, handle empty sequences, etc.
# 1999-02-10 fl Fixed problem with empty responses (from Skip Montanaro)
# 1999-06-20 fl Speed improvements, pluggable parsers/transports (0.9.8)
# 2000-11-28 fl Changed boolean to check the truth value of its argument
# 2001-02-24 fl Added encoding/Unicode/SafeTransport patches
# 2001-02-26 fl Added compare support to wrappers (0.9.9/1.0b1)
# 2001-03-28 fl Make sure response tuple is a singleton
# 2001-03-29 fl Don't require empty params element (from NAME 2001-06-10 fl Folded in _xmlrpclib accelerator support (1.0b2)
# 2001-08-20 fl Base xmlrpclib.Error on built-in Exception (from NAME 2001-09-03 fl Allow Transport subclass to override getparser
# 2001-09-10 fl Lazy import of urllib, cgi, xmllib (20x import speedup)
# 2001-10-01 fl Remove containers from memo cache when done with them
# 2001-10-01 fl Use faster escape method (80% dumps speedup)
# 2001-10-02 fl More dumps microtuning
# 2001-10-04 fl Make sure import expat gets a parser (from NAME 2001-10-10 sm Allow long ints to be passed as ints if they don't overflow
# 2001-10-17 sm Test for int and long overflow (allows use on 64-bit systems)
# 2001-11-12 fl Use repr() to marshal doubles (from NAME 2002-03-17 fl Avoid buffered read when possible (from NAME 2002-04-07 fl Added pythondoc comments
# 2002-04-16 fl Added __str__ methods to datetime/binary wrappers
# 2002-05-15 fl Added error constants (from NAME 2002-06-27 fl Merged with Python CVS version
# 2002-10-22 fl Added basic authentication (based on code from NAME 2003-01-22 sm Add support for the bool type
# 2003-02-27 gvr Remove apply calls
# 2003-04-24 sm Use cStringIO if available
# 2003-04-25 ak Add support for nil
# 2003-06-15 gn Add support for time.struct_time
# 2003-07-12 gp Correct marshalling of Faults
# 2003-10-31 mvl Add multicall support
# 2004-08-20 mvl Bump minimum supported Python version to 2.1
# 2014-12-02 ch/doko Add workaround for gzip bomb vulnerability
#
# Copyright (c) 1999-2002 by Secret Labs AB.
# Copyright (c) 1999-2002 by NAME Lundh.
#
# EMAIL http://www.pythonware.com
#
# --------------------------------------------------------------------
# The XML-RPC client interface is
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by NAME Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
#
# things to look into some day:
# TODO: sort out True/False/boolean issues for Python 2.3
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 Veritos - NAME - www.veritos.nl
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company like Veritos.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
#
# Deze module werkt in OpenERP 5.0.0 (en waarschijnlijk hoger).
# Deze module werkt niet in OpenERP versie 4 en lager.
#
# Status 1.0 - getest op OpenERP 5.0.3
#
# Versie IP_ADDRESS
# account.account.type
# Basis gelegd voor alle account type
#
# account.account.template
# Basis gelegd met alle benodigde grootboekrekeningen welke via een menu-
# structuur gelinkt zijn aan rubrieken 1 t/m 9.
# De grootboekrekeningen gelinkt aan de account.account.type
# Deze links moeten nog eens goed nagelopen worden.
#
# account.chart.template
# Basis gelegd voor het koppelen van rekeningen aan debiteuren, crediteuren,
# bank, inkoop en verkoop boeken en de BTW configuratie.
#
# Versie IP_ADDRESS
# account.tax.code.template
# Basis gelegd voor de BTW configuratie (structuur)
# Heb als basis het BTW aangifte formulier gebruikt. Of dit werkt?
#
# account.tax.template
# De BTW rekeningen aangemaakt en deze gekoppeld aan de betreffende
# grootboekrekeningen
#
# Versie IP_ADDRESS
# Opschonen van de code en verwijderen van niet gebruikte componenten.
# Versie IP_ADDRESS
# Aanpassen a_expense van 3000 -> 7000
# record id='btw_code_5b' op negatieve waarde gezet
# Versie IP_ADDRESS
# BTW rekeningen hebben typeaanduiding gekregen t.b.v. purchase of sale
# Versie IP_ADDRESS
# Opschonen van module.
# Versie IP_ADDRESS
# Opschonen van module.
# Versie IP_ADDRESS
# Foutje in l10n_nl_wizard.xml gecorrigeerd waardoor de module niet volledig installeerde.
# Versie IP_ADDRESS
# Account Receivable en Payable goed gedefinieerd.
# Versie IP_ADDRESS
# Alle user_type_xxx velden goed gedefinieerd.
# Specifieke bouw en garage gerelateerde grootboeken verwijderd om een standaard module te creeeren.
# Deze module kan dan als basis worden gebruikt voor specifieke doelgroep modules te creeeren.
# Versie IP_ADDRESS
# Correctie van rekening 7010 (stond dubbel met 7014 waardoor installatie verkeerd ging)
# versie IP_ADDRESS
# Correctie op diverse rekening types van user_type_asset -> user_type_liability en user_type_equity
# versie IP_ADDRESS
# Kleine correctie op BTW te vorderen hoog, id was hetzelfde voor beide, waardoor hoog werd overschreven door # overig. Verduidelijking van omschrijvingen in belastingcodes t.b.v. aangifte overzicht.
# versie IP_ADDRESS
# BTW omschrijvingen aangepast, zodat rapporten er beter uitzien. 2a en 5b e.d. verwijderd en enkele omschrijvingen toegevoegd.
# versie IP_ADDRESS - Switch to English
# Added properties_stock_xxx accounts for correct stock valuation, changed 7000-accounts from type cash to type expense
# Changed naming of 7020 and 7030 to Kostprijs omzet xxxx
|
"""
Test script for src=9 provisioning
Below are some odd examples and notes:
Adding a class
{
'src': '9',
'uln': 'Githens',
'ufn': 'Steven',
'aid': '56021',
'utp': '2',
'said': '56021',
'fid': '2',
'username': 'swgithen',
'ctl': 'CourseTitleb018b622-b425-4af7-bb3d-d0d2b4deb35c',
'diagnostic': '0',
'encrypt': '0',
'uem': 'EMAIL',
'cid': 'CourseTitleb018b622-b425-4af7-bb3d-d0d2b4deb35c',
'fcmd': '2'
}
{rmessage=Successful!, userid=17463901, classid=2836785, rcode=21}
Adding an assignment
{
'fid': '4',
'diagnostic': '0',
'ufn': 'Steven',
'uln': 'Githens',
'username': 'swgithen',
'assignid': 'AssignmentTitlec717957d-254f-4d6d-a64c-952e630db872',
'aid': '56021',
'src': '9',
'cid': 'CourseTitleb018b622-b425-4af7-bb3d-d0d2b4deb35c', 'said': '56021', 'dtstart': '20091225', 'encrypt': '0', 'assign': 'AssignmentTitlec717957d-254f-4d6d-a64c-952e630db872', 'uem': 'EMAIL', 'utp': '2', 'fcmd': '2', 'ctl': 'CourseTitleb018b622-b425-4af7-bb3d-d0d2b4deb35c', 'dtdue': '20100101'}
{rmessage=Successful!, userid=17463901, classid=2836785, assignmentid=7902977, rcode=41}
Adding an assignment with another inst
{'fid': '4', 'diagnostic': '0', 'ufn': 'StevenIU', 'uln': 'GithensIU', 'username': 'sgithens', 'assignid': 'AssignmentTitle5ae51e10-fd60-4720-931b-ed4f58057d00', 'aid': '56021', 'src': '9', 'cid': '2836785', 'said': '56021', 'dtstart': '20091225', 'encrypt': '0', 'assign': 'AssignmentTitle5ae51e10-fd60-4720-931b-ed4f58057d00', 'uem': 'EMAIL', 'utp': '2', 'fcmd': '2', 'ctl': 'CourseTitleb018b622-b425-4af7-bb3d-d0d2b4deb35c', 'dtdue': '20100101'}
{rmessage=Successful!, userid=17463902, classid=2836786, assignmentid=7902978, rcode=41}
Adding a class
{'src': '9', 'uln': 'Githens', 'ufn': 'Steven', 'aid': '56021', 'utp': '2', 'said': '56021', 'fid': '2', 'username': 'swgithen', 'ctl': 'CourseTitle46abd163-7464-4d21-a2c0-90c5af3312ab', 'diagnostic': '0', 'encrypt': '0', 'uem': 'EMAIL', 'fcmd': '2'}
{rmessage=Successful!, userid=17259618, classid=2836733, rcode=21}
Adding an assignment
{'fid': '4', 'diagnostic': '0', 'ufn': 'Steven', 'uln': 'Githens', 'username': 'swgithen', 'assignid': 'AssignmentTitlec4f211c1-2c38-4daf-86dc-3c57c6ef5b7b', 'aid': '56021', 'src': '9', 'cid': '2836733', 'said': '56021', 'dtstart': '20091225', 'encrypt': '0', 'assign': 'AssignmentTitlec4f211c1-2c38-4daf-86dc-3c57c6ef5b7b', 'uem': 'EMAIL', 'utp': '2', 'fcmd': '2', 'ctl': 'CourseTitle46abd163-7464-4d21-a2c0-90c5af3312ab', 'dtdue': '20100101'}
{rmessage=Successful!, userid=17463581, classid=2836734, assignmentid=7902887, rcode=41}
Adding an assignment with another inst
{'fid': '4', 'diagnostic': '0', 'ufn': 'StevenIU', 'uln': 'GithensIU', 'username': 'sgithens', 'assignid': 'AssignmentTitle2650fcca-b96e-42bd-926e-63660076d2ad', 'aid': '56021', 'src': '9', 'cid': '2836733', 'said': '56021', 'dtstart': '20091225', 'encrypt': '0', 'assign': 'AssignmentTitle2650fcca-b96e-42bd-926e-63660076d2ad', 'uem': 'EMAIL', 'utp': '2', 'fcmd': '2', 'ctl': 'CourseTitle46abd163-7464-4d21-a2c0-90c5af3312ab', 'dtdue': '20100101'}
{rmessage=Successful!, userid=17463581, classid=2836734, assignmentid=7902888, rcode=41}
""" |
#########################################################################################################
# test_4.py
# Implements unit tests for the genericQSARpyUtils project (see below).
#
# ########################################
# #test_4.py: Key documentation :Contents#
# ########################################
# #1. Overview of this project.
# #2. IMPORTANT LEGAL ISSUES
# #<N.B.: Check this section ("IMPORTANT LEGAL ISSUES") to see whether - and how - you ARE ALLOWED TO use this code!>
# #<N.B.: Includes contact details.>
# ##############################
# #1. Overview of this project.#
# ##############################
# #Project name: genericQSARpyUtils
# #Purpose of this project: To provide a set of Python functions
# #(or classes with associated methods) that can be used to perform a variety of tasks
# #which are relevant to generating input files, from cheminformatics datasets, which can be used to build and
# #validate QSAR models (generated using Machine Learning methods implemented in other software packages)
# #on such datasets.
# #To this end, two Python modules are currently provided.
# #(1) ml_input_utils.py
# #Defines the following class:
# #descriptorsFilesProcessor: This contains methods which can be used to prepare datasets in either CSV or svmlight format, including converting between these formats, based upon previously calculated fingerprints (expressed as a set of tab separated text strings for each instance) or numeric descriptors.
# #(2) ml_functions.py
# #Defines a set of functions which can be used to carry out univariate feature selection,cross-validation etc. for Machine Learning model input files in svmlight format.
# ###########################
# #2. IMPORTANT LEGAL ISSUES#
# ###########################
# Copyright Syngenta Limited 2013
#Copyright (c) 2013-2016 Liverpool John Moores University
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
# THIS PROGRAM IS MADE AVAILABLE FOR DISTRIBUTION WITHOUT ANY FORM OF WARRANTY TO THE
# EXTENT PERMITTED BY APPLICABLE LAW. THE COPYRIGHT HOLDER PROVIDES THE PROGRAM \"AS IS\"
# WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM LIES
# WITH THE USER. SHOULD THE PROGRAM PROVE DEFECTIVE IN ANY WAY, THE USER ASSUMES THE
# COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. THE COPYRIGHT HOLDER IS NOT
# RESPONSIBLE FOR ANY AMENDMENT, MODIFICATION OR OTHER ENHANCEMENT MADE TO THE PROGRAM
# BY ANY USER WHO REDISTRIBUTES THE PROGRAM SO AMENDED, MODIFIED OR ENHANCED.
# IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL THE
# COPYRIGHT HOLDER BE LIABLE TO ANY USER FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL,
# INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
# PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE
# OR LOSSES SUSTAINED BY THE USER OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO
# OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER HAS BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGES.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# ####################
# See also: http://www.gnu.org/licenses/ (last accessed 14/01/2013)
# Contact:
# 1. EMAIL or if this fails
# 2. EMAIL #####################
#########################################################################################################
#################################
#N.B. 02/06/13: new output files found to be inconsistent with old output files.
#Hence, (1) commented out the new vs. old, and clean up new, part of the code, (2) checked all new output files were consistent with expectations[DONE=><OK>; However, perhaps the following message from run_tests.log indicates a possible cause of inconsistency with old results?:"C:\Python27\lib\site-packages\sklearn\feature_selection\univariate_selection.py:271: UserWarning: Duplicate p-values. Result may depend on feature ordering.There are probably duplicate features, or you used a classification score for a regression task. warn("Duplicate p-values. Result may depend on feature ordering."], (3) copied the new output files , (4) uncommented the the new vs. old, and clean up new, part of the code, (5) re-ran tests.
##################################
|
"""
[2017-04-28] Challenge #312 [Hard] Text Summarizer
https://www.reddit.com/r/dailyprogrammer/comments/683w4s/20170428_challenge_312_hard_text_summarizer/
# Description
Automatic summarization is the process of reducing a text document with a computer program in order to create a summary
that retains the most important points of the original document. A number of algorithms have been developed, with the
simplest being one that parses the text, finds the most unique (or important) words, and then finds a sentence or two
that contains the most number of the most important words discovered. This is sometimes called "extraction-based
summarization" because you are extracting a sentence that conveys the summary of the text.
For your challenge, you should write an implementation of a text summarizer that can take a block of text (e.g. a
paragraph) and emit a one or two sentence summarization of it. You can use a stop word list (words that appear in
English that don't add any value) from [here](http://snowball.tartarus.org/algorithms/english/stop.txt).
You may want to review this brief overview of the algorithms and approaches in text summarization from [Fast Forward
labs](http://blog.fastforwardlabs.com/post/141666523533/hp-luhn-and-the-heuristic-value-of-simplicity).
This is essentially what [the autotldr bot does](https://www.reddit.com/r/autotldr/comments/31b9fm/faq_autotldr_bot/).
# Example Input
Here's a paragraph that we want to summarize:
The purpose of this paper is to extend existing research on entrepreneurial team formation under
a competence-based perspective by empirically testing the influence of the sectoral context on
that dynamics. We use inductive, theory-building design to understand how different sectoral
characteristics moderate the influence of entrepreneurial opportunity recognition on subsequent
entrepreneurial team formation. A sample of 195 founders who teamed up in the nascent phase of
Interned-based and Cleantech sectors is analysed. The results suggest a twofold moderating effect
of the sectoral context. First, a technologically more challenging sector (i.e. Cleantech) demands
technically more skilled entrepreneurs, but at the same time, it requires still fairly
commercially experienced and economically competent individuals. Furthermore, the business context
also appears to exert an important influence on team formation dynamics: data reveals that
individuals are more prone to team up with co-founders possessing complementary know-how when they
are starting a new business venture in Cleantech rather than in the Internet-based sector.
Overall, these results stress how the business context cannot be ignored when analysing
entrepreneurial team formation dynamics by offering interesting insights on the matter to
prospective entrepreneurs and interested policymakers.
# Example Output
Here's a simple extraction-based summary of that paragraph, one of a few possible outputs:
Furthermore, the business context also appears to exert an important influence on team
formation dynamics: data reveals that individuals are more prone to team up with co-founders
possessing complementary know-how when they are starting a new business venture in Cleantech
rather than in the Internet-based sector.
# Challenge Input
This case describes the establishment of a new Cisco Systems R&D facility in Shanghai, China,
and the great concern that arises when a collaborating R&D site in the United States is closed
down. What will that closure do to relationships between the Shanghai and San Jose business
units? Will they be blamed and accused of replacing the U.S. engineers? How will it affect
other projects? The case also covers aspects of the site's establishment, such as securing an
appropriate building, assembling a workforce, seeking appropriate projects, developing
managers, building teams, evaluating performance, protecting intellectual property, and
managing growth. Suitable for use in organizational behavior, human resource management, and
strategy classes at the MBA and executive education levels, the material dramatizes the
challenges of changing a U.S.-based company into a global competitor.
""" |
# #!/usr/bin/env python
#
# """
# @package ion.agents.platform.test.test_platform_resource_monitor
# @file ion/agents/platform/test/test_platform_resource_monitor.py
# @author NAME @brief Unit test cases related with platform resource monitoring
# """
#
# __author__ = 'Carlos NAME __license__ = 'Apache 2.0'
#
# #
# # bin/nosetests -v ion/agents/platform/test/test_platform_resource_monitor.py:Test.test_attr_grouping_by_similar_rate
# # bin/nosetests -v ion/agents/platform/test/test_platform_resource_monitor.py:Test.test_aggregation_for_granule
#
#
# from pyon.public import log
# from nose.plugins.attrib import attr
# from pyon.util.unit_test import IonUnitTestCase
#
# from ion.agents.platform.platform_resource_monitor import PlatformResourceMonitor
# from ion.agents.platform.util.network_util import NetworkUtil
#
# import pprint
#
#
# @attr('UNIT', group='sa')
# class Test(IonUnitTestCase):
# """
# @note These tests depend on definitions in network.yml
# """
#
# def setUp(self):
# self._pp = pprint.PrettyPrinter()
# yaml_filename = 'ion/agents/platform/rsn/simulator/network.yml'
# self._ndef = NetworkUtil.deserialize_network_definition(file(yaml_filename))
#
# def _get_attribute_values_dummy(self, attr_names, from_time):
# return {}
#
# def evt_recv(self, driver_event):
# log.debug('evt_recv: %s', driver_event)
#
# self._driver_event = driver_event
#
# def _get_attrs(self, platform_id):
# pnode = self._ndef.pnodes[platform_id]
#
# platform_attributes = dict((attr.attr_id, attr.defn) for attr
# in pnode.attrs.itervalues())
# log.debug("%r: platform_attributes: %s",
# platform_id, self._pp.pformat(platform_attributes))
# return platform_attributes
#
# def _verify_attr_grouping(self, platform_id, expected_groups):
# attrs = self._get_attrs(platform_id)
#
# prm = PlatformResourceMonitor(
# platform_id, attrs,
# self._get_attribute_values_dummy, self.evt_recv)
#
# groups = prm._group_by_monitoring_rate()
#
# log.debug("groups=\n%s", self._pp.pformat(groups))
#
# self.assertEquals(len(expected_groups), len(groups))
#
# for k in expected_groups:
# self.assertIn(k, groups)
# attr_ids = set(d['attr_id'] for d in groups[k])
# self.assertEquals(set(expected_groups[k]), attr_ids)
#
# def test_attr_grouping_by_similar_rate(self):
#
# self._verify_attr_grouping(
# platform_id="LJ01D",
# expected_groups={
# 2.5 : ["input_voltage|0"],
# 4.0 : ["MVPC_pressure_1|0", "MVPC_temperature|0"],
# 5.0 : ["input_bus_current|0"],
# }
# )
#
# self._verify_attr_grouping(
# platform_id="Node1D",
# expected_groups={
# 5.0 : ["input_voltage|0", "input_bus_current|0"],
# 10.0 : ["MVPC_pressure_1|0", "MVPC_temperature|0"],
# }
# )
#
# self._verify_attr_grouping(
# platform_id="MJ01C",
# expected_groups={
# 2.5 : ["input_voltage|0"],
# 5.0 : ["input_bus_current|0"],
# 4.0 : ["MVPC_pressure_1|0", "MVPC_temperature|0"],
# }
# )
#
# self._verify_attr_grouping(
# platform_id="ShoreStation",
# expected_groups={
# 5.0 : ["ShoreStation_attr_1|0", "ShoreStation_attr_2|0"],
# }
# )
#
# def test_aggregation_for_granule(self):
# platform_id = "LJ01D"
# attrs = self._get_attrs(platform_id)
#
# prm = PlatformResourceMonitor(
# platform_id, attrs,
# self._get_attribute_values_dummy, self.evt_recv)
#
# # set the buffers simulating retrieved data:
# prm._init_buffers()
# bufs = prm._buffers
# # each entry is a list of (val, ts) pairs:
# # note the missing pairs, which should be filled with (None, ts), see below
# bufs["input_voltage"] = [(1000, 9000), (1001, 9001), (1002, 9002)]
# bufs["input_bus_current"] = [(2000, 9000), (2002, 9002)]
# bufs["MVPC_temperature"] = [ (3000, 9001)]
#
# # run the key method under testing:
# prm._dispatch_publication()
#
# self.assertTrue(self._driver_event, "evt_recv callback must have been called")
# driver_event = self._driver_event
#
# # buffers must have been re-init'ed:
# for attr_id in attrs:
# self.assertEquals([], prm._buffers[attr_id])
#
# # this vals_dict is used by PlatformAgent to do the final creation of
# # the granule to be published
# vals_dict = driver_event.vals_dict
#
# # verify the attributes that must be present:
# self.assertIn("input_voltage", vals_dict)
# self.assertIn("input_bus_current", vals_dict)
# self.assertIn("MVPC_temperature", vals_dict)
#
# # verify the attribute that must *not* be present:
# # self.assertNotIn("MVPC_pressure_1", vals_dict)
#
# # verify the expected aligned values so they are on a common set of
# # timestamps:
#
# input_voltage = vals_dict["input_voltage"]
# input_bus_current = vals_dict["input_bus_current"]
# MVPC_temperature = vals_dict["MVPC_temperature"]
#
# self.assertEquals(
# [(1000, 9000), (1001, 9001), (1002, 9002)],
# input_voltage
# )
#
# # note the None entries that must have been created
#
# self.assertEquals(
# [(2000, 9000), (None, 9001), (2002, 9002)],
# input_bus_current
# )
#
# self.assertEquals(
# [(None, 9000), (3000, 9001), (None, 9002)],
# MVPC_temperature
# )
|
"""
PartedL - command ``parted -l``
===============================
This module provides processing for the ``parted`` command. The output is parsed
by the ``PartedL`` class. Attributes are provided for each field for the disk,
and a list of ``Partition`` class objects, one for each partition in the output.
Typical content of the ``parted -l`` command output
looks like::
Model: ATA TOSHIBA MG04ACA4 (scsi)
Disk /dev/sda: 4001GB
Sector size (logical/physical): 512B/512B
Partition Table: gpt
Disk Flags: pmbr_boot
Number Start End Size File system Name Flags
1 1049kB 2097kB 1049kB bios_grub
2 2097kB 526MB 524MB xfs
3 526MB 4001GB 4000GB lvm
The columns may vary depending upon the type of device.
Note:
The examples in this module may be executed with the following command:
``python -m insights.parsers.parted``
Examples:
>>> parted_data = '''
... Model: ATA TOSHIBA MG04ACA4 (scsi)
... Disk /dev/sda: 4001GB
... Sector size (logical/physical): 512B/512B
... Partition Table: gpt
... Disk Flags: pmbr_boot
...
... Number Start End Size File system Name Flags
... 1 1049kB 2097kB 1049kB bios_grub
... 2 2097kB 526MB 524MB xfs
... 3 526MB 4001GB 4000GB lvm
... '''.strip()
>>> from insights.tests import context_wrap
>>> shared = {PartedL: PartedL(context_wrap(parted_data))}
>>> parted_info = shared[PartedL]
>>> parted_info.data
{'partition_table': 'gpt', 'sector_size': '512B/512B', 'disk_flags': 'pmbr_boot', 'partitions': [{'end': '2097kB', 'name': 'bios_grub', 'number': '1', 'start': '1049kB', 'flags': 'bios_grub', 'file_system': 'bios_grub', 'size': '1049kB'}, {'start': '2097kB', 'size': '524MB', 'end': '526MB', 'number': '2', 'file_system': 'xfs'}, {'end': '4001GB', 'name': 'lvm', 'number': '3', 'start': '526MB', 'flags': 'lvm', 'file_system': 'lvm', 'size': '4000GB'}], 'model': 'ATA TOSHIBA MG04ACA4 (scsi)', 'disk': '/dev/sda', 'size': '4001GB'}
>>> parted_info.data['model']
'ATA TOSHIBA MG04ACA4 (scsi)'
>>> parted_info.disk
'/dev/sda'
>>> parted_info.logical_sector_size
'512B'
>>> parted_info.physical_sector_size
'512B'
>>> parted_info.boot_partition
>>> parted_info.data['disk_flags']
'pmbr_boot'
>>> len(parted_info.partitions)
3
>>> parted_info.partitions[0].data
{'end': '2097kB', 'name': 'bios_grub', 'number': '1', 'start': '1049kB', 'flags': 'bios_grub', 'file_system': 'bios_grub', 'size': '1049kB'}
>>> parted_info.partitions[0].number
'1'
>>> parted_info.partitions[0].start
'1049kB'
>>> parted_info.partitions[0].end
'2097kB'
>>> parted_info.partitions[0].size
'1049kB'
>>> parted_info.partitions[0].file_system
'bios_grub'
>>> parted_info.partitions[0].type
>>> parted_info.partitions[0].flags
'bios_grub'
""" |
"""Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, ypu must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
""" |
#
# ElementTree
# $Id: ElementTree.py 3276 2007-09-12 06:52:30Z USERNAME $
#
# light-weight XML support for Python 2.2 and later.
#
# history:
# 2001-10-20 fl created (from various sources)
# 2001-11-01 fl return root from parse method
# 2002-02-16 fl sort attributes in lexical order
# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
# 2002-05-01 fl finished TreeBuilder refactoring
# 2002-07-14 fl added basic namespace support to ElementTree.write
# 2002-07-25 fl added QName attribute support
# 2002-10-20 fl fixed encoding in write
# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
# 2002-11-27 fl accept file objects or file names for parse/write
# 2002-12-04 fl moved XMLTreeBuilder back to this module
# 2003-01-11 fl fixed entity encoding glitch for us-ascii
# 2003-02-13 fl added XML literal factory
# 2003-02-21 fl added ProcessingInstruction/PI factory
# 2003-05-11 fl added tostring/fromstring helpers
# 2003-05-26 fl added ElementPath support
# 2003-07-05 fl added makeelement factory method
# 2003-07-28 fl added more well-known namespace prefixes
# 2003-08-15 fl fixed typo in ElementTree.findtext (Thomas NAME 2003-09-04 fl fall back on emulator if ElementPath is not installed
# 2003-10-31 fl markup updates
# 2003-11-15 fl fixed nested namespace bug
# 2004-03-28 fl added XMLID helper
# 2004-06-02 fl added default support to findtext
# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
# 2004-08-23 fl take advantage of post-2.1 expat features
# 2004-09-03 fl made Element class visible; removed factory
# 2005-02-01 fl added iterparse implementation
# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
# 2005-11-12 fl added tostringlist/fromstringlist helpers
# 2006-07-05 fl merged in selected changes from the 1.3 sandbox
# 2006-07-05 fl removed support for 2.1 and earlier
# 2007-06-21 fl added deprecation/future warnings
# 2007-08-25 fl added doctype hook, added parser version attribute etc
# 2007-08-26 fl added new serializer code (better namespace handling, etc)
# 2007-08-27 fl warn for broken /tag searches on tree level
# 2007-09-02 fl added html/text methods to serializer (experimental)
# 2007-09-05 fl added method argument to tostring/tostringlist
# 2007-09-06 fl improved error handling
#
# Copyright (c) 1999-2007 by NAME All rights reserved.
#
# EMAIL
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by NAME By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
|
"""
============
Array basics
============
Array types and conversions between types
=========================================
Numpy supports a much greater variety of numerical types than Python does.
This section shows which are available, and how to modify an array's data-type.
========== ==========================================================
Data type Description
========== ==========================================================
bool_ Boolean (True or False) stored as a byte
int_ Default integer type (same as C ``long``; normally either
``int64`` or ``int32``)
intc Identical to C ``int`` (normally ``int32`` or ``int64``)
intp Integer used for indexing (same as C ``ssize_t``; normally
either ``int32`` or ``int64``)
int8 Byte (-128 to 127)
int16 Integer (-32768 to 32767)
int32 Integer (-2147483648 to 2147483647)
int64 Integer (-9223372036854775808 to 9223372036854775807)
uint8 Unsigned integer (0 to 255)
uint16 Unsigned integer (0 to 65535)
uint32 Unsigned integer (0 to 4294967295)
uint64 Unsigned integer (0 to 18446744073709551615)
float_ Shorthand for ``float64``.
float16 Half precision float: sign bit, 5 bits exponent,
10 bits mantissa
float32 Single precision float: sign bit, 8 bits exponent,
23 bits mantissa
float64 Double precision float: sign bit, 11 bits exponent,
52 bits mantissa
complex_ Shorthand for ``complex128``.
complex64 Complex number, represented by two 32-bit floats (real
and imaginary components)
complex128 Complex number, represented by two 64-bit floats (real
and imaginary components)
========== ==========================================================
Additionally to ``intc`` the platform dependent C integer types ``short``,
``long``, ``longlong`` and their unsigned versions are defined.
Numpy numerical types are instances of ``dtype`` (data-type) objects, each
having unique characteristics. Once you have imported NumPy using
::
>>> import numpy as np
the dtypes are available as ``np.bool_``, ``np.float32``, etc.
Advanced types, not listed in the table above, are explored in
section :ref:`structured_arrays`.
There are 5 basic numerical types representing booleans (bool), integers (int),
unsigned integers (uint) floating point (float) and complex. Those with numbers
in their name indicate the bitsize of the type (i.e. how many bits are needed
to represent a single value in memory). Some types, such as ``int`` and
``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit
vs. 64-bit machines). This should be taken into account when interfacing
with low-level code (such as C or Fortran) where the raw memory is addressed.
Data-types can be used as functions to convert python numbers to array scalars
(see the array scalar section for an explanation), python sequences of numbers
to arrays of that type, or as arguments to the dtype keyword that many numpy
functions or methods accept. Some examples::
>>> import numpy as np
>>> x = np.float32(1.0)
>>> x
1.0
>>> y = np.int_([1,2,4])
>>> y
array([1, 2, 4])
>>> z = np.arange(3, dtype=np.uint8)
>>> z
array([0, 1, 2], dtype=uint8)
Array types can also be referred to by character codes, mostly to retain
backward compatibility with older packages such as Numeric. Some
documentation may still refer to these, for example::
>>> np.array([1, 2, 3], dtype='f')
array([ 1., 2., 3.], dtype=float32)
We recommend using dtype objects instead.
To convert the type of an array, use the .astype() method (preferred) or
the type itself as a function. For example: ::
>>> z.astype(float) #doctest: +NORMALIZE_WHITESPACE
array([ 0., 1., 2.])
>>> np.int8(z)
array([0, 1, 2], dtype=int8)
Note that, above, we use the *Python* float object as a dtype. NumPy knows
that ``int`` refers to ``np.int_``, ``bool`` means ``np.bool_``,
that ``float`` is ``np.float_`` and ``complex`` is ``np.complex_``.
The other data-types do not have Python equivalents.
To determine the type of an array, look at the dtype attribute::
>>> z.dtype
dtype('uint8')
dtype objects also contain information about the type, such as its bit-width
and its byte-order. The data type can also be used indirectly to query
properties of the type, such as whether it is an integer::
>>> d = np.dtype(int)
>>> d
dtype('int32')
>>> np.issubdtype(d, int)
True
>>> np.issubdtype(d, float)
False
Array Scalars
=============
Numpy generally returns elements of arrays as array scalars (a scalar
with an associated dtype). Array scalars differ from Python scalars, but
for the most part they can be used interchangeably (the primary
exception is for versions of Python older than v2.x, where integer array
scalars cannot act as indices for lists and tuples). There are some
exceptions, such as when code requires very specific attributes of a scalar
or when it checks specifically whether a value is a Python scalar. Generally,
problems are easily fixed by explicitly converting array scalars
to Python scalars, using the corresponding Python type function
(e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``).
The primary advantage of using array scalars is that
they preserve the array type (Python may not have a matching scalar type
available, e.g. ``int16``). Therefore, the use of array scalars ensures
identical behaviour between arrays and scalars, irrespective of whether the
value is inside an array or not. NumPy scalars also have many of the same
methods arrays do.
""" |
"""
PHYLIP multiple sequence alignment format (:mod:`skbio.io.format.phylip`)
=========================================================================
.. currentmodule:: skbio.io.format.phylip
The PHYLIP file format stores a multiple sequence alignment. The format was
originally defined and used in NAME PHYLIP package [1]_, and has
since been supported by several other bioinformatics tools (e.g., RAxML [2]_).
See [3]_ for the original format description, and [4]_ and [5]_ for additional
descriptions.
An example PHYLIP-formatted file taken from [3]_::
5 42
Turkey AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT
Salmo gairAAGCCTTGGC AGTGCAGGGT GAGCCGTGGC CGGGCACGGT AT
H. SapiensACCGGTTGGC CGTTCAGGGT ACAGGTTGGC CGTTCAGGGT AA
Chimp AAACCCTTGC CGTTACGCTT AAACCGAGGC CGGGACACTC AT
Gorilla AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA
.. note:: Original copyright notice for the above PHYLIP file:
*(c) Copyright 1986-2008 by The University of Washington. Written by NAME Permission is granted to copy this document provided that no
fee is charged for it and that this copyright notice is not removed.*
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |:mod:`skbio.alignment.Alignment` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
PHYLIP format is a plain text format containing exactly two sections: a header
describing the dimensions of the alignment, followed by the multiple sequence
alignment itself.
The format described here is "strict" PHYLIP, as described in [4]_. Strict
PHYLIP requires that each sequence identifier is exactly 10 characters long
(padded with spaces as necessary). Other bioinformatics tools (e.g., RAxML) may
relax this rule to allow for longer sequence identifiers. See the
**Alignment Section** below for more details.
The format described here is "sequential" format. The original PHYLIP format
specification [3]_ describes both sequential and interleaved formats.
.. note:: scikit-bio currently only supports writing strict, sequential
PHYLIP-formatted files from an ``skbio.alignment.Alignment``. It does not
yet support reading PHYLIP-formatted files, nor does it support relaxed or
interleaved PHYLIP formats.
Header Section
^^^^^^^^^^^^^^
The header consists of a single line describing the dimensions of the
alignment. It **must** be the first line in the file. The header consists of
optional spaces, followed by two positive integers (``n`` and ``m``) separated
by one or more spaces. The first integer (``n``) specifies the number of
sequences (i.e., the number of rows) in the alignment. The second integer
(``m``) specifies the length of the sequences (i.e., the number of columns) in
the alignment. The smallest supported alignment dimensions are 1x1.
.. note:: scikit-bio will write the PHYLIP format header *without* preceding
spaces, and with only a single space between ``n`` and ``m``.
PHYLIP format *does not* support blank line(s) between the header and the
alignment.
Alignment Section
^^^^^^^^^^^^^^^^^
The alignment section immediately follows the header. It consists of ``n``
lines (rows), one for each sequence in the alignment. Each row consists of a
sequence identifier (ID) and characters in the sequence, in fixed width format.
The sequence ID can be up to 10 characters long. IDs less than 10 characters
must have spaces appended to them to reach the 10 character fixed width. Within
an ID, all characters except newlines are supported, including spaces,
underscores, and numbers.
.. note:: While not explicitly stated in the original PHYLIP format
description, scikit-bio only supports writing unique sequence identifiers
(i.e., duplicates are not allowed). Uniqueness is required because an
``skbio.alignment.Alignment`` cannot be created with duplicate IDs.
scikit-bio supports the empty string (``''``) as a valid sequence ID. An
empty ID will be padded with 10 spaces.
Sequence characters immediately follow the sequence ID. They *must* start at
the 11th character in the line, as the first 10 characters are reserved for the
sequence ID. While PHYLIP format does not explicitly restrict the set of
supported characters that may be used to represent a sequence, the original
format description [3]_ specifies the IUPAC nucleic acid lexicon for DNA or RNA
sequences, and the IUPAC protein lexicon for protein sequences. The original
PHYLIP specification uses ``-`` as a gap character, though older versions also
supported ``.``. The sequence characters may contain optional spaces (e.g., to
improve readability), and both upper and lower case characters are supported.
.. note:: scikit-bio will write a PHYLIP-formatted file even if the alignment's
sequence characters are not valid IUPAC characters. This differs from the
PHYLIP specification, which states that a PHYLIP-formatted file can only
contain valid IUPAC characters. To check whether all characters are valid
before writing, the user can call ``Alignment.is_valid()``.
Since scikit-bio supports both ``-`` and ``.`` as gap characters (e.g., in
``skbio.alignment.Alignment``), both are supported when writing a
PHYLIP-formatted file.
When writing a PHYLIP-formatted file, scikit-bio will split up each sequence
into chunks that are 10 characters long. Each chunk will be separated by a
single space. The sequence will always appear on a single line (sequential
format). It will *not* be wrapped across multiple lines. Sequences are
chunked in this manner for improved readability, and because most example
PHYLIP files are chunked in a similar way (e.g., see the example file
above). Note that this chunking is not required by the PHYLIP format.
Examples
--------
Let's create an alignment with three DNA sequences of equal length:
>>> from skbio import Alignment, DNA
>>> seqs = [DNA('ACCGTTGTA-GTAGCT', metadata={'id':'seq1'}),
... DNA('A--GTCGAA-GTACCT', metadata={'id':'sequence-2'}),
... DNA('AGAGTTGAAGGTATCT', metadata={'id':'3'})]
>>> aln = Alignment(seqs)
>>> aln
<Alignment: n=3; mean +/- std length=16.00 +/- 0.00>
Now let's write the alignment to file in PHYLIP format, and take a look at the
output:
>>> from io import StringIO
>>> fh = StringIO()
>>> print(aln.write(fh, format='phylip').getvalue())
3 16
seq1 ACCGTTGTA- GTAGCT
sequence-2A--GTCGAA- GTACCT
3 AGAGTTGAAG GTATCT
<BLANKLINE>
>>> fh.close()
Notice that the 16-character sequences were split into two chunks, and that
each sequence appears on a single line (sequential format). Also note that each
sequence ID is padded with spaces to 10 characters in order to produce a fixed
width column.
If the sequence IDs in an alignment surpass the 10-character limit, an error
will be raised when we try to write a PHYLIP file:
>>> long_id_seqs = [DNA('ACCGT', metadata={'id':'seq1'}),
... DNA('A--GT', metadata={'id':'long-sequence-2'}),
... DNA('AGAGT', metadata={'id':'seq3'})]
>>> long_id_aln = Alignment(long_id_seqs)
>>> fh = StringIO()
>>> long_id_aln.write(fh, format='phylip')
Traceback (most recent call last):
...
skbio.io._exception.PhylipFormatError: Alignment can only be written in \
PHYLIP format if all sequence IDs have 10 or fewer characters. Found sequence \
with ID 'long-sequence-2' that exceeds this limit. Use Alignment.update_ids \
to assign shorter IDs.
>>> fh.close()
One way to work around this is to update the IDs to be shorter. The recommended
way of accomplishing this is via ``Alignment.update_ids``, which provides a
flexible way of creating a new ``Alignment`` with updated IDs. For example, to
remap each of the IDs to integer-based IDs:
>>> short_id_aln, _ = long_id_aln.update_ids()
>>> short_id_aln.ids()
['1', '2', '3']
We can now write the new alignment in PHYLIP format:
>>> fh = StringIO()
>>> print(short_id_aln.write(fh, format='phylip').getvalue())
3 5
1 ACCGT
2 A--GT
3 AGAGT
<BLANKLINE>
>>> fh.close()
References
----------
.. [1] http://evolution.genetics.washington.edu/phylip.html
.. [2] RAxML Version 8: A tool for Phylogenetic Analysis and
Post-Analysis of Large Phylogenies". In Bioinformatics, 2014
.. [3] http://evolution.genetics.washington.edu/phylip/doc/sequence.html
.. [4] http://www.phylo.org/tools/obsolete/phylip.html
.. [5] http://www.bioperl.org/wiki/PHYLIP_multiple_alignment_format
""" |
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
""" |
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True, default_section='DEFAULT',
interpolation=<unset>, converters=<unset>):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
When `default_section' is given, the name of the special section is
named accordingly. By default it is called ``"DEFAULT"`` but this can
be customized to point to any other valid section name. Its current
value can be retrieved using the ``parser_instance.default_section``
attribute and may be modified at runtime.
When `interpolation` is given, it should be an Interpolation subclass
instance. It will be used as the handler for option value
pre-processing when using getters. RawConfigParser object s don't do
any sort of interpolation, whereas ConfigParser uses an instance of
BasicInterpolation. The library also provides a ``zc.buildbot``
inspired ExtendedInterpolation implementation.
When `converters` is given, it should be a dictionary where each key
represents the name of a type converter and each value is a callable
implementing the conversion from string to the desired datatype. Every
converter gets its corresponding get*() method on the parser object and
section proxies.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
""" |
"""
backprop_magnitude_nabla
~~~~~~~~~~~~~~~~~~~~~~~~
Using backprop2 I constructed a 784-30-30-30-30-30-10 network to classify
MNIST data. I ran ten mini-batches of size 100, with eta = 0.01 and
lambda = 0.05, using:
net.SGD(otd[:1000], 1, 100, 0.01, 0.05,
I obtained the following norms for the (unregularized) nabla_w for the
respective mini-batches:
[0.90845722175923671, 2.8852730656073566, 10.696793986223632, 37.75701921183488, 157.7365422527995, 304.43990075227839]
[0.22493835119537842, 0.6555126517964851, 2.6036801277234076, 11.408825365731225, 46.882319190445472, 70.499637502698221]
[0.11935180022357521, 0.19756069137133489, 0.8152794148335869, 3.4590802543293977, 15.470507965493903, 31.032396017142556]
[0.15130005837653659, 0.39687135985664701, 1.4810006139254532, 4.392519005642268, 16.831939776937311, 34.082104455938733]
[0.11594085276308999, 0.17177668061395848, 0.72204558746599512, 3.05062409378366, 14.133001132214286, 29.776204839994385]
[0.10790389807606221, 0.20707152756018626, 0.96348134037828603, 3.9043824079499561, 15.986873430586924, 39.195258080490895]
[0.088613291101645356, 0.129173436407863, 0.4242933114455002, 1.6154682713449411, 7.5451567587160069, 20.180545544006566]
[0.086175380639289575, 0.12571016850457151, 0.44231149185805047, 1.8435833504677326, 7.61973813981073, 19.474539356281781]
[0.095372080184163904, 0.15854489503205446, 0.70244235144444678, 2.6294803575724157, 10.427062019753425, 24.309420272033819]
[0.096453131000155692, 0.13574642196947601, 0.53551377709415471, 2.0247466793066895, 9.4503978546018068, 21.73772148470092]
Note that results are listed in order of layer. They clearly show how
the magnitude of nabla_w decreases as we go back through layers.
In this program I take min-batches 7, 8, 9 as representative and plot
them. I omit the results from the first and final layers since they
correspond to 784 input neurons and 10 output neurons, not 30 as in
the other layers, making it difficult to compare results.
Note that I haven't attempted to preserve the whole workflow here. It
involved some minor hacking around with backprop2, which messed up
that code. That's why I've simply put the results in by hand below.
""" |
#!/usr/bin/python
#FILE DESCRIPTION=======================================================
#~ Python script used for post-processing automatization of the rivulet
#~ flow down the experimental plate in Freiberg
#~ PROGRAM STRUCTURE:
#~ 1. Identify problems in data (caused by parazite currents)
#~ - find yCoordMax and zCoordMax
#~ 2. Remove the problems
#~ - create isosurface int2Store (prepared gas-liquid interface)
#~ 3. Prepare output variables
#~ - Sgl, area of the gas liquid interface
#~ - IntegrateVariables filter on int2Store, Cell Data
#~ - aLaRVec, rivulet width (on l level) (aL + aR)
#~ - Slice filter on int2Store, z = l + calculator, abs(coordsY)
#~ - data minning and further manipulation (sum in tuples)
#~ - h0Vec, rivulet height at centerline (y = 0)
#~ - Slice filter on int2Store, y = 0
#~ - data minning, no further manipulation needed
#~ - epsVec = h0Vec./aVec (there should be the same number of elements)
#~ NOTE: Same manipulations as for aVec and h0Vec were already
#~ performed during the data problems identification
#~ NOTES:
#~ - before running, the main file has to be highlighted
#~ - can be ran from command line
#~ - still unfinished BUT improvement
#~ - colors the rivulet by its hFun (same as in exp Data)
#~ - coloring, <0,1.93> mm (corresponds to calibration cell)
#~ USAGE:
#~ paraFoam --script=./rivuletPostProcSaveData.py
#~ OUTPUT:
#~ modified row in iF_scalDataFile
#~ ... to the appropriate row od the above specified file, append
#~ Sgl, ReM
#~ postProcVecs_iF_caseSpec
#~ ... file containing the vector outputs that I am interested in
#~ aLaRVec,h0Vec,epsVec,betaLVec,betaRVec
#~ File content and structure:
#~ Q0 = NUM (in m3/s)
#~ Sgl = NUM (in m2)
#~ !!ReM = NUM (in 1)!! (NOT YET IMPLEMENTED)
#~ aLaRVec = [[x0,aL0 + aR0],...,[xN,aLN + aRN]]
#~ h0Vec = [[x0,h00],...,[xN,h0N]]
#~ epsVec = [[x0,eps0],...,[xN,epsN]]
#~ betaVec = [[x0,beta0],...,[xN,beta]]
#~ ---------------------------------------------------------
#~ Q0 ... liquid volumetric flow rate
#~ Sgl ... rivulet gas-liquid interface area
#~ ReM ... maximal liquid Reynolds number in rivulet
#~ aLaRVec ... width of the rivulet at z = l
#~ h0Vec ... height of the rivulet at y = 0
#~ epsVec ... ratio of rivulet height to its width (thin film approx)
#~ betaVec ... dynamic contact angles at y = a (smoothed)
#LICENSE================================================================
# rivuletProstProcSaveData.py
#
# Copyright 2015 NAME <martin@Poctar>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
#=======================================================================
# PARAMETERS
|
"""
==============
Array indexing
==============
Array indexing refers to any use of the square brackets ([]) to index
array values. There are many options to indexing, which give numpy
indexing great power, but with power comes some complexity and the
potential for confusion. This section is just an overview of the
various options and issues related to indexing. Aside from single
element indexing, the details on most of these options are to be
found in related sections.
Assignment vs referencing
=========================
Most of the following examples show the use of indexing when
referencing data in an array. The examples work just as well
when assigning to an array. See the section at the end for
specific examples and explanations on how assignments work.
Single element indexing
=======================
Single element indexing for a 1-D array is what one expects. It work
exactly like that for other standard Python sequences. It is 0-based,
and accepts negative indices for indexing from the end of the array. ::
>>> x = np.arange(10)
>>> x[2]
2
>>> x[-2]
8
Unlike lists and tuples, numpy arrays support multidimensional indexing
for multidimensional arrays. That means that it is not necessary to
separate each dimension's index into its own set of square brackets. ::
>>> x.shape = (2,5) # now x is 2-dimensional
>>> x[1,3]
8
>>> x[1,-1]
9
Note that if one indexes a multidimensional array with fewer indices
than dimensions, one gets a subdimensional array. For example: ::
>>> x[0]
array([0, 1, 2, 3, 4])
That is, each index specified selects the array corresponding to the
rest of the dimensions selected. In the above example, choosing 0
means that remaining dimension of lenth 5 is being left unspecified,
and that what is returned is an array of that dimensionality and size.
It must be noted that the returned array is not a copy of the original,
but points to the same values in memory as does the original array.
In this case, the 1-D array at the first position (0) is returned.
So using a single index on the returned array, results in a single
element being returned. That is: ::
>>> x[0][2]
2
So note that ``x[0,2] = x[0][2]`` though the second case is more
inefficient a new temporary array is created after the first index
that is subsequently indexed by 2.
Note to those used to IDL or Fortran memory order as it relates to
indexing. Numpy uses C-order indexing. That means that the last
index usually represents the most rapidly changing memory location,
unlike Fortran or IDL, where the first index represents the most
rapidly changing location in memory. This difference represents a
great potential for confusion.
Other indexing options
======================
It is possible to slice and stride arrays to extract arrays of the
same number of dimensions, but of different sizes than the original.
The slicing and striding works exactly the same way it does for lists
and tuples except that they can be applied to multiple dimensions as
well. A few examples illustrates best: ::
>>> x = np.arange(10)
>>> x[2:5]
array([2, 3, 4])
>>> x[:-7]
array([0, 1, 2])
>>> x[1:7:2]
array([1, 3, 5])
>>> y = np.arange(35).reshape(5,7)
>>> y[1:5:2,::3]
array([[ 7, 10, 13],
[21, 24, 27]])
Note that slices of arrays do not copy the internal array data but
also produce new views of the original data.
It is possible to index arrays with other arrays for the purposes of
selecting lists of values out of arrays into new arrays. There are
two different ways of accomplishing this. One uses one or more arrays
of index values. The other involves giving a boolean array of the proper
shape to indicate the values to be selected. Index arrays are a very
powerful tool that allow one to avoid looping over individual elements in
arrays and thus greatly improve performance.
It is possible to use special features to effectively increase the
number of dimensions in an array through indexing so the resulting
array aquires the shape needed for use in an expression or with a
specific function.
Index arrays
============
Numpy arrays may be indexed with other arrays (or any other sequence-
like object that can be converted to an array, such as lists, with the
exception of tuples; see the end of this document for why this is). The
use of index arrays ranges from simple, straightforward cases to
complex, hard-to-understand cases. For all cases of index arrays, what
is returned is a copy of the original data, not a view as one gets for
slices.
Index arrays must be of integer type. Each value in the array indicates
which value in the array to use in place of the index. To illustrate: ::
>>> x = np.arange(10,1,-1)
>>> x
array([10, 9, 8, 7, 6, 5, 4, 3, 2])
>>> x[np.array([3, 3, 1, 8])]
array([7, 7, 9, 2])
The index array consisting of the values 3, 3, 1 and 8 correspondingly
create an array of length 4 (same as the index array) where each index
is replaced by the value the index array has in the array being indexed.
Negative values are permitted and work as they do with single indices
or slices: ::
>>> x[np.array([3,3,-3,8])]
array([7, 7, 4, 2])
It is an error to have index values out of bounds: ::
>>> x[np.array([3, 3, 20, 8])]
<type 'exceptions.IndexError'>: index 20 out of bounds 0<=index<9
Generally speaking, what is returned when index arrays are used is
an array with the same shape as the index array, but with the type
and values of the array being indexed. As an example, we can use a
multidimensional index array instead: ::
>>> x[np.array([[1,1],[2,3]])]
array([[9, 9],
[8, 7]])
Indexing Multi-dimensional arrays
=================================
Things become more complex when multidimensional arrays are indexed,
particularly with multidimensional index arrays. These tend to be
more unusal uses, but theyare permitted, and they are useful for some
problems. We'll start with thesimplest multidimensional case (using
the array y from the previous examples): ::
>>> y[np.array([0,2,4]), np.array([0,1,2])]
array([ 0, 15, 30])
In this case, if the index arrays have a matching shape, and there is
an index array for each dimension of the array being indexed, the
resultant array has the same shape as the index arrays, and the values
correspond to the index set for each position in the index arrays. In
this example, the first index value is 0 for both index arrays, and
thus the first value of the resultant array is y[0,0]. The next value
is y[2,1], and the last is y[4,2].
If the index arrays do not have the same shape, there is an attempt to
broadcast them to the same shape. If they cannot be broadcast to the
same shape, an exception is raised: ::
>>> y[np.array([0,2,4]), np.array([0,1])]
<type 'exceptions.ValueError'>: shape mismatch: objects cannot be
broadcast to a single shape
The broadcasting mechanism permits index arrays to be combined with
scalars for other indices. The effect is that the scalar value is used
for all the corresponding values of the index arrays: ::
>>> y[np.array([0,2,4]), 1]
array([ 1, 15, 29])
Jumping to the next level of complexity, it is possible to only
partially index an array with index arrays. It takes a bit of thought
to understand what happens in such cases. For example if we just use
one index array with y: ::
>>> y[np.array([0,2,4])]
array([[ 0, 1, 2, 3, 4, 5, 6],
[14, 15, 16, 17, 18, 19, 20],
[28, 29, 30, 31, 32, 33, 34]])
What results is the construction of a new array where each value of
the index array selects one row from the array being indexed and the
resultant array has the resulting shape (size of row, number index
elements).
An example of where this may be useful is for a color lookup table
where we want to map the values of an image into RGB triples for
display. The lookup table could have a shape (nlookup, 3). Indexing
such an array with an image with shape (ny, nx) with dtype=np.uint8
(or any integer type so long as values are with the bounds of the
lookup table) will result in an array of shape (ny, nx, 3) where a
triple of RGB values is associated with each pixel location.
In general, the shape of the resulant array will be the concatenation
of the shape of the index array (or the shape that all the index arrays
were broadcast to) with the shape of any unused dimensions (those not
indexed) in the array being indexed.
Boolean or "mask" index arrays
==============================
Boolean arrays used as indices are treated in a different manner
entirely than index arrays. Boolean arrays must be of the same shape
as the initial dimensions of the array being indexed. In the
most straightforward case, the boolean array has the same shape: ::
>>> b = y>20
>>> y[b]
array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
The result is a 1-D array containing all the elements in the indexed
array corresponding to all the true elements in the boolean array. As
with index arrays, what is returned is a copy of the data, not a view
as one gets with slices.
The result will be multidimensional if y has more dimensions than b.
For example: ::
>>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y
array([False, False, False, True, True], dtype=bool)
>>> y[b[:,5]]
array([[21, 22, 23, 24, 25, 26, 27],
[28, 29, 30, 31, 32, 33, 34]])
Here the 4th and 5th rows are selected from the indexed array and
combined to make a 2-D array.
In general, when the boolean array has fewer dimensions than the array
being indexed, this is equivalent to y[b, ...], which means
y is indexed by b followed by as many : as are needed to fill
out the rank of y.
Thus the shape of the result is one dimension containing the number
of True elements of the boolean array, followed by the remaining
dimensions of the array being indexed.
For example, using a 2-D boolean array of shape (2,3)
with four True elements to select rows from a 3-D array of shape
(2,3,5) results in a 2-D result of shape (4,5): ::
>>> x = np.arange(30).reshape(2,3,5)
>>> x
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]]])
>>> b = np.array([[True, True, False], [False, True, True]])
>>> x[b]
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]])
For further details, consult the numpy reference documentation on array indexing.
Combining index arrays with slices
==================================
Index arrays may be combined with slices. For example: ::
>>> y[np.array([0,2,4]),1:3]
array([[ 1, 2],
[15, 16],
[29, 30]])
In effect, the slice is converted to an index array
np.array([[1,2]]) (shape (1,2)) that is broadcast with the index array
to produce a resultant array of shape (3,2).
Likewise, slicing can be combined with broadcasted boolean indices: ::
>>> y[b[:,5],1:3]
array([[22, 23],
[29, 30]])
Structural indexing tools
=========================
To facilitate easy matching of array shapes with expressions and in
assignments, the np.newaxis object can be used within array indices
to add new dimensions with a size of 1. For example: ::
>>> y.shape
(5, 7)
>>> y[:,np.newaxis,:].shape
(5, 1, 7)
Note that there are no new elements in the array, just that the
dimensionality is increased. This can be handy to combine two
arrays in a way that otherwise would require explicitly reshaping
operations. For example: ::
>>> x = np.arange(5)
>>> x[:,np.newaxis] + x[np.newaxis,:]
array([[0, 1, 2, 3, 4],
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8]])
The ellipsis syntax maybe used to indicate selecting in full any
remaining unspecified dimensions. For example: ::
>>> z = np.arange(81).reshape(3,3,3,3)
>>> z[1,...,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
This is equivalent to: ::
>>> z[1,:,:,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
Assigning values to indexed arrays
==================================
As mentioned, one can select a subset of an array to assign to using
a single index, slices, and index and mask arrays. The value being
assigned to the indexed array must be shape consistent (the same shape
or broadcastable to the shape the index produces). For example, it is
permitted to assign a constant to a slice: ::
>>> x = np.arange(10)
>>> x[2:7] = 1
or an array of the right size: ::
>>> x[2:7] = np.arange(5)
Note that assignments may result in changes if assigning
higher types to lower types (like floats to ints) or even
exceptions (assigning complex to floats or ints): ::
>>> x[1] = 1.2
>>> x[1]
1
>>> x[1] = 1.2j
<type 'exceptions.TypeError'>: can't convert complex to long; use
long(abs(z))
Unlike some of the references (such as array and mask indices)
assignments are always made to the original data in the array
(indeed, nothing else would make sense!). Note though, that some
actions may not work as one may naively expect. This particular
example is often surprising to people: ::
>>> x = np.arange(0, 50, 10)
>>> x
array([ 0, 10, 20, 30, 40])
>>> x[np.array([1, 1, 3, 1])] += 1
>>> x
array([ 0, 11, 20, 31, 40])
Where people expect that the 1st location will be incremented by 3.
In fact, it will only be incremented by 1. The reason is because
a new array is extracted from the original (as a temporary) containing
the values at 1, 1, 3, 1, then the value 1 is added to the temporary,
and then the temporary is assigned back to the original array. Thus
the value of the array at x[1]+1 is assigned to x[1] three times,
rather than being incremented 3 times.
Dealing with variable numbers of indices within programs
========================================================
The index syntax is very powerful but limiting when dealing with
a variable number of indices. For example, if you want to write
a function that can handle arguments with various numbers of
dimensions without having to write special case code for each
number of possible dimensions, how can that be done? If one
supplies to the index a tuple, the tuple will be interpreted
as a list of indices. For example (using the previous definition
for the array z): ::
>>> indices = (1,1,1,1)
>>> z[indices]
40
So one can use code to construct tuples of any number of indices
and then use these within an index.
Slices can be specified within programs by using the slice() function
in Python. For example: ::
>>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2]
>>> z[indices]
array([39, 40])
Likewise, ellipsis can be specified by code by using the Ellipsis
object: ::
>>> indices = (1, Ellipsis, 1) # same as [1,...,1]
>>> z[indices]
array([[28, 31, 34],
[37, 40, 43],
[46, 49, 52]])
For this reason it is possible to use the output from the np.where()
function directly as an index since it always returns a tuple of index
arrays.
Because the special treatment of tuples, they are not automatically
converted to an array as a list would be. As an example: ::
>>> z[[1,1,1,1]] # produces a large array
array([[[[27, 28, 29],
[30, 31, 32], ...
>>> z[(1,1,1,1)] # returns a single value
40
""" |
"""
=================
Structured Arrays
=================
Introduction
============
Numpy provides powerful capabilities to create arrays of structured datatype.
These arrays permit one to manipulate the data by named fields. A simple
example will show what is meant.: ::
>>> x = np.array([(1,2.,'Hello'), (2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> x
array([(1, 2.0, 'Hello'), (2, 3.0, 'World')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
Here we have created a one-dimensional array of length 2. Each element of
this array is a structure that contains three items, a 32-bit integer, a 32-bit
float, and a string of length 10 or less. If we index this array at the second
position we get the second structure: ::
>>> x[1]
(2,3.,"World")
Conveniently, one can access any field of the array by indexing using the
string that names that field. ::
>>> y = x['foo']
>>> y
array([ 2., 3.], dtype=float32)
>>> y[:] = 2*y
>>> y
array([ 4., 6.], dtype=float32)
>>> x
array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
In these examples, y is a simple float array consisting of the 2nd field
in the structured type. But, rather than being a copy of the data in the structured
array, it is a view, i.e., it shares exactly the same memory locations.
Thus, when we updated this array by doubling its values, the structured
array shows the corresponding values as doubled as well. Likewise, if one
changes the structured array, the field view also changes: ::
>>> x[1] = (-1,-1.,"Master")
>>> x
array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
>>> y
array([ 4., -1.], dtype=float32)
Defining Structured Arrays
==========================
One defines a structured array through the dtype object. There are
**several** alternative ways to define the fields of a record. Some of
these variants provide backward compatibility with Numeric, numarray, or
another module, and should not be used except for such purposes. These
will be so noted. One specifies record structure in
one of four alternative ways, using an argument (as supplied to a dtype
function keyword or a dtype object constructor itself). This
argument must be one of the following: 1) string, 2) tuple, 3) list, or
4) dictionary. Each of these is briefly described below.
1) String argument.
In this case, the constructor expects a comma-separated list of type
specifiers, optionally with extra shape information. The fields are
given the default names 'f0', 'f1', 'f2' and so on.
The type specifiers can take 4 different forms: ::
a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f2, f4, f8, c8, c16, a<n>
(representing bytes, ints, unsigned ints, floats, complex and
fixed length strings of specified byte lengths)
b) int8,...,uint8,...,float16, float32, float64, complex64, complex128
(this time with bit sizes)
c) older Numeric/numarray type specifications (e.g. Float32).
Don't use these in new code!
d) Single character type specifiers (e.g H for unsigned short ints).
Avoid using these unless you must. Details can be found in the
Numpy book
These different styles can be mixed within the same string (but why would you
want to do that?). Furthermore, each type specifier can be prefixed
with a repetition number, or a shape. In these cases an array
element is created, i.e., an array within a record. That array
is still referred to as a single field. An example: ::
>>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64')
>>> x
array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])],
dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))])
By using strings to define the record structure, it precludes being
able to name the fields in the original definition. The names can
be changed as shown later, however.
2) Tuple argument: The only relevant tuple case that applies to record
structures is when a structure is mapped to an existing data type. This
is done by pairing in a tuple, the existing data type with a matching
dtype definition (using any of the variants being described here). As
an example (using a definition using a list, so see 3) for further
details): ::
>>> x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')]))
>>> x
array([0, 0, 0])
>>> x['r']
array([0, 0, 0], dtype=uint8)
In this case, an array is produced that looks and acts like a simple int32 array,
but also has definitions for fields that use only one byte of the int32 (a bit
like Fortran equivalencing).
3) List argument: In this case the record structure is defined with a list of
tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field
('' is permitted), 2) the type of the field, and 3) the shape (optional).
For example::
>>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
>>> x
array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])],
dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))])
4) Dictionary argument: two different forms are permitted. The first consists
of a dictionary with two required keys ('names' and 'formats'), each having an
equal sized list of values. The format list contains any type/shape specifier
allowed in other contexts. The names must be strings. There are two optional
keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to
the required two where offsets contain integer offsets for each field, and
titles are objects containing metadata for each field (these do not have
to be strings), where the value of None is permitted. As an example: ::
>>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[('col1', '>i4'), ('col2', '>f4')])
The other dictionary form permitted is a dictionary of name keys with tuple
values specifying type, offset, and an optional title. ::
>>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')])
Accessing and modifying field names
===================================
The field names are an attribute of the dtype object defining the structure.
For the last example: ::
>>> x.dtype.names
('col1', 'col2')
>>> x.dtype.names = ('x', 'y')
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')])
>>> x.dtype.names = ('x', 'y', 'z') # wrong number of names
<type 'exceptions.ValueError'>: must replace all names at once with a sequence of length 2
Accessing field titles
====================================
The field titles provide a standard place to put associated info for fields.
They do not have to be strings. ::
>>> x.dtype.fields['x'][2]
'title 1'
Accessing multiple fields at once
====================================
You can access multiple fields at once using a list of field names: ::
>>> x = np.array([(1.5,2.5,(1.0,2.0)),(3.,4.,(4.,5.)),(1.,3.,(2.,6.))],
dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
Notice that `x` is created with a list of tuples. ::
>>> x[['x','y']]
array([(1.5, 2.5), (3.0, 4.0), (1.0, 3.0)],
dtype=[('x', '<f4'), ('y', '<f4')])
>>> x[['x','value']]
array([(1.5, [[1.0, 2.0], [1.0, 2.0]]), (3.0, [[4.0, 5.0], [4.0, 5.0]]),
(1.0, [[2.0, 6.0], [2.0, 6.0]])],
dtype=[('x', '<f4'), ('value', '<f4', (2, 2))])
The fields are returned in the order they are asked for.::
>>> x[['y','x']]
array([(2.5, 1.5), (4.0, 3.0), (3.0, 1.0)],
dtype=[('y', '<f4'), ('x', '<f4')])
Filling structured arrays
=========================
Structured arrays can be filled by field or row by row. ::
>>> arr = np.zeros((5,), dtype=[('var1','f8'),('var2','f8')])
>>> arr['var1'] = np.arange(5)
If you fill it in row by row, it takes a take a tuple
(but not a list or array!)::
>>> arr[0] = (10,20)
>>> arr
array([(10.0, 20.0), (1.0, 0.0), (2.0, 0.0), (3.0, 0.0), (4.0, 0.0)],
dtype=[('var1', '<f8'), ('var2', '<f8')])
Record Arrays
=============
For convenience, numpy provides "record arrays" which allow one to access
fields of structured arrays by attribute rather than by index. Record arrays
are structured arrays wrapped using a subclass of ndarray,
:class:`numpy.recarray`, which allows field access by attribute on the array
object, and record arrays also use a special datatype, :class:`numpy.record`,
which allows field access by attribute on the individual elements of the array.
The simplest way to create a record array is with :func:`numpy.rec.array`: ::
>>> recordarr = np.rec.array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> recordarr.bar
array([ 2., 3.], dtype=float32)
>>> recordarr[1:2]
rec.array([(2, 3.0, 'World')],
dtype=[('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')])
>>> recordarr[1:2].foo
array([2], dtype=int32)
>>> recordarr.foo[1:2]
array([2], dtype=int32)
>>> recordarr[1].baz
'World'
numpy.rec.array can convert a wide variety of arguments into record arrays,
including normal structured arrays: ::
>>> arr = array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])
>>> recordarr = np.rec.array(arr)
The numpy.rec module provides a number of other convenience functions for
creating record arrays, see :ref:`record array creation routines
<routines.array-creation.rec>`.
A record array representation of a structured array can be obtained using the
appropriate :ref:`view`: ::
>>> arr = np.array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')])
>>> recordarr = arr.view(dtype=dtype((np.record, arr.dtype)),
... type=np.recarray)
For convenience, viewing an ndarray as type `np.recarray` will automatically
convert to `np.record` datatype, so the dtype can be left out of the view: ::
>>> recordarr = arr.view(np.recarray)
>>> recordarr.dtype
dtype((numpy.record, [('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')]))
To get back to a plain ndarray both the dtype and type must be reset. The
following view does so, taking into account the unusual case that the
recordarr was not a structured type: ::
>>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray)
Record array fields accessed by index or by attribute are returned as a record
array if the field has a structured type but as a plain ndarray otherwise. ::
>>> recordarr = np.rec.array([('Hello', (1,2)),("World", (3,4))],
... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])])
>>> type(recordarr.foo)
<type 'numpy.ndarray'>
>>> type(recordarr.bar)
<class 'numpy.core.records.recarray'>
Note that if a field has the same name as an ndarray attribute, the ndarray
attribute takes precedence. Such fields will be inaccessible by attribute but
may still be accessed by index.
""" |
"""Test module for the noddy examples
Noddy 1:
>>> import noddy
>>> n1 = noddy.Noddy()
>>> n2 = noddy.Noddy()
>>> del n1
>>> del n2
Noddy 2
>>> import noddy2
>>> n1 = noddy2.Noddy('jim', 'fulton', 42)
>>> n1.first
'jim'
>>> n1.last
'NAME n1.number
42
>>> n1.name()
'jim NAME n1.first = 'will'
>>> n1.name()
'will NAME n1.last = 'NAME n1.name()
'will NAME del n1.first
>>> n1.name()
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first = 'drew'
>>> n1.first
'drew'
>>> del n1.number
Traceback (most recent call last):
...
TypeError: can't delete numeric/char attribute
>>> n1.number=2
>>> n1.number
2
>>> n1.first = 42
>>> n1.name()
'42 NAME n2 = noddy2.Noddy()
>>> n2.name()
' '
>>> n2.first
''
>>> n2.last
''
>>> del n2.first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.name()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: first
>>> n2.number
0
>>> n3 = noddy2.Noddy('jim', 'fulton', 'waaa')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: an integer is required
>>> del n1
>>> del n2
Noddy 3
>>> import noddy3
>>> n1 = noddy3.Noddy('jim', 'fulton', 42)
>>> n1 = noddy3.Noddy('jim', 'fulton', 42)
>>> n1.name()
'jim NAME del n1.first
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: Cannot delete the first attribute
>>> n1.first = 42
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: The first attribute value must be a string
>>> n1.first = 'will'
>>> n1.name()
'will NAME n2 = noddy3.Noddy()
>>> n2 = noddy3.Noddy()
>>> n2 = noddy3.Noddy()
>>> n3 = noddy3.Noddy('jim', 'fulton', 'waaa')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: an integer is required
>>> del n1
>>> del n2
Noddy 4
>>> import noddy4
>>> n1 = noddy4.Noddy('jim', 'fulton', 42)
>>> n1.first
'jim'
>>> n1.last
'NAME n1.number
42
>>> n1.name()
'jim NAME n1.first = 'will'
>>> n1.name()
'will NAME n1.last = 'NAME n1.name()
'will NAME del n1.first
>>> n1.name()
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first = 'drew'
>>> n1.first
'drew'
>>> del n1.number
Traceback (most recent call last):
...
TypeError: can't delete numeric/char attribute
>>> n1.number=2
>>> n1.number
2
>>> n1.first = 42
>>> n1.name()
'42 NAME n2 = noddy4.Noddy()
>>> n2 = noddy4.Noddy()
>>> n2 = noddy4.Noddy()
>>> n2 = noddy4.Noddy()
>>> n2.name()
' '
>>> n2.first
''
>>> n2.last
''
>>> del n2.first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.name()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: first
>>> n2.number
0
>>> n3 = noddy4.Noddy('jim', 'fulton', 'waaa')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: an integer is required
Test cyclic gc(?)
>>> import gc
>>> gc.disable()
>>> x = []
>>> l = [x]
>>> n2.first = l
>>> n2.first
[[]]
>>> l.append(n2)
>>> del l
>>> del n1
>>> del n2
>>> sys.getrefcount(x)
3
>>> ignore = gc.collect()
>>> sys.getrefcount(x)
2
>>> gc.enable()
""" |
""" Layered, composite rendering for TileStache.
The Sandwich Provider supplies a Photoshop-like rendering pipeline, making it
possible to use the output of other configured tile layers as layers or masks
to create a combined output. Sandwich is modeled on Lars Ahlzen's TopOSM.
The external "Blit" library is required by Sandwich, and can be installed
via Pip, easy_install, or directly from Github:
https://github.com/migurski/Blit
The "stack" configuration parameter describes a layer or stack of layers that
can be combined to create output. A simple stack that merely outputs a single
color orange tile looks like this:
{"color" "#ff9900"}
Other layers in the current TileStache configuration can be reference by name,
as in this example stack that simply echoes another layer:
{"src": "layer-name"}
Bitmap images can also be referenced by local filename or URL, and will be
tiled seamlessly, assuming 256x256 parent tiles:
{"src": "image.png"}
{"src": "http://example.com/image.png"}
Layers can be limited to appear at certain zoom levels, given either as a range
or as a single number:
{"src": "layer-name", "zoom": "12"}
{"src": "layer-name", "zoom": "12-18"}
Layers can also be used as masks, as in this example that uses one layer
to mask another layer:
{"mask": "layer-name", "src": "other-layer"}
Many combinations of "src", "mask", and "color" can be used together, but it's
an error to provide all three.
Layers can be combined through the use of opacity and blend modes. Opacity is
specified as a value from 0.0-1.0, and blend mode is specified as a string.
This example layer is blended using the "hard light" mode at 50% opacity:
{"src": "hillshading", "mode": "hard light", "opacity": 0.5}
Currently-supported blend modes include "screen", "add", "multiply", "subtract",
"linear light", and "hard light".
Layers can also be affected by adjustments. Adjustments are specified as an
array of names and parameters. This example layer has been slightly darkened
using the "curves" adjustment, moving the input value of 181 (light gray)
to 50% gray while leaving black and white alone:
{"src": "hillshading", "adjustments": [ ["curves", [0, 181, 255]] ]}
Available adjustments:
"threshold" - Blit.adjustments.threshold()
"curves" - Blit.adjustments.curves()
"curves2" - Blit.adjustments.curves2()
See detailed information about adjustments in Blit documentation:
https://github.com/migurski/Blit#readme
Finally, the stacking feature allows layers to combined in more complex ways.
This example stack combines a background color and foreground layer:
[
{"color": "#ff9900"},
{"src": "layer-name"}
]
A complete example configuration might look like this:
{
"cache":
{
"name": "Test"
},
"layers":
{
"base":
{
"provider": {"name": "mapnik", "mapfile": "mapnik-base.xml"}
},
"halos":
{
"provider": {"name": "mapnik", "mapfile": "mapnik-halos.xml"},
"metatile": {"buffer": 128}
},
"outlines":
{
"provider": {"name": "mapnik", "mapfile": "mapnik-outlines.xml"},
"metatile": {"buffer": 16}
},
"streets":
{
"provider": {"name": "mapnik", "mapfile": "mapnik-streets.xml"},
"metatile": {"buffer": 128}
},
"sandwiches":
{
"provider":
{
"name": "Sandwich",
"stack":
[
{"src": "base"},
{"src": "outlines", "mask": "halos"},
{"src": "streets"}
]
}
}
}
}
""" |
"""Test module for the noddy examples
Noddy 1:
>>> import noddy
>>> n1 = noddy.Noddy()
>>> n2 = noddy.Noddy()
>>> del n1
>>> del n2
Noddy 2
>>> import noddy2
>>> n1 = noddy2.Noddy('jim', 'fulton', 42)
>>> n1.first
'jim'
>>> n1.last
'NAME n1.number
42
>>> n1.name()
'jim NAME n1.first = 'will'
>>> n1.name()
'will NAME n1.last = 'NAME n1.name()
'will NAME del n1.first
>>> n1.name()
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first = 'drew'
>>> n1.first
'drew'
>>> del n1.number
Traceback (most recent call last):
...
TypeError: can't delete numeric/char attribute
>>> n1.number=2
>>> n1.number
2
>>> n1.first = 42
>>> n1.name()
'42 NAME n2 = noddy2.Noddy()
>>> n2.name()
' '
>>> n2.first
''
>>> n2.last
''
>>> del n2.first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.name()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: first
>>> n2.number
0
>>> n3 = noddy2.Noddy('jim', 'fulton', 'waaa')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: an integer is required
>>> del n1
>>> del n2
Noddy 3
>>> import noddy3
>>> n1 = noddy3.Noddy('jim', 'fulton', 42)
>>> n1 = noddy3.Noddy('jim', 'fulton', 42)
>>> n1.name()
'jim NAME del n1.first
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: Cannot delete the first attribute
>>> n1.first = 42
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: The first attribute value must be a string
>>> n1.first = 'will'
>>> n1.name()
'will NAME n2 = noddy3.Noddy()
>>> n2 = noddy3.Noddy()
>>> n2 = noddy3.Noddy()
>>> n3 = noddy3.Noddy('jim', 'fulton', 'waaa')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: an integer is required
>>> del n1
>>> del n2
Noddy 4
>>> import noddy4
>>> n1 = noddy4.Noddy('jim', 'fulton', 42)
>>> n1.first
'jim'
>>> n1.last
'NAME n1.number
42
>>> n1.name()
'jim NAME n1.first = 'will'
>>> n1.name()
'will NAME n1.last = 'NAME n1.name()
'will NAME del n1.first
>>> n1.name()
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first = 'drew'
>>> n1.first
'drew'
>>> del n1.number
Traceback (most recent call last):
...
TypeError: can't delete numeric/char attribute
>>> n1.number=2
>>> n1.number
2
>>> n1.first = 42
>>> n1.name()
'42 NAME n2 = noddy4.Noddy()
>>> n2 = noddy4.Noddy()
>>> n2 = noddy4.Noddy()
>>> n2 = noddy4.Noddy()
>>> n2.name()
' '
>>> n2.first
''
>>> n2.last
''
>>> del n2.first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.name()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: first
>>> n2.number
0
>>> n3 = noddy4.Noddy('jim', 'fulton', 'waaa')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: an integer is required
Test cyclic gc(?)
>>> import gc
>>> gc.disable()
>>> x = []
>>> l = [x]
>>> n2.first = l
>>> n2.first
[[]]
>>> l.append(n2)
>>> del l
>>> del n1
>>> del n2
>>> sys.getrefcount(x)
3
>>> ignore = gc.collect()
>>> sys.getrefcount(x)
2
>>> gc.enable()
""" |
#!/usr/bin/env python
# Library for treating FITS files as Python Imaging Library objects
# Copyright (c) 2005, 2006, 2007, NAME
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * The names of the contributors may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Changelog:
#
# 3/31/08 Fixed overflow errors that were occuring when zscale_range was
# returning weird types for zmin and zmax. Now we force zmin & zmax
# to be of builtin type float for safety.
#
# 10/17/07 Added manual range selection to FitsImage. Fixed typecode for
# numpy to use unsigned 8 bit integers.
#
# 9/25/07 Added call to fits_simple_verify() to verify input file is FITS.
# Removed kwargs from FitsImage() because pyfits doesn't use them.
#
# 9/14/07 Changed array usage from Numeric to numpy. Changed underlying
# FITS I/O library from fitslib to pyfits. Modifications made
# by NAME 8/20/07 Write arcsinh scaling algorithm and adding scaling options.
# Updated documentation. Dropped number of channels check on
# color -- PIL should handle this instead.
#
# 8/17/07 Wrote new scaling algorithm, percentile_range(), that determines
# the range to use from a configurable percentile cut. Now
# FitsImage() takes optional named arguments to configure which
# contrast algorithm to use. In addition, keyword arguments are
# passed on to Fits() to configure how minor errors are handled.
#
# 7/4/07 Updated to use Numeric. Improved speed of zscale_range().
#
# 10/10/06 Increased accuracy of draw_circle().
#
# 2/7/06 Updated documentation.
#
# 1/4/06 Fixed bug in zscale_range() where num_points and num_pixels
# sometimes differed, resulting in the sigma iteration failing because
# the arrays would differ in length. Now the arrays are both of
# size num_pixels. Some additional checks were also added.
#
# 12/10/05 Updated documentation.
#
# 12/8/05 Now draw_circle will not draw points that lie outside of the image.
#
# 12/7/05 Wrote zscale_range() function which implements the ds9 zscale
# autocontrast algorithm for FITs images. Wrote a new version of
# asImage(), now called FitsImage(), that returns a PIL Image object
# without use of the convert commandline utility. Rewrote convert()
# and resize() methods so that they do not have to use the convert
# command externally. Removed all of the other asImage() methods
# that weren't working.
|
"""
imsize map_coordinates fourier_shift
50 0.016211 0.00944495
84 0.0397182 0.0161059
118 0.077543 0.0443089
153 0.132948 0.058187
187 0.191808 0.0953341
221 0.276543 0.12069
255 0.357552 0.182863
289 0.464547 0.26451
324 0.622776 0.270612
358 0.759015 0.713239
392 0.943339 0.441262
426 1.12885 0.976379
461 1.58367 1.26116
495 1.62482 0.824757
529 1.83506 1.19455
563 3.21001 2.82487
597 2.64892 2.23473
632 2.74313 2.21019
666 3.07002 2.49054
700 3.50138 1.59507
Fourier outperforms map_coordinates slightly. It wraps, though, while
map_coordinates in general does not.
With skimage:
imsize map_coordinates fourier_shift skimage
50 0.0154819 0.00862598 0.0100191
84 0.0373471 0.0164428 0.0299141
118 0.0771091 0.0555351 0.047652
153 0.128651 0.0582621 0.108211
187 0.275812 0.129408 0.17893
221 0.426893 0.177555 0.281367
255 0.571022 0.26866 0.354988
289 0.75541 0.412766 0.415558
324 1.02605 0.402632 0.617405
358 1.14151 0.975867 0.512207
392 1.51085 0.549434 0.904133
426 1.72907 1.28387 0.948763
461 2.03424 1.79091 1.09984
495 2.23595 0.976755 1.49104
529 2.59915 1.95115 1.47774
563 3.34082 3.03312 1.76769
597 3.43117 2.84357 2.67582
632 4.06516 4.19464 2.22102
666 6.22056 3.65876 2.39756
700 5.06125 2.00939 2.73733
Fourier's all over the place, probably because of a strong dependence on
primeness. Comparable to skimage for some cases though.
""" |
# -*- encoding: utf-8 -*-
# back ported from CPython 3
# A. HISTORY OF THE SOFTWARE
# ==========================
#
# Python was created in the early 1990s by NAME at Stichting
# Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
# as a successor of a language called ABC. NAME remains Python's
# principal author, although it includes many contributions from others.
#
# In 1995, NAME continued his work on Python at the Corporation for
# National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
# in Reston, Virginia where he released several versions of the
# software.
#
# In May 2000, NAME and the Python core development team moved to
# BeOpen.com to form the BeOpen PythonLabs team. In October of the same
# year, the PythonLabs team moved to Digital Creations (now Zope
# Corporation, see http://www.zope.com). In 2001, the Python Software
# Foundation (PSF, see http://www.python.org/psf/) was formed, a
# non-profit organization created specifically to own Python-related
# Intellectual Property. Zope Corporation is a sponsoring member of
# the PSF.
#
# All Python releases are Open Source (see http://www.opensource.org for
# the Open Source Definition). Historically, most, but not all, Python
# releases have also been GPL-compatible; the table below summarizes
# the various releases.
#
# Release Derived Year Owner GPL-
# from compatible? (1)
#
# 0.9.0 thru 1.2 1991-1995 CWI yes
# 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
# 1.6 1.5.2 2000 CNRI no
# 2.0 1.6 2000 BeOpen.com no
# 1.6.1 1.6 2001 CNRI yes (2)
# 2.1 2.0+1.6.1 2001 PSF no
# 2.0.1 2.0+1.6.1 2001 PSF yes
# 2.1.1 2.1+2.0.1 2001 PSF yes
# 2.2 2.1.1 2001 PSF yes
# 2.1.2 2.1.1 2002 PSF yes
# 2.1.3 2.1.2 2002 PSF yes
# 2.2.1 2.2 2002 PSF yes
# 2.2.2 2.2.1 2002 PSF yes
# 2.2.3 2.2.2 2003 PSF yes
# 2.3 2.2.2 2002-2003 PSF yes
# 2.3.1 2.3 2002-2003 PSF yes
# 2.3.2 2.3.1 2002-2003 PSF yes
# 2.3.3 2.3.2 2002-2003 PSF yes
# 2.3.4 2.3.3 2004 PSF yes
# 2.3.5 2.3.4 2005 PSF yes
# 2.4 2.3 2004 PSF yes
# 2.4.1 2.4 2005 PSF yes
# 2.4.2 2.4.1 2005 PSF yes
# 2.4.3 2.4.2 2006 PSF yes
# 2.4.4 2.4.3 2006 PSF yes
# 2.5 2.4 2006 PSF yes
# 2.5.1 2.5 2007 PSF yes
# 2.5.2 2.5.1 2008 PSF yes
# 2.5.3 2.5.2 2008 PSF yes
# 2.6 2.5 2008 PSF yes
# 2.6.1 2.6 2008 PSF yes
# 2.6.2 2.6.1 2009 PSF yes
# 2.6.3 2.6.2 2009 PSF yes
# 2.6.4 2.6.3 2009 PSF yes
# 2.6.5 2.6.4 2010 PSF yes
# 2.7 2.6 2010 PSF yes
#
# Footnotes:
#
# (1) GPL-compatible doesn't mean that we're distributing Python under
# the GPL. All Python licenses, unlike the GPL, let you distribute
# a modified version without making your changes open source. The
# GPL-compatible licenses make it possible to combine Python with
# other software that is released under the GPL; the others don't.
#
# (2) According to NAME 1.6.1 is not GPL-compatible,
# because its license has a choice of law clause. According to
# CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
# is "not incompatible" with the GPL.
#
# Thanks to the many outside volunteers who have worked under NAME's
# direction to make these releases possible.
#
#
# B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
# ===============================================================
#
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013 Python Software Foundation; All Rights Reserved" are retained
# in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
#
#
# BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
# -------------------------------------------
#
# BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
#
# 1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
# office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
# Individual or Organization ("Licensee") accessing and otherwise using
# this software in source or binary form and its associated
# documentation ("the Software").
#
# 2. Subject to the terms and conditions of this BeOpen Python License
# Agreement, BeOpen hereby grants Licensee a non-exclusive,
# royalty-free, world-wide license to reproduce, analyze, test, perform
# and/or display publicly, prepare derivative works, distribute, and
# otherwise use the Software alone or in any derivative version,
# provided, however, that the BeOpen Python License is retained in the
# Software, alone or in any derivative version prepared by Licensee.
#
# 3. BeOpen is making the Software available to Licensee on an "AS IS"
# basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
# SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
# AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
# DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 5. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 6. This License Agreement shall be governed by and interpreted in all
# respects by the law of the State of California, excluding conflict of
# law provisions. Nothing in this License Agreement shall be deemed to
# create any relationship of agency, partnership, or joint venture
# between BeOpen and Licensee. This License Agreement does not grant
# permission to use BeOpen trademarks or trade names in a trademark
# sense to endorse or promote products or services of Licensee, or any
# third party. As an exception, the "BeOpen Python" logos available at
# http://www.pythonlabs.com/logos.html may be used according to the
# permissions granted on that web page.
#
# 7. By copying, installing or otherwise using the software, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
#
#
# CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
# ---------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Corporation for National
# Research Initiatives, having an office at 1895 Preston White Drive,
# Reston, VA 20191 ("CNRI"), and the Individual or Organization
# ("Licensee") accessing and otherwise using Python 1.6.1 software in
# source or binary form and its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, CNRI
# hereby grants Licensee a nonexclusive, royalty-free, world-wide
# license to reproduce, analyze, test, perform and/or display publicly,
# prepare derivative works, distribute, and otherwise use Python 1.6.1
# alone or in any derivative version, provided, however, that CNRI's
# License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
# 1995-2001 Corporation for National Research Initiatives; All Rights
# Reserved" are retained in Python 1.6.1 alone or in any derivative
# version prepared by Licensee. Alternately, in lieu of CNRI's License
# Agreement, Licensee may substitute the following text (omitting the
# quotes): "Python 1.6.1 is made available subject to the terms and
# conditions in CNRI's License Agreement. This Agreement together with
# Python 1.6.1 may be located on the Internet using the following
# unique, persistent identifier (known as a handle): 1895.22/1013. This
# Agreement may also be obtained from a proxy server on the Internet
# using the following URL: http://hdl.handle.net/1895.22/1013".
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python 1.6.1 or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python 1.6.1.
#
# 4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
# basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# 1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. This License Agreement shall be governed by the federal
# intellectual property law of the United States, including without
# limitation the federal copyright law, and, to the extent such
# U.S. federal law does not apply, by the law of the Commonwealth of
# Virginia, excluding Virginia's conflict of law provisions.
# Notwithstanding the foregoing, with regard to derivative works based
# on Python 1.6.1 that incorporate non-separable material that was
# previously distributed under the GNU General Public License (GPL), the
# law of the Commonwealth of Virginia shall govern this License
# Agreement only as to issues arising under or with respect to
# Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
# License Agreement shall be deemed to create any relationship of
# agency, partnership, or joint venture between CNRI and Licensee. This
# License Agreement does not grant permission to use CNRI trademarks or
# trade name in a trademark sense to endorse or promote products or
# services of Licensee, or any third party.
#
# 8. By clicking on the "ACCEPT" button where indicated, or by copying,
# installing or otherwise using Python 1.6.1, Licensee agrees to be
# bound by the terms and conditions of this License Agreement.
#
# ACCEPT
#
#
# CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
# --------------------------------------------------
#
# Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
# The Netherlands. All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Stichting Mathematisch
# Centrum or CWI not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
#############################################################################
# This script contains two trivial examples of simple "scripted step" classes.
# To fully understand how the lldb "Thread Plan" architecture works, read the
# comments at the beginning of ThreadPlan.h in the lldb sources. The python
# interface is a reduced version of the full internal mechanism, but captures
# most of the power with a much simpler interface.
#
# But I'll attempt a brief summary here.
# Stepping in lldb is done independently for each thread. Moreover, the stepping
# operations are stackable. So for instance if you did a "step over", and in
# the course of stepping over you hit a breakpoint, stopped and stepped again,
# the first "step-over" would be suspended, and the new step operation would
# be enqueued. Then if that step over caused the program to hit another breakpoint,
# lldb would again suspend the second step and return control to the user, so
# now there are two pending step overs. Etc. with all the other stepping
# operations. Then if you hit "continue" the bottom-most step-over would complete,
# and another continue would complete the first "step-over".
#
# lldb represents this system with a stack of "Thread Plans". Each time a new
# stepping operation is requested, a new plan is pushed on the stack. When the
# operation completes, it is pushed off the stack.
#
# The bottom-most plan in the stack is the immediate controller of stepping,
# most importantly, when the process resumes, the bottom most plan will get
# asked whether to set the program running freely, or to instruction-single-step
# the current thread. In the scripted interface, you indicate this by returning
# False or True respectively from the should_step method.
#
# Each time the process stops the thread plan stack for each thread that stopped
# "for a reason", Ii.e. a single-step completed on that thread, or a breakpoint
# was hit), is queried to determine how to proceed, starting from the most
# recently pushed plan, in two stages:
#
# 1) Each plan is asked if it "explains" the stop. The first plan to claim the
# stop wins. In scripted Thread Plans, this is done by returning True from
# the "explains_stop method. This is how, for instance, control is returned
# to the User when the "step-over" plan hits a breakpoint. The step-over
# plan doesn't explain the breakpoint stop, so it returns false, and the
# breakpoint hit is propagated up the stack to the "base" thread plan, which
# is the one that handles random breakpoint hits.
#
# 2) Then the plan that won the first round is asked if the process should stop.
# This is done in the "should_stop" method. The scripted plans actually do
# three jobs in should_stop:
# a) They determine if they have completed their job or not. If they have
# they indicate that by calling SetPlanComplete on their thread plan.
# b) They decide whether they want to return control to the user or not.
# They do this by returning True or False respectively.
# c) If they are not done, they set up whatever machinery they will use
# the next time the thread continues.
#
# Note that deciding to return control to the user, and deciding your plan
# is done, are orthgonal operations. You could set up the next phase of
# stepping, and then return True from should_stop, and when the user next
# "continued" the process your plan would resume control. Of course, the
# user might also "step-over" or some other operation that would push a
# different plan, which would take control till it was done.
#
# One other detail you should be aware of, if the plan below you on the
# stack was done, then it will be popped and the next plan will take control
# and its "should_stop" will be called.
#
# Note also, there should be another method called when your plan is popped,
# to allow you to do whatever cleanup is required. I haven't gotten to that
# yet. For now you should do that at the same time you mark your plan complete.
#
# 3) After the round of negotiation over whether to stop or not is done, all the
# plans get asked if they are "stale". If they are say they are stale
# then they will get popped. This question is asked with the "is_stale" method.
#
# This is useful, for instance, in the FinishPrintAndContinue plan. What might
# happen here is that after continuing but before the finish is done, the program
# could hit another breakpoint and stop. Then the user could use the step
# command repeatedly until they leave the frame of interest by stepping.
# In that case, the step plan is the one that will be responsible for stopping,
# and the finish plan won't be asked should_stop, it will just be asked if it
# is stale. In this case, if the step_out plan that the FinishPrintAndContinue
# plan is driving is stale, so is ours, and it is time to do our printing.
#
# Both examples show stepping through an address range for 20 bytes from the
# current PC. The first one does it by single stepping and checking a condition.
# It doesn't, however handle the case where you step into another frame while
# still in the current range in the starting frame.
#
# That is better handled in the second example by using the built-in StepOverRange
# thread plan.
#
# To use these stepping modes, you would do:
#
# (lldb) command script import scripted_step.py
# (lldb) thread step-scripted -C scripted_step.SimpleStep
# or
#
# (lldb) thread step-scripted -C scripted_step.StepWithPlan
|
"""
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or record arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard
to convert are libraries like PIL (able to read and write many image formats
such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
""" |
"""
This page is in the table of contents.
Cool is a craft tool to cool the shape.
Cool works well with a stepper extruder, it does not work well with a DC motor extruder.
If enabled, before each layer that takes less then "Minimum Layer Time" to print the tool head will orbit around the printed area for 'Minimum Layer Time' minus 'the time it takes to print the layer' before it starts printing the layer. This is great way to let layers with smaller area cool before you start printing on top of them (so you do not overheat the area).
The cool manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Cool
Allan NAME aka The Masked Retriever's has written the "Skeinforge Quicktip: Cool" at:
http://blog.thingiverse.com/2009/07/28/skeinforge-quicktip-cool/
==Operation==
The default 'Activate Cool' checkbox is on. When it is on, the functions described below will work, when it is off, the functions will not be called.
==Settings==
===Bridge Cool===
Default is one degree Celcius.
If the layer is a bridge layer, then cool will lower the temperature by 'Bridge Cool' degrees Celcius.
===Cool Type===
Default is 'Slow Down'.
====Orbit====
When selected, cool will add orbits with the extruder off to give the layer time to cool, so that the next layer is not extruded on a molten base. The orbits will be around the largest island on that layer. Orbit should only be chosen if you can not upgrade to a stepper extruder.
====Slow Down====
When selected, cool will slow down the extruder so that it will take the minimum layer time to extrude the layer. DC motors do not operate properly at slow flow rates, so if you have a DC motor extruder, you should upgrade to a stepper extruder, but if you can't do that, you can try using the 'Orbit' option.
===Maximum Cool===
Default is 2 degrees Celcius.
If it takes less time to extrude the layer than the minimum layer time, then cool will lower the temperature by the 'Maximum Cool' setting times the layer time over the minimum layer time.
===Minimum Layer Time===
Default is 60 seconds.
Defines the minimum amount of time the extruder will spend on a layer, this is an important setting.
===Minimum Orbital Radius===
Default is 10 millimeters.
When the orbit cool type is selected, if the area of the largest island is as large as the square of the "Minimum Orbital Radius" then the orbits will be just within the island. If the island is smaller, then the orbits will be in a square of the "Minimum Orbital Radius" around the center of the island. This is so that the hot extruder does not stay too close to small islands.
===Name of Alteration Files===
Cool looks for alteration files in the alterations folder in the .skeinforge folder in the home directory. Cool does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder. The cool start and end text idea is from:
http://makerhahn.blogspot.com/2008/10/yay-minimug.html
====Name of Cool End File====
Default is cool_end.gcode.
If there is a file with the name of the "Name of Cool End File" setting, it will be added to the end of the orbits.
====Name of Cool Start File====
Default is cool_start.gcode.
If there is a file with the name of the "Name of Cool Start File" setting, it will be added to the start of the orbits.
===Orbital Outset===
Default is 2 millimeters.
When the orbit cool type is selected, the orbits will be outset around the largest island by 'Orbital Outset' millimeters. If 'Orbital Outset' is negative, the orbits will be inset instead.
===Turn Fan On at Beginning===
Default is on.
When selected, cool will turn the fan on at the beginning of the fabrication by adding the M106 command.
===Turn Fan Off at Ending===
Default is on.
When selected, cool will turn the fan off at the ending of the fabrication by adding the M107 command.
==Examples==
The following examples cool the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and cool.py.
> python cool.py
This brings up the cool dialog.
> python cool.py Screw Holder Bottom.stl
The cool tool is parsing the file:
Screw Holder Bottom.stl
..
The cool tool has created the file:
.. Screw Holder Bottom_cool.gcode
""" |
"""
=============================
Subclassing ndarray in python
=============================
Credits
-------
This page is based with thanks on the wiki page on subclassing by NAME - http://www.scipy.org/Subclasses.
Introduction
------------
Subclassing ndarray is relatively simple, but it has some complications
compared to other Python objects. On this page we explain the machinery
that allows you to subclass ndarray, and the implications for
implementing a subclass.
ndarrays and object creation
============================
Subclassing ndarray is complicated by the fact that new instances of
ndarray classes can come about in three different ways. These are:
#. Explicit constructor call - as in ``MySubClass(params)``. This is
the usual route to Python instance creation.
#. View casting - casting an existing ndarray as a given subclass
#. New from template - creating a new instance from a template
instance. Examples include returning slices from a subclassed array,
creating return types from ufuncs, and copying arrays. See
:ref:`new-from-template` for more details
The last two are characteristics of ndarrays - in order to support
things like array slicing. The complications of subclassing ndarray are
due to the mechanisms numpy has to support these latter two routes of
instance creation.
.. _view-casting:
View casting
------------
*View casting* is the standard ndarray mechanism by which you take an
ndarray of any subclass, and return a view of the array as another
(specified) subclass:
>>> import numpy as np
>>> # create a completely useless ndarray subclass
>>> class C(np.ndarray): pass
>>> # create a standard ndarray
>>> arr = np.zeros((3,))
>>> # take a view of it, as our useless subclass
>>> c_arr = arr.view(C)
>>> type(c_arr)
<class 'C'>
.. _new-from-template:
Creating new from template
--------------------------
New instances of an ndarray subclass can also come about by a very
similar mechanism to :ref:`view-casting`, when numpy finds it needs to
create a new instance from a template instance. The most obvious place
this has to happen is when you are taking slices of subclassed arrays.
For example:
>>> v = c_arr[1:]
>>> type(v) # the view is of type 'C'
<class 'C'>
>>> v is c_arr # but it's a new instance
False
The slice is a *view* onto the original ``c_arr`` data. So, when we
take a view from the ndarray, we return a new ndarray, of the same
class, that points to the data in the original.
There are other points in the use of ndarrays where we need such views,
such as copying arrays (``c_arr.copy()``), creating ufunc output arrays
(see also :ref:`array-wrap`), and reducing methods (like
``c_arr.mean()``.
Relationship of view casting and new-from-template
--------------------------------------------------
These paths both use the same machinery. We make the distinction here,
because they result in different input to your methods. Specifically,
:ref:`view-casting` means you have created a new instance of your array
type from any potential subclass of ndarray. :ref:`new-from-template`
means you have created a new instance of your class from a pre-existing
instance, allowing you - for example - to copy across attributes that
are particular to your subclass.
Implications for subclassing
----------------------------
If we subclass ndarray, we need to deal not only with explicit
construction of our array type, but also :ref:`view-casting` or
:ref:`new-from-template`. Numpy has the machinery to do this, and this
machinery that makes subclassing slightly non-standard.
There are two aspects to the machinery that ndarray uses to support
views and new-from-template in subclasses.
The first is the use of the ``ndarray.__new__`` method for the main work
of object initialization, rather then the more usual ``__init__``
method. The second is the use of the ``__array_finalize__`` method to
allow subclasses to clean up after the creation of views and new
instances from templates.
A brief Python primer on ``__new__`` and ``__init__``
=====================================================
``__new__`` is a standard Python method, and, if present, is called
before ``__init__`` when we create a class instance. See the `python
__new__ documentation
<http://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail.
For example, consider the following Python code:
.. testcode::
class C(object):
def __new__(cls, *args):
print 'Cls in __new__:', cls
print 'Args in __new__:', args
return object.__new__(cls, *args)
def __init__(self, *args):
print 'type(self) in __init__:', type(self)
print 'Args in __init__:', args
meaning that we get:
>>> c = C('hello')
Cls in __new__: <class 'C'>
Args in __new__: ('hello',)
type(self) in __init__: <class 'C'>
Args in __init__: ('hello',)
When we call ``C('hello')``, the ``__new__`` method gets its own class
as first argument, and the passed argument, which is the string
``'hello'``. After python calls ``__new__``, it usually (see below)
calls our ``__init__`` method, with the output of ``__new__`` as the
first argument (now a class instance), and the passed arguments
following.
As you can see, the object can be initialized in the ``__new__``
method or the ``__init__`` method, or both, and in fact ndarray does
not have an ``__init__`` method, because all the initialization is
done in the ``__new__`` method.
Why use ``__new__`` rather than just the usual ``__init__``? Because
in some cases, as for ndarray, we want to be able to return an object
of some other class. Consider the following:
.. testcode::
class D(C):
def __new__(cls, *args):
print 'D cls is:', cls
print 'D args in __new__:', args
return C.__new__(C, *args)
def __init__(self, *args):
# we never get here
print 'In D __init__'
meaning that:
>>> obj = D('hello')
D cls is: <class 'D'>
D args in __new__: ('hello',)
Cls in __new__: <class 'C'>
Args in __new__: ('hello',)
>>> type(obj)
<class 'C'>
The definition of ``C`` is the same as before, but for ``D``, the
``__new__`` method returns an instance of class ``C`` rather than
``D``. Note that the ``__init__`` method of ``D`` does not get
called. In general, when the ``__new__`` method returns an object of
class other than the class in which it is defined, the ``__init__``
method of that class is not called.
This is how subclasses of the ndarray class are able to return views
that preserve the class type. When taking a view, the standard
ndarray machinery creates the new ndarray object with something
like::
obj = ndarray.__new__(subtype, shape, ...
where ``subdtype`` is the subclass. Thus the returned view is of the
same class as the subclass, rather than being of class ``ndarray``.
That solves the problem of returning views of the same type, but now
we have a new problem. The machinery of ndarray can set the class
this way, in its standard methods for taking views, but the ndarray
``__new__`` method knows nothing of what we have done in our own
``__new__`` method in order to set attributes, and so on. (Aside -
why not call ``obj = subdtype.__new__(...`` then? Because we may not
have a ``__new__`` method with the same call signature).
The role of ``__array_finalize__``
==================================
``__array_finalize__`` is the mechanism that numpy provides to allow
subclasses to handle the various ways that new instances get created.
Remember that subclass instances can come about in these three ways:
#. explicit constructor call (``obj = MySubClass(params)``). This will
call the usual sequence of ``MySubClass.__new__`` then (if it exists)
``MySubClass.__init__``.
#. :ref:`view-casting`
#. :ref:`new-from-template`
Our ``MySubClass.__new__`` method only gets called in the case of the
explicit constructor call, so we can't rely on ``MySubClass.__new__`` or
``MySubClass.__init__`` to deal with the view casting and
new-from-template. It turns out that ``MySubClass.__array_finalize__``
*does* get called for all three methods of object creation, so this is
where our object creation housekeeping usually goes.
* For the explicit constructor call, our subclass will need to create a
new ndarray instance of its own class. In practice this means that
we, the authors of the code, will need to make a call to
``ndarray.__new__(MySubClass,...)``, or do view casting of an existing
array (see below)
* For view casting and new-from-template, the equivalent of
``ndarray.__new__(MySubClass,...`` is called, at the C level.
The arguments that ``__array_finalize__`` recieves differ for the three
methods of instance creation above.
The following code allows us to look at the call sequences and arguments:
.. testcode::
import numpy as np
class C(np.ndarray):
def __new__(cls, *args, **kwargs):
print 'In __new__ with class %s' % cls
return np.ndarray.__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
# in practice you probably will not need or want an __init__
# method for your subclass
print 'In __init__ with class %s' % self.__class__
def __array_finalize__(self, obj):
print 'In array_finalize:'
print ' self type is %s' % type(self)
print ' obj type is %s' % type(obj)
Now:
>>> # Explicit constructor
>>> c = C((10,))
In __new__ with class <class 'C'>
In array_finalize:
self type is <class 'C'>
obj type is <type 'NoneType'>
In __init__ with class <class 'C'>
>>> # View casting
>>> a = np.arange(10)
>>> cast_a = a.view(C)
In array_finalize:
self type is <class 'C'>
obj type is <type 'numpy.ndarray'>
>>> # Slicing (example of new-from-template)
>>> cv = c[:1]
In array_finalize:
self type is <class 'C'>
obj type is <class 'C'>
The signature of ``__array_finalize__`` is::
def __array_finalize__(self, obj):
``ndarray.__new__`` passes ``__array_finalize__`` the new object, of our
own class (``self``) as well as the object from which the view has been
taken (``obj``). As you can see from the output above, the ``self`` is
always a newly created instance of our subclass, and the type of ``obj``
differs for the three instance creation methods:
* When called from the explicit constructor, ``obj`` is ``None``
* When called from view casting, ``obj`` can be an instance of any
subclass of ndarray, including our own.
* When called in new-from-template, ``obj`` is another instance of our
own subclass, that we might use to update the new ``self`` instance.
Because ``__array_finalize__`` is the only method that always sees new
instances being created, it is the sensible place to fill in instance
defaults for new object attributes, among other tasks.
This may be clearer with an example.
Simple example - adding an extra attribute to ndarray
-----------------------------------------------------
.. testcode::
import numpy as np
class InfoArray(np.ndarray):
def __new__(subtype, shape, dtype=float, buffer=None, offset=0,
strides=None, order=None, info=None):
# Create the ndarray instance of our type, given the usual
# ndarray input arguments. This will call the standard
# ndarray constructor, but return an object of our type.
# It also triggers a call to InfoArray.__array_finalize__
obj = np.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides,
order)
# set the new 'info' attribute to the value passed
obj.info = info
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# ``self`` is a new object resulting from
# ndarray.__new__(InfoArray, ...), therefore it only has
# attributes that the ndarray.__new__ constructor gave it -
# i.e. those of a standard ndarray.
#
# We could have got to the ndarray.__new__ call in 3 ways:
# From an explicit constructor - e.g. InfoArray():
# obj is None
# (we're in the middle of the InfoArray.__new__
# constructor, and self.info will be set when we return to
# InfoArray.__new__)
if obj is None: return
# From view casting - e.g arr.view(InfoArray):
# obj is arr
# (type(obj) can be InfoArray)
# From new-from-template - e.g infoarr[:3]
# type(obj) is InfoArray
#
# Note that it is here, rather than in the __new__ method,
# that we set the default value for 'info', because this
# method sees all creation of default objects - with the
# InfoArray.__new__ constructor, but also with
# arr.view(InfoArray).
self.info = getattr(obj, 'info', None)
# We do not need to return anything
Using the object looks like this:
>>> obj = InfoArray(shape=(3,)) # explicit constructor
>>> type(obj)
<class 'InfoArray'>
>>> obj.info is None
True
>>> obj = InfoArray(shape=(3,), info='information')
>>> obj.info
'information'
>>> v = obj[1:] # new-from-template - here - slicing
>>> type(v)
<class 'InfoArray'>
>>> v.info
'information'
>>> arr = np.arange(10)
>>> cast_arr = arr.view(InfoArray) # view casting
>>> type(cast_arr)
<class 'InfoArray'>
>>> cast_arr.info is None
True
This class isn't very useful, because it has the same constructor as the
bare ndarray object, including passing in buffers and shapes and so on.
We would probably prefer the constructor to be able to take an already
formed ndarray from the usual numpy calls to ``np.array`` and return an
object.
Slightly more realistic example - attribute added to existing array
-------------------------------------------------------------------
Here is a class that takes a standard ndarray that already exists, casts
as our type, and adds an extra attribute.
.. testcode::
import numpy as np
class RealisticInfoArray(np.ndarray):
def __new__(cls, input_array, info=None):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attribute to the created instance
obj.info = info
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
if obj is None: return
self.info = getattr(obj, 'info', None)
So:
>>> arr = np.arange(5)
>>> obj = RealisticInfoArray(arr, info='information')
>>> type(obj)
<class 'RealisticInfoArray'>
>>> obj.info
'information'
>>> v = obj[1:]
>>> type(v)
<class 'RealisticInfoArray'>
>>> v.info
'information'
.. _array-wrap:
``__array_wrap__`` for ufuncs
-------------------------------------------------------
``__array_wrap__`` gets called at the end of numpy ufuncs and other numpy
functions, to allow a subclass to set the type of the return value
and update attributes and metadata. Let's show how this works with an example.
First we make the same subclass as above, but with a different name and
some print statements:
.. testcode::
import numpy as np
class MySubClass(np.ndarray):
def __new__(cls, input_array, info=None):
obj = np.asarray(input_array).view(cls)
obj.info = info
return obj
def __array_finalize__(self, obj):
print 'In __array_finalize__:'
print ' self is %s' % repr(self)
print ' obj is %s' % repr(obj)
if obj is None: return
self.info = getattr(obj, 'info', None)
def __array_wrap__(self, out_arr, context=None):
print 'In __array_wrap__:'
print ' self is %s' % repr(self)
print ' arr is %s' % repr(out_arr)
# then just call the parent
return np.ndarray.__array_wrap__(self, out_arr, context)
We run a ufunc on an instance of our new array:
>>> obj = MySubClass(np.arange(5), info='spam')
In __array_finalize__:
self is MySubClass([0, 1, 2, 3, 4])
obj is array([0, 1, 2, 3, 4])
>>> arr2 = np.arange(5)+1
>>> ret = np.add(arr2, obj)
In __array_wrap__:
self is MySubClass([0, 1, 2, 3, 4])
arr is array([1, 3, 5, 7, 9])
In __array_finalize__:
self is MySubClass([1, 3, 5, 7, 9])
obj is MySubClass([0, 1, 2, 3, 4])
>>> ret
MySubClass([1, 3, 5, 7, 9])
>>> ret.info
'spam'
Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method of the
input with the highest ``__array_priority__`` value, in this case
``MySubClass.__array_wrap__``, with arguments ``self`` as ``obj``, and
``out_arr`` as the (ndarray) result of the addition. In turn, the
default ``__array_wrap__`` (``ndarray.__array_wrap__``) has cast the
result to class ``MySubClass``, and called ``__array_finalize__`` -
hence the copying of the ``info`` attribute. This has all happened at the C level.
But, we could do anything we wanted:
.. testcode::
class SillySubClass(np.ndarray):
def __array_wrap__(self, arr, context=None):
return 'I lost your data'
>>> arr1 = np.arange(5)
>>> obj = arr1.view(SillySubClass)
>>> arr2 = np.arange(5)
>>> ret = np.multiply(obj, arr2)
>>> ret
'I lost your data'
So, by defining a specific ``__array_wrap__`` method for our subclass,
we can tweak the output from ufuncs. The ``__array_wrap__`` method
requires ``self``, then an argument - which is the result of the ufunc -
and an optional parameter *context*. This parameter is returned by some
ufuncs as a 3-element tuple: (name of the ufunc, argument of the ufunc,
domain of the ufunc). ``__array_wrap__`` should return an instance of
its containing class. See the masked array subclass for an
implementation.
In addition to ``__array_wrap__``, which is called on the way out of the
ufunc, there is also an ``__array_prepare__`` method which is called on
the way into the ufunc, after the output arrays are created but before any
computation has been performed. The default implementation does nothing
but pass through the array. ``__array_prepare__`` should not attempt to
access the array data or resize the array, it is intended for setting the
output array type, updating attributes and metadata, and performing any
checks based on the input that may be desired before computation begins.
Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or
subclass thereof or raise an error.
Extra gotchas - custom ``__del__`` methods and ndarray.base
-----------------------------------------------------------
One of the problems that ndarray solves is keeping track of memory
ownership of ndarrays and their views. Consider the case where we have
created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``.
The two objects are looking at the same memory. Numpy keeps track of
where the data came from for a particular array or view, with the
``base`` attribute:
>>> # A normal ndarray, that owns its own data
>>> arr = np.zeros((4,))
>>> # In this case, base is None
>>> arr.base is None
True
>>> # We take a view
>>> v1 = arr[1:]
>>> # base now points to the array that it derived from
>>> v1.base is arr
True
>>> # Take a view of a view
>>> v2 = v1[1:]
>>> # base points to the view it derived from
>>> v2.base is v1
True
In general, if the array owns its own memory, as for ``arr`` in this
case, then ``arr.base`` will be None - there are some exceptions to this
- see the numpy book for more details.
The ``base`` attribute is useful in being able to tell whether we have
a view or the original array. This in turn can be useful if we need
to know whether or not to do some specific cleanup when the subclassed
array is deleted. For example, we may only want to do the cleanup if
the original array is deleted, but not the views. For an example of
how this can work, have a look at the ``memmap`` class in
``numpy.core``.
""" |
"""
=======================================
Signal processing (:mod:`scipy.signal`)
=======================================
Convolution
===========
.. autosummary::
:toctree: generated/
convolve -- N-dimensional convolution.
correlate -- N-dimensional correlation.
fftconvolve -- N-dimensional convolution using the FFT.
convolve2d -- 2-dimensional convolution (more options).
correlate2d -- 2-dimensional correlation (more options).
sepfir2d -- Convolve with a 2-D separable FIR filter.
choose_conv_method -- Chooses faster of FFT and direct convolution methods.
B-splines
=========
.. autosummary::
:toctree: generated/
bspline -- B-spline basis function of order n.
cubic -- B-spline basis function of order 3.
quadratic -- B-spline basis function of order 2.
gauss_spline -- Gaussian approximation to the B-spline basis function.
cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline.
qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline.
cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline.
qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline.
cspline1d_eval -- Evaluate a cubic spline at the given points.
qspline1d_eval -- Evaluate a quadratic spline at the given points.
spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array.
Filtering
=========
.. autosummary::
:toctree: generated/
order_filter -- N-dimensional order filter.
medfilt -- N-dimensional median filter.
medfilt2d -- 2-dimensional median filter (faster).
wiener -- N-dimensional wiener filter.
symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems).
symiirorder2 -- 4th-order IIR filter (cascade of second-order systems).
lfilter -- 1-dimensional FIR and IIR digital linear filtering.
lfiltic -- Construct initial conditions for `lfilter`.
lfilter_zi -- Compute an initial state zi for the lfilter function that
-- corresponds to the steady state of the step response.
filtfilt -- A forward-backward filter.
savgol_filter -- Filter a signal using the Savitzky-Golay filter.
deconvolve -- 1-d deconvolution using lfilter.
sosfilt -- 1-dimensional IIR digital linear filtering using
-- a second-order sections filter representation.
sosfilt_zi -- Compute an initial state zi for the sosfilt function that
-- corresponds to the steady state of the step response.
sosfiltfilt -- A forward-backward filter for second-order sections.
hilbert -- Compute 1-D analytic signal, using the Hilbert transform.
hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform.
decimate -- Downsample a signal.
detrend -- Remove linear and/or constant trends from data.
resample -- Resample using Fourier method.
resample_poly -- Resample using polyphase filtering method.
upfirdn -- Upsample, apply FIR filter, downsample.
Filter design
=============
.. autosummary::
:toctree: generated/
bilinear -- Digital filter from an analog filter using
-- the bilinear transform.
findfreqs -- Find array of frequencies for computing filter response.
firls -- FIR filter design using least-squares error minimization.
firwin -- Windowed FIR filter design, with frequency response
-- defined as pass and stop bands.
firwin2 -- Windowed FIR filter design, with arbitrary frequency
-- response.
freqs -- Analog filter frequency response from TF coefficients.
freqs_zpk -- Analog filter frequency response from ZPK coefficients.
freqz -- Digital filter frequency response from TF coefficients.
freqz_zpk -- Digital filter frequency response from ZPK coefficients.
sosfreqz -- Digital filter frequency response for SOS format filter.
group_delay -- Digital filter group delay.
iirdesign -- IIR filter design given bands and gains.
iirfilter -- IIR filter design given order and critical frequencies.
kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given
-- the number of taps and the transition width at
-- discontinuities in the frequency response.
kaiser_beta -- Compute the Kaiser parameter beta, given the desired
-- FIR filter attenuation.
kaiserord -- Design a Kaiser window to limit ripple and width of
-- transition region.
minimum_phase -- Convert a linear phase FIR filter to minimum phase.
savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay
-- filter.
remez -- Optimal FIR filter design.
unique_roots -- Unique roots and their multiplicities.
residue -- Partial fraction expansion of b(s) / a(s).
residuez -- Partial fraction expansion of b(z) / a(z).
invres -- Inverse partial fraction expansion for analog filter.
invresz -- Inverse partial fraction expansion for digital filter.
BadCoefficients -- Warning on badly conditioned filter coefficients
Lower-level filter design functions:
.. autosummary::
:toctree: generated/
abcd_normalize -- Check state-space matrices and ensure they are rank-2.
band_stop_obj -- Band Stop Objective Function for order minimization.
besselap -- Return (z,p,k) for analog prototype of Bessel filter.
buttap -- Return (z,p,k) for analog prototype of Butterworth filter.
cheb1ap -- Return (z,p,k) for type I Chebyshev filter.
cheb2ap -- Return (z,p,k) for type II Chebyshev filter.
cmplx_sort -- Sort roots based on magnitude.
ellipap -- Return (z,p,k) for analog prototype of elliptic filter.
lp2bp -- Transform a lowpass filter prototype to a bandpass filter.
lp2bs -- Transform a lowpass filter prototype to a bandstop filter.
lp2hp -- Transform a lowpass filter prototype to a highpass filter.
lp2lp -- Transform a lowpass filter prototype to a lowpass filter.
normalize -- Normalize polynomial representation of a transfer function.
Matlab-style IIR filter design
==============================
.. autosummary::
:toctree: generated/
butter -- Butterworth
buttord
cheby1 -- Chebyshev Type I
cheb1ord
cheby2 -- Chebyshev Type II
cheb2ord
ellip -- Elliptic (Cauer)
ellipord
bessel -- Bessel (no order selection available -- try butterod)
iirnotch -- Design second-order IIR notch digital filter.
iirpeak -- Design second-order IIR peak (resonant) digital filter.
Continuous-Time Linear Systems
==============================
.. autosummary::
:toctree: generated/
lti -- Continuous-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
lsim -- continuous-time simulation of output to linear system.
lsim2 -- like lsim, but `scipy.integrate.odeint` is used.
impulse -- impulse response of linear, time-invariant (LTI) system.
impulse2 -- like impulse, but `scipy.integrate.odeint` is used.
step -- step response of continous-time LTI system.
step2 -- like step, but `scipy.integrate.odeint` is used.
freqresp -- frequency response of a continuous-time LTI system.
bode -- Bode magnitude and phase data (continuous-time LTI).
Discrete-Time Linear Systems
============================
.. autosummary::
:toctree: generated/
dlti -- Discrete-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
dlsim -- simulation of output to a discrete-time linear system.
dimpulse -- impulse response of a discrete-time LTI system.
dstep -- step response of a discrete-time LTI system.
dfreqresp -- frequency response of a discrete-time LTI system.
dbode -- Bode magnitude and phase data (discrete-time LTI).
LTI Representations
===================
.. autosummary::
:toctree: generated/
tf2zpk -- transfer function to zero-pole-gain.
tf2sos -- transfer function to second-order sections.
tf2ss -- transfer function to state-space.
zpk2tf -- zero-pole-gain to transfer function.
zpk2sos -- zero-pole-gain to second-order sections.
zpk2ss -- zero-pole-gain to state-space.
ss2tf -- state-pace to transfer function.
ss2zpk -- state-space to pole-zero-gain.
sos2zpk -- second-order sections to zero-pole-gain.
sos2tf -- second-order sections to transfer function.
cont2discrete -- continuous-time to discrete-time LTI conversion.
place_poles -- pole placement.
Waveforms
=========
.. autosummary::
:toctree: generated/
chirp -- Frequency swept cosine signal, with several freq functions.
gausspulse -- Gaussian modulated sinusoid
max_len_seq -- Maximum length sequence
sawtooth -- Periodic sawtooth
square -- Square wave
sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial
unit_impulse -- Discrete unit impulse
Window functions
================
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
barthann -- Bartlett-Hann window
bartlett -- Bartlett window
blackman -- Blackman window
blackmanharris -- Minimum 4-term Blackman-Harris window
bohman -- Bohman window
boxcar -- Boxcar window
chebwin -- Dolph-Chebyshev window
cosine -- Cosine window
exponential -- Exponential window
flattop -- Flat top window
gaussian -- Gaussian window
general_gaussian -- Generalized Gaussian window
hamming -- Hamming window
hann -- Hann window
hanning -- Hann window
kaiser -- Kaiser window
nuttall -- Nuttall's minimum 4-term Blackman-Harris window
parzen -- Parzen window
slepian -- Slepian window
triang -- Triangular window
tukey -- Tukey window
Wavelets
========
.. autosummary::
:toctree: generated/
cascade -- compute scaling function and wavelet from coefficients
daub -- return low-pass
morlet -- Complex Morlet wavelet.
qmf -- return quadrature mirror filter from low-pass
ricker -- return ricker wavelet
cwt -- perform continuous wavelet transform
Peak finding
============
.. autosummary::
:toctree: generated/
find_peaks_cwt -- Attempt to find the peaks in the given 1-D array
argrelmin -- Calculate the relative minima of data
argrelmax -- Calculate the relative maxima of data
argrelextrema -- Calculate the relative extrema of data
Spectral Analysis
=================
.. autosummary::
:toctree: generated/
periodogram -- Compute a (modified) periodogram
welch -- Compute a periodogram using Welch's method
csd -- Compute the cross spectral density, using Welch's method
coherence -- Compute the magnitude squared coherence, using Welch's method
spectrogram -- Compute the spectrogram
lombscargle -- Computes the Lomb-Scargle periodogram
vectorstrength -- Computes the vector strength
stft -- Compute the Short Time Fourier Transform
istft -- Compute the Inverse Short Time Fourier Transform
check_COLA -- Check the COLA constraint for iSTFT reconstruction
""" |
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True, default_section='DEFAULT',
interpolation=<unset>, converters=<unset>):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
When `default_section' is given, the name of the special section is
named accordingly. By default it is called ``"DEFAULT"`` but this can
be customized to point to any other valid section name. Its current
value can be retrieved using the ``parser_instance.default_section``
attribute and may be modified at runtime.
When `interpolation` is given, it should be an Interpolation subclass
instance. It will be used as the handler for option value
pre-processing when using getters. RawConfigParser object s don't do
any sort of interpolation, whereas ConfigParser uses an instance of
BasicInterpolation. The library also provides a ``zc.buildbot``
inspired ExtendedInterpolation implementation.
When `converters` is given, it should be a dictionary where each key
represents the name of a type converter and each value is a callable
implementing the conversion from string to the desired datatype. Every
converter gets its corresponding get*() method on the parser object and
section proxies.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the iterable of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
""" |
"""
========================
Broadcasting over arrays
========================
The term broadcasting describes how numpy treats arrays with different
shapes during arithmetic operations. Subject to certain constraints,
the smaller array is "broadcast" across the larger array so that they
have compatible shapes. Broadcasting provides a means of vectorizing
array operations so that looping occurs in C instead of Python. It does
this without making needless copies of data and usually leads to
efficient algorithm implementations. There are, however, cases where
broadcasting is a bad idea because it leads to inefficient use of memory
that slows computation.
NumPy operations are usually done on pairs of arrays on an
element-by-element basis. In the simplest case, the two arrays must
have exactly the same shape, as in the following example:
>>> a = np.array([1.0, 2.0, 3.0])
>>> b = np.array([2.0, 2.0, 2.0])
>>> a * b
array([ 2., 4., 6.])
NumPy's broadcasting rule relaxes this constraint when the arrays'
shapes meet certain constraints. The simplest broadcasting example occurs
when an array and a scalar value are combined in an operation:
>>> a = np.array([1.0, 2.0, 3.0])
>>> b = 2.0
>>> a * b
array([ 2., 4., 6.])
The result is equivalent to the previous example where ``b`` was an array.
We can think of the scalar ``b`` being *stretched* during the arithmetic
operation into an array with the same shape as ``a``. The new elements in
``b`` are simply copies of the original scalar. The stretching analogy is
only conceptual. NumPy is smart enough to use the original scalar value
without actually making copies, so that broadcasting operations are as
memory and computationally efficient as possible.
The code in the second example is more efficient than that in the first
because broadcasting moves less memory around during the multiplication
(``b`` is a scalar rather than an array).
General Broadcasting Rules
==========================
When operating on two arrays, NumPy compares their shapes element-wise.
It starts with the trailing dimensions, and works its way forward. Two
dimensions are compatible when
1) they are equal, or
2) one of them is 1
If these conditions are not met, a
``ValueError: frames are not aligned`` exception is thrown, indicating that
the arrays have incompatible shapes. The size of the resulting array
is the maximum size along each dimension of the input arrays.
Arrays do not need to have the same *number* of dimensions. For example,
if you have a ``256x256x3`` array of RGB values, and you want to scale
each color in the image by a different value, you can multiply the image
by a one-dimensional array with 3 values. Lining up the sizes of the
trailing axes of these arrays according to the broadcast rules, shows that
they are compatible::
Image (3d array): 256 x 256 x 3
Scale (1d array): 3
Result (3d array): 256 x 256 x 3
When either of the dimensions compared is one, the other is
used. In other words, dimensions with size 1 are stretched or "copied"
to match the other.
In the following example, both the ``A`` and ``B`` arrays have axes with
length one that are expanded to a larger size during the broadcast
operation::
A (4d array): 8 x 1 x 6 x 1
B (3d array): 7 x 1 x 5
Result (4d array): 8 x 7 x 6 x 5
Here are some more examples::
A (2d array): 5 x 4
B (1d array): 1
Result (2d array): 5 x 4
A (2d array): 5 x 4
B (1d array): 4
Result (2d array): 5 x 4
A (3d array): 15 x 3 x 5
B (3d array): 15 x 1 x 5
Result (3d array): 15 x 3 x 5
A (3d array): 15 x 3 x 5
B (2d array): 3 x 5
Result (3d array): 15 x 3 x 5
A (3d array): 15 x 3 x 5
B (2d array): 3 x 1
Result (3d array): 15 x 3 x 5
Here are examples of shapes that do not broadcast::
A (1d array): 3
B (1d array): 4 # trailing dimensions do not match
A (2d array): 2 x 1
B (3d array): 8 x 4 x 3 # second from last dimensions mismatched
An example of broadcasting in practice::
>>> x = np.arange(4)
>>> xx = x.reshape(4,1)
>>> y = np.ones(5)
>>> z = np.ones((3,4))
>>> x.shape
(4,)
>>> y.shape
(5,)
>>> x + y
<type 'exceptions.ValueError'>: shape mismatch: objects cannot be broadcast to a single shape
>>> xx.shape
(4, 1)
>>> y.shape
(5,)
>>> (xx + y).shape
(4, 5)
>>> xx + y
array([[ 1., 1., 1., 1., 1.],
[ 2., 2., 2., 2., 2.],
[ 3., 3., 3., 3., 3.],
[ 4., 4., 4., 4., 4.]])
>>> x.shape
(4,)
>>> z.shape
(3, 4)
>>> (x + z).shape
(3, 4)
>>> x + z
array([[ 1., 2., 3., 4.],
[ 1., 2., 3., 4.],
[ 1., 2., 3., 4.]])
Broadcasting provides a convenient way of taking the outer product (or
any other outer operation) of two arrays. The following example shows an
outer addition operation of two 1-d arrays::
>>> a = np.array([0.0, 10.0, 20.0, 30.0])
>>> b = np.array([1.0, 2.0, 3.0])
>>> a[:, np.newaxis] + b
array([[ 1., 2., 3.],
[ 11., 12., 13.],
[ 21., 22., 23.],
[ 31., 32., 33.]])
Here the ``newaxis`` index operator inserts a new axis into ``a``,
making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array
with ``b``, which has shape ``(3,)``, yields a ``4x3`` array.
See `this article <http://wiki.scipy.org/EricsBroadcastingDoc>`_
for illustrations of broadcasting concepts.
""" |
"""
Wrappers to LAPACK library
==========================
flapack -- wrappers for Fortran [*] LAPACK routines
clapack -- wrappers for ATLAS LAPACK routines
calc_lwork -- calculate optimal lwork parameters
get_lapack_funcs -- query for wrapper functions.
[*] If ATLAS libraries are available then Fortran routines
actually use ATLAS routines and should perform equally
well to ATLAS routines.
Module flapack
++++++++++++++
In the following all function names are shown without
type prefix (s,d,c,z). Optimal values for lwork can
be computed using calc_lwork module.
Linear Equations
----------------
Drivers::
lu,piv,x,info = gesv(a,b,overwrite_a=0,overwrite_b=0)
lub,piv,x,info = gbsv(kl,ku,ab,b,overwrite_ab=0,overwrite_b=0)
c,x,info = posv(a,b,lower=0,overwrite_a=0,overwrite_b=0)
Computational routines::
lu,piv,info = getrf(a,overwrite_a=0)
x,info = getrs(lu,piv,b,trans=0,overwrite_b=0)
inv_a,info = getri(lu,piv,lwork=min_lwork,overwrite_lu=0)
c,info = potrf(a,lower=0,clean=1,overwrite_a=0)
x,info = potrs(c,b,lower=0,overwrite_b=0)
inv_a,info = potri(c,lower=0,overwrite_c=0)
inv_c,info = trtri(c,lower=0,unitdiag=0,overwrite_c=0)
Linear Least Squares (LLS) Problems
-----------------------------------
Drivers::
v,x,s,rank,info = gelss(a,b,cond=-1.0,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
Computational routines::
qr,tau,info = geqrf(a,lwork=min_lwork,overwrite_a=0)
q,info = orgqr|ungqr(qr,tau,lwork=min_lwork,overwrite_qr=0,overwrite_tau=1)
Generalized Linear Least Squares (LSE and GLM) Problems
-------------------------------------------------------
Standard Eigenvalue and Singular Value Problems
-----------------------------------------------
Drivers::
w,v,info = syev|heev(a,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0)
w,v,info = syevd|heevd(a,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0)
w,v,info = syevr|heevr(a,compute_v=1,lower=0,vrange=,irange=,atol=-1.0,lwork=min_lwork,overwrite_a=0)
t,sdim,(wr,wi|w),vs,info = gees(select,a,compute_v=1,sort_t=0,lwork=min_lwork,select_extra_args=(),overwrite_a=0)
wr,(wi,vl|w),vr,info = geev(a,compute_vl=1,compute_vr=1,lwork=min_lwork,overwrite_a=0)
u,s,vt,info = gesdd(a,compute_uv=1,lwork=min_lwork,overwrite_a=0)
Computational routines::
ht,tau,info = gehrd(a,lo=0,hi=n-1,lwork=min_lwork,overwrite_a=0)
ba,lo,hi,pivscale,info = gebal(a,scale=0,permute=0,overwrite_a=0)
Generalized Eigenvalue and Singular Value Problems
--------------------------------------------------
Drivers::
w,v,info = sygv|hegv(a,b,itype=1,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
w,v,info = sygvd|hegvd(a,b,itype=1,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
(alphar,alphai|alpha),beta,vl,vr,info = ggev(a,b,compute_vl=1,compute_vr=1,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
Auxiliary routines
------------------
a,info = lauum(c,lower=0,overwrite_c=0)
a = laswp(a,piv,k1=0,k2=len(piv)-1,off=0,inc=1,overwrite_a=0)
Module clapack
++++++++++++++
Linear Equations
----------------
Drivers::
lu,piv,x,info = gesv(a,b,rowmajor=1,overwrite_a=0,overwrite_b=0)
c,x,info = posv(a,b,lower=0,rowmajor=1,overwrite_a=0,overwrite_b=0)
Computational routines::
lu,piv,info = getrf(a,rowmajor=1,overwrite_a=0)
x,info = getrs(lu,piv,b,trans=0,rowmajor=1,overwrite_b=0)
inv_a,info = getri(lu,piv,rowmajor=1,overwrite_lu=0)
c,info = potrf(a,lower=0,clean=1,rowmajor=1,overwrite_a=0)
x,info = potrs(c,b,lower=0,rowmajor=1,overwrite_b=0)
inv_a,info = potri(c,lower=0,rowmajor=1,overwrite_c=0)
inv_c,info = trtri(c,lower=0,unitdiag=0,rowmajor=1,overwrite_c=0)
Auxiliary routines
------------------
a,info = lauum(c,lower=0,rowmajor=1,overwrite_c=0)
Module calc_lwork
+++++++++++++++++
Optimal lwork is maxwrk. Default is minwrk.
minwrk,maxwrk = gehrd(prefix,n,lo=0,hi=n-1)
minwrk,maxwrk = gesdd(prefix,m,n,compute_uv=1)
minwrk,maxwrk = gelss(prefix,m,n,nrhs)
minwrk,maxwrk = getri(prefix,n)
minwrk,maxwrk = geev(prefix,n,compute_vl=1,compute_vr=1)
minwrk,maxwrk = heev(prefix,n,lower=0)
minwrk,maxwrk = syev(prefix,n,lower=0)
minwrk,maxwrk = gees(prefix,n,compute_v=1)
minwrk,maxwrk = geqrf(prefix,m,n)
minwrk,maxwrk = gqr(prefix,m,n)
""" |
"""Test module for the noddy examples
Noddy 1:
>>> import noddy
>>> n1 = noddy.Noddy()
>>> n2 = noddy.Noddy()
>>> del n1
>>> del n2
Noddy 2
>>> import noddy2
>>> n1 = noddy2.Noddy('jim', 'fulton', 42)
>>> n1.first
'jim'
>>> n1.last
'NAME n1.number
42
>>> n1.name()
'jim NAME n1.first = 'will'
>>> n1.name()
'will NAME n1.last = 'NAME n1.name()
'will NAME del n1.first
>>> n1.name()
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first = 'drew'
>>> n1.first
'drew'
>>> del n1.number
Traceback (most recent call last):
...
TypeError: can't delete numeric/char attribute
>>> n1.number=2
>>> n1.number
2
>>> n1.first = 42
>>> n1.name()
'42 NAME n2 = noddy2.Noddy()
>>> n2.name()
' '
>>> n2.first
''
>>> n2.last
''
>>> del n2.first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.name()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: first
>>> n2.number
0
>>> n3 = noddy2.Noddy('jim', 'fulton', 'waaa')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: an integer is required
>>> del n1
>>> del n2
Noddy 3
>>> import noddy3
>>> n1 = noddy3.Noddy('jim', 'fulton', 42)
>>> n1 = noddy3.Noddy('jim', 'fulton', 42)
>>> n1.name()
'jim NAME del n1.first
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: Cannot delete the first attribute
>>> n1.first = 42
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: The first attribute value must be a string
>>> n1.first = 'will'
>>> n1.name()
'will NAME n2 = noddy3.Noddy()
>>> n2 = noddy3.Noddy()
>>> n2 = noddy3.Noddy()
>>> n3 = noddy3.Noddy('jim', 'fulton', 'waaa')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: an integer is required
>>> del n1
>>> del n2
Noddy 4
>>> import noddy4
>>> n1 = noddy4.Noddy('jim', 'fulton', 42)
>>> n1.first
'jim'
>>> n1.last
'NAME n1.number
42
>>> n1.name()
'jim NAME n1.first = 'will'
>>> n1.name()
'will NAME n1.last = 'NAME n1.name()
'will NAME del n1.first
>>> n1.name()
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first = 'drew'
>>> n1.first
'drew'
>>> del n1.number
Traceback (most recent call last):
...
TypeError: can't delete numeric/char attribute
>>> n1.number=2
>>> n1.number
2
>>> n1.first = 42
>>> n1.name()
'42 NAME n2 = noddy4.Noddy()
>>> n2 = noddy4.Noddy()
>>> n2 = noddy4.Noddy()
>>> n2 = noddy4.Noddy()
>>> n2.name()
' '
>>> n2.first
''
>>> n2.last
''
>>> del n2.first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.name()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: first
>>> n2.number
0
>>> n3 = noddy4.Noddy('jim', 'fulton', 'waaa')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: an integer is required
Test cyclic gc(?)
>>> import gc
>>> gc.disable()
>>> x = []
>>> l = [x]
>>> n2.first = l
>>> n2.first
[[]]
>>> l.append(n2)
>>> del l
>>> del n1
>>> del n2
>>> sys.getrefcount(x)
3
>>> ignore = gc.collect()
>>> sys.getrefcount(x)
2
>>> gc.enable()
""" |
"""Generic socket server classes.
This module tries to capture the various aspects of defining a server:
For socket-based servers:
- address family:
- AF_INET{,6}: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
For request-based servers (including socket-based):
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
save some typing. (There's also the issue that a deep class hierarchy
slows down method lookups.)
There are five classes in an inheritance diagram, four of which represent
synchronous servers of four types:
+------------+
| BaseServer |
+------------+
|
v
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
stream server is the address family, which is simply repeated in both
unix server classes.
Forking and threading versions of each type of server can be created
using the ForkingMixIn and ThreadingMixIn mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer! Setting the various member variables also changes
the behavior of the underlying server mechanism.
To implement a service, you must derive a class from
BaseRequestHandler and redefine its handle() method. You can then run
various versions of the service by combining one of the server classes
with your request handler class.
The request handler class must be different for datagram or stream
services. This can be hidden by using the request handler
subclasses StreamRequestHandler or DatagramRequestHandler.
Of course, you still have to use your head!
For instance, it makes no sense to use a forking server if the service
contains state in memory that can be modified by requests (since the
modifications in the child process would never reach the initial state
kept in the parent process and passed to each child). In this case,
you can use a threading server, but you will probably have to use
locks to avoid two requests that come in nearly simultaneous to apply
conflicting changes to the server state.
On the other hand, if you are building e.g. an HTTP server, where all
data is stored externally (e.g. in the file system), a synchronous
class will essentially render the service "deaf" while one request is
being handled -- which may be for a very long time if a client is slow
to read all the data it has requested. Here a threading or forking
server is appropriate.
In some cases, it may be appropriate to process part of a request
synchronously, but to finish processing in a forked child depending on
the request data. This can be implemented by using a synchronous
server and doing an explicit fork in the request handler class
handle() method.
Another approach to handling multiple simultaneous requests in an
environment that supports neither threads nor fork (or where these are
too expensive or inappropriate for the service) is to maintain an
explicit table of partially finished requests and to use select() to
decide which request to work on next (or whether to handle a new
incoming request). This is particularly important for stream services
where each client can potentially be connected for a long time (if
threads or subprocesses cannot be used).
Future work:
- Standard classes for Sun RPC (which uses either UDP or TCP)
- Standard mix-in classes to implement various authentication
and encryption schemes
- Standard framework for select-based multiplexing
XXX Open problems:
- What to do with out-of-band data?
BaseServer:
- split generic "request" functionality out into BaseServer class.
Copyright (C) 2000 NAME <EMAIL>
example: read entries from a SQL database (requires overriding
get_request() to return a table entry from the database).
entry is processed by a RequestHandlerClass.
""" |
# -*- coding: utf-8 -*-
# Spearmint
#
# Academic and Non-Commercial Research Use Software License and Terms
# of Use
#
# Spearmint is a software package to perform Bayesian optimization
# according to specific algorithms (the “Software”). The Software is
# designed to automatically run experiments (thus the code name
# 'spearmint') in a manner that iteratively adjusts a number of
# parameters so as to minimize some objective in as few runs as
# possible.
#
# The Software was developed by NAME NAME and
# NAME at Harvard University, NAME at the
# University of Toronto (“Toronto”), and NAME at the
# Université de Sherbrooke (“Sherbrooke”), which assigned its rights
# in the Software to Socpra Sciences et Génie
# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement
# between the parties, it is distributed for free academic and
# non-commercial research use by the President and Fellows of Harvard
# College (“Harvard”).
#
# Using the Software indicates your agreement to be bound by the terms
# of this Software Use Agreement (“Agreement”). Absent your agreement
# to the terms below, you (the “End User”) have no rights to hold or
# use the Software whatsoever.
#
# Harvard agrees to grant hereunder the limited non-exclusive license
# to End User for the use of the Software in the performance of End
# User’s internal, non-commercial research and academic use at End
# User’s academic or not-for-profit research institution
# (“Institution”) on the following terms and conditions:
#
# 1. NO REDISTRIBUTION. The Software remains the property Harvard,
# Toronto and Socpra, and except as set forth in Section 4, End User
# shall not publish, distribute, or otherwise transfer or make
# available the Software to any other party.
#
# 2. NO COMMERCIAL USE. End User shall not use the Software for
# commercial purposes and any such use of the Software is expressly
# prohibited. This includes, but is not limited to, use of the
# Software in fee-for-service arrangements, core facilities or
# laboratories or to provide research services to (or in collaboration
# with) third parties for a fee, and in industry-sponsored
# collaborative research projects where any commercial rights are
# granted to the sponsor. If End User wishes to use the Software for
# commercial purposes or for any other restricted purpose, End User
# must execute a separate license agreement with Harvard.
#
# Requests for use of the Software for commercial purposes, please
# contact:
#
# Office of Technology Development
# Harvard University
# Smith Campus Center, Suite 727E
# 1350 Massachusetts Avenue
# Cambridge, MA 02138 USA
# Telephone: (617) 495-3067
# Facsimile: (617) 495-9568
# E-mail: EMAIL 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own
# all intellectual property in the Software. End User shall gain no
# ownership to the Software. End User shall not remove or delete and
# shall retain in the Software, in any modifications to Software and
# in any Derivative Works, the copyright, trademark, or other notices
# pertaining to Software as provided with the Software.
#
# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,
# as such term is defined under U.S. copyright laws, provided that any
# such Derivative Works shall be restricted to non-commercial,
# internal research and academic use at End User’s Institution. End
# User may distribute Derivative Works to other Institutions solely
# for the performance of non-commercial, internal research and
# academic use on terms substantially similar to this License and
# Terms of Use.
#
# 5. FEEDBACK. In order to improve the Software, comments from End
# Users may be useful. End User agrees to provide Harvard with
# feedback on the End User’s use of the Software (e.g., any bugs in
# the Software, the user experience, etc.). Harvard is permitted to
# use such information provided by End User in making changes and
# improvements to the Software without compensation or an accounting
# to End User.
#
# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or
# Sherbrooke or Socpra may develop modifications to the Software that
# may be based on the feedback provided by End User under Section 5
# above. Harvard, Toronto and Sherbrooke/Socpra shall not be
# restricted in any way by End User regarding their use of such
# information. End User acknowledges the right of Harvard, Toronto
# and Sherbrooke/Socpra to prepare, publish, display, reproduce,
# transmit and or use modifications to the Software that may be
# substantially similar or functionally equivalent to End User’s
# modifications and/or improvements if any. In the event that End
# User obtains patent protection for any modification or improvement
# to Software, End User agrees not to allege or enjoin infringement of
# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,
# or any of the researchers, medical or research staff, officers,
# directors and employees of those institutions.
#
# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,
# present, or share results from the use of the Software. In
# accordance with customary academic practice, End User will
# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers
# of the Software and may cite the relevant reference(s) from the
# following list of publications:
#
# Practical Bayesian Optimization of Machine Learning Algorithms
# NAME, NAME and NAME Neural Information Processing Systems, 2012
#
# Multi-Task Bayesian Optimization
# NAME, NAME and NAME Advances in Neural Information Processing Systems, 2013
#
# Input Warping for Bayesian Optimization of Non-stationary Functions
# NAME, NAME, NAME and NAME Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013
#
# Bayesian Optimization and Semiparametric Models with Applications to
# Assistive Technology NAME, PhD Thesis, University of
# Toronto, 2013
#
# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS." TO THE FULLEST
# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA
# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR
# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND
# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,
# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE
# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT
# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.
#
# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT
# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,
# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL
# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR
# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER
# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH
# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS
# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,
# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 10. INDEMNIFICATION. To the extent permitted by law, End User shall
# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke
# and Socpra, their corporate affiliates, current or future directors,
# trustees, officers, faculty, medical and professional staff,
# employees, students and agents and their respective successors,
# heirs and assigns (the "Indemnitees"), against any liability,
# damage, loss or expense (including reasonable attorney's fees and
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End User’s breach of this
# Agreement or its Institution’s use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms “Harvard” or
# “University of Toronto” or “Université de Sherbrooke” or “Socpra
# Sciences et Génie S.E.C.” (or a substantially similar term) in any
# way that is inconsistent with the permitted uses described
# herein. You agree not to use any name or emblem of Harvard, Toronto
# or Sherbrooke, or any of their subdivisions for any purpose, or to
# falsely suggest any relationship between End User (or its
# Institution) and Harvard, Toronto and/or Sherbrooke, or in any
# manner that would infringe or violate any of their rights.
#
# 13. End User represents and warrants that it has the legal authority
# to enter into this License and Terms of Use on behalf of itself and
# its Institution.
|
"""
# ggame
The simple cross-platform sprite and game platform for Brython Server (Pygame, Tkinter to follow?).
Ggame stands for a couple of things: "good game" (of course!) and also "git game" or "github game"
because it is designed to operate with [Brython Server](http://runpython.com) in concert with
Github as a backend file store.
Ggame is **not** intended to be a full-featured gaming API, with every bell and whistle. Ggame is
designed primarily as a tool for teaching computer programming, recognizing that the ability
to create engaging and interactive games is a powerful motivator for many progamming students.
Accordingly, any functional or performance enhancements that *can* be reasonably implemented
by the user are left as an exercise.
## Functionality Goals
The ggame library is intended to be trivially easy to use. For example:
from ggame import App, ImageAsset, Sprite
# Create a displayed object at 100,100 using an image asset
Sprite(ImageAsset("ggame/bunny.png"), (100,100))
# Create the app, with a 500x500 pixel stage
app = App(500,500)
# Run the app
app.run()
## Overview
There are three major components to the `ggame` system: Assets, Sprites and the App.
### Assets
Asset objects (i.e. `ggame.ImageAsset`, etc.) typically represent separate files that
are provided by the "art department". These might be background images, user interface
images, or images that represent objects in the game. In addition, `ggame.SoundAsset`
is used to represent sound files (`.wav` or `.mp3` format) that can be played in the
game.
Ggame also extends the asset concept to include graphics that are generated dynamically
at run-time, such as geometrical objects, e.g. rectangles, lines, etc.
### Sprites
All of the visual aspects of the game are represented by instances of `ggame.Sprite` or
subclasses of it.
### App
Every ggame application must create a single instance of the `ggame.App` class (or
a sub-class of it). Creating an instance of the `ggame.App` class will initiate
creation of a pop-up window on your browser. Executing the app's `run` method will
begin the process of refreshing the visual assets on the screen.
### Events
No game is complete without a player and players produce events. Your code handles user
input by registering to receive keyboard and mouse events using `ggame.App.listenKeyEvent` and
`ggame.App.listenMouseEvent` methods.
## Execution Environment
Ggame is designed to be executed in a web browser using [Brython](http://brython.info/),
[Pixi.js](http://www.pixijs.com/) and [Buzz](http://buzz.jaysalvat.com/). The easiest
way to do this is by executing from [runpython](http://runpython.com), with source
code residing on [github](http://github.com).
When using [runpython](http://runpython.com), you will have to configure your browser
to allow popup windows.
To use Ggame in your own application, you will minimally need to create a folder called
`ggame` in your project. Within `ggame`, copy the `ggame.py`, `sysdeps.py` and
`__init__.py` files from the [ggame project](https://github.com/BrythonServer/ggame).
### Include Ggame as a Git Subtree
From the same directory as your own python sources (note: you must have an existing git
repository with committed files in order for the following to work properly),
execute the following terminal commands:
git remote add -f ggame https://github.com/BrythonServer/ggame.git
git merge -s ours --no-commit ggame/master
mkdir ggame
git read-tree --prefix=ggame/ -u ggame/master
git commit -m "Merge ggame project as our subdirectory"
If you want to pull in updates from ggame in the future:
git pull -s subtree ggame master
You can see an example of how a ggame subtree is used by examining the
[Brython Server Spacewar](https://github.com/BrythonServer/Spacewar) repo on Github.
## Geometry
When referring to screen coordinates, note that the x-axis of the computer screen
is *horizontal* with the zero position on the left hand side of the screen. The
y-axis is *vertical* with the zero position at the **top** of the screen.
Increasing positive y-coordinates correspond to the downward direction on the
computer screen. Note that this is **different** from the way you may have learned
about x and y coordinates in math class!
""" |
"""
====================================
Linear algebra (:mod:`scipy.linalg`)
====================================
.. currentmodule:: scipy.linalg
Linear algebra functions.
.. seealso::
`numpy.linalg` for more linear algebra functions. Note that
although `scipy.linalg` imports most of them, identically named
functions from `scipy.linalg` may offer more or slightly differing
functionality.
Basics
======
.. autosummary::
:toctree: generated/
inv - Find the inverse of a square matrix
solve - Solve a linear system of equations
solve_banded - Solve a banded linear system
solveh_banded - Solve a Hermitian or symmetric banded system
solve_circulant - Solve a circulant system
solve_triangular - Solve a triangular matrix
solve_toeplitz - Solve a toeplitz matrix
det - Find the determinant of a square matrix
norm - Matrix and vector norm
lstsq - Solve a linear least-squares problem
pinv - Pseudo-inverse (Moore-Penrose) using lstsq
pinv2 - Pseudo-inverse using svd
pinvh - Pseudo-inverse of hermitian matrix
kron - Kronecker product of two arrays
tril - Construct a lower-triangular matrix from a given matrix
triu - Construct an upper-triangular matrix from a given matrix
orthogonal_procrustes - Solve an orthogonal Procrustes problem
matrix_balance - Balance matrix entries with a similarity transformation
subspace_angles - Compute the subspace angles between two matrices
LinAlgError
LinAlgWarning
Eigenvalue Problems
===================
.. autosummary::
:toctree: generated/
eig - Find the eigenvalues and eigenvectors of a square matrix
eigvals - Find just the eigenvalues of a square matrix
eigh - Find the e-vals and e-vectors of a Hermitian or symmetric matrix
eigvalsh - Find just the eigenvalues of a Hermitian or symmetric matrix
eig_banded - Find the eigenvalues and eigenvectors of a banded matrix
eigvals_banded - Find just the eigenvalues of a banded matrix
eigh_tridiagonal - Find the eigenvalues and eigenvectors of a tridiagonal matrix
eigvalsh_tridiagonal - Find just the eigenvalues of a tridiagonal matrix
Decompositions
==============
.. autosummary::
:toctree: generated/
lu - LU decomposition of a matrix
lu_factor - LU decomposition returning unordered matrix and pivots
lu_solve - Solve Ax=b using back substitution with output of lu_factor
svd - Singular value decomposition of a matrix
svdvals - Singular values of a matrix
diagsvd - Construct matrix of singular values from output of svd
orth - Construct orthonormal basis for the range of A using svd
null_space - Construct orthonormal basis for the null space of A using svd
ldl - LDL.T decomposition of a Hermitian or a symmetric matrix.
cholesky - Cholesky decomposition of a matrix
cholesky_banded - Cholesky decomp. of a sym. or Hermitian banded matrix
cho_factor - Cholesky decomposition for use in solving a linear system
cho_solve - Solve previously factored linear system
cho_solve_banded - Solve previously factored banded linear system
polar - Compute the polar decomposition.
qr - QR decomposition of a matrix
qr_multiply - QR decomposition and multiplication by Q
qr_update - Rank k QR update
qr_delete - QR downdate on row or column deletion
qr_insert - QR update on row or column insertion
rq - RQ decomposition of a matrix
qz - QZ decomposition of a pair of matrices
ordqz - QZ decomposition of a pair of matrices with reordering
schur - Schur decomposition of a matrix
rsf2csf - Real to complex Schur form
hessenberg - Hessenberg form of a matrix
cdf2rdf - Complex diagonal form to real diagonal block form
.. seealso::
`scipy.linalg.interpolative` -- Interpolative matrix decompositions
Matrix Functions
================
.. autosummary::
:toctree: generated/
expm - Matrix exponential
logm - Matrix logarithm
cosm - Matrix cosine
sinm - Matrix sine
tanm - Matrix tangent
coshm - Matrix hyperbolic cosine
sinhm - Matrix hyperbolic sine
tanhm - Matrix hyperbolic tangent
signm - Matrix sign
sqrtm - Matrix square root
funm - Evaluating an arbitrary matrix function
expm_frechet - Frechet derivative of the matrix exponential
expm_cond - Relative condition number of expm in the Frobenius norm
fractional_matrix_power - Fractional matrix power
Matrix Equation Solvers
=======================
.. autosummary::
:toctree: generated/
solve_sylvester - Solve the Sylvester matrix equation
solve_continuous_are - Solve the continuous-time algebraic Riccati equation
solve_discrete_are - Solve the discrete-time algebraic Riccati equation
solve_continuous_lyapunov - Solve the continous-time Lyapunov equation
solve_discrete_lyapunov - Solve the discrete-time Lyapunov equation
Sketches and Random Projections
===============================
.. autosummary::
:toctree: generated/
clarkson_woodruff_transform - Applies the Clarkson Woodruff Sketch (a.k.a CountMin Sketch)
Special Matrices
================
.. autosummary::
:toctree: generated/
block_diag - Construct a block diagonal matrix from submatrices
circulant - Circulant matrix
companion - Companion matrix
dft - Discrete Fourier transform matrix
hadamard - Hadamard matrix of order 2**n
hankel - Hankel matrix
helmert - Helmert matrix
hilbert - Hilbert matrix
invhilbert - Inverse Hilbert matrix
leslie - Leslie matrix
pascal - Pascal matrix
invpascal - Inverse Pascal matrix
toeplitz - Toeplitz matrix
tri - Construct a matrix filled with ones at and below a given diagonal
Low-level routines
==================
.. autosummary::
:toctree: generated/
get_blas_funcs
get_lapack_funcs
find_best_blas_type
.. seealso::
`scipy.linalg.blas` -- Low-level BLAS functions
`scipy.linalg.lapack` -- Low-level LAPACK functions
`scipy.linalg.cython_blas` -- Low-level BLAS functions for Cython
`scipy.linalg.cython_lapack` -- Low-level LAPACK functions for Cython
""" |
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
""" |
"""
=====================================
Sparse matrices (:mod:`scipy.sparse`)
=====================================
.. currentmodule:: scipy.sparse
SciPy 2-D sparse matrix package for numeric data.
Contents
========
Sparse matrix classes
---------------------
.. autosummary::
:toctree: generated/
bsr_matrix - Block Sparse Row matrix
coo_matrix - A sparse matrix in COOrdinate format
csc_matrix - Compressed Sparse Column matrix
csr_matrix - Compressed Sparse Row matrix
dia_matrix - Sparse matrix with DIAgonal storage
dok_matrix - Dictionary Of Keys based sparse matrix
lil_matrix - Row-based linked list sparse matrix
Functions
---------
Building sparse matrices:
.. autosummary::
:toctree: generated/
eye - Sparse MxN matrix whose k-th diagonal is all ones
identity - Identity matrix in sparse format
kron - kronecker product of two sparse matrices
kronsum - kronecker sum of sparse matrices
diags - Return a sparse matrix from diagonals
spdiags - Return a sparse matrix from diagonals
block_diag - Build a block diagonal sparse matrix
tril - Lower triangular portion of a matrix in sparse format
triu - Upper triangular portion of a matrix in sparse format
bmat - Build a sparse matrix from sparse sub-blocks
hstack - Stack sparse matrices horizontally (column wise)
vstack - Stack sparse matrices vertically (row wise)
rand - Random values in a given shape
norm - Return norm of a sparse matrix
Sparse matrix tools:
.. autosummary::
:toctree: generated/
find
Identifying sparse matrices:
.. autosummary::
:toctree: generated/
issparse
isspmatrix
isspmatrix_csc
isspmatrix_csr
isspmatrix_bsr
isspmatrix_lil
isspmatrix_dok
isspmatrix_coo
isspmatrix_dia
Submodules
----------
.. autosummary::
:toctree: generated/
csgraph - Compressed sparse graph routines
linalg - sparse linear algebra routines
Exceptions
----------
.. autosummary::
:toctree: generated/
SparseEfficiencyWarning
SparseWarning
Usage information
=================
There are seven available sparse matrix types:
1. csc_matrix: Compressed Sparse Column format
2. csr_matrix: Compressed Sparse Row format
3. bsr_matrix: Block Sparse Row format
4. lil_matrix: List of Lists format
5. dok_matrix: Dictionary of Keys format
6. coo_matrix: COOrdinate format (aka IJV, triplet format)
7. dia_matrix: DIAgonal format
To construct a matrix efficiently, use either dok_matrix or lil_matrix.
The lil_matrix class supports basic slicing and fancy
indexing with a similar syntax to NumPy arrays. As illustrated below,
the COO format may also be used to efficiently construct matrices.
To perform manipulations such as multiplication or inversion, first
convert the matrix to either CSC or CSR format. The lil_matrix format is
row-based, so conversion to CSR is efficient, whereas conversion to CSC
is less so.
All conversions among the CSR, CSC, and COO formats are efficient,
linear-time operations.
Matrix vector product
---------------------
To do a vector product between a sparse matrix and a vector simply use
the matrix `dot` method, as described in its docstring:
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> v = np.array([1, 0, -1])
>>> A.dot(v)
array([ 1, -3, -1], dtype=int64)
.. warning:: As of NumPy 1.7, `np.dot` is not aware of sparse matrices,
therefore using it will result on unexpected results or errors.
The corresponding dense array should be obtained first instead:
>>> np.dot(A.toarray(), v)
array([ 1, -3, -1], dtype=int64)
but then all the performance advantages would be lost.
The CSR format is specially suitable for fast matrix vector products.
Example 1
---------
Construct a 1000x1000 lil_matrix and add some values to it:
>>> from scipy.sparse import lil_matrix
>>> from scipy.sparse.linalg import spsolve
>>> from numpy.linalg import solve, norm
>>> from numpy.random import rand
>>> A = lil_matrix((1000, 1000))
>>> A[0, :100] = rand(100)
>>> A[1, 100:200] = A[0, :100]
>>> A.setdiag(rand(1000))
Now convert it to CSR format and solve A x = b for x:
>>> A = A.tocsr()
>>> b = rand(1000)
>>> x = spsolve(A, b)
Convert it to a dense matrix and solve, and check that the result
is the same:
>>> x_ = solve(A.toarray(), b)
Now we can compute norm of the error with:
>>> err = norm(x-x_)
>>> err < 1e-10
True
It should be small :)
Example 2
---------
Construct a matrix in COO format:
>>> from scipy import sparse
>>> from numpy import array
>>> I = array([0,3,1,0])
>>> J = array([0,3,1,2])
>>> V = array([4,5,7,9])
>>> A = sparse.coo_matrix((V,(I,J)),shape=(4,4))
Notice that the indices do not need to be sorted.
Duplicate (i,j) entries are summed when converting to CSR or CSC.
>>> I = array([0,0,1,3,1,0,0])
>>> J = array([0,2,1,3,1,0,0])
>>> V = array([1,1,1,1,1,1,1])
>>> B = sparse.coo_matrix((V,(I,J)),shape=(4,4)).tocsr()
This is useful for constructing finite-element stiffness and mass matrices.
Further Details
---------------
CSR column indices are not necessarily sorted. Likewise for CSC row
indices. Use the .sorted_indices() and .sort_indices() methods when
sorted indices are required (e.g. when passing data to other libraries).
""" |
"""==============
Array indexing
==============
Array indexing refers to any use of the square brackets ([]) to index
array values. There are many options to indexing, which give numpy
indexing great power, but with power comes some complexity and the
potential for confusion. This section is just an overview of the
various options and issues related to indexing. Aside from single
element indexing, the details on most of these options are to be
found in related sections.
Assignment vs referencing
=========================
Most of the following examples show the use of indexing when
referencing data in an array. The examples work just as well
when assigning to an array. See the section at the end for
specific examples and explanations on how assignments work.
Single element indexing
=======================
Single element indexing for a 1-D array is what one expects. It work
exactly like that for other standard Python sequences. It is 0-based,
and accepts negative indices for indexing from the end of the array. ::
>>> x = np.arange(10)
>>> x[2]
2
>>> x[-2]
8
Unlike lists and tuples, numpy arrays support multidimensional indexing
for multidimensional arrays. That means that it is not necessary to
separate each dimension's index into its own set of square brackets. ::
>>> x.shape = (2,5) # now x is 2-dimensional
>>> x[1,3]
8
>>> x[1,-1]
9
Note that if one indexes a multidimensional array with fewer indices
than dimensions, one gets a subdimensional array. For example: ::
>>> x[0]
array([0, 1, 2, 3, 4])
That is, each index specified selects the array corresponding to the
rest of the dimensions selected. In the above example, choosing 0
means that the remaining dimension of length 5 is being left unspecified,
and that what is returned is an array of that dimensionality and size.
It must be noted that the returned array is not a copy of the original,
but points to the same values in memory as does the original array.
In this case, the 1-D array at the first position (0) is returned.
So using a single index on the returned array, results in a single
element being returned. That is: ::
>>> x[0][2]
2
So note that ``x[0,2] = x[0][2]`` though the second case is more
inefficient as a new temporary array is created after the first index
that is subsequently indexed by 2.
Note to those used to IDL or Fortran memory order as it relates to
indexing. Numpy uses C-order indexing. That means that the last
index usually represents the most rapidly changing memory location,
unlike Fortran or IDL, where the first index represents the most
rapidly changing location in memory. This difference represents a
great potential for confusion.
Other indexing options
======================
It is possible to slice and stride arrays to extract arrays of the
same number of dimensions, but of different sizes than the original.
The slicing and striding works exactly the same way it does for lists
and tuples except that they can be applied to multiple dimensions as
well. A few examples illustrates best: ::
>>> x = np.arange(10)
>>> x[2:5]
array([2, 3, 4])
>>> x[:-7]
array([0, 1, 2])
>>> x[1:7:2]
array([1, 3, 5])
>>> y = np.arange(35).reshape(5,7)
>>> y[1:5:2,::3]
array([[ 7, 10, 13],
[21, 24, 27]])
Note that slices of arrays do not copy the internal array data but
also produce new views of the original data.
It is possible to index arrays with other arrays for the purposes of
selecting lists of values out of arrays into new arrays. There are
two different ways of accomplishing this. One uses one or more arrays
of index values. The other involves giving a boolean array of the proper
shape to indicate the values to be selected. Index arrays are a very
powerful tool that allow one to avoid looping over individual elements in
arrays and thus greatly improve performance.
It is possible to use special features to effectively increase the
number of dimensions in an array through indexing so the resulting
array aquires the shape needed for use in an expression or with a
specific function.
Index arrays
============
Numpy arrays may be indexed with other arrays (or any other sequence-
like object that can be converted to an array, such as lists, with the
exception of tuples; see the end of this document for why this is). The
use of index arrays ranges from simple, straightforward cases to
complex, hard-to-understand cases. For all cases of index arrays, what
is returned is a copy of the original data, not a view as one gets for
slices.
Index arrays must be of integer type. Each value in the array indicates
which value in the array to use in place of the index. To illustrate: ::
>>> x = np.arange(10,1,-1)
>>> x
array([10, 9, 8, 7, 6, 5, 4, 3, 2])
>>> x[np.array([3, 3, 1, 8])]
array([7, 7, 9, 2])
The index array consisting of the values 3, 3, 1 and 8 correspondingly
create an array of length 4 (same as the index array) where each index
is replaced by the value the index array has in the array being indexed.
Negative values are permitted and work as they do with single indices
or slices: ::
>>> x[np.array([3,3,-3,8])]
array([7, 7, 4, 2])
It is an error to have index values out of bounds: ::
>>> x[np.array([3, 3, 20, 8])]
<type 'exceptions.IndexError'>: index 20 out of bounds 0<=index<9
Generally speaking, what is returned when index arrays are used is
an array with the same shape as the index array, but with the type
and values of the array being indexed. As an example, we can use a
multidimensional index array instead: ::
>>> x[np.array([[1,1],[2,3]])]
array([[9, 9],
[8, 7]])
Indexing Multi-dimensional arrays
=================================
Things become more complex when multidimensional arrays are indexed,
particularly with multidimensional index arrays. These tend to be
more unusal uses, but theyare permitted, and they are useful for some
problems. We'll start with thesimplest multidimensional case (using
the array y from the previous examples): ::
>>> y[np.array([0,2,4]), np.array([0,1,2])]
array([ 0, 15, 30])
In this case, if the index arrays have a matching shape, and there is
an index array for each dimension of the array being indexed, the
resultant array has the same shape as the index arrays, and the values
correspond to the index set for each position in the index arrays. In
this example, the first index value is 0 for both index arrays, and
thus the first value of the resultant array is y[0,0]. The next value
is y[2,1], and the last is y[4,2].
If the index arrays do not have the same shape, there is an attempt to
broadcast them to the same shape. If they cannot be broadcast to the
same shape, an exception is raised: ::
>>> y[np.array([0,2,4]), np.array([0,1])]
<type 'exceptions.ValueError'>: shape mismatch: objects cannot be
broadcast to a single shape
The broadcasting mechanism permits index arrays to be combined with
scalars for other indices. The effect is that the scalar value is used
for all the corresponding values of the index arrays: ::
>>> y[np.array([0,2,4]), 1]
array([ 1, 15, 29])
Jumping to the next level of complexity, it is possible to only
partially index an array with index arrays. It takes a bit of thought
to understand what happens in such cases. For example if we just use
one index array with y: ::
>>> y[np.array([0,2,4])]
array([[ 0, 1, 2, 3, 4, 5, 6],
[14, 15, 16, 17, 18, 19, 20],
[28, 29, 30, 31, 32, 33, 34]])
What results is the construction of a new array where each value of
the index array selects one row from the array being indexed and the
resultant array has the resulting shape (size of row, number index
elements).
An example of where this may be useful is for a color lookup table
where we want to map the values of an image into RGB triples for
display. The lookup table could have a shape (nlookup, 3). Indexing
such an array with an image with shape (ny, nx) with dtype=np.uint8
(or any integer type so long as values are with the bounds of the
lookup table) will result in an array of shape (ny, nx, 3) where a
triple of RGB values is associated with each pixel location.
In general, the shape of the resulant array will be the concatenation
of the shape of the index array (or the shape that all the index arrays
were broadcast to) with the shape of any unused dimensions (those not
indexed) in the array being indexed.
Boolean or "mask" index arrays
==============================
Boolean arrays used as indices are treated in a different manner
entirely than index arrays. Boolean arrays must be of the same shape
as the initial dimensions of the array being indexed. In the
most straightforward case, the boolean array has the same shape: ::
>>> b = y>20
>>> y[b]
array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
Unlike in the case of integer index arrays, in the boolean case, the
result is a 1-D array containing all the elements in the indexed array
corresponding to all the true elements in the boolean array. The
elements in the indexed array are always iterated and returned in
:term:`row-major` (C-style) order. The result is also identical to
``y[np.nonzero(b)]``. As with index arrays, what is returned is a copy
of the data, not a view as one gets with slices.
The result will be multidimensional if y has more dimensions than b.
For example: ::
>>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y
array([False, False, False, True, True], dtype=bool)
>>> y[b[:,5]]
array([[21, 22, 23, 24, 25, 26, 27],
[28, 29, 30, 31, 32, 33, 34]])
Here the 4th and 5th rows are selected from the indexed array and
combined to make a 2-D array.
In general, when the boolean array has fewer dimensions than the array
being indexed, this is equivalent to y[b, ...], which means
y is indexed by b followed by as many : as are needed to fill
out the rank of y.
Thus the shape of the result is one dimension containing the number
of True elements of the boolean array, followed by the remaining
dimensions of the array being indexed.
For example, using a 2-D boolean array of shape (2,3)
with four True elements to select rows from a 3-D array of shape
(2,3,5) results in a 2-D result of shape (4,5): ::
>>> x = np.arange(30).reshape(2,3,5)
>>> x
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]]])
>>> b = np.array([[True, True, False], [False, True, True]])
>>> x[b]
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]])
For further details, consult the numpy reference documentation on array indexing.
Combining index arrays with slices
==================================
Index arrays may be combined with slices. For example: ::
>>> y[np.array([0,2,4]),1:3]
array([[ 1, 2],
[15, 16],
[29, 30]])
In effect, the slice is converted to an index array
np.array([[1,2]]) (shape (1,2)) that is broadcast with the index array
to produce a resultant array of shape (3,2).
Likewise, slicing can be combined with broadcasted boolean indices: ::
>>> y[b[:,5],1:3]
array([[22, 23],
[29, 30]])
Structural indexing tools
=========================
To facilitate easy matching of array shapes with expressions and in
assignments, the np.newaxis object can be used within array indices
to add new dimensions with a size of 1. For example: ::
>>> y.shape
(5, 7)
>>> y[:,np.newaxis,:].shape
(5, 1, 7)
Note that there are no new elements in the array, just that the
dimensionality is increased. This can be handy to combine two
arrays in a way that otherwise would require explicitly reshaping
operations. For example: ::
>>> x = np.arange(5)
>>> x[:,np.newaxis] + x[np.newaxis,:]
array([[0, 1, 2, 3, 4],
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8]])
The ellipsis syntax maybe used to indicate selecting in full any
remaining unspecified dimensions. For example: ::
>>> z = np.arange(81).reshape(3,3,3,3)
>>> z[1,...,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
This is equivalent to: ::
>>> z[1,:,:,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
Assigning values to indexed arrays
==================================
As mentioned, one can select a subset of an array to assign to using
a single index, slices, and index and mask arrays. The value being
assigned to the indexed array must be shape consistent (the same shape
or broadcastable to the shape the index produces). For example, it is
permitted to assign a constant to a slice: ::
>>> x = np.arange(10)
>>> x[2:7] = 1
or an array of the right size: ::
>>> x[2:7] = np.arange(5)
Note that assignments may result in changes if assigning
higher types to lower types (like floats to ints) or even
exceptions (assigning complex to floats or ints): ::
>>> x[1] = 1.2
>>> x[1]
1
>>> x[1] = 1.2j
<type 'exceptions.TypeError'>: can't convert complex to long; use
long(abs(z))
Unlike some of the references (such as array and mask indices)
assignments are always made to the original data in the array
(indeed, nothing else would make sense!). Note though, that some
actions may not work as one may naively expect. This particular
example is often surprising to people: ::
>>> x = np.arange(0, 50, 10)
>>> x
array([ 0, 10, 20, 30, 40])
>>> x[np.array([1, 1, 3, 1])] += 1
>>> x
array([ 0, 11, 20, 31, 40])
Where people expect that the 1st location will be incremented by 3.
In fact, it will only be incremented by 1. The reason is because
a new array is extracted from the original (as a temporary) containing
the values at 1, 1, 3, 1, then the value 1 is added to the temporary,
and then the temporary is assigned back to the original array. Thus
the value of the array at x[1]+1 is assigned to x[1] three times,
rather than being incremented 3 times.
Dealing with variable numbers of indices within programs
========================================================
The index syntax is very powerful but limiting when dealing with
a variable number of indices. For example, if you want to write
a function that can handle arguments with various numbers of
dimensions without having to write special case code for each
number of possible dimensions, how can that be done? If one
supplies to the index a tuple, the tuple will be interpreted
as a list of indices. For example (using the previous definition
for the array z): ::
>>> indices = (1,1,1,1)
>>> z[indices]
40
So one can use code to construct tuples of any number of indices
and then use these within an index.
Slices can be specified within programs by using the slice() function
in Python. For example: ::
>>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2]
>>> z[indices]
array([39, 40])
Likewise, ellipsis can be specified by code by using the Ellipsis
object: ::
>>> indices = (1, Ellipsis, 1) # same as [1,...,1]
>>> z[indices]
array([[28, 31, 34],
[37, 40, 43],
[46, 49, 52]])
For this reason it is possible to use the output from the np.where()
function directly as an index since it always returns a tuple of index
arrays.
Because the special treatment of tuples, they are not automatically
converted to an array as a list would be. As an example: ::
>>> z[[1,1,1,1]] # produces a large array
array([[[[27, 28, 29],
[30, 31, 32], ...
>>> z[(1,1,1,1)] # returns a single value
40
""" |
"""
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
""" |
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is font utility code.
#
# The Initial Developer of the Original Code is Mozilla Corporation.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# NAME <EMAIL>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK ***** */
# eotlitetool.py - create EOT version of OpenType font for use with IE
#
# Usage: eotlitetool.py [-o output-filename] font1 [font2 ...]
#
# OpenType file structure
# http://www.microsoft.com/typography/otspec/otff.htm
#
# Types:
#
# BYTE 8-bit unsigned integer.
# CHAR 8-bit signed integer.
# USHORT 16-bit unsigned integer.
# SHORT 16-bit signed integer.
# ULONG 32-bit unsigned integer.
# Fixed 32-bit signed fixed-point number (16.16)
# LONGDATETIME Date represented in number of seconds since 12:00 midnight, January 1, 1904. The value is represented as a signed 64-bit integer.
#
# SFNT Header
#
# Fixed sfnt version // 0x00010000 for version 1.0.
# USHORT numTables // Number of tables.
# USHORT searchRange // (Maximum power of 2 <= numTables) x 16.
# USHORT entrySelector // Log2(maximum power of 2 <= numTables).
# USHORT rangeShift // NumTables x 16-searchRange.
#
# Table Directory
#
# ULONG tag // 4-byte identifier.
# ULONG checkSum // CheckSum for this table.
# ULONG offset // Offset from beginning of TrueType font file.
# ULONG length // Length of this table.
#
# OS/2 Table (Version 4)
#
# USHORT version // 0x0004
# SHORT xAvgCharWidth
# USHORT usWeightClass
# USHORT usWidthClass
# USHORT fsType
# SHORT ySubscriptXSize
# SHORT ySubscriptYSize
# SHORT ySubscriptXOffset
# SHORT ySubscriptYOffset
# SHORT ySuperscriptXSize
# SHORT ySuperscriptYSize
# SHORT ySuperscriptXOffset
# SHORT ySuperscriptYOffset
# SHORT yStrikeoutSize
# SHORT yStrikeoutPosition
# SHORT sFamilyClass
# BYTE panose[10]
# ULONG ulUnicodeRange1 // Bits 0-31
# ULONG ulUnicodeRange2 // Bits 32-63
# ULONG ulUnicodeRange3 // Bits 64-95
# ULONG ulUnicodeRange4 // Bits 96-127
# CHAR achVendID[4]
# USHORT fsSelection
# USHORT usFirstCharIndex
# USHORT usLastCharIndex
# SHORT sTypoAscender
# SHORT sTypoDescender
# SHORT sTypoLineGap
# USHORT usWinAscent
# USHORT usWinDescent
# ULONG ulCodePageRange1 // Bits 0-31
# ULONG ulCodePageRange2 // Bits 32-63
# SHORT sxHeight
# SHORT sCapHeight
# USHORT usDefaultChar
# USHORT usBreakChar
# USHORT usMaxContext
#
#
# The Naming Table is organized as follows:
#
# [name table header]
# [name records]
# [string data]
#
# Name Table Header
#
# USHORT format // Format selector (=0).
# USHORT count // Number of name records.
# USHORT stringOffset // Offset to start of string storage (from start of table).
#
# Name Record
#
# USHORT platformID // Platform ID.
# USHORT encodingID // Platform-specific encoding ID.
# USHORT languageID // Language ID.
# USHORT nameID // Name ID.
# USHORT length // String length (in bytes).
# USHORT offset // String offset from start of storage area (in bytes).
#
# head Table
#
# Fixed tableVersion // Table version number 0x00010000 for version 1.0.
# Fixed fontRevision // Set by font manufacturer.
# ULONG checkSumAdjustment // To compute: set it to 0, sum the entire font as ULONG, then store 0xB1B0AFBA - sum.
# ULONG magicNumber // Set to 0x5F0F3CF5.
# USHORT flags
# USHORT unitsPerEm // Valid range is from 16 to 16384. This value should be a power of 2 for fonts that have TrueType outlines.
# LONGDATETIME created // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# LONGDATETIME modified // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# SHORT xMin // For all glyph bounding boxes.
# SHORT yMin
# SHORT xMax
# SHORT yMax
# USHORT macStyle
# USHORT lowestRecPPEM // Smallest readable size in pixels.
# SHORT fontDirectionHint
# SHORT indexToLocFormat // 0 for short offsets, 1 for long.
# SHORT glyphDataFormat // 0 for current format.
#
#
#
# Embedded OpenType (EOT) file format
# http://www.w3.org/Submission/EOT/
#
# EOT version 0x00020001
#
# An EOT font consists of a header with the original OpenType font
# appended at the end. Most of the data in the EOT header is simply a
# copy of data from specific tables within the font data. The exceptions
# are the 'Flags' field and the root string name field. The root string
# is a set of names indicating domains for which the font data can be
# used. A null root string implies the font data can be used anywhere.
# The EOT header is in little-endian byte order but the font data remains
# in big-endian order as specified by the OpenType spec.
#
# Overall structure:
#
# [EOT header]
# [EOT name records]
# [font data]
#
# EOT header
#
# ULONG eotSize // Total structure length in bytes (including string and font data)
# ULONG fontDataSize // Length of the OpenType font (FontData) in bytes
# ULONG version // Version number of this format - 0x00020001
# ULONG flags // Processing Flags (0 == no special processing)
# BYTE fontPANOSE[10] // OS/2 Table panose
# BYTE charset // DEFAULT_CHARSET (0x01)
# BYTE italic // 0x01 if ITALIC in OS/2 Table fsSelection is set, 0 otherwise
# ULONG weight // OS/2 Table usWeightClass
# USHORT fsType // OS/2 Table fsType (specifies embedding permission flags)
# USHORT magicNumber // Magic number for EOT file - 0x504C.
# ULONG unicodeRange1 // OS/2 Table ulUnicodeRange1
# ULONG unicodeRange2 // OS/2 Table ulUnicodeRange2
# ULONG unicodeRange3 // OS/2 Table ulUnicodeRange3
# ULONG unicodeRange4 // OS/2 Table ulUnicodeRange4
# ULONG codePageRange1 // OS/2 Table ulCodePageRange1
# ULONG codePageRange2 // OS/2 Table ulCodePageRange2
# ULONG checkSumAdjustment // head Table CheckSumAdjustment
# ULONG reserved[4] // Reserved - must be 0
# USHORT padding1 // Padding - must be 0
#
# EOT name records
#
# USHORT FamilyNameSize // Font family name size in bytes
# BYTE FamilyName[FamilyNameSize] // Font family name (name ID = 1), little-endian UTF-16
# USHORT Padding2 // Padding - must be 0
#
# USHORT StyleNameSize // Style name size in bytes
# BYTE StyleName[StyleNameSize] // Style name (name ID = 2), little-endian UTF-16
# USHORT Padding3 // Padding - must be 0
#
# USHORT VersionNameSize // Version name size in bytes
# bytes VersionName[VersionNameSize] // Version name (name ID = 5), little-endian UTF-16
# USHORT Padding4 // Padding - must be 0
#
# USHORT FullNameSize // Full name size in bytes
# BYTE FullName[FullNameSize] // Full name (name ID = 4), little-endian UTF-16
# USHORT Padding5 // Padding - must be 0
#
# USHORT RootStringSize // Root string size in bytes
# BYTE RootString[RootStringSize] // Root string, little-endian UTF-16
|
"""
Implementation of the trigsimp algorithm by Fu et al.
The idea behind the ``fu`` algorithm is to use a sequence of rules, applied
in what is heuristically known to be a smart order, to select a simpler
expression that is equivalent to the input.
There are transform rules in which a single rule is applied to the
expression tree. The following are just mnemonic in nature; see the
docstrings for examples.
TR0 - simplify expression
TR1 - sec-csc to cos-sin
TR2 - tan-cot to sin-cos ratio
TR2i - sin-cos ratio to tan
TR3 - angle canonicalization
TR4 - functions at special angles
TR5 - powers of sin to powers of cos
TR6 - powers of cos to powers of sin
TR7 - reduce cos power (increase angle)
TR8 - expand products of sin-cos to sums
TR9 - contract sums of sin-cos to products
TR10 - separate sin-cos arguments
TR10i - collect sin-cos arguments
TR11 - reduce double angles
TR12 - separate tan arguments
TR12i - collect tan arguments
TR13 - expand product of tan-cot
TRmorrie - prod(cos(x*2**i), (i, 0, k - 1)) -> sin(2**k*x)/(2**k*sin(x))
TR14 - factored powers of sin or cos to cos or sin power
TR15 - negative powers of sin to cot power
TR16 - negative powers of cos to tan power
TR22 - tan-cot powers to negative powers of sec-csc functions
TR111 - negative sin-cos-tan powers to csc-sec-cot
There are 4 combination transforms (CTR1 - CTR4) in which a sequence of
transformations are applied and the simplest expression is selected from
a few options.
Finally, there are the 2 rule lists (RL1 and RL2), which apply a
sequence of transformations and combined transformations, and the ``fu``
algorithm itself, which applies rules and rule lists and selects the
best expressions. There is also a function ``L`` which counts the number
of trigonometric funcions that appear in the expression.
Other than TR0, re-writing of expressions is not done by the transformations.
e.g. TR10i finds pairs of terms in a sum that are in the form like
``cos(x)*cos(y) + sin(x)*sin(y)``. Such expression are targeted in a bottom-up
traversal of the expression, but no manipulation to make them appear is
attempted. For example,
Set-up for examples below:
>>> from sympy.simplify.fu import fu, L, TR9, TR10i, TR11
>>> from sympy import factor, sin, cos, powsimp
>>> from sympy.abc import x, y, z, a
>>> from time import time
>>> eq = cos(x + y)/cos(x)
>>> TR10i(eq.expand(trig=True))
-sin(x)*sin(y)/cos(x) + cos(y)
If the expression is put in "normal" form (with a common denominator) then
the transformation is successful:
>>> TR10i(_.normal())
cos(x + y)/cos(x)
TR11's behavior is similar. It rewrites double angles as smaller angles but
doesn't do any simplification of the result.
>>> TR11(sin(2)**a*cos(1)**(-a), 1)
(2*sin(1)*cos(1))**a*cos(1)**(-a)
>>> powsimp(_)
(2*sin(1))**a
The temptation is to try make these TR rules "smarter" but that should really
be done at a higher level; the TR rules should try maintain the "do one thing
well" principle. There is one exception, however. In TR10i and TR9 terms are
recognized even when they are each multiplied by a common factor:
>>> fu(a*cos(x)*cos(y) + a*sin(x)*sin(y))
a*cos(x - y)
Factoring with ``factor_terms`` is used but it it "JIT"-like, being delayed
until it is deemed necessary. Furthermore, if the factoring does not
help with the simplification, it is not retained, so
``a*cos(x)*cos(y) + a*sin(x)*sin(z)`` does not become the factored
(but unsimplified in the trigonometric sense) expression:
>>> fu(a*cos(x)*cos(y) + a*sin(x)*sin(z))
a*sin(x)*sin(z) + a*cos(x)*cos(y)
In some cases factoring might be a good idea, but the user is left
to make that decision. For example:
>>> expr=((15*sin(2*x) + 19*sin(x + y) + 17*sin(x + z) + 19*cos(x - z) +
... 25)*(20*sin(2*x) + 15*sin(x + y) + sin(y + z) + 14*cos(x - z) +
... 14*cos(y - z))*(9*sin(2*y) + 12*sin(y + z) + 10*cos(x - y) + 2*cos(y -
... z) + 18)).expand(trig=True).expand()
In the expanded state, there are nearly 1000 trig functions:
>>> L(expr)
932
If the expression where factored first, this would take time but the
resulting expression would be transformed very quickly:
>>> def clock(f, n=2):
... t=time(); f(); return round(time()-t, n)
...
>>> clock(lambda: factor(expr)) # doctest: +SKIP
0.86
>>> clock(lambda: TR10i(expr), 3) # doctest: +SKIP
0.016
If the unexpanded expression is used, the transformation takes longer but
not as long as it took to factor it and then transform it:
>>> clock(lambda: TR10i(expr), 2) # doctest: +SKIP
0.28
So neither expansion nor factoring is used in ``TR10i``: if the
expression is already factored (or partially factored) then expansion
with ``trig=True`` would destroy what is already known and take
longer; if the expression is expanded, factoring may take longer than
simply applying the transformation itself.
Although the algorithms should be canonical, always giving the same
result, they may not yield the best result. This, in general, is
the nature of simplification where searching all possible transformation
paths is very expensive. Here is a simple example. There are 6 terms
in the following sum:
>>> expr = (sin(x)**2*cos(y)*cos(z) + sin(x)*sin(y)*cos(x)*cos(z) +
... sin(x)*sin(z)*cos(x)*cos(y) + sin(y)*sin(z)*cos(x)**2 + sin(y)*sin(z) +
... cos(y)*cos(z))
>>> args = expr.args
Serendipitously, fu gives the best result:
>>> fu(expr)
3*cos(y - z)/2 - cos(2*x + y + z)/2
But if different terms were combined, a less-optimal result might be
obtained, requiring some additional work to get better simplification,
but still less than optimal. The following shows an alternative form
of ``expr`` that resists optimal simplification once a given step
is taken since it leads to a dead end:
>>> TR9(-cos(x)**2*cos(y + z) + 3*cos(y - z)/2 +
... cos(y + z)/2 + cos(-2*x + y + z)/4 - cos(2*x + y + z)/4)
sin(2*x)*sin(y + z)/2 - cos(x)**2*cos(y + z) + 3*cos(y - z)/2 + cos(y + z)/2
Here is a smaller expression that exhibits the same behavior:
>>> a = sin(x)*sin(z)*cos(x)*cos(y) + sin(x)*sin(y)*cos(x)*cos(z)
>>> TR10i(a)
sin(x)*sin(y + z)*cos(x)
>>> newa = _
>>> TR10i(expr - a) # this combines two more of the remaining terms
sin(x)**2*cos(y)*cos(z) + sin(y)*sin(z)*cos(x)**2 + cos(y - z)
>>> TR10i(_ + newa) == _ + newa # but now there is no more simplification
True
Without getting lucky or trying all possible pairings of arguments, the
final result may be less than optimal and impossible to find without
better heuristics or brute force trial of all possibilities.
Notes
=====
This work was started by NAME at the Technological School
"Electronic systems" (30.11.2011).
References
==========
http://rfdz.ph-noe.ac.at/fileadmin/Mathematik_Uploads/ACDCA/
DESTIME2006/DES_contribs/Fu/simplification.pdf
http://www.sosmath.com/trig/Trig5/trig5/pdf/pdf.html gives a formula sheet.
""" |
"""
========
Glossary
========
.. glossary::
along an axis
Axes are defined for arrays with more than one dimension. A
2-dimensional array has two corresponding axes: the first running
vertically downwards across rows (axis 0), and the second running
horizontally across columns (axis 1).
Many operation can take place along one of these axes. For example,
we can sum each row of an array, in which case we operate along
columns, or axis 1::
>>> x = np.arange(12).reshape((3,4))
>>> x
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.sum(axis=1)
array([ 6, 22, 38])
array
A homogeneous container of numerical elements. Each element in the
array occupies a fixed amount of memory (hence homogeneous), and
can be a numerical element of a single type (such as float, int
or complex) or a combination (such as ``(float, int, float)``). Each
array has an associated data-type (or ``dtype``), which describes
the numerical type of its elements::
>>> x = np.array([1, 2, 3], float)
>>> x
array([ 1., 2., 3.])
>>> x.dtype # floating point number, 64 bits of memory per element
dtype('float64')
# More complicated data type: each array element is a combination of
# and integer and a floating point number
>>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)])
array([(1, 2.0), (3, 4.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Fast element-wise operations, called `ufuncs`_, operate on arrays.
array_like
Any sequence that can be interpreted as an ndarray. This includes
nested lists, tuples, scalars and existing arrays.
attribute
A property of an object that can be accessed using ``obj.attribute``,
e.g., ``shape`` is an attribute of an array::
>>> x = np.array([1, 2, 3])
>>> x.shape
(3,)
BLAS
`Basic Linear Algebra Subprograms <http://en.wikipedia.org/wiki/BLAS>`_
broadcast
NumPy can do operations on arrays whose shapes are mismatched::
>>> x = np.array([1, 2])
>>> y = np.array([[3], [4]])
>>> x
array([1, 2])
>>> y
array([[3],
[4]])
>>> x + y
array([[4, 5],
[5, 6]])
See `doc.broadcasting`_ for more information.
C order
See `row-major`
column-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In column-major order, the leftmost index "varies the
fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the column-major order as::
[1, 4, 2, 5, 3, 6]
Column-major order is also known as the Fortran order, as the Fortran
programming language uses it.
decorator
An operator that transforms a function. For example, a ``log``
decorator may be defined to print debugging information upon
function execution::
>>> def log(f):
... def new_logging_func(*args, **kwargs):
... print "Logging call with parameters:", args, kwargs
... return f(*args, **kwargs)
...
... return new_logging_func
Now, when we define a function, we can "decorate" it using ``log``::
>>> @log
... def add(a, b):
... return a + b
Calling ``add`` then yields:
>>> add(1, 2)
Logging call with parameters: (1, 2) {}
3
dictionary
Resembling a language dictionary, which provides a mapping between
words and descriptions thereof, a Python dictionary is a mapping
between two objects::
>>> x = {1: 'one', 'two': [1, 2]}
Here, `x` is a dictionary mapping keys to values, in this case
the integer 1 to the string "one", and the string "two" to
the list ``[1, 2]``. The values may be accessed using their
corresponding keys::
>>> x[1]
'one'
>>> x['two']
[1, 2]
Note that dictionaries are not stored in any specific order. Also,
most mutable (see *immutable* below) objects, such as lists, may not
be used as keys.
For more information on dictionaries, read the
`Python tutorial <http://docs.python.org/tut>`_.
Fortran order
See `column-major`
flattened
Collapsed to a one-dimensional array. See `ndarray.flatten`_ for details.
immutable
An object that cannot be modified after execution is called
immutable. Two common examples are strings and tuples.
instance
A class definition gives the blueprint for constructing an object::
>>> class House(object):
... wall_colour = 'white'
Yet, we have to *build* a house before it exists::
>>> h = House() # build a house
Now, ``h`` is called a ``House`` instance. An instance is therefore
a specific realisation of a class.
iterable
A sequence that allows "walking" (iterating) over items, typically
using a loop such as::
>>> x = [1, 2, 3]
>>> [item**2 for item in x]
[1, 4, 9]
It is often used in combintion with ``enumerate``::
>>> keys = ['a','b','c']
>>> for n, k in enumerate(keys):
... print "Key %d: %s" % (n, k)
...
Key 0: a
Key 1: b
Key 2: c
list
A Python container that can hold any number of objects or items.
The items do not have to be of the same type, and can even be
lists themselves::
>>> x = [2, 2.0, "two", [2, 2.0]]
The list `x` contains 4 items, each which can be accessed individually::
>>> x[2] # the string 'two'
'two'
>>> x[3] # a list, containing an integer 2 and a float 2.0
[2, 2.0]
It is also possible to select more than one item at a time,
using *slicing*::
>>> x[0:2] # or, equivalently, x[:2]
[2, 2.0]
In code, arrays are often conveniently expressed as nested lists::
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
For more information, read the section on lists in the `Python
tutorial <http://docs.python.org/tut>`_. For a mapping
type (key-value), see *dictionary*.
mask
A boolean array, used to select only certain elements for an operation::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> mask = (x > 2)
>>> mask
array([False, False, False, True, True], dtype=bool)
>>> x[mask] = -1
>>> x
array([ 0, 1, 2, -1, -1])
masked array
Array that suppressed values indicated by a mask::
>>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True])
>>> x
masked_array(data = [-- 2.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
>>> x + [1, 2, 3]
masked_array(data = [-- 4.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
Masked arrays are often used when operating on arrays containing
missing or invalid entries.
matrix
A 2-dimensional ndarray that preserves its two-dimensional nature
throughout operations. It has certain special operations, such as ``*``
(matrix multiplication) and ``**`` (matrix power), defined::
>>> x = np.mat([[1, 2], [3, 4]])
>>> x
matrix([[1, 2],
[3, 4]])
>>> x**2
matrix([[ 7, 10],
[15, 22]])
method
A function associated with an object. For example, each ndarray has a
method called ``repeat``::
>>> x = np.array([1, 2, 3])
>>> x.repeat(2)
array([1, 1, 2, 2, 3, 3])
ndarray
See *array*.
record array
An `ndarray`_ with `structured data type`_ which has been subclassed as
np.recarray and whose dtype is of type np.record, making the
fields of its data type to be accessible by attribute.
reference
If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore,
``a`` and ``b`` are different names for the same Python object.
row-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In row-major order, the rightmost index "varies
the fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the row-major order as::
[1, 2, 3, 4, 5, 6]
Row-major order is also known as the C order, as the C programming
language uses it. New Numpy arrays are by default in row-major order.
self
Often seen in method signatures, ``self`` refers to the instance
of the associated class. For example:
>>> class Paintbrush(object):
... color = 'blue'
...
... def paint(self):
... print "Painting the city %s!" % self.color
...
>>> p = Paintbrush()
>>> p.color = 'red'
>>> p.paint() # self refers to 'p'
Painting the city red!
slice
Used to select only certain elements from a sequence::
>>> x = range(5)
>>> x
[0, 1, 2, 3, 4]
>>> x[1:3] # slice from 1 to 3 (excluding 3 itself)
[1, 2]
>>> x[1:5:2] # slice from 1 to 5, but skipping every second element
[1, 3]
>>> x[::-1] # slice a sequence in reverse
[4, 3, 2, 1, 0]
Arrays may have more than one dimension, each which can be sliced
individually::
>>> x = np.array([[1, 2], [3, 4]])
>>> x
array([[1, 2],
[3, 4]])
>>> x[:, 1]
array([2, 4])
structured data type
A data type composed of other datatypes
tuple
A sequence that may contain a variable number of types of any
kind. A tuple is immutable, i.e., once constructed it cannot be
changed. Similar to a list, it can be indexed and sliced::
>>> x = (1, 'one', [1, 2])
>>> x
(1, 'one', [1, 2])
>>> x[0]
1
>>> x[:2]
(1, 'one')
A useful concept is "tuple unpacking", which allows variables to
be assigned to the contents of a tuple::
>>> x, y = (1, 2)
>>> x, y = 1, 2
This is often used when a function returns multiple values:
>>> def return_many():
... return 1, 'alpha', None
>>> a, b, c = return_many()
>>> a, b, c
(1, 'alpha', None)
>>> a
1
>>> b
'alpha'
ufunc
Universal function. A fast element-wise array operation. Examples include
``add``, ``sin`` and ``logical_or``.
view
An array that does not own its data, but refers to another array's
data instead. For example, we may create a view that only shows
every second element of another array::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> y = x[::2]
>>> y
array([0, 2, 4])
>>> x[0] = 3 # changing x changes y as well, since y is a view on x
>>> y
array([3, 2, 4])
wrapper
Python is a high-level (highly abstracted, or English-like) language.
This abstraction comes at a price in execution speed, and sometimes
it becomes necessary to use lower level languages to do fast
computations. A wrapper is code that provides a bridge between
high and the low level languages, allowing, e.g., Python to execute
code written in C or Fortran.
Examples include ctypes, SWIG and Cython (which wraps C and C++)
and f2py (which wraps Fortran).
""" |
"""CPStats, a package for collecting and reporting on program statistics.
Overview
========
Statistics about program operation are an invaluable monitoring and debugging
tool. Unfortunately, the gathering and reporting of these critical values is
usually ad-hoc. This package aims to add a centralized place for gathering
statistical performance data, a structure for recording that data which
provides for extrapolation of that data into more useful information,
and a method of serving that data to both human investigators and
monitoring software. Let's examine each of those in more detail.
Data Gathering
--------------
Just as Python's `logging` module provides a common importable for gathering
and sending messages, performance statistics would benefit from a similar
common mechanism, and one that does *not* require each package which wishes
to collect stats to import a third-party module. Therefore, we choose to
re-use the `logging` module by adding a `statistics` object to it.
That `logging.statistics` object is a nested dict. It is not a custom class,
because that would:
1. require libraries and applications to import a third-party module in
order to participate
2. inhibit innovation in extrapolation approaches and in reporting tools, and
3. be slow.
There are, however, some specifications regarding the structure of the dict.::
{
+----"SQLAlchemy": {
| "Inserts": 4389745,
| "Inserts per Second":
| lambda s: s["Inserts"] / (time() - s["Start"]),
| C +---"Table Statistics": {
| o | "widgets": {-----------+
N | l | "Rows": 1.3M, | Record
a | l | "Inserts": 400, |
m | e | },---------------------+
e | c | "froobles": {
s | t | "Rows": 7845,
p | i | "Inserts": 0,
a | o | },
c | n +---},
e | "Slow Queries":
| [{"Query": "SELECT * FROM widgets;",
| "Processing Time": 47.840923343,
| },
| ],
+----},
}
The `logging.statistics` dict has four levels. The topmost level is nothing
more than a set of names to introduce modularity, usually along the lines of
package names. If the SQLAlchemy project wanted to participate, for example,
it might populate the item `logging.statistics['SQLAlchemy']`, whose value
would be a second-layer dict we call a "namespace". Namespaces help multiple
packages to avoid collisions over key names, and make reports easier to read,
to boot. The maintainers of SQLAlchemy should feel free to use more than one
namespace if needed (such as 'SQLAlchemy ORM'). Note that there are no case
or other syntax constraints on the namespace names; they should be chosen
to be maximally readable by humans (neither too short nor too long).
Each namespace, then, is a dict of named statistical values, such as
'Requests/sec' or 'Uptime'. You should choose names which will look
good on a report: spaces and capitalization are just fine.
In addition to scalars, values in a namespace MAY be a (third-layer)
dict, or a list, called a "collection". For example, the CherryPy
:class:`StatsTool` keeps track of what each request is doing (or has most
recently done) in a 'Requests' collection, where each key is a thread ID; each
value in the subdict MUST be a fourth dict (whew!) of statistical data about
each thread. We call each subdict in the collection a "record". Similarly,
the :class:`StatsTool` also keeps a list of slow queries, where each record
contains data about each slow query, in order.
Values in a namespace or record may also be functions, which brings us to:
Extrapolation
-------------
The collection of statistical data needs to be fast, as close to unnoticeable
as possible to the host program. That requires us to minimize I/O, for example,
but in Python it also means we need to minimize function calls. So when you
are designing your namespace and record values, try to insert the most basic
scalar values you already have on hand.
When it comes time to report on the gathered data, however, we usually have
much more freedom in what we can calculate. Therefore, whenever reporting
tools (like the provided :class:`StatsPage` CherryPy class) fetch the contents
of `logging.statistics` for reporting, they first call
`extrapolate_statistics` (passing the whole `statistics` dict as the only
argument). This makes a deep copy of the statistics dict so that the
reporting tool can both iterate over it and even change it without harming
the original. But it also expands any functions in the dict by calling them.
For example, you might have a 'Current Time' entry in the namespace with the
value "lambda scope: time.time()". The "scope" parameter is the current
namespace dict (or record, if we're currently expanding one of those
instead), allowing you access to existing static entries. If you're truly
evil, you can even modify more than one entry at a time.
However, don't try to calculate an entry and then use its value in further
extrapolations; the order in which the functions are called is not guaranteed.
This can lead to a certain amount of duplicated work (or a redesign of your
schema), but that's better than complicating the spec.
After the whole thing has been extrapolated, it's time for:
Reporting
---------
The :class:`StatsPage` class grabs the `logging.statistics` dict, extrapolates
it all, and then transforms it to HTML for easy viewing. Each namespace gets
its own header and attribute table, plus an extra table for each collection.
This is NOT part of the statistics specification; other tools can format how
they like.
You can control which columns are output and how they are formatted by updating
StatsPage.formatting, which is a dict that mirrors the keys and nesting of
`logging.statistics`. The difference is that, instead of data values, it has
formatting values. Use None for a given key to indicate to the StatsPage that a
given column should not be output. Use a string with formatting
(such as '%.3f') to interpolate the value(s), or use a callable (such as
lambda v: v.isoformat()) for more advanced formatting. Any entry which is not
mentioned in the formatting dict is output unchanged.
Monitoring
----------
Although the HTML output takes pains to assign unique id's to each <td> with
statistical data, you're probably better off fetching /cpstats/data, which
outputs the whole (extrapolated) `logging.statistics` dict in JSON format.
That is probably easier to parse, and doesn't have any formatting controls,
so you get the "original" data in a consistently-serialized format.
Note: there's no treatment yet for datetime objects. Try time.time() instead
for now if you can. Nagios will probably thank you.
Turning Collection Off
----------------------
It is recommended each namespace have an "Enabled" item which, if False,
stops collection (but not reporting) of statistical data. Applications
SHOULD provide controls to pause and resume collection by setting these
entries to False or True, if present.
Usage
=====
To collect statistics on CherryPy applications::
from cherrypy.lib import cpstats
appconfig['/']['tools.cpstats.on'] = True
To collect statistics on your own code::
import logging
# Initialize the repository
if not hasattr(logging, 'statistics'): logging.statistics = {}
# Initialize my namespace
mystats = logging.statistics.setdefault('My Stuff', {})
# Initialize my namespace's scalars and collections
mystats.update({
'Enabled': True,
'Start Time': time.time(),
'Important Events': 0,
'Events/Second': lambda s: (
(s['Important Events'] / (time.time() - s['Start Time']))),
})
...
for event in events:
...
# Collect stats
if mystats.get('Enabled', False):
mystats['Important Events'] += 1
To report statistics::
root.cpstats = cpstats.StatsPage()
To format statistics reports::
See 'Reporting', above.
""" |