comments
stringlengths 2
31.4k
|
---|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is font utility code.
#
# The Initial Developer of the Original Code is Mozilla Corporation.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# NAME <EMAIL>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK ***** */
# eotlitetool.py - create EOT version of OpenType font for use with IE
#
# Usage: eotlitetool.py [-o output-filename] font1 [font2 ...]
#
# OpenType file structure
# http://www.microsoft.com/typography/otspec/otff.htm
#
# Types:
#
# BYTE 8-bit unsigned integer.
# CHAR 8-bit signed integer.
# USHORT 16-bit unsigned integer.
# SHORT 16-bit signed integer.
# ULONG 32-bit unsigned integer.
# Fixed 32-bit signed fixed-point number (16.16)
# LONGDATETIME Date represented in number of seconds since 12:00 midnight, January 1, 1904. The value is represented as a signed 64-bit integer.
#
# SFNT Header
#
# Fixed sfnt version // 0x00010000 for version 1.0.
# USHORT numTables // Number of tables.
# USHORT searchRange // (Maximum power of 2 <= numTables) x 16.
# USHORT entrySelector // Log2(maximum power of 2 <= numTables).
# USHORT rangeShift // NumTables x 16-searchRange.
#
# Table Directory
#
# ULONG tag // 4-byte identifier.
# ULONG checkSum // CheckSum for this table.
# ULONG offset // Offset from beginning of TrueType font file.
# ULONG length // Length of this table.
#
# OS/2 Table (Version 4)
#
# USHORT version // 0x0004
# SHORT xAvgCharWidth
# USHORT usWeightClass
# USHORT usWidthClass
# USHORT fsType
# SHORT ySubscriptXSize
# SHORT ySubscriptYSize
# SHORT ySubscriptXOffset
# SHORT ySubscriptYOffset
# SHORT ySuperscriptXSize
# SHORT ySuperscriptYSize
# SHORT ySuperscriptXOffset
# SHORT ySuperscriptYOffset
# SHORT yStrikeoutSize
# SHORT yStrikeoutPosition
# SHORT sFamilyClass
# BYTE panose[10]
# ULONG ulUnicodeRange1 // Bits 0-31
# ULONG ulUnicodeRange2 // Bits 32-63
# ULONG ulUnicodeRange3 // Bits 64-95
# ULONG ulUnicodeRange4 // Bits 96-127
# CHAR achVendID[4]
# USHORT fsSelection
# USHORT usFirstCharIndex
# USHORT usLastCharIndex
# SHORT sTypoAscender
# SHORT sTypoDescender
# SHORT sTypoLineGap
# USHORT usWinAscent
# USHORT usWinDescent
# ULONG ulCodePageRange1 // Bits 0-31
# ULONG ulCodePageRange2 // Bits 32-63
# SHORT sxHeight
# SHORT sCapHeight
# USHORT usDefaultChar
# USHORT usBreakChar
# USHORT usMaxContext
#
#
# The Naming Table is organized as follows:
#
# [name table header]
# [name records]
# [string data]
#
# Name Table Header
#
# USHORT format // Format selector (=0).
# USHORT count // Number of name records.
# USHORT stringOffset // Offset to start of string storage (from start of table).
#
# Name Record
#
# USHORT platformID // Platform ID.
# USHORT encodingID // Platform-specific encoding ID.
# USHORT languageID // Language ID.
# USHORT nameID // Name ID.
# USHORT length // String length (in bytes).
# USHORT offset // String offset from start of storage area (in bytes).
#
# head Table
#
# Fixed tableVersion // Table version number 0x00010000 for version 1.0.
# Fixed fontRevision // Set by font manufacturer.
# ULONG checkSumAdjustment // To compute: set it to 0, sum the entire font as ULONG, then store 0xB1B0AFBA - sum.
# ULONG magicNumber // Set to 0x5F0F3CF5.
# USHORT flags
# USHORT unitsPerEm // Valid range is from 16 to 16384. This value should be a power of 2 for fonts that have TrueType outlines.
# LONGDATETIME created // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# LONGDATETIME modified // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# SHORT xMin // For all glyph bounding boxes.
# SHORT yMin
# SHORT xMax
# SHORT yMax
# USHORT macStyle
# USHORT lowestRecPPEM // Smallest readable size in pixels.
# SHORT fontDirectionHint
# SHORT indexToLocFormat // 0 for short offsets, 1 for long.
# SHORT glyphDataFormat // 0 for current format.
#
#
#
# Embedded OpenType (EOT) file format
# http://www.w3.org/Submission/EOT/
#
# EOT version 0x00020001
#
# An EOT font consists of a header with the original OpenType font
# appended at the end. Most of the data in the EOT header is simply a
# copy of data from specific tables within the font data. The exceptions
# are the 'Flags' field and the root string name field. The root string
# is a set of names indicating domains for which the font data can be
# used. A null root string implies the font data can be used anywhere.
# The EOT header is in little-endian byte order but the font data remains
# in big-endian order as specified by the OpenType spec.
#
# Overall structure:
#
# [EOT header]
# [EOT name records]
# [font data]
#
# EOT header
#
# ULONG eotSize // Total structure length in bytes (including string and font data)
# ULONG fontDataSize // Length of the OpenType font (FontData) in bytes
# ULONG version // Version number of this format - 0x00020001
# ULONG flags // Processing Flags (0 == no special processing)
# BYTE fontPANOSE[10] // OS/2 Table panose
# BYTE charset // DEFAULT_CHARSET (0x01)
# BYTE italic // 0x01 if ITALIC in OS/2 Table fsSelection is set, 0 otherwise
# ULONG weight // OS/2 Table usWeightClass
# USHORT fsType // OS/2 Table fsType (specifies embedding permission flags)
# USHORT magicNumber // Magic number for EOT file - 0x504C.
# ULONG unicodeRange1 // OS/2 Table ulUnicodeRange1
# ULONG unicodeRange2 // OS/2 Table ulUnicodeRange2
# ULONG unicodeRange3 // OS/2 Table ulUnicodeRange3
# ULONG unicodeRange4 // OS/2 Table ulUnicodeRange4
# ULONG codePageRange1 // OS/2 Table ulCodePageRange1
# ULONG codePageRange2 // OS/2 Table ulCodePageRange2
# ULONG checkSumAdjustment // head Table CheckSumAdjustment
# ULONG reserved[4] // Reserved - must be 0
# USHORT padding1 // Padding - must be 0
#
# EOT name records
#
# USHORT FamilyNameSize // Font family name size in bytes
# BYTE FamilyName[FamilyNameSize] // Font family name (name ID = 1), little-endian UTF-16
# USHORT Padding2 // Padding - must be 0
#
# USHORT StyleNameSize // Style name size in bytes
# BYTE StyleName[StyleNameSize] // Style name (name ID = 2), little-endian UTF-16
# USHORT Padding3 // Padding - must be 0
#
# USHORT VersionNameSize // Version name size in bytes
# bytes VersionName[VersionNameSize] // Version name (name ID = 5), little-endian UTF-16
# USHORT Padding4 // Padding - must be 0
#
# USHORT FullNameSize // Full name size in bytes
# BYTE FullName[FullNameSize] // Full name (name ID = 4), little-endian UTF-16
# USHORT Padding5 // Padding - must be 0
#
# USHORT RootStringSize // Root string size in bytes
# BYTE RootString[RootStringSize] // Root string, little-endian UTF-16
|
"""Generic socket server classes.
This module tries to capture the various aspects of defining a server:
For socket-based servers:
- address family:
- AF_INET{,6}: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
For request-based servers (including socket-based):
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
save some typing. (There's also the issue that a deep class hierarchy
slows down method lookups.)
There are five classes in an inheritance diagram, four of which represent
synchronous servers of four types:
+------------+
| BaseServer |
+------------+
|
v
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
stream server is the address family, which is simply repeated in both
unix server classes.
Forking and threading versions of each type of server can be created
using the ForkingMixIn and ThreadingMixIn mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer! Setting the various member variables also changes
the behavior of the underlying server mechanism.
To implement a service, you must derive a class from
BaseRequestHandler and redefine its handle() method. You can then run
various versions of the service by combining one of the server classes
with your request handler class.
The request handler class must be different for datagram or stream
services. This can be hidden by using the request handler
subclasses StreamRequestHandler or DatagramRequestHandler.
Of course, you still have to use your head!
For instance, it makes no sense to use a forking server if the service
contains state in memory that can be modified by requests (since the
modifications in the child process would never reach the initial state
kept in the parent process and passed to each child). In this case,
you can use a threading server, but you will probably have to use
locks to avoid two requests that come in nearly simultaneous to apply
conflicting changes to the server state.
On the other hand, if you are building e.g. an HTTP server, where all
data is stored externally (e.g. in the file system), a synchronous
class will essentially render the service "deaf" while one request is
being handled -- which may be for a very long time if a client is slow
to read all the data it has requested. Here a threading or forking
server is appropriate.
In some cases, it may be appropriate to process part of a request
synchronously, but to finish processing in a forked child depending on
the request data. This can be implemented by using a synchronous
server and doing an explicit fork in the request handler class
handle() method.
Another approach to handling multiple simultaneous requests in an
environment that supports neither threads nor fork (or where these are
too expensive or inappropriate for the service) is to maintain an
explicit table of partially finished requests and to use select() to
decide which request to work on next (or whether to handle a new
incoming request). This is particularly important for stream services
where each client can potentially be connected for a long time (if
threads or subprocesses cannot be used).
Future work:
- Standard classes for Sun RPC (which uses either UDP or TCP)
- Standard mix-in classes to implement various authentication
and encryption schemes
- Standard framework for select-based multiplexing
XXX Open problems:
- What to do with out-of-band data?
BaseServer:
- split generic "request" functionality out into BaseServer class.
Copyright (C) 2000 NAME <EMAIL>
example: read entries from a SQL database (requires overriding
get_request() to return a table entry from the database).
entry is processed by a RequestHandlerClass.
""" |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 Veritos - NAME - www.veritos.nl
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company like Veritos.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
#
# Deze module werkt in OpenERP 5.0.0 (en waarschijnlijk hoger).
# Deze module werkt niet in OpenERP versie 4 en lager.
#
# Status 1.0 - getest op OpenERP 5.0.3
#
# Versie IP_ADDRESS
# account.account.type
# Basis gelegd voor alle account type
#
# account.account.template
# Basis gelegd met alle benodigde grootboekrekeningen welke via een menu-
# structuur gelinkt zijn aan rubrieken 1 t/m 9.
# De grootboekrekeningen gelinkt aan de account.account.type
# Deze links moeten nog eens goed nagelopen worden.
#
# account.chart.template
# Basis gelegd voor het koppelen van rekeningen aan debiteuren, crediteuren,
# bank, inkoop en verkoop boeken en de BTW configuratie.
#
# Versie IP_ADDRESS
# account.tax.code.template
# Basis gelegd voor de BTW configuratie (structuur)
# Heb als basis het BTW aangifte formulier gebruikt. Of dit werkt?
#
# account.tax.template
# De BTW rekeningen aangemaakt en deze gekoppeld aan de betreffende
# grootboekrekeningen
#
# Versie IP_ADDRESS
# Opschonen van de code en verwijderen van niet gebruikte componenten.
# Versie IP_ADDRESS
# Aanpassen a_expense van 3000 -> 7000
# record id='btw_code_5b' op negatieve waarde gezet
# Versie IP_ADDRESS
# BTW rekeningen hebben typeaanduiding gekregen t.b.v. purchase of sale
# Versie IP_ADDRESS
# Opschonen van module.
# Versie IP_ADDRESS
# Opschonen van module.
# Versie IP_ADDRESS
# Foutje in l10n_nl_wizard.xml gecorrigeerd waardoor de module niet volledig installeerde.
# Versie IP_ADDRESS
# Account Receivable en Payable goed gedefinieerd.
# Versie IP_ADDRESS
# Alle user_type_xxx velden goed gedefinieerd.
# Specifieke bouw en garage gerelateerde grootboeken verwijderd om een standaard module te creeeren.
# Deze module kan dan als basis worden gebruikt voor specifieke doelgroep modules te creeeren.
# Versie IP_ADDRESS
# Correctie van rekening 7010 (stond dubbel met 7014 waardoor installatie verkeerd ging)
# versie IP_ADDRESS
# Correctie op diverse rekening types van user_type_asset -> user_type_liability en user_type_equity
# versie IP_ADDRESS
# Kleine correctie op BTW te vorderen hoog, id was hetzelfde voor beide, waardoor hoog werd overschreven door # overig. Verduidelijking van omschrijvingen in belastingcodes t.b.v. aangifte overzicht.
# versie IP_ADDRESS
# BTW omschrijvingen aangepast, zodat rapporten er beter uitzien. 2a en 5b e.d. verwijderd en enkele omschrijvingen toegevoegd.
# versie IP_ADDRESS - Switch to English
# Added properties_stock_xxx accounts for correct stock valuation, changed 7000-accounts from type cash to type expense
# Changed naming of 7020 and 7030 to Kostprijs omzet xxxx
|
"""
========
Glossary
========
.. glossary::
along an axis
Axes are defined for arrays with more than one dimension. A
2-dimensional array has two corresponding axes: the first running
vertically downwards across rows (axis 0), and the second running
horizontally across columns (axis 1).
Many operation can take place along one of these axes. For example,
we can sum each row of an array, in which case we operate along
columns, or axis 1::
>>> x = np.arange(12).reshape((3,4))
>>> x
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.sum(axis=1)
array([ 6, 22, 38])
array
A homogeneous container of numerical elements. Each element in the
array occupies a fixed amount of memory (hence homogeneous), and
can be a numerical element of a single type (such as float, int
or complex) or a combination (such as ``(float, int, float)``). Each
array has an associated data-type (or ``dtype``), which describes
the numerical type of its elements::
>>> x = np.array([1, 2, 3], float)
>>> x
array([ 1., 2., 3.])
>>> x.dtype # floating point number, 64 bits of memory per element
dtype('float64')
# More complicated data type: each array element is a combination of
# and integer and a floating point number
>>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)])
array([(1, 2.0), (3, 4.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Fast element-wise operations, called `ufuncs`_, operate on arrays.
array_like
Any sequence that can be interpreted as an ndarray. This includes
nested lists, tuples, scalars and existing arrays.
attribute
A property of an object that can be accessed using ``obj.attribute``,
e.g., ``shape`` is an attribute of an array::
>>> x = np.array([1, 2, 3])
>>> x.shape
(3,)
BLAS
`Basic Linear Algebra Subprograms <http://en.wikipedia.org/wiki/BLAS>`_
broadcast
NumPy can do operations on arrays whose shapes are mismatched::
>>> x = np.array([1, 2])
>>> y = np.array([[3], [4]])
>>> x
array([1, 2])
>>> y
array([[3],
[4]])
>>> x + y
array([[4, 5],
[5, 6]])
See `doc.broadcasting`_ for more information.
C order
See `row-major`
column-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In column-major order, the leftmost index "varies the
fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the column-major order as::
[1, 4, 2, 5, 3, 6]
Column-major order is also known as the Fortran order, as the Fortran
programming language uses it.
decorator
An operator that transforms a function. For example, a ``log``
decorator may be defined to print debugging information upon
function execution::
>>> def log(f):
... def new_logging_func(*args, **kwargs):
... print "Logging call with parameters:", args, kwargs
... return f(*args, **kwargs)
...
... return new_logging_func
Now, when we define a function, we can "decorate" it using ``log``::
>>> @log
... def add(a, b):
... return a + b
Calling ``add`` then yields:
>>> add(1, 2)
Logging call with parameters: (1, 2) {}
3
dictionary
Resembling a language dictionary, which provides a mapping between
words and descriptions thereof, a Python dictionary is a mapping
between two objects::
>>> x = {1: 'one', 'two': [1, 2]}
Here, `x` is a dictionary mapping keys to values, in this case
the integer 1 to the string "one", and the string "two" to
the list ``[1, 2]``. The values may be accessed using their
corresponding keys::
>>> x[1]
'one'
>>> x['two']
[1, 2]
Note that dictionaries are not stored in any specific order. Also,
most mutable (see *immutable* below) objects, such as lists, may not
be used as keys.
For more information on dictionaries, read the
`Python tutorial <http://docs.python.org/tut>`_.
Fortran order
See `column-major`
flattened
Collapsed to a one-dimensional array. See `ndarray.flatten`_ for details.
immutable
An object that cannot be modified after execution is called
immutable. Two common examples are strings and tuples.
instance
A class definition gives the blueprint for constructing an object::
>>> class House(object):
... wall_colour = 'white'
Yet, we have to *build* a house before it exists::
>>> h = House() # build a house
Now, ``h`` is called a ``House`` instance. An instance is therefore
a specific realisation of a class.
iterable
A sequence that allows "walking" (iterating) over items, typically
using a loop such as::
>>> x = [1, 2, 3]
>>> [item**2 for item in x]
[1, 4, 9]
It is often used in combintion with ``enumerate``::
>>> keys = ['a','b','c']
>>> for n, k in enumerate(keys):
... print "Key %d: %s" % (n, k)
...
Key 0: a
Key 1: b
Key 2: c
list
A Python container that can hold any number of objects or items.
The items do not have to be of the same type, and can even be
lists themselves::
>>> x = [2, 2.0, "two", [2, 2.0]]
The list `x` contains 4 items, each which can be accessed individually::
>>> x[2] # the string 'two'
'two'
>>> x[3] # a list, containing an integer 2 and a float 2.0
[2, 2.0]
It is also possible to select more than one item at a time,
using *slicing*::
>>> x[0:2] # or, equivalently, x[:2]
[2, 2.0]
In code, arrays are often conveniently expressed as nested lists::
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
For more information, read the section on lists in the `Python
tutorial <http://docs.python.org/tut>`_. For a mapping
type (key-value), see *dictionary*.
mask
A boolean array, used to select only certain elements for an operation::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> mask = (x > 2)
>>> mask
array([False, False, False, True, True], dtype=bool)
>>> x[mask] = -1
>>> x
array([ 0, 1, 2, -1, -1])
masked array
Array that suppressed values indicated by a mask::
>>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True])
>>> x
masked_array(data = [-- 2.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
>>> x + [1, 2, 3]
masked_array(data = [-- 4.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
Masked arrays are often used when operating on arrays containing
missing or invalid entries.
matrix
A 2-dimensional ndarray that preserves its two-dimensional nature
throughout operations. It has certain special operations, such as ``*``
(matrix multiplication) and ``**`` (matrix power), defined::
>>> x = np.mat([[1, 2], [3, 4]])
>>> x
matrix([[1, 2],
[3, 4]])
>>> x**2
matrix([[ 7, 10],
[15, 22]])
method
A function associated with an object. For example, each ndarray has a
method called ``repeat``::
>>> x = np.array([1, 2, 3])
>>> x.repeat(2)
array([1, 1, 2, 2, 3, 3])
ndarray
See *array*.
reference
If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore,
``a`` and ``b`` are different names for the same Python object.
row-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In row-major order, the rightmost index "varies
the fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the row-major order as::
[1, 2, 3, 4, 5, 6]
Row-major order is also known as the C order, as the C programming
language uses it. New Numpy arrays are by default in row-major order.
self
Often seen in method signatures, ``self`` refers to the instance
of the associated class. For example:
>>> class Paintbrush(object):
... color = 'blue'
...
... def paint(self):
... print "Painting the city %s!" % self.color
...
>>> p = Paintbrush()
>>> p.color = 'red'
>>> p.paint() # self refers to 'p'
Painting the city red!
slice
Used to select only certain elements from a sequence::
>>> x = range(5)
>>> x
[0, 1, 2, 3, 4]
>>> x[1:3] # slice from 1 to 3 (excluding 3 itself)
[1, 2]
>>> x[1:5:2] # slice from 1 to 5, but skipping every second element
[1, 3]
>>> x[::-1] # slice a sequence in reverse
[4, 3, 2, 1, 0]
Arrays may have more than one dimension, each which can be sliced
individually::
>>> x = np.array([[1, 2], [3, 4]])
>>> x
array([[1, 2],
[3, 4]])
>>> x[:, 1]
array([2, 4])
tuple
A sequence that may contain a variable number of types of any
kind. A tuple is immutable, i.e., once constructed it cannot be
changed. Similar to a list, it can be indexed and sliced::
>>> x = (1, 'one', [1, 2])
>>> x
(1, 'one', [1, 2])
>>> x[0]
1
>>> x[:2]
(1, 'one')
A useful concept is "tuple unpacking", which allows variables to
be assigned to the contents of a tuple::
>>> x, y = (1, 2)
>>> x, y = 1, 2
This is often used when a function returns multiple values:
>>> def return_many():
... return 1, 'alpha', None
>>> a, b, c = return_many()
>>> a, b, c
(1, 'alpha', None)
>>> a
1
>>> b
'alpha'
ufunc
Universal function. A fast element-wise array operation. Examples include
``add``, ``sin`` and ``logical_or``.
view
An array that does not own its data, but refers to another array's
data instead. For example, we may create a view that only shows
every second element of another array::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> y = x[::2]
>>> y
array([0, 2, 4])
>>> x[0] = 3 # changing x changes y as well, since y is a view on x
>>> y
array([3, 2, 4])
wrapper
Python is a high-level (highly abstracted, or English-like) language.
This abstraction comes at a price in execution speed, and sometimes
it becomes necessary to use lower level languages to do fast
computations. A wrapper is code that provides a bridge between
high and the low level languages, allowing, e.g., Python to execute
code written in C or Fortran.
Examples include ctypes, SWIG and Cython (which wraps C and C++)
and f2py (which wraps Fortran).
""" |
{'downAngleV': (0.0, 42.0, 10.0, 10.0), 'useOldDownAngle': False, 'splitBias': 0.0, 'branchDist': 1.600000023841858, 'downAngle': (90.0, 110.0, 45.0, 45.0), 'leafScale': 0.20000000298023224, 'baseSize': 0.06799984723329544, 'shapeS': '4', 'leafScaleX': 0.019999999552965164, 'nrings': 7, 'gustF': 0.07500000298023224, 'showLeaves': True, 'taperCrown': 0.0, 'curveBack': (0.0, 0.0, 0.0, 0.0), 'leafShape': 'rect', 'makeMesh': False, 'scale0': 1.0, 'length': (1.0, 0.3199999928474426, 0.75, 0.44999998807907104), 'ratioPower': 1.0, 'taper': (1.0, 1.0, 1.0, 1.0), 'baseSize_s': 0.25, 'splitHeight': 0.20000000298023224, 'handleType': '0', 'pruneBase': 0.30000001192092896, 'attractUp': (2.0, 0.0, 0.5, 0.5), 'boneStep': (1, 1, 1, 1), 'seed': 0, 'leafDownAngle': 65.0, 'attractOut': (0.0, 0.0, 0.0, 0.0), 'leafAnim': False, 'gust': 1.0, 'curveV': (100.0, 100.0, 100.0, 0.0), 'splitAngle': (0.0, 22.0, 25.0, 0.0), 'prunePowerLow': 0.0010000000474974513, 'leafangle': -10.0, 'frameRate': 1.0, 'pruneRatio': 1.0, 'rMode': 'rotate', 'shape': '8', 'segSplits': (0.0, 0.30000001192092896, 0.4000000059604645, 0.0), 'branches': (0, 36, 7, 10), 'prunePowerHigh': 0.5, 'af1': 1.0, 'closeTip': False, 'splitAngleV': (0.0, 5.0, 0.0, 0.0), 'ratio': 0.019999999552965164, 'minRadius': 0.001500000013038516, 'levels': 3, 'leafRotate': 137.5, 'armLevels': 2, 'horzLeaves': False, 'pruneWidth': 0.4000000059604645, 'baseSplits': 0, 'scale': 4.0, 'leafScaleV': 0.10000000149011612, 'splitByLen': True, 'useParentAngle': True, 'previewArm': False, 'wind': 1.0, 'leafDist': '3', 'leafScaleT': 0.25, 'bevel': True, 'resU': 4, 'leafDownAngleV': 55.0, 'pruneWidthPeak': 0.6000000238418579, 'af3': 4.0, 'scaleV': 1.0, 'rootFlare': 1.0, 'loopFrames': 0, 'curve': (0.0, -40.0, -30.0, 0.0), 'leaves': 500, 'scaleV0': 0.10000000149011612, 'rotate': (99.5, 137.5, -90.0, 137.5), 'curveRes': (8, 5, 3, 1), 'useArm': False, 'af2': 1.0, 'bevelRes': 2, 'autoTaper': True, 'lengthV': (0.0, 0.15000000596046448, 0.25, 0.0), 'armAnim': False, 'prune': False, 'radiusTweak': (1.0, 1.0, 1.0, 1.0), 'leafRotateV': 30.0, 'customShape': (0.8999999761581421, 1.0, 0.20000000298023224, 0.20000000298023224), 'rotateV': (15.0, 0.0, 0.0, 0.0)} |
#
# ElementTree
# $Id: ElementTree.py 2326 2005-03-17 07:45:21Z USERNAME $
#
# light-weight XML support for Python 1.5.2 and later.
#
# history:
# 2001-10-20 fl created (from various sources)
# 2001-11-01 fl return root from parse method
# 2002-02-16 fl sort attributes in lexical order
# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
# 2002-05-01 fl finished TreeBuilder refactoring
# 2002-07-14 fl added basic namespace support to ElementTree.write
# 2002-07-25 fl added QName attribute support
# 2002-10-20 fl fixed encoding in write
# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
# 2002-11-27 fl accept file objects or file names for parse/write
# 2002-12-04 fl moved XMLTreeBuilder back to this module
# 2003-01-11 fl fixed entity encoding glitch for us-ascii
# 2003-02-13 fl added XML literal factory
# 2003-02-21 fl added ProcessingInstruction/PI factory
# 2003-05-11 fl added tostring/fromstring helpers
# 2003-05-26 fl added ElementPath support
# 2003-07-05 fl added makeelement factory method
# 2003-07-28 fl added more well-known namespace prefixes
# 2003-08-15 fl fixed typo in ElementTree.findtext (Thomas NAME 2003-09-04 fl fall back on emulator if ElementPath is not installed
# 2003-10-31 fl markup updates
# 2003-11-15 fl fixed nested namespace bug
# 2004-03-28 fl added XMLID helper
# 2004-06-02 fl added default support to findtext
# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
# 2004-08-23 fl take advantage of post-2.1 expat features
# 2005-02-01 fl added iterparse implementation
# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
# 2012-06-29 EMAIL Made all classes new-style
# 2012-07-02 EMAIL Include dist. ElementPath
# 2013-02-27 EMAIL renamed module files, kept namespace.
#
# Copyright (c) 1999-2005 by NAME All rights reserved.
#
# EMAIL
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2005 by NAME By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
|
# -*- encoding: utf-8 -*-
# back ported from CPython 3
# A. HISTORY OF THE SOFTWARE
# ==========================
#
# Python was created in the early 1990s by NAME at Stichting
# Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
# as a successor of a language called ABC. NAME remains Python's
# principal author, although it includes many contributions from others.
#
# In 1995, NAME continued his work on Python at the Corporation for
# National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
# in Reston, Virginia where he released several versions of the
# software.
#
# In May 2000, NAME and the Python core development team moved to
# BeOpen.com to form the BeOpen PythonLabs team. In October of the same
# year, the PythonLabs team moved to Digital Creations (now Zope
# Corporation, see http://www.zope.com). In 2001, the Python Software
# Foundation (PSF, see http://www.python.org/psf/) was formed, a
# non-profit organization created specifically to own Python-related
# Intellectual Property. Zope Corporation is a sponsoring member of
# the PSF.
#
# All Python releases are Open Source (see http://www.opensource.org for
# the Open Source Definition). Historically, most, but not all, Python
# releases have also been GPL-compatible; the table below summarizes
# the various releases.
#
# Release Derived Year Owner GPL-
# from compatible? (1)
#
# 0.9.0 thru 1.2 1991-1995 CWI yes
# 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
# 1.6 1.5.2 2000 CNRI no
# 2.0 1.6 2000 BeOpen.com no
# 1.6.1 1.6 2001 CNRI yes (2)
# 2.1 2.0+1.6.1 2001 PSF no
# 2.0.1 2.0+1.6.1 2001 PSF yes
# 2.1.1 2.1+2.0.1 2001 PSF yes
# 2.2 2.1.1 2001 PSF yes
# 2.1.2 2.1.1 2002 PSF yes
# 2.1.3 2.1.2 2002 PSF yes
# 2.2.1 2.2 2002 PSF yes
# 2.2.2 2.2.1 2002 PSF yes
# 2.2.3 2.2.2 2003 PSF yes
# 2.3 2.2.2 2002-2003 PSF yes
# 2.3.1 2.3 2002-2003 PSF yes
# 2.3.2 2.3.1 2002-2003 PSF yes
# 2.3.3 2.3.2 2002-2003 PSF yes
# 2.3.4 2.3.3 2004 PSF yes
# 2.3.5 2.3.4 2005 PSF yes
# 2.4 2.3 2004 PSF yes
# 2.4.1 2.4 2005 PSF yes
# 2.4.2 2.4.1 2005 PSF yes
# 2.4.3 2.4.2 2006 PSF yes
# 2.4.4 2.4.3 2006 PSF yes
# 2.5 2.4 2006 PSF yes
# 2.5.1 2.5 2007 PSF yes
# 2.5.2 2.5.1 2008 PSF yes
# 2.5.3 2.5.2 2008 PSF yes
# 2.6 2.5 2008 PSF yes
# 2.6.1 2.6 2008 PSF yes
# 2.6.2 2.6.1 2009 PSF yes
# 2.6.3 2.6.2 2009 PSF yes
# 2.6.4 2.6.3 2009 PSF yes
# 2.6.5 2.6.4 2010 PSF yes
# 2.7 2.6 2010 PSF yes
#
# Footnotes:
#
# (1) GPL-compatible doesn't mean that we're distributing Python under
# the GPL. All Python licenses, unlike the GPL, let you distribute
# a modified version without making your changes open source. The
# GPL-compatible licenses make it possible to combine Python with
# other software that is released under the GPL; the others don't.
#
# (2) According to NAME 1.6.1 is not GPL-compatible,
# because its license has a choice of law clause. According to
# CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
# is "not incompatible" with the GPL.
#
# Thanks to the many outside volunteers who have worked under NAME's
# direction to make these releases possible.
#
#
# B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
# ===============================================================
#
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013 Python Software Foundation; All Rights Reserved" are retained
# in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
#
#
# BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
# -------------------------------------------
#
# BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
#
# 1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
# office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
# Individual or Organization ("Licensee") accessing and otherwise using
# this software in source or binary form and its associated
# documentation ("the Software").
#
# 2. Subject to the terms and conditions of this BeOpen Python License
# Agreement, BeOpen hereby grants Licensee a non-exclusive,
# royalty-free, world-wide license to reproduce, analyze, test, perform
# and/or display publicly, prepare derivative works, distribute, and
# otherwise use the Software alone or in any derivative version,
# provided, however, that the BeOpen Python License is retained in the
# Software, alone or in any derivative version prepared by Licensee.
#
# 3. BeOpen is making the Software available to Licensee on an "AS IS"
# basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
# SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
# AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
# DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 5. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 6. This License Agreement shall be governed by and interpreted in all
# respects by the law of the State of California, excluding conflict of
# law provisions. Nothing in this License Agreement shall be deemed to
# create any relationship of agency, partnership, or joint venture
# between BeOpen and Licensee. This License Agreement does not grant
# permission to use BeOpen trademarks or trade names in a trademark
# sense to endorse or promote products or services of Licensee, or any
# third party. As an exception, the "BeOpen Python" logos available at
# http://www.pythonlabs.com/logos.html may be used according to the
# permissions granted on that web page.
#
# 7. By copying, installing or otherwise using the software, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
#
#
# CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
# ---------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Corporation for National
# Research Initiatives, having an office at 1895 Preston White Drive,
# Reston, VA 20191 ("CNRI"), and the Individual or Organization
# ("Licensee") accessing and otherwise using Python 1.6.1 software in
# source or binary form and its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, CNRI
# hereby grants Licensee a nonexclusive, royalty-free, world-wide
# license to reproduce, analyze, test, perform and/or display publicly,
# prepare derivative works, distribute, and otherwise use Python 1.6.1
# alone or in any derivative version, provided, however, that CNRI's
# License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
# 1995-2001 Corporation for National Research Initiatives; All Rights
# Reserved" are retained in Python 1.6.1 alone or in any derivative
# version prepared by Licensee. Alternately, in lieu of CNRI's License
# Agreement, Licensee may substitute the following text (omitting the
# quotes): "Python 1.6.1 is made available subject to the terms and
# conditions in CNRI's License Agreement. This Agreement together with
# Python 1.6.1 may be located on the Internet using the following
# unique, persistent identifier (known as a handle): 1895.22/1013. This
# Agreement may also be obtained from a proxy server on the Internet
# using the following URL: http://hdl.handle.net/1895.22/1013".
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python 1.6.1 or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python 1.6.1.
#
# 4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
# basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# 1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. This License Agreement shall be governed by the federal
# intellectual property law of the United States, including without
# limitation the federal copyright law, and, to the extent such
# U.S. federal law does not apply, by the law of the Commonwealth of
# Virginia, excluding Virginia's conflict of law provisions.
# Notwithstanding the foregoing, with regard to derivative works based
# on Python 1.6.1 that incorporate non-separable material that was
# previously distributed under the GNU General Public License (GPL), the
# law of the Commonwealth of Virginia shall govern this License
# Agreement only as to issues arising under or with respect to
# Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
# License Agreement shall be deemed to create any relationship of
# agency, partnership, or joint venture between CNRI and Licensee. This
# License Agreement does not grant permission to use CNRI trademarks or
# trade name in a trademark sense to endorse or promote products or
# services of Licensee, or any third party.
#
# 8. By clicking on the "ACCEPT" button where indicated, or by copying,
# installing or otherwise using Python 1.6.1, Licensee agrees to be
# bound by the terms and conditions of this License Agreement.
#
# ACCEPT
#
#
# CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
# --------------------------------------------------
#
# Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
# The Netherlands. All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Stichting Mathematisch
# Centrum or CWI not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
#-- GAUDI jobOptions generated on Mon Jul 27 18:36:16 2015
#-- Contains event types :
#-- 15104010 - 67 files - 1002713 events - 226.51 GBytes
#-- Extra information about the data processing phases:
#-- Processing Pass Step-124834
#-- StepId : 124834
#-- StepName : Reco14a for MC
#-- ApplicationName : Brunel
#-- ApplicationVersion : v43r2p7
#-- OptionFiles : $APPCONFIGOPTS/Brunel/DataType-2012.py;$APPCONFIGOPTS/Brunel/MC-WithTruth.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124620
#-- StepId : 124620
#-- StepName : Digi13 with G4 dE/dx
#-- ApplicationName : NAME
#-- ApplicationVersion : v26r3
#-- OptionFiles : $APPCONFIGOPTS/NAME/Default.py;$APPCONFIGOPTS/NAME/DataType-2012.py;$APPCONFIGOPTS/NAME/NAME-SiG4EnergyDeposit.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-126084
#-- StepId : 126084
#-- StepName : Sim08e - 2012 - MD - Pythia8
#-- ApplicationName : NAME
#-- ApplicationVersion : v45r7
#-- OptionFiles : $APPCONFIGOPTS/NAME/Sim08-Beam4000GeV-md100-2012-nu2.5.py;$DECFILESROOT/options/@{eventType}.py;$LBPYTHIA8ROOT/options/Pythia8.py;$APPCONFIGOPTS/NAME/G4PL_FTFP_BERT_EmNoCuts.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : dddb-20130929-1
#-- CONDDB : sim-20130522-1-vc-md100
#-- ExtraPackages : AppConfig.v3r182;DecFiles.v27r17
#-- Visible : Y
#-- Processing Pass Step-124632
#-- StepId : 124632
#-- StepName : TCK-0x409f0045 Flagged for Sim08 2012
#-- ApplicationName : NAME
#-- ApplicationVersion : v14r8p1
#-- OptionFiles : $APPCONFIGOPTS/NAME/NAMESimProductionWithL0Emulation.py;$APPCONFIGOPTS/Conditions/TCK-0x409f0045.py;$APPCONFIGOPTS/NAME/DataType-2012.py;$APPCONFIGOPTS/L0/L0TCK-0x0045.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124630
#-- StepId : 124630
#-- StepName : Stripping20-NoPrescalingFlagged for Sim08
#-- ApplicationName : USERNAME
#-- ApplicationVersion : v32r2p1
#-- OptionFiles : $APPCONFIGOPTS/USERNAME/DV-Stripping20-Stripping-MC-NoPrescaling.py;$APPCONFIGOPTS/USERNAME/DataType-2012.py;$APPCONFIGOPTS/USERNAME/InputType-DST.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
|
"""
# ggame
The simple cross-platform sprite and game platform for Brython Server (Pygame, Tkinter to follow?).
Ggame stands for a couple of things: "good game" (of course!) and also "git game" or "github game"
because it is designed to operate with [Brython Server](http://runpython.com) in concert with
Github as a backend file store.
Ggame is **not** intended to be a full-featured gaming API, with every bell and whistle. Ggame is
designed primarily as a tool for teaching computer programming, recognizing that the ability
to create engaging and interactive games is a powerful motivator for many progamming students.
Accordingly, any functional or performance enhancements that *can* be reasonably implemented
by the user are left as an exercise.
## Functionality Goals
The ggame library is intended to be trivially easy to use. For example:
from ggame import App, ImageAsset, Sprite
# Create a displayed object at 100,100 using an image asset
Sprite(ImageAsset("ggame/bunny.png"), (100,100))
# Create the app, with a 500x500 pixel stage
app = App(500,500)
# Run the app
app.run()
## Overview
There are three major components to the `ggame` system: Assets, Sprites and the App.
### Assets
Asset objects (i.e. `ggame.ImageAsset`, etc.) typically represent separate files that
are provided by the "art department". These might be background images, user interface
images, or images that represent objects in the game. In addition, `ggame.SoundAsset`
is used to represent sound files (`.wav` or `.mp3` format) that can be played in the
game.
Ggame also extends the asset concept to include graphics that are generated dynamically
at run-time, such as geometrical objects, e.g. rectangles, lines, etc.
### Sprites
All of the visual aspects of the game are represented by instances of `ggame.Sprite` or
subclasses of it.
### App
Every ggame application must create a single instance of the `ggame.App` class (or
a sub-class of it). Creating an instance of the `ggame.App` class will initiate
creation of a pop-up window on your browser. Executing the app's `run` method will
begin the process of refreshing the visual assets on the screen.
### Events
No game is complete without a player and players produce events. Your code handles user
input by registering to receive keyboard and mouse events using `ggame.App.listenKeyEvent` and
`ggame.App.listenMouseEvent` methods.
## Execution Environment
Ggame is designed to be executed in a web browser using [Brython](http://brython.info/),
[Pixi.js](http://www.pixijs.com/) and [Buzz](http://buzz.jaysalvat.com/). The easiest
way to do this is by executing from [runpython](http://runpython.com), with source
code residing on [github](http://github.com).
When using [runpython](http://runpython.com), you will have to configure your browser
to allow popup windows.
To use Ggame in your own application, you will minimally need to create a folder called
`ggame` in your project. Within `ggame`, copy the `ggame.py`, `sysdeps.py` and
`__init__.py` files from the [ggame project](https://github.com/BrythonServer/ggame).
### Include Ggame as a Git Subtree
From the same directory as your own python sources (note: you must have an existing git
repository with committed files in order for the following to work properly),
execute the following terminal commands:
git remote add -f ggame https://github.com/BrythonServer/ggame.git
git merge -s ours --no-commit ggame/master
mkdir ggame
git read-tree --prefix=ggame/ -u ggame/master
git commit -m "Merge ggame project as our subdirectory"
If you want to pull in updates from ggame in the future:
git pull -s subtree ggame master
You can see an example of how a ggame subtree is used by examining the
[Brython Server Spacewar](https://github.com/BrythonServer/Spacewar) repo on Github.
## Geometry
When referring to screen coordinates, note that the x-axis of the computer screen
is *horizontal* with the zero position on the left hand side of the screen. The
y-axis is *vertical* with the zero position at the **top** of the screen.
Increasing positive y-coordinates correspond to the downward direction on the
computer screen. Note that this is **different** from the way you may have learned
about x and y coordinates in math class!
""" |
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
""" |
"""
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing
of executable commands and scripts (in any language, not just Python),
especially commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd
module manages and cleans up one or more temporary workspace
directories, and provides methods for creating files and directories in
those workspace directories from in-line data, here-documents), allowing
tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
import TestCmd
test = TestCmd.TestCmd()
There are a bunch of keyword arguments available at instantiation:
test = TestCmd.TestCmd(description = 'string',
program = 'program_or_script_to_test',
interpreter = 'script_interpreter',
workdir = 'prefix',
subdir = 'subdir',
verbose = Boolean,
match = default_match_function,
diff = default_diff_function,
combine = Boolean)
There are a bunch of methods that let you do different things:
test.verbose_set(1)
test.description_set('string')
test.program_set('program_or_script_to_test')
test.interpreter_set('script_interpreter')
test.interpreter_set(['script_interpreter', 'arg'])
test.workdir_set('prefix')
test.workdir_set('')
test.workpath('file')
test.workpath('subdir', 'file')
test.subdir('subdir', ...)
test.rmdir('subdir', ...)
test.write('file', "contents\n")
test.write(['subdir', 'file'], "contents\n")
test.read('file')
test.read(['subdir', 'file'])
test.read('file', mode)
test.read(['subdir', 'file'], mode)
test.writable('dir', 1)
test.writable('dir', None)
test.preserve(condition, ...)
test.cleanup(condition)
test.command_args(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program')
test.run(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
chdir = 'directory_to_chdir_to',
stdin = 'input to feed to the program\n')
universal_newlines = True)
p = test.start(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
universal_newlines = None)
test.finish(self, p)
test.pass_test()
test.pass_test(condition)
test.pass_test(condition, function)
test.fail_test()
test.fail_test(condition)
test.fail_test(condition, function)
test.fail_test(condition, function, skip)
test.no_result()
test.no_result(condition)
test.no_result(condition, function)
test.no_result(condition, function, skip)
test.stdout()
test.stdout(run)
test.stderr()
test.stderr(run)
test.symlink(target, link)
test.banner(string)
test.banner(string, width)
test.diff(actual, expected)
test.match(actual, expected)
test.match_exact("actual 1\nactual 2\n", "expected 1\nexpected 2\n")
test.match_exact(["actual 1\n", "actual 2\n"],
["expected 1\n", "expected 2\n"])
test.match_re("actual 1\nactual 2\n", regex_string)
test.match_re(["actual 1\n", "actual 2\n"], list_of_regexes)
test.match_re_dotall("actual 1\nactual 2\n", regex_string)
test.match_re_dotall(["actual 1\n", "actual 2\n"], list_of_regexes)
test.tempdir()
test.tempdir('temporary-directory')
test.sleep()
test.sleep(seconds)
test.where_is('foo')
test.where_is('foo', 'PATH1:PATH2')
test.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
test.unlink('file')
test.unlink('subdir', 'file')
The TestCmd module provides pass_test(), fail_test(), and no_result()
unbound functions that report test results for use with the Aegis change
management system. These methods terminate the test immediately,
reporting PASSED, FAILED, or NO RESULT respectively, and exiting with
status 0 (success), 1 or 2 respectively. This allows for a distinction
between an actual failed test and a test that could not be properly
evaluated because of an external condition (such as a full file system
or incorrect permissions).
import TestCmd
TestCmd.pass_test()
TestCmd.pass_test(condition)
TestCmd.pass_test(condition, function)
TestCmd.fail_test()
TestCmd.fail_test(condition)
TestCmd.fail_test(condition, function)
TestCmd.fail_test(condition, function, skip)
TestCmd.no_result()
TestCmd.no_result(condition)
TestCmd.no_result(condition, function)
TestCmd.no_result(condition, function, skip)
The TestCmd module also provides unbound functions that handle matching
in the same way as the match_*() methods described above.
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_exact)
test = TestCmd.TestCmd(match = TestCmd.match_re)
test = TestCmd.TestCmd(match = TestCmd.match_re_dotall)
The TestCmd module provides unbound functions that can be used for the
"diff" argument to TestCmd.TestCmd instantiation:
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_re,
diff = TestCmd.diff_re)
test = TestCmd.TestCmd(diff = TestCmd.simple_diff)
The "diff" argument can also be used with standard difflib functions:
import difflib
test = TestCmd.TestCmd(diff = difflib.context_diff)
test = TestCmd.TestCmd(diff = difflib.unified_diff)
Lastly, the where_is() method also exists in an unbound function
version.
import TestCmd
TestCmd.where_is('foo')
TestCmd.where_is('foo', 'PATH1:PATH2')
TestCmd.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
""" |
"""This module tests SyntaxErrors.
Here's an example of the sort of thing that is tested.
>>> def f(x):
... global x
Traceback (most recent call last):
SyntaxError: name 'x' is parameter and global
The tests are all raise SyntaxErrors. They were created by checking
each C call that raises SyntaxError. There are several modules that
raise these exceptions-- ast.c, compile.c, future.c, pythonrun.c, and
symtable.c.
The parser itself outlaws a lot of invalid syntax. None of these
errors are tested here at the moment. We should add some tests; since
there are infinitely many programs with invalid syntax, we would need
to be judicious in selecting some.
The compiler generates a synthetic module name for code executed by
doctest. Since all the code comes from the same module, a suffix like
[1] is appended to the module name, As a consequence, changing the
order of tests in this module means renumbering all the errors after
it. (Maybe we should enable the ellipsis option for these tests.)
In ast.c, syntax errors are raised by calling ast_error().
Errors from set_context():
>>> obj.None = 1
Traceback (most recent call last):
SyntaxError: invalid syntax
>>> None = 1
Traceback (most recent call last):
SyntaxError: can't assign to keyword
It's a syntax error to assign to the empty tuple. Why isn't it an
error to assign to the empty list? It will always raise some error at
runtime.
>>> () = 1
Traceback (most recent call last):
SyntaxError: can't assign to ()
>>> f() = 1
Traceback (most recent call last):
SyntaxError: can't assign to function call
>>> del f()
Traceback (most recent call last):
SyntaxError: can't delete function call
>>> a + 1 = 2
Traceback (most recent call last):
SyntaxError: can't assign to operator
>>> (x for x in x) = 1
Traceback (most recent call last):
SyntaxError: can't assign to generator expression
>>> 1 = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> "abc" = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> b"" = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> `1` = 1
Traceback (most recent call last):
SyntaxError: invalid syntax
If the left-hand side of an assignment is a list or tuple, an illegal
expression inside that contain should still cause a syntax error.
This test just checks a couple of cases rather than enumerating all of
them.
>>> (a, "b", c) = (1, 2, 3)
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> [a, b, c + 1] = [1, 2, 3]
Traceback (most recent call last):
SyntaxError: can't assign to operator
>>> a if 1 else b = 1
Traceback (most recent call last):
SyntaxError: can't assign to conditional expression
From compiler_complex_args():
>>> def f(None=1):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
From ast_for_arguments():
>>> def f(x, y=1, z):
... pass
Traceback (most recent call last):
SyntaxError: non-default argument follows default argument
>>> def f(x, None):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
>>> def f(*None):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
>>> def f(**None):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
From ast_for_funcdef():
>>> def None(x):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
From ast_for_call():
>>> def f(it, *varargs):
... return list(it)
>>> L = range(10)
>>> f(x for x in L)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(x for x in L, 1)
Traceback (most recent call last):
SyntaxError: Generator expression must be parenthesized if not sole argument
>>> f((x for x in L), 1)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... i244, i245, i246, i247, i248, i249, i250, i251, i252,
... i253, i254, i255)
Traceback (most recent call last):
SyntaxError: more than 255 arguments
The actual error cases counts positional arguments, keyword arguments,
and generator expression arguments separately. This test combines the
three.
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... (x for x in i244), i245, i246, i247, i248, i249, i250, i251,
... i252=1, i253=1, i254=1, i255=1)
Traceback (most recent call last):
SyntaxError: more than 255 arguments
>>> f(lambda x: x[0] = 3)
Traceback (most recent call last):
SyntaxError: lambda cannot contain assignment
The grammar accepts any test (basically, any expression) in the
keyword slot of a call site. Test a few different options.
>>> f(x()=2)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression
>>> f(a or b=1)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression
>>> f(x.y=1)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression
More set_context():
>>> (x for x in x) += 1
Traceback (most recent call last):
SyntaxError: can't assign to generator expression
>>> None += 1
Traceback (most recent call last):
SyntaxError: can't assign to keyword
>>> f() += 1
Traceback (most recent call last):
SyntaxError: can't assign to function call
Test continue in finally in weird combinations.
continue in for loop under finally should be ok.
>>> def test():
... try:
... pass
... finally:
... for abc in range(10):
... continue
... print(abc)
>>> test()
9
Start simple, a continue in a finally should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
This is essentially a continue in a finally which should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... try:
... continue
... except:
... pass
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... try:
... continue
... finally:
... pass
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try: pass
... finally:
... try:
... pass
... except:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
There is one test for a break that is not in a loop. The compiler
uses a single data structure to keep track of try-finally and loops,
so we need to be sure that a break is actually inside a loop. If it
isn't, there should be a syntax error.
>>> try:
... print(1)
... break
... print(2)
... finally:
... print(3)
Traceback (most recent call last):
...
SyntaxError: 'break' outside loop
This should probably raise a better error than a SystemError (or none at all).
In 2.5 there was a missing exception and an assert was triggered in a debug
build. The number of blocks must be greater than CO_MAXBLOCKS. SF #1565514
>>> while 1:
... while 2:
... while 3:
... while 4:
... while 5:
... while 6:
... while 8:
... while 9:
... while 10:
... while 11:
... while 12:
... while 13:
... while 14:
... while 15:
... while 16:
... while 17:
... while 18:
... while 19:
... while 20:
... while 21:
... while 22:
... break
Traceback (most recent call last):
...
SystemError: too many statically nested blocks
Misuse of the nonlocal statement can lead to a few unique syntax errors.
>>> def f(x):
... nonlocal x
Traceback (most recent call last):
...
SyntaxError: name 'x' is parameter and nonlocal
>>> def f():
... global x
... nonlocal x
Traceback (most recent call last):
...
SyntaxError: name 'x' is nonlocal and global
>>> def f():
... nonlocal x
Traceback (most recent call last):
...
SyntaxError: no binding for nonlocal 'x' found
From SF bug #1705365
>>> nonlocal x
Traceback (most recent call last):
...
SyntaxError: nonlocal declaration not allowed at module level
TODO(jhylton): Figure out how to test SyntaxWarning with doctest.
## >>> def f(x):
## ... def f():
## ... print(x)
## ... nonlocal x
## Traceback (most recent call last):
## ...
## SyntaxWarning: name 'x' is assigned to before nonlocal declaration
## >>> def f():
## ... x = 1
## ... nonlocal x
## Traceback (most recent call last):
## ...
## SyntaxWarning: name 'x' is assigned to before nonlocal declaration
This tests assignment-context; there was a bug in Python 2.5 where compiling
a complex 'if' (one with 'elif') would fail to notice an invalid suite,
leading to spurious errors.
>>> if 1:
... x() = 1
... elif 1:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... x() = 1
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... x() = 1
... elif 1:
... pass
... else:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... x() = 1
... else:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... pass
... else:
... x() = 1
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
Make sure that the old "raise X, Y[, Z]" form is gone:
>>> raise X, Y
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> raise X, Y, Z
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> f(a=23, a=234)
Traceback (most recent call last):
...
SyntaxError: keyword argument repeated
>>> del ()
Traceback (most recent call last):
SyntaxError: can't delete ()
>>> {1, 2, 3} = 42
Traceback (most recent call last):
SyntaxError: can't assign to literal
Corner-cases that used to fail to raise the correct error:
>>> def f(*, x=lambda __debug__:0): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> def f(*args:(lambda __debug__:0)): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> def f(**kwargs:(lambda __debug__:0)): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> with (lambda *:0): pass
Traceback (most recent call last):
SyntaxError: named arguments must follow bare *
Corner-cases that used to crash:
>>> def f(**__debug__): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> def f(*xx, __debug__): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
""" |
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
""" |
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
""" |
"""
AUI is an Advanced User Interface library that aims to implement "cutting-edge"
interface usability and design features so developers can quickly and easily create
beautiful and usable application interfaces.
Vision and Design Principles
============================
AUI attempts to encapsulate the following aspects of the user interface:
* **Frame Management**: Frame management provides the means to open, move and hide common
controls that are needed to interact with the document, and allow these configurations
to be saved into different perspectives and loaded at a later time.
* **Toolbars**: Toolbars are a specialized subset of the frame management system and should
behave similarly to other docked components. However, they also require additional
functionality, such as "spring-loaded" rebar support, "chevron" buttons and end-user
customizability.
* **Modeless Controls**: Modeless controls expose a tool palette or set of options that
float above the application content while allowing it to be accessed. Usually accessed
by the toolbar, these controls disappear when an option is selected, but may also be
"torn off" the toolbar into a floating frame of their own.
* **Look and Feel**: Look and feel encompasses the way controls are drawn, both when shown
statically as well as when they are being moved. This aspect of user interface design
incorporates "special effects" such as transparent window dragging as well as frame animation.
AUI adheres to the following principles:
- Use native floating frames to obtain a native look and feel for all platforms;
- Use existing wxPython code where possible, such as sizer implementation for frame management;
- Use standard wxPython coding conventions.
Usage
=====
The following example shows a simple implementation that uses L{AuiManager} to manage
three text controls in a frame window::
class MyFrame(wx.Frame):
def __init__(self, parent, id=-1, title="AUI Test", pos=wx.DefaultPosition,
size=(800, 600), style=wx.DEFAULT_FRAME_STYLE):
wx.Frame.__init__(self, parent, id, title, pos, size, style)
self._mgr = aui.AuiManager()
# notify AUI which frame to use
self._mgr.SetManagedWindow(self)
# create several text controls
text1 = wx.TextCtrl(self, -1, "Pane 1 - sample text",
wx.DefaultPosition, wx.Size(200,150),
wx.NO_BORDER | wx.TE_MULTILINE)
text2 = wx.TextCtrl(self, -1, "Pane 2 - sample text",
wx.DefaultPosition, wx.Size(200,150),
wx.NO_BORDER | wx.TE_MULTILINE)
text3 = wx.TextCtrl(self, -1, "Main content window",
wx.DefaultPosition, wx.Size(200,150),
wx.NO_BORDER | wx.TE_MULTILINE)
# add the panes to the manager
self._mgr.AddPane(text1, AuiPaneInfo().Left().Caption("Pane Number One"))
self._mgr.AddPane(text2, AuiPaneInfo().Bottom().Caption("Pane Number Two"))
self._mgr.AddPane(text3, AuiPaneInfo().CenterPane())
# tell the manager to "commit" all the changes just made
self._mgr.Update()
self.Bind(wx.EVT_CLOSE, self.OnClose)
def OnClose(self, event):
# deinitialize the frame manager
self._mgr.UnInit()
self.Destroy()
event.Skip()
# our normal wxApp-derived class, as usual
app = wx.PySimpleApp()
frame = MyFrame(None)
app.SetTopWindow(frame)
frame.Show()
app.MainLoop()
What's New
==========
Current wxAUI Version Tracked: wxWidgets 2.9.0 (SVN HEAD)
The wxPython AUI version fixes the following bugs or implement the following
missing features (the list is not exhaustive):
- Visual Studio 2005 style docking: http://www.kirix.com/forums/viewtopic.php?f=16&t=596
- Dock and Pane Resizing: http://www.kirix.com/forums/viewtopic.php?f=16&t=582
- Patch concerning dock resizing: http://www.kirix.com/forums/viewtopic.php?f=16&t=610
- Patch to effect wxAuiToolBar orientation switch: http://www.kirix.com/forums/viewtopic.php?f=16&t=641
- AUI: Core dump when loading a perspective in wxGTK (MSW OK): http://www.kirix.com/forums/viewtopic.php?f=15&t=627
- wxAuiNotebook reordered AdvanceSelection(): http://www.kirix.com/forums/viewtopic.php?f=16&t=617
- Vertical Toolbar Docking Issue: http://www.kirix.com/forums/viewtopic.php?f=16&t=181
- Patch to show the resize hint on mouse-down in aui: http://trac.wxwidgets.org/ticket/9612
- The Left/Right and Top/Bottom Docks over draw each other: http://trac.wxwidgets.org/ticket/3516
- MinSize() not honoured: http://trac.wxwidgets.org/ticket/3562
- Layout problem with wxAUI: http://trac.wxwidgets.org/ticket/3597
- Resizing children ignores current window size: http://trac.wxwidgets.org/ticket/3908
- Resizing panes under Vista does not repaint background: http://trac.wxwidgets.org/ticket/4325
- Resize sash resizes in response to click: http://trac.wxwidgets.org/ticket/4547
- "Illegal" resizing of the AuiPane? (wxPython): http://trac.wxwidgets.org/ticket/4599
- Floating wxAUIPane Resize Event doesn't update its position: http://trac.wxwidgets.org/ticket/9773
- Don't hide floating panels when we maximize some other panel: http://trac.wxwidgets.org/ticket/4066
- wxAUINotebook incorrect ALLOW_ACTIVE_PANE handling: http://trac.wxwidgets.org/ticket/4361
- Page changing veto doesn't work, (patch supplied): http://trac.wxwidgets.org/ticket/4518
- Show and DoShow are mixed around in wxAuiMDIChildFrame: http://trac.wxwidgets.org/ticket/4567
- wxAuiManager & wxToolBar - ToolBar Of Size Zero: http://trac.wxwidgets.org/ticket/9724
- wxAuiNotebook doesn't behave properly like a container as far as...: http://trac.wxwidgets.org/ticket/9911
- Serious layout bugs in wxAUI: http://trac.wxwidgets.org/ticket/10620
- wAuiDefaultTabArt::Clone() should just use copy contructor: http://trac.wxwidgets.org/ticket/11388
- Drop down button for check tool on wxAuiToolbar: http://trac.wxwidgets.org/ticket/11139
Plus the following features:
- AuiManager:
(a) Implementation of a simple minimize pane system: Clicking on this minimize button causes a new
AuiToolBar to be created and added to the frame manager, (currently the implementation is such
that panes at West will have a toolbar at the right, panes at South will have toolbars at the
bottom etc...) and the pane is hidden in the manager.
Clicking on the restore button on the newly created toolbar will result in the toolbar being
removed and the original pane being restored;
(b) Panes can be docked on top of each other to form `AuiNotebooks`; `AuiNotebooks` tabs can be torn
off to create floating panes;
(c) On Windows XP, use the nice sash drawing provided by XP while dragging the sash;
(d) Possibility to set an icon on docked panes;
(e) Possibility to draw a sash visual grip, for enhanced visualization of sashes;
(f) Implementation of a native docking art (`ModernDockArt`). Windows XP only, **requires** NAME
pywin32 package (winxptheme);
(g) Possibility to set a transparency for floating panes (a la Paint .NET);
(h) Snapping the main frame to the screen in any positin specified by horizontal and vertical
alignments;
(i) Snapping floating panes on left/right/top/bottom or any combination of directions, a la Winamp;
(j) "Fly-out" floating panes, i.e. panes which show themselves only when the mouse hover them;
(k) Ability to set custom bitmaps for pane buttons (close, maximize, etc...);
(l) Implementation of the style ``AUI_MGR_ANIMATE_FRAMES``, which fade-out floating panes when
they are closed (all platforms which support frames transparency) and show a moving rectangle
when they are docked and minimized (Windows < Vista and GTK only);
(m) A pane switcher dialog is available to cycle through existing AUI panes;
(n) Some flags which allow to choose the orientation and the position of the minimized panes;
(o) The functions [Get]MinimizeMode() in `AuiPaneInfo` which allow to set/get the flags described above;
(p) Events like ``EVT_AUI_PANE_DOCKING``, ``EVT_AUI_PANE_DOCKED``, ``EVT_AUI_PANE_FLOATING`` and ``EVT_AUI_PANE_FLOATED`` are
available for all panes *except* toolbar panes;
(q) Implementation of the RequestUserAttention method for panes;
(r) Ability to show the caption bar of docked panes on the left instead of on the top (with caption
text rotated by 90 degrees then). This is similar to what `wxDockIt` did. To enable this feature on any
given pane, simply call `CaptionVisible(True, left=True)`;
(s) New Aero-style docking guides: you can enable them by using the `AuiManager` style ``AUI_MGR_AERO_DOCKING_GUIDES``;
(t) A slide-in/slide-out preview of minimized panes can be seen by enabling the `AuiManager` style
``AUI_MGR_PREVIEW_MINIMIZED_PANES`` and by hovering with the mouse on the minimized pane toolbar tool;
(s) New Whidbey-style docking guides: you can enable them by using the `AuiManager` style ``AUI_MGR_WHIDBEY_DOCKING_GUIDES``;
(t) Native of custom-drawn mini frames can be used as floating panes, depending on the ``AUI_MGR_USE_NATIVE_MINIFRAMES`` style;
(u) A "smooth docking effect" can be obtained by using the ``AUI_MGR_SMOOTH_DOCKING`` style (similar to PyQT docking style).
|
- AuiNotebook:
(a) Implementation of the style ``AUI_NB_HIDE_ON_SINGLE_TAB``, a la `wx.lib.agw.flatnotebook`;
(b) Implementation of the style ``AUI_NB_SMART_TABS``, a la `wx.lib.agw.flatnotebook`;
(c) Implementation of the style ``AUI_NB_USE_IMAGES_DROPDOWN``, which allows to show tab images
on the tab dropdown menu instead of bare check menu items (a la `wx.lib.agw.flatnotebook`);
(d) 6 different tab arts are available, namely:
(1) Default "glossy" theme (as in `wx.aui.AuiNotebook`)
(2) Simple theme (as in `wx.aui.AuiNotebook`)
(3) Firefox 2 theme
(4) Visual Studio 2003 theme (VC71)
(5) Visual Studio 2005 theme (VC81)
(6) Google Chrome theme
(e) Enabling/disabling tabs;
(f) Setting the colour of the tab's text;
(g) Implementation of the style ``AUI_NB_CLOSE_ON_TAB_LEFT``, which draws the tab close button on
the left instead of on the right (a la Camino browser);
(h) Ability to save and load perspectives in `AuiNotebook` (experimental);
(i) Possibility to add custom buttons in the `AuiNotebook` tab area;
(j) Implementation of the style ``AUI_NB_TAB_FLOAT``, which allows the floating of single tabs.
Known limitation: when the notebook is more or less full screen, tabs cannot be dragged far
enough outside of the notebook to become floating pages;
(k) Implementation of the style ``AUI_NB_DRAW_DND_TAB`` (on by default), which draws an image
representation of a tab while dragging;
(l) Implementation of the `AuiNotebook` unsplit functionality, which unsplit a splitted AuiNotebook
when double-clicking on a sash;
(m) Possibility to hide all the tabs by calling `HideAllTAbs`;
(n) wxPython controls can now be added inside page tabs by calling `AddControlToPage`, and they can be
removed by calling `RemoveControlFromPage`;
(o) Possibility to preview all the pages in a `AuiNotebook` (as thumbnails) by using the `NotebookPreview`
method of `AuiNotebook`;
(p) Tab labels can be edited by calling the `SetRenamable` method on a `AuiNotebook` page;
(q) Support for multi-lines tab labels in `AuiNotebook`;
(r) Support for setting minimum and maximum tab widths for fixed width tabs.
|
- AuiToolBar:
(a) ``AUI_TB_PLAIN_BACKGROUND`` style that allows to easy setup a plain background to the AUI toolbar,
without the need to override drawing methods. This style contrasts with the default behaviour
of the `wx.aui.AuiToolBar` that draws a background gradient and this break the window design when
putting it within a control that has margin between the borders and the toolbar (example: put
`wx.aui.AuiToolBar` within a `wx.StaticBoxSizer` that has a plain background);
(b) `AuiToolBar` allow item alignment: http://trac.wxwidgets.org/ticket/10174;
(c) `AUIToolBar` `DrawButton()` improvement: http://trac.wxwidgets.org/ticket/10303;
(d) `AuiToolBar` automatically assign new id for tools: http://trac.wxwidgets.org/ticket/10173;
(e) `AuiToolBar` Allow right-click on any kind of button: http://trac.wxwidgets.org/ticket/10079;
(f) `AuiToolBar` idle update only when visible: http://trac.wxwidgets.org/ticket/10075;
(g) Ability of creating `AuiToolBar` tools with [counter]clockwise rotation. This allows to propose a
variant of the minimizing functionality with a rotated button which keeps the caption of the pane
as label;
(h) Allow setting the alignment of all tools in a toolbar that is expanded.
TODOs
=====
- Documentation, documentation and documentation;
- Fix `tabmdi.AuiMDIParentFrame` and friends, they do not work correctly at present;
- Allow specification of `CaptionLeft()` to `AuiPaneInfo` to show the caption bar of docked panes
on the left instead of on the top (with caption text rotated by 90 degrees then). This is
similar to what `wxDockIt` did - DONE;
- Make developer-created `AuiNotebooks` and automatic (framemanager-created) `AuiNotebooks` behave
the same way (undocking of tabs) - DONE, to some extent;
- Find a way to dock panes in already floating panes (`AuiFloatingFrames`), as they already have
their own `AuiManager`;
- Add more gripper styles (see, i.e., PlusDock 4.0);
- Add an "AutoHide" feature to docked panes, similar to fly-out floating panes (see, i.e., PlusDock 4.0);
- Add events for panes when they are about to float or to be docked (something like
``EVT_AUI_PANE_FLOATING/ED`` and ``EVT_AUI_PANE_DOCKING/ED``) - DONE, to some extent;
- Implement the 4-ways splitter behaviour for horizontal and vertical sashes if they intersect;
- Extend `tabart.py` with more aui tab arts;
- Implement ``AUI_NB_LEFT`` and ``AUI_NB_RIGHT`` tab locations in `AuiNotebook`;
- Move `AuiDefaultToolBarArt` into a separate module (as with `tabart.py` and `dockart.py`) and
provide more arts for toolbars (maybe from `wx.lib.agw.flatmenu`?)
- Support multiple-rows/multiple columns toolbars;
- Integrate as much as possible with `wx.lib.agw.flatmenu`, from dropdown menus in `AuiNotebook` to
toolbars and menu positioning;
- Possibly handle minimization of panes in a different way (or provide an option to switch to
another way of minimizing panes);
- Clean up/speed up the code, especially time-consuming for-loops;
- Possibly integrate `wxPyRibbon` (still on development), at least on Windows.
License And Version
===================
AUI library is distributed under the wxPython license.
Latest revision: NAME @ 28 Apr 2010, 14.00 GMT
Version 1.2.
""" |
"""
# ggame
The simple cross-platform sprite and game platform for Brython Server (Pygame, Tkinter to follow?).
Ggame stands for a couple of things: "good game" (of course!) and also "git game" or "github game"
because it is designed to operate with [Brython Server](http://runpython.com) in concert with
Github as a backend file store.
Ggame is **not** intended to be a full-featured gaming API, with every bell and whistle. Ggame is
designed primarily as a tool for teaching computer programming, recognizing that the ability
to create engaging and interactive games is a powerful motivator for many progamming students.
Accordingly, any functional or performance enhancements that *can* be reasonably implemented
by the user are left as an exercise.
## Functionality Goals
The ggame library is intended to be trivially easy to use. For example:
from ggame import App, ImageAsset, Sprite
# Create a displayed object at 100,100 using an image asset
Sprite(ImageAsset("ggame/bunny.png"), (100,100))
# Create the app, with a 500x500 pixel stage
app = App(500,500)
# Run the app
app.run()
## Overview
There are three major components to the `ggame` system: Assets, Sprites and the App.
### Assets
Asset objects (i.e. `ggame.ImageAsset`, etc.) typically represent separate files that
are provided by the "art department". These might be background images, user interface
images, or images that represent objects in the game. In addition, `ggame.SoundAsset`
is used to represent sound files (`.wav` or `.mp3` format) that can be played in the
game.
Ggame also extends the asset concept to include graphics that are generated dynamically
at run-time, such as geometrical objects, e.g. rectangles, lines, etc.
### Sprites
All of the visual aspects of the game are represented by instances of `ggame.Sprite` or
subclasses of it.
### App
Every ggame application must create a single instance of the `ggame.App` class (or
a sub-class of it). Creating an instance of the `ggame.App` class will initiate
creation of a pop-up window on your browser. Executing the app's `run` method will
begin the process of refreshing the visual assets on the screen.
### Events
No game is complete without a player and players produce events. Your code handles user
input by registering to receive keyboard and mouse events using `ggame.App.listenKeyEvent` and
`ggame.App.listenMouseEvent` methods.
## Execution Environment
Ggame is designed to be executed in a web browser using [Brython](http://brython.info/),
[Pixi.js](http://www.pixijs.com/) and [Buzz](http://buzz.jaysalvat.com/). The easiest
way to do this is by executing from [runpython](http://runpython.com), with source
code residing on [github](http://github.com).
When using [runpython](http://runpython.com), you will have to configure your browser
to allow popup windows.
To use Ggame in your own application, you will minimally need to create a folder called
`ggame` in your project. Within `ggame`, copy the `ggame.py`, `sysdeps.py` and
`__init__.py` files from the [ggame project](https://github.com/BrythonServer/ggame).
### Include Ggame as a Git Subtree
From the same directory as your own python sources (note: you must have an existing git
repository with committed files in order for the following to work properly),
execute the following terminal commands:
git remote add -f ggame https://github.com/BrythonServer/ggame.git
git merge -s ours --no-commit ggame/master
mkdir ggame
git read-tree --prefix=ggame/ -u ggame/master
git commit -m "Merge ggame project as our subdirectory"
If you want to pull in updates from ggame in the future:
git pull -s subtree ggame master
You can see an example of how a ggame subtree is used by examining the
[Brython Server Spacewar](https://github.com/BrythonServer/Spacewar) repo on Github.
## Geometry
When referring to screen coordinates, note that the x-axis of the computer screen
is *horizontal* with the zero position on the left hand side of the screen. The
y-axis is *vertical* with the zero position at the **top** of the screen.
Increasing positive y-coordinates correspond to the downward direction on the
computer screen. Note that this is **different** from the way you may have learned
about x and y coordinates in math class!
""" |
""" A gallery script for pyblosxom
QUIK HOWTO
1. Create the following templates (with your flavor suffix) pygallery_index,
pygallery_thumbs, pygallery_image. The following template variables are
availible:
* pygallery_index
- $body : contains the list of galleries
* pygallery_thumbs
- $title : contains the thumb group name (if any)
- $body : contains the list of thumbs
* pygallery_image
- $title: file name
- $image_url: the url to the image
- $next: the url to the next image
- $prev: the url to the previus image
- $summary: a little text you have written
2. The following variables must be set in your pybloxsom config.py file:
py["pygallery_dirpath"] = "/home/myaccount/www/gallery" #file system
py["pygallery_url"] = "http://myurl.com/gallery" #URL
The pygallery_dirpath configuration variable must point on a place in the
file system that can be read from your web server. The variable
pygallery_dirpath points to the root of your galleries. This root must
contain one directory (one gallery) at least, however it can contain several
directories (many galleries). Every gallery directory must contain one
directory that is called thumbs (the small images) and one that is called
images (the big images).
pygallery_dirpath must be set to point to the root url of your galleries.
3. Create your first three galleries (summer2005, newyear2005 and summer2006)!
For example:
mkdir /home/myaccount/www/gallery
mkdir /home/myaccount/www/gallery/summer2005
mkdir /home/myaccount/www/gallery/summer2005/thumbs
mkdir /home/myaccount/www/gallery/summer2005/images
mkdir /home/myaccount/www/gallery/newyear2005
mkdir /home/myaccount/www/gallery/newyear2005/thumbs
mkdir /home/myaccount/www/gallery/newyear2005/images
mkdir /home/myaccount/www/gallery/summer2006
mkdir /home/myaccount/www/gallery/summer2006/thumbs
mkdir /home/myaccount/www/gallery/summer2006/images
Then copy your big pictures to the new image directories and the thumbs to
the thumbs directories. OBSERVE.!! the thumbs and the images must have
matching names. So if you have a picture called mycomputer.jpg in the image
directory, you must have a mycomputer.jpg in the thumbs directory.
4. Test your gallery! For example :
http://myurl.com/gallery/cgi-bin/pyblosxom.cgi/pygallery/index
Observe that you must write pygallery in the end of the the url.
5. pygallery has 3 levels. The first level is the galleries list, The second
level is the thumbs section and the third level is the big image. What if
you do not like for example the look of the gallery list items or that there
are 5 thumbs on a row and that yo want a to use HTML table tags in the
beginning and the end of a thumb row? Well then you should read the
CONFIGURATION OPTIONS section.
6. In the default gallery list the name used for each gallery is the
directory names you gave them (for example summer2005) and does not contain
any summary text and other fun stuff. However this is fixed by placing an
index.cfg file in those galleries you need them in. This is described in the
chapter INDEX.CFG section.
7. If you want to sort your items in the galleries list add the file dirlist.cfg
to your root of your galleries. Type in the directory names in the order you
want them to be listed, separated with new lines and you have a new order. If
you have a dirlist.cfg file ,only directories listed in the file will show up in
the galleries list. Here is an example:
touch /home/myaccount/www/gallery/dirlist.cfg
echo "summer2005" >> /home/myaccount/www/gallery/dirlist.cfg
echo "newyear2005" >> /home/myaccount/www/gallery/dirlist.cfg
If you have the directories summer2005, newyear2005 and summer2006 the above
example would mean that you only show gallery summer2005 and newyear2005 in the
list and in that order.
8. The thumbs are not the only things that can be grouped! You can also group
galleries in the dirlist.cfg (you need to add one more template to use this called
pygallery_group). To do that you just put a label enclosed with brackets before the
directory names that are in the same group like this:
[Family]
summer2005
newyear2005
[Parties]
lanparty1
lanparty2
bithday2005
It is possible to use ordinary index and grouped index at the same time because the
use of different url paths. For example:
* Ordinary index: http://myurl.com/gallery/cgi-bin/pyblosxom.cgi/pygallery/index
* Grouped index: http://myurl.com/gallery/cgi-bin/pyblosxom.cgi/pygallery/groups
In the ordinary index case the groups are ignored but the order of the galleries are
still taken in consideration.
The url to the ordinary index ends with index in the url and the grouped index ends
with groups in the utl. To only view the galleries in a group the url must end with
the word group and the name of the group:
* http://myurl.com/gallery/cgi-bin/pyblosxom.cgi/pygallery/group/Family
Observe the difference between the url containing groups (the group index) and group
(the group).
* pygallery_index
- $body : contains the list of galleries
- $grp_name: contains the name of the group
- $grp_url: contains the url to the group
The template pygallery_group have two additional template variables than
pygallery_index. It has been designed with flexibility in mind. For example if you
only want to list the names of the groups and the url to the groups, skip the $body
template variable. If you want to list all groups and images then you probably do not
need the $grp_url variable (even if it is possible).
9. If you are interested in more customizing keep on reading.
CONFIGURATION OPTIONS
These are the configuration options for pyblosxom config.py
* pygallery_dirpath
- Already covered
* pygallery_url
- Already covered
* pygallery_use_grp
- You can group thumbs in a gallery with a index.cfg (True/False).
- Default: False
* pygallery_thumb_row_len
- How many thumbs will be on a row.
- Default: 5
* pygallery_index_item
- How do you want the HTML of an item in the gallery list to look like.
- Default: <a href="$gal_dir_relurl">$gal_dirname</a><br />
- Variables:
$gal_dirname: The name of the gallery
$gal_summary: Text (from index.cfg)
$gal_dir_relurl: The url to the gallery
$gal_thumb: A thumb images name (from index.cfg)
* pygallery_thumb_rbegin
- How do you want your thumb row to begin.
- Default: <!-- row begin -->
* pygallery_thumb_rend
- How do you want your thumb row to end.
- Default: <!-- row end --> <br />
* pygallery_thumb_item
- How do you want the HTML of a thumb to look like.
- Default: <a href="$gal_thumb_imgurl">
<img style="border-style: none;" src="$gal_thumb_url"
alt="$gal_thumb_filename"/>
</a> 
- Variables:
$gal_thumb_filename: The name of the file
$gal_thumb_url: The url to the thumb
$gal_thumb_imgurl: The url to the big picture
* pygallery_thumb_item_grp
- When the gallery ends with a row where the number of thumbs is less
than pygallery_thumb_row_len you maybe want to replace the empty spot
with HTLM table tags or another picture.
- Default: <img style="border-style: none;"
src="$gal_thumb_url" alt="$gal_thumb_filename"/> 
- Variables:
$gal_thumb_filename: The name of the file
$gal_thumb_url: The url to the group thumb. This image must be placed
in the gallery root (pygallery_dirpath) and have the
same name (+ the format suffix) as the thumb group it
will appear in (a group is defined in the index.cfg).
This can be used even if no groups are used but then
the image msut have the name thumbs (+ the format
suffix).
* pygallery_default_index_thumb
- A image used in the gallery list when no one is found in a index.cfg.
The image must be placed in the gallery root (pygallery_dirpath).
INDEX.CFG
The config.py options are used to customize all the galleries globally. If
you want to customize things in a gallery you must use the index.cfg file. A
index.cfg file is placed in any gallery (for example one in summer2005 and
one in newyear2005). The data format follows RFC822:
http://www.faqs.org/rfcs/rfc822.html
Here is an example that uses all the options of the index.cfg
[index]
name: The fun summer 2005
summary: Here we have a gallery with allot of sun and sand
thumb: picture2.jpg
[grouplinks]
picture1.jpg: landscape
picture2.jpg: portrait
picture3.jpg: portrait
picture4.jpg: landscape
[text]
picture1.jpg: NAME is skydiving
picture2.jpg: Oscar is playing mini golf
picture3.jpg: Oscar throws the golf club
picture4.jpg: NAME laugh at Oscar
Lets explain the file. The tags [index], [grouplinks], [text] must exist in
the file. Lets begin with the [index] part. The name variable contains the
value that will be put in the $gal_dirname variable. The summary variable
contains the value that will be put in the $gal_summary variable. The thumb
variable contains the value that will be put in the $gal_thumb variable.
$gal_dirname, $gal_summary and $gal_thumb are used in the config.py option
pygallery_index_item.
The [grouplinks] tag contains file names of thumb files in the thumbs
directory of a gallery. The after the : char is the group name. In the
example above yo got two groups. When a new group name appear in a index.cfg
file it is automatically added to the gallery that the index.cfg file
belongs to. I actually made this functionality because i wanted to separate
landscape oriented and portrait oriented in to two groups. I hate mixing
them!
The [text] tag contains file names of files in a images directory and the
corresponding text that will be shown with the picture.
There is also a program that copies an resizes images, creates thumbs and
creates index.cfg files. One interesting feature is that it creates groups
in the index.cfg file. It uses the orientation (landscape and portrait) of
the picture and size to create groups. This program is called apethumbgen.py
and is available from http://www.codeape.org/blog/static/download
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Copyright 2006 NAME - (3 April, 2006) Created.
1.0 - (1 October, 2006) Finished =)
1.01 - (8 October, 2006) Fixed html bug in defult data
1.2 - (15 Februari, 2007) Added a way to sort the galleries list (dirlist.cfg).
2.0 - (13 November, 2007) Added the group functionality and fixed a ton of bugs
2.01 - (13 November, 2007) Found a last minute index out of bound bug
""" |
"""Generic socket server classes.
This module tries to capture the various aspects of defining a server:
For socket-based servers:
- address family:
- AF_INET{,6}: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
For request-based servers (including socket-based):
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
save some typing. (There's also the issue that a deep class hierarchy
slows down method lookups.)
There are five classes in an inheritance diagram, four of which represent
synchronous servers of four types:
+------------+
| BaseServer |
+------------+
|
v
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
stream server is the address family, which is simply repeated in both
unix server classes.
Forking and threading versions of each type of server can be created
using the ForkingMixIn and ThreadingMixIn mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer! Setting the various member variables also changes
the behavior of the underlying server mechanism.
To implement a service, you must derive a class from
BaseRequestHandler and redefine its handle() method. You can then run
various versions of the service by combining one of the server classes
with your request handler class.
The request handler class must be different for datagram or stream
services. This can be hidden by using the request handler
subclasses StreamRequestHandler or DatagramRequestHandler.
Of course, you still have to use your head!
For instance, it makes no sense to use a forking server if the service
contains state in memory that can be modified by requests (since the
modifications in the child process would never reach the initial state
kept in the parent process and passed to each child). In this case,
you can use a threading server, but you will probably have to use
locks to avoid two requests that come in nearly simultaneous to apply
conflicting changes to the server state.
On the other hand, if you are building e.g. an HTTP server, where all
data is stored externally (e.g. in the file system), a synchronous
class will essentially render the service "deaf" while one request is
being handled -- which may be for a very long time if a client is slow
to reqd all the data it has requested. Here a threading or forking
server is appropriate.
In some cases, it may be appropriate to process part of a request
synchronously, but to finish processing in a forked child depending on
the request data. This can be implemented by using a synchronous
server and doing an explicit fork in the request handler class
handle() method.
Another approach to handling multiple simultaneous requests in an
environment that supports neither threads nor fork (or where these are
too expensive or inappropriate for the service) is to maintain an
explicit table of partially finished requests and to use select() to
decide which request to work on next (or whether to handle a new
incoming request). This is particularly important for stream services
where each client can potentially be connected for a long time (if
threads or subprocesses cannot be used).
Future work:
- Standard classes for Sun RPC (which uses either UDP or TCP)
- Standard mix-in classes to implement various authentication
and encryption schemes
- Standard framework for select-based multiplexing
XXX Open problems:
- What to do with out-of-band data?
BaseServer:
- split generic "request" functionality out into BaseServer class.
Copyright (C) 2000 NAME <EMAIL>
example: read entries from a SQL database (requires overriding
get_request() to return a table entry from the database).
entry is processed by a RequestHandlerClass.
""" |
"""
[2016-07-08] Challenge #274 [Hard] ∞ Loop solver
https://www.reddit.com/r/dailyprogrammer/comments/4rug59/20160708_challenge_274_hard_loop_solver/
#Description
∞ Loop is a mobile game that consists of n\*m tiles, placed in a n\*m grid. There are 16 different tiles:
┃, ━, ┏, ┓, ┛, ┗, ┣, ┳, ┫, ┻, ╋, ╹, ╺, ╻, ╸, and the empty tile.
(If some of the Unicode characters aren't shown, [here](http://i.imgur.com/PWyeW5r.png) is a screenshot of this
paragraph).
In other words, every tile may or may not have a "pipe" going up, a "pipe" going right, a "pipe" going down, and a
"pipe" going left. All combinations of those are valid, legal tiles.
At the beginning of the game, the grid is filled with those tiles. The player may then choose some tile and rotate it
90 degrees to the right. The player may do this an unlimited amount of times. For example, ┣ becomes ┳ and ┛ becomes ┗,
but ╋ stays ╋.
The objective is to create a closed loop: every pipe must have another tile facing it in the adjacent tile — for
example if some tile has a pipe going right, its adjacent tile to the right must have a pipe going left.
In case you need clarification, [here's](https://www.youtube.com/watch?v=TlR1hfiIk10) some random guy playing it.
Your task is to write a program that, given an initial grid of tiles, outputs a solution to that grid.
#Formal Inputs & Outputs
An easy way to represent tiles without having to deal with Unicode (or ASCII art) is to use the bitmask technique to
encode the tiles as numbers 0...15.
To encode a tile:
* Start with 0.
* If the tile has a pipe going up, add 1.
* If the tile has a pipe going right, add 2.
* If the tile has a pipe going down, add 4.
* If the tile has a pipe going left, add 8.
For example, ┫ becomes 1+4+8=13.
If we look at the binary representation of that number, we see that:
* The first digit from the right shows whether the tile has a pipe going up;
* The second digit from the right shows whether the tile has a pipe going right;
* The third digit from the right shows whether the tile has a pipe going down;
* The fourth digit from the right shows whether the tile has a pipe going left.
13 in binary is 1101, from which it is evident that all pipes are present except the pipe going right.
##Input description
The input consists of n rows, each row having m space-separated numbers in it. Those numbers are the tiles, encoded in
the bitmask technique discussed above.
You may also include the number of rows and columns in the input, if that makes it easier to read the input.
##Output description
Output a similar grid which is obtained by rotating some or all tiles in the input grid. A tile may be rotated multiple
times. The output grid must be a closed loop.
##Sample input 1
9 12 12 6
10 13 13 5
3 3 9 3
##Sample output 1
6 12 6 12
5 7 13 5
3 9 3 9
The sample input corresponds to:
┛┓┓┏
━┫┫┃
┗┗┛┗
By rotating some tiles, we get:
┏┓┏┓
┃┣┫┃
┗┛┗┛,
which corresponds to the sample output and is a closed loop.
(Again, if Unicode characters don't load, [here](http://i.imgur.com/lqCGY3e.png) is the first sample input).
##Sample input 2
0 8 8 0
##Sample output 2
0 2 8 0
The input corresponds to ╸╸, surrounded by two empty tiles.
The corresponding output is ╺╸.
#Notes
It is easiest to use the bitwise and/or/xor operators to rotate and check for pipes. Most programming languages have
such operators. The bitwise shift operators may also be helpful to rotate the tiles.
[Here's](https://en.wikipedia.org/wiki/Mask_(computing\)) a Wikipedia article on using them on bitmasks.
#Finally
This challenge was suggested by /u/A858DE57B86C2F16F, many thanks! Have a good challenge idea? Consider submitting it
to /r/dailyprogrammer_ideas
""" |
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
""" |
#
# tested on | Windows native | Linux cross-compilation
# ------------------------+-------------------+---------------------------
# MSVS C++ 2010 Express | WORKS | n/a
# Mingw-w64 | WORKS | WORKS
# Mingw-w32 | WORKS | WORKS
# MinGW | WORKS | untested
#
#####
# Notes about MSVS C++ :
#
# - MSVC2010-Express compiles to 32bits only.
#
#####
# Notes about Mingw-w64 and Mingw-w32 under Windows :
#
# - both can be installed using the official installer :
# http://mingw-w64.sourceforge.net/download.php#mingw-builds
#
# - if you want to compile both 32bits and 64bits, don't forget to
# run the installer twice to install them both.
#
# - install them into a path that does not contain spaces
# ( example : "C:/Mingw-w32", "C:/Mingw-w64" )
#
# - if you want to compile faster using the "-j" option, don't forget
# to install the appropriate version of the Pywin32 python extension
# available from : http://sourceforge.net/projects/pywin32/files/
#
# - before running scons, you must add into the environment path
# the path to the "/bin" directory of the Mingw version you want
# to use :
#
# set PATH=C:/Mingw-w32/bin;%PATH%
#
# - then, scons should be able to detect gcc.
# - Mingw-w32 only compiles 32bits.
# - Mingw-w64 only compiles 64bits.
#
# - it is possible to add them both at the same time into the PATH env,
# if you also define the MINGW32_PREFIX and MINGW64_PREFIX environment
# variables.
# For instance, you could store that set of commands into a .bat script
# that you would run just before scons :
#
# set PATH=C:\mingw-w32\bin;%PATH%
# set PATH=C:\mingw-w64\bin;%PATH%
# set MINGW32_PREFIX=C:\mingw-w32\bin\
# set MINGW64_PREFIX=C:\mingw-w64\bin\
#
#####
# Notes about Mingw, Mingw-w64 and Mingw-w32 under Linux :
#
# - default toolchain prefixes are :
# "i586-mingw32msvc-" for MinGW
# "i686-w64-mingw32-" for Mingw-w32
# "x86_64-w64-mingw32-" for Mingw-w64
#
# - if both MinGW and Mingw-w32 are installed on your system
# Mingw-w32 should take the priority over MinGW.
#
# - it is possible to manually override prefixes by defining
# the MINGW32_PREFIX and MINGW64_PREFIX environment variables.
#
#####
# Notes about Mingw under Windows :
#
# - this is the MinGW version from http://mingw.org/
# - install it into a path that does not contain spaces
# ( example : "C:/MinGW" )
# - several DirectX headers might be missing. You can copy them into
# the C:/MinGW/include" directory from this page :
# https://code.google.com/p/mingw-lib/source/browse/trunk/working/avcodec_to_widget_5/directx_include/
# - before running scons, add the path to the "/bin" directory :
# set PATH=C:/MinGW/bin;%PATH%
# - scons should be able to detect gcc.
#
#####
# TODO :
#
# - finish to cleanup this script to remove all the remains of previous hacks and workarounds
# - make it work with the Windows7 SDK that is supposed to enable 64bits compilation for MSVC2010-Express
# - confirm it works well with other Visual Studio versions.
# - update the wiki about the pywin32 extension required for the "-j" option under Windows.
# - update the wiki to document MINGW32_PREFIX and MINGW64_PREFIX
#
|
"""
=====================================================
Optimization and root finding (:mod:`scipy.optimize`)
=====================================================
.. currentmodule:: scipy.optimize
Optimization
============
Local Optimization
------------------
.. autosummary::
:toctree: generated/
minimize - Unified interface for minimizers of multivariate functions
minimize_scalar - Unified interface for minimizers of univariate functions
OptimizeResult - The optimization result returned by some optimizers
OptimizeWarning - The optimization encountered problems
The `minimize` function supports the following methods:
.. toctree::
optimize.minimize-neldermead
optimize.minimize-powell
optimize.minimize-cg
optimize.minimize-bfgs
optimize.minimize-newtoncg
optimize.minimize-lbfgsb
optimize.minimize-tnc
optimize.minimize-cobyla
optimize.minimize-slsqp
optimize.minimize-dogleg
optimize.minimize-trustncg
The `minimize_scalar` function supports the following methods:
.. toctree::
optimize.minimize_scalar-brent
optimize.minimize_scalar-bounded
optimize.minimize_scalar-golden
The specific optimization method interfaces below in this subsection are
not recommended for use in new scripts; all of these methods are accessible
via a newer, more consistent interface provided by the functions above.
General-purpose multivariate methods:
.. autosummary::
:toctree: generated/
fmin - Nelder-Mead Simplex algorithm
fmin_powell - Powell's (modified) level set method
fmin_cg - Non-linear (Polak-Ribiere) conjugate gradient algorithm
fmin_bfgs - Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno)
fmin_ncg - Line-search Newton Conjugate Gradient
Constrained multivariate methods:
.. autosummary::
:toctree: generated/
fmin_l_bfgs_b - Zhu, Byrd, and Nocedal's constrained optimizer
fmin_tnc - Truncated Newton code
fmin_cobyla - Constrained optimization by linear approximation
fmin_slsqp - Minimization using sequential least-squares programming
differential_evolution - stochastic minimization using differential evolution
Univariate (scalar) minimization methods:
.. autosummary::
:toctree: generated/
fminbound - Bounded minimization of a scalar function
brent - 1-D function minimization using Brent method
golden - 1-D function minimization using Golden Section method
Equation (Local) Minimizers
---------------------------
.. autosummary::
:toctree: generated/
leastsq - Minimize the sum of squares of M equations in N unknowns
nnls - Linear least-squares problem with non-negativity constraint
Global Optimization
-------------------
.. autosummary::
:toctree: generated/
basinhopping - Basinhopping stochastic optimizer
brute - Brute force searching optimizer
differential_evolution - stochastic minimization using differential evolution
Rosenbrock function
-------------------
.. autosummary::
:toctree: generated/
rosen - The Rosenbrock function.
rosen_der - The derivative of the Rosenbrock function.
rosen_hess - The Hessian matrix of the Rosenbrock function.
rosen_hess_prod - Product of the Rosenbrock Hessian with a vector.
Fitting
=======
.. autosummary::
:toctree: generated/
curve_fit -- Fit curve to a set of points
Root finding
============
Scalar functions
----------------
.. autosummary::
:toctree: generated/
brentq - quadratic interpolation Brent method
brenth - Brent method, modified by Harris with hyperbolic extrapolation
ridder - Ridder's method
bisect - Bisection method
newton - Secant method or Newton's method
Fixed point finding:
.. autosummary::
:toctree: generated/
fixed_point - Single-variable fixed-point solver
Multidimensional
----------------
General nonlinear solvers:
.. autosummary::
:toctree: generated/
root - Unified interface for nonlinear solvers of multivariate functions
fsolve - Non-linear multi-variable equation solver
broyden1 - Broyden's first method
broyden2 - Broyden's second method
The `root` function supports the following methods:
.. toctree::
optimize.root-hybr
optimize.root-lm
optimize.root-broyden1
optimize.root-broyden2
optimize.root-anderson
optimize.root-linearmixing
optimize.root-diagbroyden
optimize.root-excitingmixing
optimize.root-krylov
optimize.root-dfsane
Large-scale nonlinear solvers:
.. autosummary::
:toctree: generated/
newton_krylov
anderson
Simple iterations:
.. autosummary::
:toctree: generated/
excitingmixing
linearmixing
diagbroyden
:mod:`Additional information on the nonlinear solvers <scipy.optimize.nonlin>`
Linear Programming
==================
Simplex Algorithm:
.. autosummary::
:toctree: generated/
linprog -- Linear programming using the simplex algorithm
linprog_verbose_callback -- Sample callback function for linprog
The `linprog` function supports the following methods:
.. toctree::
optimize.linprog-simplex
Utilities
=========
.. autosummary::
:toctree: generated/
approx_fprime - Approximate the gradient of a scalar function
bracket - Bracket a minimum, given two starting points
check_grad - Check the supplied derivative using finite differences
line_search - Return a step that satisfies the strong Wolfe conditions
show_options - Show specific options optimization solvers
LbfgsInvHessProduct - Linear operator for L-BFGS approximate inverse Hessian
""" |
"""Drag-and-drop support for Tkinter.
This is very preliminary. I currently only support dnd *within* one
application, between different windows (or within the same window).
I an trying to make this as generic as possible -- not dependent on
the use of a particular widget or icon type, etc. I also hope that
this will work with Pmw.
To enable an object to be dragged, you must create an event binding
for it that starts the drag-and-drop process. Typically, you should
bind <ButtonPress> to a callback function that you write. The function
should call Tkdnd.dnd_start(source, event), where 'source' is the
object to be dragged, and 'event' is the event that invoked the call
(the argument to your callback function). Even though this is a class
instantiation, the returned instance should not be stored -- it will
be kept alive automatically for the duration of the drag-and-drop.
When a drag-and-drop is already in process for the Tk interpreter, the
call is *ignored*; this normally averts starting multiple simultaneous
dnd processes, e.g. because different button callbacks all
dnd_start().
The object is *not* necessarily a widget -- it can be any
application-specific object that is meaningful to potential
drag-and-drop targets.
Potential drag-and-drop targets are discovered as follows. Whenever
the mouse moves, and at the start and end of a drag-and-drop move, the
Tk widget directly under the mouse is inspected. This is the target
widget (not to be confused with the target object, yet to be
determined). If there is no target widget, there is no dnd target
object. If there is a target widget, and it has an attribute
dnd_accept, this should be a function (or any callable object). The
function is called as dnd_accept(source, event), where 'source' is the
object being dragged (the object passed to dnd_start() above), and
'event' is the most recent event object (generally a <Motion> event;
it can also be <ButtonPress> or <ButtonRelease>). If the dnd_accept()
function returns something other than None, this is the new dnd target
object. If dnd_accept() returns None, or if the target widget has no
dnd_accept attribute, the target widget's parent is considered as the
target widget, and the search for a target object is repeated from
there. If necessary, the search is repeated all the way up to the
root widget. If none of the target widgets can produce a target
object, there is no target object (the target object is None).
The target object thus produced, if any, is called the new target
object. It is compared with the old target object (or None, if there
was no old target widget). There are several cases ('source' is the
source object, and 'event' is the most recent event object):
- Both the old and new target objects are None. Nothing happens.
- The old and new target objects are the same object. Its method
dnd_motion(source, event) is called.
- The old target object was None, and the new target object is not
None. The new target object's method dnd_enter(source, event) is
called.
- The new target object is None, and the old target object is not
None. The old target object's method dnd_leave(source, event) is
called.
- The old and new target objects differ and neither is None. The old
target object's method dnd_leave(source, event), and then the new
target object's method dnd_enter(source, event) is called.
Once this is done, the new target object replaces the old one, and the
Tk mainloop proceeds. The return value of the methods mentioned above
is ignored; if they raise an exception, the normal exception handling
mechanisms take over.
The drag-and-drop processes can end in two ways: a final target object
is selected, or no final target object is selected. When a final
target object is selected, it will always have been notified of the
potential drop by a call to its dnd_enter() method, as described
above, and possibly one or more calls to its dnd_motion() method; its
dnd_leave() method has not been called since the last call to
dnd_enter(). The target is notified of the drop by a call to its
method dnd_commit(source, event).
If no final target object is selected, and there was an old target
object, its dnd_leave(source, event) method is called to complete the
dnd sequence.
Finally, the source object is notified that the drag-and-drop process
is over, by a call to source.dnd_end(target, event), specifying either
the selected target object, or None if no target object was selected.
The source object can use this to implement the commit action; this is
sometimes simpler than to do it in the target's dnd_commit(). The
target's dnd_commit() method could then simply be aliased to
dnd_leave().
At any time during a dnd sequence, the application can cancel the
sequence by calling the cancel() method on the object returned by
dnd_start(). This will call dnd_leave() if a target is currently
active; it will never call dnd_commit().
""" |
"""
=============
Miscellaneous
=============
IEEE 754 Floating Point Special Values
--------------------------------------
Special values defined in numpy: nan, inf,
NaNs can be used as a poor-man's mask (if you don't care what the
original value was)
Note: cannot use equality to test NaNs. E.g.: ::
>>> myarr = np.array([1., 0., np.nan, 3.])
>>> np.where(myarr == np.nan)
>>> np.nan == np.nan # is always False! Use special numpy functions instead.
False
>>> myarr[myarr == np.nan] = 0. # doesn't work
>>> myarr
array([ 1., 0., NaN, 3.])
>>> myarr[np.isnan(myarr)] = 0. # use this instead find
>>> myarr
array([ 1., 0., 0., 3.])
Other related special value functions: ::
isinf(): True if value is inf
isfinite(): True if not nan or inf
nan_to_num(): Map nan to 0, inf to max float, -inf to min float
The following corresponds to the usual functions except that nans are excluded
from the results: ::
nansum()
nanmax()
nanmin()
nanargmax()
nanargmin()
>>> x = np.arange(10.)
>>> x[3] = np.nan
>>> x.sum()
nan
>>> np.nansum(x)
42.0
How numpy handles numerical exceptions
--------------------------------------
The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow``
and ``'ignore'`` for ``underflow``. But this can be changed, and it can be
set individually for different kinds of exceptions. The different behaviors
are:
- 'ignore' : Take no action when the exception occurs.
- 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module).
- 'raise' : Raise a `FloatingPointError`.
- 'call' : Call a function specified using the `seterrcall` function.
- 'print' : Print a warning directly to ``stdout``.
- 'log' : Record error in a Log object specified by `seterrcall`.
These behaviors can be set for all kinds of errors or specific ones:
- all : apply to all numeric exceptions
- invalid : when NaNs are generated
- divide : divide by zero (for integers as well!)
- overflow : floating point overflows
- underflow : floating point underflows
Note that integer divide-by-zero is handled by the same machinery.
These behaviors are set on a per-thread basis.
Examples
--------
::
>>> oldsettings = np.seterr(all='warn')
>>> np.zeros(5,dtype=np.float32)/0.
invalid value encountered in divide
>>> j = np.seterr(under='ignore')
>>> np.array([1.e-100])**10
>>> j = np.seterr(invalid='raise')
>>> np.sqrt(np.array([-1.]))
FloatingPointError: invalid value encountered in sqrt
>>> def errorhandler(errstr, errflag):
... print "saw stupid error!"
>>> np.seterrcall(errorhandler)
<function err_handler at 0x...>
>>> j = np.seterr(all='call')
>>> np.zeros(5, dtype=np.int32)/0
FloatingPointError: invalid value encountered in divide
saw stupid error!
>>> j = np.seterr(**oldsettings) # restore previous
... # error-handling settings
Interfacing to C
----------------
Only a survey of the choices. Little detail on how each works.
1) Bare metal, wrap your own C-code manually.
- Plusses:
- Efficient
- No dependencies on other tools
- Minuses:
- Lots of learning overhead:
- need to learn basics of Python C API
- need to learn basics of numpy C API
- need to learn how to handle reference counting and love it.
- Reference counting often difficult to get right.
- getting it wrong leads to memory leaks, and worse, segfaults
- API will change for Python 3.0!
2) Cython
- Plusses:
- avoid learning C API's
- no dealing with reference counting
- can code in pseudo python and generate C code
- can also interface to existing C code
- should shield you from changes to Python C api
- has become the de-facto standard within the scientific Python community
- fast indexing support for arrays
- Minuses:
- Can write code in non-standard form which may become obsolete
- Not as flexible as manual wrapping
4) ctypes
- Plusses:
- part of Python standard library
- good for interfacing to existing sharable libraries, particularly
Windows DLLs
- avoids API/reference counting issues
- good numpy support: arrays have all these in their ctypes
attribute: ::
a.ctypes.data a.ctypes.get_strides
a.ctypes.data_as a.ctypes.shape
a.ctypes.get_as_parameter a.ctypes.shape_as
a.ctypes.get_data a.ctypes.strides
a.ctypes.get_shape a.ctypes.strides_as
- Minuses:
- can't use for writing code to be turned into C extensions, only a wrapper
tool.
5) SWIG (automatic wrapper generator)
- Plusses:
- around a long time
- multiple scripting language support
- C++ support
- Good for wrapping large (many functions) existing C libraries
- Minuses:
- generates lots of code between Python and the C code
- can cause performance problems that are nearly impossible to optimize
out
- interface files can be hard to write
- doesn't necessarily avoid reference counting issues or needing to know
API's
7) scipy.weave
- Plusses:
- can turn many numpy expressions into C code
- dynamic compiling and loading of generated C code
- can embed pure C code in Python module and have weave extract, generate
interfaces and compile, etc.
- Minuses:
- Future very uncertain: it's the only part of Scipy not ported to Python 3
and is effectively deprecated in favor of Cython.
8) Psyco
- Plusses:
- Turns pure python into efficient machine code through jit-like
optimizations
- very fast when it optimizes well
- Minuses:
- Only on intel (windows?)
- Doesn't do much for numpy?
Interfacing to Fortran:
-----------------------
The clear choice to wrap Fortran code is
`f2py <http://docs.scipy.org/doc/numpy-dev/f2py/>`_.
Pyfort is an older alternative, but not supported any longer.
Fwrap is a newer project that looked promising but isn't being developed any
longer.
Interfacing to C++:
-------------------
1) Cython
2) CXX
3) Boost.python
4) SWIG
5) SIP (used mainly in PyQT)
""" |
"""
=====================================================
Optimization and Root Finding (:mod:`scipy.optimize`)
=====================================================
.. currentmodule:: scipy.optimize
SciPy ``optimize`` provides functions for minimizing (or maximizing)
objective functions, possibly subject to constraints. It includes
solvers for nonlinear problems (with support for both local and global
optimization algorithms), linear programing, constrained
and nonlinear least-squares, root finding and curve fitting.
Common functions and objects, shared across different solvers, are:
.. autosummary::
:toctree: generated/
show_options - Show specific options optimization solvers.
OptimizeResult - The optimization result returned by some optimizers.
OptimizeWarning - The optimization encountered problems.
Optimization
============
Scalar Functions Optimization
-----------------------------
.. autosummary::
:toctree: generated/
minimize_scalar - Interface for minimizers of univariate functions
The `minimize_scalar` function supports the following methods:
.. toctree::
optimize.minimize_scalar-brent
optimize.minimize_scalar-bounded
optimize.minimize_scalar-golden
Local (Multivariate) Optimization
---------------------------------
.. autosummary::
:toctree: generated/
minimize - Interface for minimizers of multivariate functions.
The `minimize` function supports the following methods:
.. toctree::
optimize.minimize-neldermead
optimize.minimize-powell
optimize.minimize-cg
optimize.minimize-bfgs
optimize.minimize-newtoncg
optimize.minimize-lbfgsb
optimize.minimize-tnc
optimize.minimize-cobyla
optimize.minimize-slsqp
optimize.minimize-trustconstr
optimize.minimize-dogleg
optimize.minimize-trustncg
optimize.minimize-trustkrylov
optimize.minimize-trustexact
Constraints are passed to `minimize` function as a single object or
as a list of objects from the following classes:
.. autosummary::
:toctree: generated/
NonlinearConstraint - Class defining general nonlinear constraints.
LinearConstraint - Class defining general linear constraints.
Simple bound constraints are handled separately and there is a special class
for them:
.. autosummary::
:toctree: generated/
Bounds - Bound constraints.
Quasi-Newton strategies implementing `HessianUpdateStrategy`
interface can be used to approximate the Hessian in `minimize`
function (available only for the 'trust-constr' method). Available
quasi-Newton methods implementing this interface are:
.. autosummary::
:toctree: generated/
BFGS - Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy.
SR1 - Symmetric-rank-1 Hessian update strategy.
Global Optimization
-------------------
.. autosummary::
:toctree: generated/
basinhopping - Basinhopping stochastic optimizer.
brute - Brute force searching optimizer.
differential_evolution - stochastic minimization using differential evolution.
shgo - simplicial homology global optimisation
dual_annealing - Dual annealing stochastic optimizer.
Least-squares and Curve Fitting
===============================
Nonlinear Least-Squares
-----------------------
.. autosummary::
:toctree: generated/
least_squares - Solve a nonlinear least-squares problem with bounds on the variables.
Linear Least-Squares
--------------------
.. autosummary::
:toctree: generated/
nnls - Linear least-squares problem with non-negativity constraint.
lsq_linear - Linear least-squares problem with bound constraints.
Curve Fitting
-------------
.. autosummary::
:toctree: generated/
curve_fit -- Fit curve to a set of points.
Root finding
============
Scalar functions
----------------
.. autosummary::
:toctree: generated/
root_scalar - Unified interface for nonlinear solvers of scalar functions.
brentq - quadratic interpolation Brent method.
brenth - Brent method, modified by Harris with hyperbolic extrapolation.
ridder - Ridder's method.
bisect - Bisection method.
newton - Newton's method (also Secant and Halley's methods).
toms748 - Alefeld, Potra & Shi Algorithm 748
RootResults - The root finding result returned by some root finders.
The `root_scalar` function supports the following methods:
.. toctree::
optimize.root_scalar-brentq
optimize.root_scalar-brenth
optimize.root_scalar-bisect
optimize.root_scalar-ridder
optimize.root_scalar-newton
optimize.root_scalar-toms748
optimize.root_scalar-secant
optimize.root_scalar-halley
The table below lists situations and appropriate methods, along with
*asymptotic* convergence rates per iteration (and per function evaluation)
for successful convergence to a simple root(*).
Bisection is the slowest of them all, adding one bit of accuracy for each
function evaluation, but is guaranteed to converge.
The other bracketing methods all (eventually) increase the number of accurate
bits by about 50% for every function evaluation.
The derivative-based methods, all built on `newton`, can converge quite quickly
if the initial value is close to the root. They can also be applied to
functions defined on (a subset of) the complex plane.
+-------------+----------+----------+-----------+-------------+-------------+----------------+
| Domain of f | Bracket? | Derivatives? | Solvers | Convergence |
+ + +----------+-----------+ +-------------+----------------+
| | | `fprime` | `fprime2` | | Guaranteed? | Rate(s)(*) |
+=============+==========+==========+===========+=============+=============+================+
| `R` | Yes | N/A | N/A | - bisection | - Yes | - 1 "Linear" |
| | | | | - brentq | - Yes | - >=1, <= 1.62 |
| | | | | - brenth | - Yes | - >=1, <= 1.62 |
| | | | | - ridder | - Yes | - 2.0 (1.41) |
| | | | | - toms748 | - Yes | - 2.7 (1.65) |
+-------------+----------+----------+-----------+-------------+-------------+----------------+
| `R` or `C` | No | No | No | secant | No | 1.62 (1.62) |
+-------------+----------+----------+-----------+-------------+-------------+----------------+
| `R` or `C` | No | Yes | No | newton | No | 2.00 (1.41) |
+-------------+----------+----------+-----------+-------------+-------------+----------------+
| `R` or `C` | No | Yes | Yes | halley | No | 3.00 (1.44) |
+-------------+----------+----------+-----------+-------------+-------------+----------------+
.. seealso::
`scipy.optimize.cython_optimize` -- Typed Cython versions of zeros functions
Fixed point finding:
.. autosummary::
:toctree: generated/
fixed_point - Single-variable fixed-point solver.
Multidimensional
----------------
.. autosummary::
:toctree: generated/
root - Unified interface for nonlinear solvers of multivariate functions.
The `root` function supports the following methods:
.. toctree::
optimize.root-hybr
optimize.root-lm
optimize.root-broyden1
optimize.root-broyden2
optimize.root-anderson
optimize.root-linearmixing
optimize.root-diagbroyden
optimize.root-excitingmixing
optimize.root-krylov
optimize.root-dfsane
Linear Programming
==================
.. autosummary::
:toctree: generated/
linprog -- Unified interface for minimizers of linear programming problems.
The `linprog` function supports the following methods:
.. toctree::
optimize.linprog-simplex
optimize.linprog-interior-point
optimize.linprog-revised_simplex
The simplex method supports callback functions, such as:
.. autosummary::
:toctree: generated/
linprog_verbose_callback -- Sample callback function for linprog (simplex).
Assignment problems:
.. autosummary::
:toctree: generated/
linear_sum_assignment -- Solves the linear-sum assignment problem.
Utilities
=========
Finite-Difference Approximation
-------------------------------
.. autosummary::
:toctree: generated/
approx_fprime - Approximate the gradient of a scalar function.
check_grad - Check the supplied derivative using finite differences.
Line Search
-----------
.. autosummary::
:toctree: generated/
bracket - Bracket a minimum, given two starting points.
line_search - Return a step that satisfies the strong Wolfe conditions.
Hessian Approximation
---------------------
.. autosummary::
:toctree: generated/
LbfgsInvHessProduct - Linear operator for L-BFGS approximate inverse Hessian.
HessianUpdateStrategy - Interface for implementing Hessian update strategies
Benchmark Problems
------------------
.. autosummary::
:toctree: generated/
rosen - The Rosenbrock function.
rosen_der - The derivative of the Rosenbrock function.
rosen_hess - The Hessian matrix of the Rosenbrock function.
rosen_hess_prod - Product of the Rosenbrock Hessian with a vector.
Legacy Functions
================
The functions below are not recommended for use in new scripts;
all of these methods are accessible via a newer, more consistent
interfaces, provided by the interfaces above.
Optimization
------------
General-purpose multivariate methods:
.. autosummary::
:toctree: generated/
fmin - Nelder-Mead Simplex algorithm.
fmin_powell - Powell's (modified) level set method.
fmin_cg - Non-linear (Polak-Ribiere) conjugate gradient algorithm.
fmin_bfgs - Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno).
fmin_ncg - Line-search Newton Conjugate Gradient.
Constrained multivariate methods:
.. autosummary::
:toctree: generated/
fmin_l_bfgs_b - Zhu, Byrd, and Nocedal's constrained optimizer.
fmin_tnc - Truncated Newton code.
fmin_cobyla - Constrained optimization by linear approximation.
fmin_slsqp - Minimization using sequential least-squares programming.
Univariate (scalar) minimization methods:
.. autosummary::
:toctree: generated/
fminbound - Bounded minimization of a scalar function.
brent - 1-D function minimization using Brent method.
golden - 1-D function minimization using Golden Section method.
Least-Squares
-------------
.. autosummary::
:toctree: generated/
leastsq - Minimize the sum of squares of M equations in N unknowns.
Root Finding
------------
General nonlinear solvers:
.. autosummary::
:toctree: generated/
fsolve - Non-linear multi-variable equation solver.
broyden1 - Broyden's first method.
broyden2 - Broyden's second method.
Large-scale nonlinear solvers:
.. autosummary::
:toctree: generated/
newton_krylov
anderson
Simple iteration solvers:
.. autosummary::
:toctree: generated/
excitingmixing
linearmixing
diagbroyden
:mod:`Additional information on the nonlinear solvers <scipy.optimize.nonlin>`
""" |
"""
Basic functions used by several sub-packages and
useful to have in the main name-space.
Type Handling
-------------
================ ===================
iscomplexobj Test for complex object, scalar result
isrealobj Test for real object, scalar result
iscomplex Test for complex elements, array result
isreal Test for real elements, array result
imag Imaginary part
real Real part
real_if_close Turns complex number with tiny imaginary part to real
isneginf Tests for negative infinity, array result
isposinf Tests for positive infinity, array result
isnan Tests for nans, array result
isinf Tests for infinity, array result
isfinite Tests for finite numbers, array result
isscalar True if argument is a scalar
nan_to_num Replaces NaN's with 0 and infinities with large numbers
cast Dictionary of functions to force cast to each type
common_type Determine the minimum common type code for a group
of arrays
mintypecode Return minimal allowed common typecode.
================ ===================
Index Tricks
------------
================ ===================
mgrid Method which allows easy construction of N-d
'mesh-grids'
``r_`` Append and construct arrays: turns slice objects into
ranges and concatenates them, for 2d arrays appends rows.
index_exp Konrad Hinsen's index_expression class instance which
can be useful for building complicated slicing syntax.
================ ===================
Useful Functions
----------------
================ ===================
select Extension of where to multiple conditions and choices
extract Extract 1d array from flattened array according to mask
insert Insert 1d array of values into Nd array according to mask
linspace Evenly spaced samples in linear space
logspace Evenly spaced samples in logarithmic space
fix Round x to nearest integer towards zero
mod Modulo mod(x,y) = x % y except keeps sign of y
amax Array maximum along axis
amin Array minimum along axis
ptp Array max-min along axis
cumsum Cumulative sum along axis
prod Product of elements along axis
cumprod Cumluative product along axis
diff Discrete differences along axis
angle Returns angle of complex argument
unwrap Unwrap phase along given axis (1-d algorithm)
sort_complex Sort a complex-array (based on real, then imaginary)
trim_zeros Trim the leading and trailing zeros from 1D array.
vectorize A class that wraps a Python function taking scalar
arguments into a generalized function which can handle
arrays of arguments using the broadcast rules of
numerix Python.
================ ===================
Shape Manipulation
------------------
================ ===================
squeeze Return a with length-one dimensions removed.
atleast_1d Force arrays to be > 1D
atleast_2d Force arrays to be > 2D
atleast_3d Force arrays to be > 3D
vstack Stack arrays vertically (row on row)
hstack Stack arrays horizontally (column on column)
column_stack Stack 1D arrays as columns into 2D array
dstack Stack arrays depthwise (along third dimension)
stack Stack arrays along a new axis
split Divide array into a list of sub-arrays
hsplit Split into columns
vsplit Split into rows
dsplit Split along third dimension
================ ===================
Matrix (2D Array) Manipulations
-------------------------------
================ ===================
fliplr 2D array with columns flipped
flipud 2D array with rows flipped
rot90 Rotate a 2D array a multiple of 90 degrees
eye Return a 2D array with ones down a given diagonal
diag Construct a 2D array from a vector, or return a given
diagonal from a 2D array.
mat Construct a Matrix
bmat Build a Matrix from blocks
================ ===================
Polynomials
-----------
================ ===================
poly1d A one-dimensional polynomial class
poly Return polynomial coefficients from roots
roots Find roots of polynomial given coefficients
polyint Integrate polynomial
polyder Differentiate polynomial
polyadd Add polynomials
polysub Substract polynomials
polymul Multiply polynomials
polydiv Divide polynomials
polyval Evaluate polynomial at given argument
================ ===================
Iterators
---------
================ ===================
Arrayterator A buffered iterator for big arrays.
================ ===================
Import Tricks
-------------
================ ===================
ppimport Postpone module import until trying to use it
ppimport_attr Postpone module import until trying to use its attribute
ppresolve Import postponed module and return it.
================ ===================
Machine Arithmetics
-------------------
================ ===================
machar_single Single precision floating point arithmetic parameters
machar_double Double precision floating point arithmetic parameters
================ ===================
Threading Tricks
----------------
================ ===================
ParallelExec Execute commands in parallel thread.
================ ===================
1D Array Set Operations
-----------------------
Set operations for 1D numeric arrays based on sort() function.
================ ===================
ediff1d Array difference (auxiliary function).
unique Unique elements of an array.
intersect1d Intersection of 1D arrays with unique elements.
setxor1d Set exclusive-or of 1D arrays with unique elements.
in1d Test whether elements in a 1D array are also present in
another array.
union1d Union of 1D arrays with unique elements.
setdiff1d Set difference of 1D arrays with unique elements.
================ ===================
""" |
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is font utility code.
#
# The Initial Developer of the Original Code is Mozilla Corporation.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# NAME <EMAIL>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK ***** */
# eotlitetool.py - create EOT version of OpenType font for use with IE
#
# Usage: eotlitetool.py [-o output-filename] font1 [font2 ...]
#
# OpenType file structure
# http://www.microsoft.com/typography/otspec/otff.htm
#
# Types:
#
# BYTE 8-bit unsigned integer.
# CHAR 8-bit signed integer.
# USHORT 16-bit unsigned integer.
# SHORT 16-bit signed integer.
# ULONG 32-bit unsigned integer.
# Fixed 32-bit signed fixed-point number (16.16)
# LONGDATETIME Date represented in number of seconds since 12:00 midnight, January 1, 1904. The value is represented as a signed 64-bit integer.
#
# SFNT Header
#
# Fixed sfnt version // 0x00010000 for version 1.0.
# USHORT numTables // Number of tables.
# USHORT searchRange // (Maximum power of 2 <= numTables) x 16.
# USHORT entrySelector // Log2(maximum power of 2 <= numTables).
# USHORT rangeShift // NumTables x 16-searchRange.
#
# Table Directory
#
# ULONG tag // 4-byte identifier.
# ULONG checkSum // CheckSum for this table.
# ULONG offset // Offset from beginning of TrueType font file.
# ULONG length // Length of this table.
#
# OS/2 Table (Version 4)
#
# USHORT version // 0x0004
# SHORT xAvgCharWidth
# USHORT usWeightClass
# USHORT usWidthClass
# USHORT fsType
# SHORT ySubscriptXSize
# SHORT ySubscriptYSize
# SHORT ySubscriptXOffset
# SHORT ySubscriptYOffset
# SHORT ySuperscriptXSize
# SHORT ySuperscriptYSize
# SHORT ySuperscriptXOffset
# SHORT ySuperscriptYOffset
# SHORT yStrikeoutSize
# SHORT yStrikeoutPosition
# SHORT sFamilyClass
# BYTE panose[10]
# ULONG ulUnicodeRange1 // Bits 0-31
# ULONG ulUnicodeRange2 // Bits 32-63
# ULONG ulUnicodeRange3 // Bits 64-95
# ULONG ulUnicodeRange4 // Bits 96-127
# CHAR achVendID[4]
# USHORT fsSelection
# USHORT usFirstCharIndex
# USHORT usLastCharIndex
# SHORT sTypoAscender
# SHORT sTypoDescender
# SHORT sTypoLineGap
# USHORT usWinAscent
# USHORT usWinDescent
# ULONG ulCodePageRange1 // Bits 0-31
# ULONG ulCodePageRange2 // Bits 32-63
# SHORT sxHeight
# SHORT sCapHeight
# USHORT usDefaultChar
# USHORT usBreakChar
# USHORT usMaxContext
#
#
# The Naming Table is organized as follows:
#
# [name table header]
# [name records]
# [string data]
#
# Name Table Header
#
# USHORT format // Format selector (=0).
# USHORT count // Number of name records.
# USHORT stringOffset // Offset to start of string storage (from start of table).
#
# Name Record
#
# USHORT platformID // Platform ID.
# USHORT encodingID // Platform-specific encoding ID.
# USHORT languageID // Language ID.
# USHORT nameID // Name ID.
# USHORT length // String length (in bytes).
# USHORT offset // String offset from start of storage area (in bytes).
#
# head Table
#
# Fixed tableVersion // Table version number 0x00010000 for version 1.0.
# Fixed fontRevision // Set by font manufacturer.
# ULONG checkSumAdjustment // To compute: set it to 0, sum the entire font as ULONG, then store 0xB1B0AFBA - sum.
# ULONG magicNumber // Set to 0x5F0F3CF5.
# USHORT flags
# USHORT unitsPerEm // Valid range is from 16 to 16384. This value should be a power of 2 for fonts that have TrueType outlines.
# LONGDATETIME created // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# LONGDATETIME modified // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# SHORT xMin // For all glyph bounding boxes.
# SHORT yMin
# SHORT xMax
# SHORT yMax
# USHORT macStyle
# USHORT lowestRecPPEM // Smallest readable size in pixels.
# SHORT fontDirectionHint
# SHORT indexToLocFormat // 0 for short offsets, 1 for long.
# SHORT glyphDataFormat // 0 for current format.
#
#
#
# Embedded OpenType (EOT) file format
# http://www.w3.org/Submission/EOT/
#
# EOT version 0x00020001
#
# An EOT font consists of a header with the original OpenType font
# appended at the end. Most of the data in the EOT header is simply a
# copy of data from specific tables within the font data. The exceptions
# are the 'Flags' field and the root string name field. The root string
# is a set of names indicating domains for which the font data can be
# used. A null root string implies the font data can be used anywhere.
# The EOT header is in little-endian byte order but the font data remains
# in big-endian order as specified by the OpenType spec.
#
# Overall structure:
#
# [EOT header]
# [EOT name records]
# [font data]
#
# EOT header
#
# ULONG eotSize // Total structure length in bytes (including string and font data)
# ULONG fontDataSize // Length of the OpenType font (FontData) in bytes
# ULONG version // Version number of this format - 0x00020001
# ULONG flags // Processing Flags (0 == no special processing)
# BYTE fontPANOSE[10] // OS/2 Table panose
# BYTE charset // DEFAULT_CHARSET (0x01)
# BYTE italic // 0x01 if ITALIC in OS/2 Table fsSelection is set, 0 otherwise
# ULONG weight // OS/2 Table usWeightClass
# USHORT fsType // OS/2 Table fsType (specifies embedding permission flags)
# USHORT magicNumber // Magic number for EOT file - 0x504C.
# ULONG unicodeRange1 // OS/2 Table ulUnicodeRange1
# ULONG unicodeRange2 // OS/2 Table ulUnicodeRange2
# ULONG unicodeRange3 // OS/2 Table ulUnicodeRange3
# ULONG unicodeRange4 // OS/2 Table ulUnicodeRange4
# ULONG codePageRange1 // OS/2 Table ulCodePageRange1
# ULONG codePageRange2 // OS/2 Table ulCodePageRange2
# ULONG checkSumAdjustment // head Table CheckSumAdjustment
# ULONG reserved[4] // Reserved - must be 0
# USHORT padding1 // Padding - must be 0
#
# EOT name records
#
# USHORT FamilyNameSize // Font family name size in bytes
# BYTE FamilyName[FamilyNameSize] // Font family name (name ID = 1), little-endian UTF-16
# USHORT Padding2 // Padding - must be 0
#
# USHORT StyleNameSize // Style name size in bytes
# BYTE StyleName[StyleNameSize] // Style name (name ID = 2), little-endian UTF-16
# USHORT Padding3 // Padding - must be 0
#
# USHORT VersionNameSize // Version name size in bytes
# bytes VersionName[VersionNameSize] // Version name (name ID = 5), little-endian UTF-16
# USHORT Padding4 // Padding - must be 0
#
# USHORT FullNameSize // Full name size in bytes
# BYTE FullName[FullNameSize] // Full name (name ID = 4), little-endian UTF-16
# USHORT Padding5 // Padding - must be 0
#
# USHORT RootStringSize // Root string size in bytes
# BYTE RootString[RootStringSize] // Root string, little-endian UTF-16
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), NAME <EMAIL>, 2012-2013
# Copyright (c), NAME <EMAIL>, 2015
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The match_hostname function and supporting code is under the terms and
# conditions of the Python Software Foundation License. They were taken from
# the Python3 standard library and adapted for use in Python2. See comments in the
# source for which code precisely is under this License. PSF License text
# follows:
#
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
# retained in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
|
#
# XML-RPC CLIENT LIBRARY
# $Id$
#
# an XML-RPC client interface for Python.
#
# the marshalling and response parser code can also be used to
# implement XML-RPC servers.
#
# Notes:
# this version is designed to work with Python 2.1 or newer.
#
# History:
# 1999-01-14 fl Created
# 1999-01-15 fl Changed dateTime to use localtime
# 1999-01-16 fl Added Binary/base64 element, default to RPC2 service
# 1999-01-19 fl Fixed array data element (from Skip Montanaro)
# 1999-01-21 fl Fixed dateTime constructor, etc.
# 1999-02-02 fl Added fault handling, handle empty sequences, etc.
# 1999-02-10 fl Fixed problem with empty responses (from Skip Montanaro)
# 1999-06-20 fl Speed improvements, pluggable parsers/transports (0.9.8)
# 2000-11-28 fl Changed boolean to check the truth value of its argument
# 2001-02-24 fl Added encoding/Unicode/SafeTransport patches
# 2001-02-26 fl Added compare support to wrappers (0.9.9/1.0b1)
# 2001-03-28 fl Make sure response tuple is a singleton
# 2001-03-29 fl Don't require empty params element (from NAME 2001-06-10 fl Folded in _xmlrpclib accelerator support (1.0b2)
# 2001-08-20 fl Base xmlrpclib.Error on built-in Exception (from NAME 2001-09-03 fl Allow Transport subclass to override getparser
# 2001-09-10 fl Lazy import of urllib, cgi, xmllib (20x import speedup)
# 2001-10-01 fl Remove containers from memo cache when done with them
# 2001-10-01 fl Use faster escape method (80% dumps speedup)
# 2001-10-02 fl More dumps microtuning
# 2001-10-04 fl Make sure import expat gets a parser (from NAME 2001-10-10 sm Allow long ints to be passed as ints if they don't overflow
# 2001-10-17 sm Test for int and long overflow (allows use on 64-bit systems)
# 2001-11-12 fl Use repr() to marshal doubles (from NAME 2002-03-17 fl Avoid buffered read when possible (from NAME 2002-04-07 fl Added pythondoc comments
# 2002-04-16 fl Added __str__ methods to datetime/binary wrappers
# 2002-05-15 fl Added error constants (from NAME 2002-06-27 fl Merged with Python CVS version
# 2002-10-22 fl Added basic authentication (based on code from NAME 2003-01-22 sm Add support for the bool type
# 2003-02-27 gvr Remove apply calls
# 2003-04-24 sm Use cStringIO if available
# 2003-04-25 ak Add support for nil
# 2003-06-15 gn Add support for time.struct_time
# 2003-07-12 gp Correct marshalling of Faults
# 2003-10-31 mvl Add multicall support
# 2004-08-20 mvl Bump minimum supported Python version to 2.1
# 2014-12-02 ch/doko Add workaround for gzip bomb vulnerability
#
# Copyright (c) 1999-2002 by Secret Labs AB.
# Copyright (c) 1999-2002 by NAME Lundh.
#
# EMAIL http://www.pythonware.com
#
# --------------------------------------------------------------------
# The XML-RPC client interface is
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by NAME Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# SKR04
# =====
# Dieses Modul bietet Ihnen einen deutschen Kontenplan basierend auf dem SKR04.
# Gemäss der aktuellen Einstellungen ist die Firma nicht Umsatzsteuerpflichtig,
# d.h. im Standard existiert keine Zuordnung von Produkten und Sachkonten zu
# Steuerschlüsseln.
# Diese Grundeinstellung ist sehr einfach zu ändern und bedarf in der Regel
# grundsätzlich eine initiale Zuweisung von Steuerschlüsseln zu Produkten und / oder
# Sachkonten oder zu Partnern.
# Die Umsatzsteuern (voller Steuersatz, reduzierte Steuer und steuerfrei)
# sollten bei den Produktstammdaten hinterlegt werden (in Abhängigkeit der
# Steuervorschriften). Die Zuordnung erfolgt auf dem Aktenreiter Finanzbuchhaltung
# (Kategorie: Umsatzsteuer).
# Die Vorsteuern (voller Steuersatz, reduzierte Steuer und steuerfrei)
# sollten ebenso bei den Produktstammdaten hinterlegt werden (in Abhängigkeit
# der Steuervorschriften). Die Zuordnung erfolgt auf dem Aktenreiter
# Finanzbuchhaltung (Kategorie: Vorsteuer).
# Die Zuordnung der Steuern für Ein- und Ausfuhren aus EU Ländern, sowie auch
# für den Ein- und Verkauf aus und in Drittländer sollten beim Partner
# (Lieferant/Kunde) hinterlegt werden (in Anhängigkeit vom Herkunftsland
# des Lieferanten/Kunden). Die Zuordnung beim Kunden ist 'höherwertig' als
# die Zuordnung bei Produkten und überschreibt diese im Einzelfall.
#
# Zur Vereinfachung der Steuerausweise und Buchung bei Auslandsgeschäften
# erlaubt Odoo ein generelles Mapping von Steuerausweis und Steuerkonten
# (z.B. Zuordnung 'Umsatzsteuer 19%' zu 'steuerfreie Einfuhren aus der EU')
# zwecks Zuordnung dieses Mappings zum ausländischen Partner (Kunde/Lieferant).
# Die Rechnungsbuchung beim Einkauf bewirkt folgendes:
# Die Steuerbemessungsgrundlage (exklusive Steuer) wird ausgewiesen bei den
# jeweiligen Kategorien für den Vorsteuer Steuermessbetrag (z.B. Vorsteuer
# Steuermessbetrag Voller Steuersatz 19%).
# Der Steuerbetrag erscheint unter der Kategorie 'Vorsteuern' (z.B. Vorsteuer
# 19%). Durch multidimensionale Hierachien können verschiedene Positionen
# zusammengefasst werden und dann in Form eines Reports ausgegeben werden.
#
# Die Rechnungsbuchung beim Verkauf bewirkt folgendes:
# Die Steuerbemessungsgrundlage (exklusive Steuer) wird ausgewiesen bei den
# jeweiligen Kategorien für den Umsatzsteuer Steuermessbetrag
# (z.B. Umsatzsteuer Steuermessbetrag Voller Steuersatz 19%).
# Der Steuerbetrag erscheint unter der Kategorie 'Umsatzsteuer'
# (z.B. Umsatzsteuer 19%). Durch multidimensionale Hierachien können
# verschiedene Positionen zusammengefasst werden.
# Die zugewiesenen Steuerausweise können auf Ebene der einzelnen
# Rechnung (Eingangs- und Ausgangsrechnung) nachvollzogen werden,
# und dort gegebenenfalls angepasst werden.
# Rechnungsgutschriften führen zu einer Korrektur (Gegenposition)
# der Steuerbuchung, in Form einer spiegelbildlichen Buchung.
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# SKR03
# =====
# Dieses Modul bietet Ihnen einen deutschen Kontenplan basierend auf dem SKR03.
# Gemäss der aktuellen Einstellungen ist die Firma nicht Umsatzsteuerpflichtig.
# Diese Grundeinstellung ist sehr einfach zu ändern und bedarf in der Regel
# grundsätzlich eine initiale Zuweisung von Steuerkonten zu Produkten und / oder
# Sachkonten oder zu Partnern.
# Die Umsatzsteuern (voller Steuersatz, reduzierte Steuer und steuerfrei)
# sollten bei den Produktstammdaten hinterlegt werden (in Abhängigkeit der
# Steuervorschriften). Die Zuordnung erfolgt auf dem Aktenreiter Finanzbuchhaltung
# (Kategorie: Umsatzsteuer).
# Die Vorsteuern (voller Steuersatz, reduzierte Steuer und steuerfrei)
# sollten ebenso bei den Produktstammdaten hinterlegt werden (in Abhängigkeit
# der Steuervorschriften). Die Zuordnung erfolgt auf dem Aktenreiter
# Finanzbuchhaltung (Kategorie: Vorsteuer).
# Die Zuordnung der Steuern für Ein- und Ausfuhren aus EU Ländern, sowie auch
# für den Ein- und Verkauf aus und in Drittländer sollten beim Partner
# (Lieferant/Kunde)hinterlegt werden (in Anhängigkeit vom Herkunftsland
# des Lieferanten/Kunden). Die Zuordnung beim Kunden ist 'höherwertig' als
# die Zuordnung bei Produkten und überschreibt diese im Einzelfall.
#
# Zur Vereinfachung der Steuerausweise und Buchung bei Auslandsgeschäften
# erlaubt OpenERP ein generelles Mapping von Steuerausweis und Steuerkonten
# (z.B. Zuordnung 'Umsatzsteuer 19%' zu 'steuerfreie Einfuhren aus der EU')
# zwecks Zuordnung dieses Mappings zum ausländischen Partner (Kunde/Lieferant).
# Die Rechnungsbuchung beim Einkauf bewirkt folgendes:
# Die Steuerbemessungsgrundlage (exklusive Steuer) wird ausgewiesen bei den
# jeweiligen Kategorien für den Vorsteuer Steuermessbetrag (z.B. Vorsteuer
# Steuermessbetrag Voller Steuersatz 19%).
# Der Steuerbetrag erscheint unter der Kategorie 'Vorsteuern' (z.B. Vorsteuer
# 19%). Durch multidimensionale Hierachien können verschiedene Positionen
# zusammengefasst werden und dann in Form eines Reports ausgegeben werden.
#
# Die Rechnungsbuchung beim Verkauf bewirkt folgendes:
# Die Steuerbemessungsgrundlage (exklusive Steuer) wird ausgewiesen bei den
# jeweiligen Kategorien für den Umsatzsteuer Steuermessbetrag
# (z.B. Umsatzsteuer Steuermessbetrag Voller Steuersatz 19%).
# Der Steuerbetrag erscheint unter der Kategorie 'Umsatzsteuer'
# (z.B. Umsatzsteuer 19%). Durch multidimensionale Hierachien können
# verschiedene Positionen zusammengefasst werden.
# Die zugewiesenen Steuerausweise können auf Ebene der einzelnen
# Rechnung (Eingangs- und Ausgangsrechnung) nachvollzogen werden,
# und dort gegebenenfalls angepasst werden.
# Rechnungsgutschriften führen zu einer Korrektur (Gegenposition)
# der Steuerbuchung, in Form einer spiegelbildlichen Buchung.
# SKR04
# =====
# Dieses Modul bietet Ihnen einen deutschen Kontenplan basierend auf dem SKR04.
# Gemäss der aktuellen Einstellungen ist die Firma nicht Umsatzsteuerpflichtig,
# d.h. im Standard existiert keine Zuordnung von Produkten und Sachkonten zu
# Steuerschlüsseln.
# Diese Grundeinstellung ist sehr einfach zu ändern und bedarf in der Regel
# grundsätzlich eine initiale Zuweisung von Steuerschlüsseln zu Produkten und / oder
# Sachkonten oder zu Partnern.
# Die Umsatzsteuern (voller Steuersatz, reduzierte Steuer und steuerfrei)
# sollten bei den Produktstammdaten hinterlegt werden (in Abhängigkeit der
# Steuervorschriften). Die Zuordnung erfolgt auf dem Aktenreiter Finanzbuchhaltung
# (Kategorie: Umsatzsteuer).
# Die Vorsteuern (voller Steuersatz, reduzierte Steuer und steuerfrei)
# sollten ebenso bei den Produktstammdaten hinterlegt werden (in Abhängigkeit
# der Steuervorschriften). Die Zuordnung erfolgt auf dem Aktenreiter
# Finanzbuchhaltung (Kategorie: Vorsteuer).
# Die Zuordnung der Steuern für Ein- und Ausfuhren aus EU Ländern, sowie auch
# für den Ein- und Verkauf aus und in Drittländer sollten beim Partner
# (Lieferant/Kunde) hinterlegt werden (in Anhängigkeit vom Herkunftsland
# des Lieferanten/Kunden). Die Zuordnung beim Kunden ist 'höherwertig' als
# die Zuordnung bei Produkten und überschreibt diese im Einzelfall.
#
# Zur Vereinfachung der Steuerausweise und Buchung bei Auslandsgeschäften
# erlaubt OpenERP ein generelles Mapping von Steuerausweis und Steuerkonten
# (z.B. Zuordnung 'Umsatzsteuer 19%' zu 'steuerfreie Einfuhren aus der EU')
# zwecks Zuordnung dieses Mappings zum ausländischen Partner (Kunde/Lieferant).
# Die Rechnungsbuchung beim Einkauf bewirkt folgendes:
# Die Steuerbemessungsgrundlage (exklusive Steuer) wird ausgewiesen bei den
# jeweiligen Kategorien für den Vorsteuer Steuermessbetrag (z.B. Vorsteuer
# Steuermessbetrag Voller Steuersatz 19%).
# Der Steuerbetrag erscheint unter der Kategorie 'Vorsteuern' (z.B. Vorsteuer
# 19%). Durch multidimensionale Hierachien können verschiedene Positionen
# zusammengefasst werden und dann in Form eines Reports ausgegeben werden.
#
# Die Rechnungsbuchung beim Verkauf bewirkt folgendes:
# Die Steuerbemessungsgrundlage (exklusive Steuer) wird ausgewiesen bei den
# jeweiligen Kategorien für den Umsatzsteuer Steuermessbetrag
# (z.B. Umsatzsteuer Steuermessbetrag Voller Steuersatz 19%).
# Der Steuerbetrag erscheint unter der Kategorie 'Umsatzsteuer'
# (z.B. Umsatzsteuer 19%). Durch multidimensionale Hierachien können
# verschiedene Positionen zusammengefasst werden.
# Die zugewiesenen Steuerausweise können auf Ebene der einzelnen
# Rechnung (Eingangs- und Ausgangsrechnung) nachvollzogen werden,
# und dort gegebenenfalls angepasst werden.
# Rechnungsgutschriften führen zu einer Korrektur (Gegenposition)
# der Steuerbuchung, in Form einer spiegelbildlichen Buchung.
|
"""
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing
of executable commands and scripts (in any language, not just Python),
especially commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd
module manages and cleans up one or more temporary workspace
directories, and provides methods for creating files and directories in
those workspace directories from in-line data, here-documents), allowing
tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
import TestCmd
test = TestCmd.TestCmd()
There are a bunch of keyword arguments available at instantiation:
test = TestCmd.TestCmd(description = 'string',
program = 'program_or_script_to_test',
interpreter = 'script_interpreter',
workdir = 'prefix',
subdir = 'subdir',
verbose = Boolean,
match = default_match_function,
diff = default_diff_function,
combine = Boolean)
There are a bunch of methods that let you do different things:
test.verbose_set(1)
test.description_set('string')
test.program_set('program_or_script_to_test')
test.interpreter_set('script_interpreter')
test.interpreter_set(['script_interpreter', 'arg'])
test.workdir_set('prefix')
test.workdir_set('')
test.workpath('file')
test.workpath('subdir', 'file')
test.subdir('subdir', ...)
test.rmdir('subdir', ...)
test.write('file', "contents\n")
test.write(['subdir', 'file'], "contents\n")
test.read('file')
test.read(['subdir', 'file'])
test.read('file', mode)
test.read(['subdir', 'file'], mode)
test.writable('dir', 1)
test.writable('dir', None)
test.preserve(condition, ...)
test.cleanup(condition)
test.command_args(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program')
test.run(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
chdir = 'directory_to_chdir_to',
stdin = 'input to feed to the program\n')
universal_newlines = True)
p = test.start(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
universal_newlines = None)
test.finish(self, p)
test.pass_test()
test.pass_test(condition)
test.pass_test(condition, function)
test.fail_test()
test.fail_test(condition)
test.fail_test(condition, function)
test.fail_test(condition, function, skip)
test.no_result()
test.no_result(condition)
test.no_result(condition, function)
test.no_result(condition, function, skip)
test.stdout()
test.stdout(run)
test.stderr()
test.stderr(run)
test.symlink(target, link)
test.banner(string)
test.banner(string, width)
test.diff(actual, expected)
test.match(actual, expected)
test.match_exact("actual 1\nactual 2\n", "expected 1\nexpected 2\n")
test.match_exact(["actual 1\n", "actual 2\n"],
["expected 1\n", "expected 2\n"])
test.match_re("actual 1\nactual 2\n", regex_string)
test.match_re(["actual 1\n", "actual 2\n"], list_of_regexes)
test.match_re_dotall("actual 1\nactual 2\n", regex_string)
test.match_re_dotall(["actual 1\n", "actual 2\n"], list_of_regexes)
test.tempdir()
test.tempdir('temporary-directory')
test.sleep()
test.sleep(seconds)
test.where_is('foo')
test.where_is('foo', 'PATH1:PATH2')
test.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
test.unlink('file')
test.unlink('subdir', 'file')
The TestCmd module provides pass_test(), fail_test(), and no_result()
unbound functions that report test results for use with the Aegis change
management system. These methods terminate the test immediately,
reporting PASSED, FAILED, or NO RESULT respectively, and exiting with
status 0 (success), 1 or 2 respectively. This allows for a distinction
between an actual failed test and a test that could not be properly
evaluated because of an external condition (such as a full file system
or incorrect permissions).
import TestCmd
TestCmd.pass_test()
TestCmd.pass_test(condition)
TestCmd.pass_test(condition, function)
TestCmd.fail_test()
TestCmd.fail_test(condition)
TestCmd.fail_test(condition, function)
TestCmd.fail_test(condition, function, skip)
TestCmd.no_result()
TestCmd.no_result(condition)
TestCmd.no_result(condition, function)
TestCmd.no_result(condition, function, skip)
The TestCmd module also provides unbound functions that handle matching
in the same way as the match_*() methods described above.
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_exact)
test = TestCmd.TestCmd(match = TestCmd.match_re)
test = TestCmd.TestCmd(match = TestCmd.match_re_dotall)
The TestCmd module provides unbound functions that can be used for the
"diff" argument to TestCmd.TestCmd instantiation:
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_re,
diff = TestCmd.diff_re)
test = TestCmd.TestCmd(diff = TestCmd.simple_diff)
The "diff" argument can also be used with standard difflib functions:
import difflib
test = TestCmd.TestCmd(diff = difflib.context_diff)
test = TestCmd.TestCmd(diff = difflib.unified_diff)
Lastly, the where_is() method also exists in an unbound function
version.
import TestCmd
TestCmd.where_is('foo')
TestCmd.where_is('foo', 'PATH1:PATH2')
TestCmd.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
""" |
#!/usr/bin/env python
#
# History:
# 2014-Dec-09 DG
# Started this history log. The PCapture window was slowing taking longer
# and longer to refresh, as more plots were added. Now explicitly clears
# the plot before plotting a new one.0
# 2014-Dec-13 DG
# Added text and highlight if ND is on.
# 2015-May-02 DG
# The antenna MJD is suddenly glitching on occasion to very large values
# and exceeding the datetime.timedelta limit in datime(). It now can get
# no larger than today's MJD (in Communications section). Also made a lot
# of style edits to adhere to PEP8 style guide.
# 2015-May-29 DG
# Converted from using datime() to using Time() based on astropy.
# 2015-Jun-25 DG
# Now that Ant13's solar power station is online, changed SolPwr to display
# data from both.
# 2015-Jul-24 LK
# Modified to support displaying stateframe data that are elements of arrays.
# 2015-Jul-25 DG
# Changed ROACH Status output to show X and Y delays separately.
# 2015-Aug-27 DG
# Changed order of antennas in Pointing section to allow separation of Az-El
# and RA-Dec headings
# 2015-Sep-11 DG
# Attempt to eliminate crashes due to bad values in tracking modes by using
# np.clip() on PowerSwitch, RunControl, RunMode, DataMode.
# 2015-Sep-16 DG
# Add code to make the "AT STOW" notification work for the old antennas (9-11 and 13).
# Also changed "yellow" tracking warning limit to 0.005 degrees.
# 2015-Oct-13 DG
# Added CryoRX tab to display cryoreceiver part of stateframe.
# 2015-Oct-14 DG
# Adjustments to CryoRX tab.
# 2015-Oct-18 DG
# Added FEMB to CryoRX tab, and cleaned up code a bit.
# 2015-Oct-28 DG
# Added display of Local Sidereal Time, and fixed typo in Antenna tab red labels.
# 2015-Nov-21 DG
# Sort saved-plot filenames.
# 2015-Nov-29 DG
# Squashed some bugs where a timestamp or mjd of 0 was being converted to a Time()
# object, which resulted in an annoying warning message.
# 2015-Dec-01 DG
# Reduce font size of listboxes when the screen height is small.
# 2015-Dec-19 DG
# Changed LNA output to print LNA name instead of number.
# 2015-Dec-30 DG
# Changed Outlet heading to include outlet number.
# 2016-Jan-15 DG
# Update expected STOW position for 27-m antennas to +20 Dec
# 2016-Mar-02 DG
# Added code to read and display last CRIO command (and error)
# 2016-Mar-17 DG
# Changed display of FEM voltage to FEM power
# 2017-Jan-18 DG
# Added ant 14 tracking and receiver information to CryoRX display page
# 2017-Feb-09 DG
# Changed antlist to remove ant 15, which is no longer planned to be used.
# Also removed ant 15 (index 14) from definition of altantindex in update_display()
# Also expanded space for FSeqFile display, to allow for longer FSEQ filenames
# 2018-Jan-10 DG
# Added display of control room temperature, with red background if greater than 85 F
# 2018-Aug-25 DG
# Added remaining antennas to temperature plot, and cleaned up the code. Also added
# Cryo-temperature (second stage) to front page, and fixed color coding to be red only
# if temperature is out of range. Also changed startup page size and opened Temperature
# tab on startup.
# 2018-Nov-17 DG
# Fixed some deprecated function calls to call the replacement routines
# 2019-Jan-16 DG
# Added indication of solar power not updating.
# 2019-Feb-23 DG
# Fixed some annoying string display problems that were not there for earlier version
# of Tkinter. Also finally killed the old "pcapture" tab, which had not been used in
# forever.
# 2019-Nov-22 DG
# Added red (error) color to LO1A Sweep Status
# 2020-Apr-23 OG
# Added collapsable section for Antenna Last Command, increased the starting height
# of the opening window to 950 pixels, and set ROACH status to be expanded by default
# 2020-May-02 DG
# Cleaned up some garbage in the "Task" string
# 2020-Jun-05
# Added yellow (warning) to power and attenuation if first atennuator on H or V is non zero.
# 2020-Nov-13 OG
# Added red (error) to power an attenuation if any Power reports NAN
# 2021-Jan-30 DG
# Add error case for writing stateframe log to disk--if write fails it just prints
# an error message to the screen.
# 2021-Feb-11 OG
# Added section to display most recent IDB file. Will be highlighted in
# red if older than 15 minutes.
|
# Rackspace Monitoring as a Service (MaaS) Alarm Languague parser.
# Generated by the Waxeye Parser Generator - version 0.8.0
# www.waxeye.org
# Generated from the following grammar (retrieved from
# https://github.rackspace.com/CloudMonitoring/ele/blob/master/grammar/alarm.waxeye)
# AlarmDsl <- Ws *SetStatement Ws *IfStatement Ws *Return
#
#
# MetricName <- :'metric[' ( ( :'"' Name :'"' ) | ( :"'" Name :"'" ) ) :']'
#
# Param <- Ws (MetricName | Number) Ws
# FunctionName <- +([a-zA-Z_])
#
# Metric <= FunctionName :'(' ( Param | Param *( :',' Ws Param ) ) :')'
# | MetricName
#
#
# Name <- +([a-zA-Z_-]
# | ' '
# | [0-9]
# | '.'
# | '/')
#
#
# InExp <= Metric Ws Rhs
#
# Rhs <- NOp Ws NValue
# | TOp Ws TValue
# | CTOp Ws String
#
# CondExp <= Ws (:'(' Ws InExp Ws :')'
# | InExp ) Ws
#
# Conj <- "&&"
# | "||"
#
# IfStatement <- :'if' Ws
# :'(' Ws CondExp
# *(Conj Ws CondExp)
# :')' Ws Block Ws
#
# Block <- :'{' Ws Return Ws :'}'
#
# # Number comparators
# NValue <- Number
# | Metric
#
# # Text metrics, not the same comparators
# TValue <- String
# | Metric
#
# String <- ( :'"'
# *( :'\\' ( Unicode | Escaped )
# | !'\\' !'"' . )
# :'"' )
# |
# ( :"'"
# *( :'\\' ( Unicode | Escaped )
# | !'\\' !"'" . )
# :"'" )
#
#
# Unicode <- 'u' [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]
#
# Escaped <- ["'/\\bfnrt]
#
# Number <- ?'-'
# ('0' | [1-9] *[0-9])
# ?('.' +[0-9])
# ?([eE] ?[+-] +[0-9])
#
# WholeNumber <- [1-9] *[0-9]
#
# AlarmStatusArgs <- AlertStates ?( Ws :',' Ws AlertStateReason )
#
# DeprecatedAlarmStatus <- AlarmStatusArgs
#
# AlarmStatus <- :'new' Ws :'AlarmStatus(' Ws AlarmStatusArgs Ws :')' Ws :';'
#
# Return <- :'return ' Ws ( AlarmStatus | DeprecatedAlarmStatus ) Ws
#
# AlertStates <- 'CRITICAL'
# | 'OK'
# | 'WARNING'
#
# AlertStateReason <- String
#
# # Number operators that take metric or constant numbers on both sides
# NOp <- ('>='
# | '<='
# | '>'
# | '<'
# | '=='
# | '!=') Ws
#
# # Text operators that take metric or constant strings on both sides
# TOp <- ('==' | '!=') Ws
#
# # Text operators that only take constant strings on the rhs
# CTOp <- ('nregex' | 'regex') Ws
#
#
# SetStatement <- :':set' Ws InSetStatement Ws
#
#
# InSetStatement <= SetConsistencyLevel
# | SetConsecutiveCount
# | SetDimensionFilter
#
#
# ConsistencyLevel <- 'ONE'
# | 'QUORUM'
# | 'ALL'
#
#
# # Should these just be on the alarm itself?
# # Like check type filtering...
# SetConsistencyLevel <- :'consistencyLevel' Ws :'=' Ws ConsistencyLevel
#
# SetConsecutiveCount <- :'consecutiveCount' Ws :'=' Ws WholeNumber
#
# # We just parse out a string here, then validate it later so we can provide an
# # actually useful error message.
# SetDimensionFilter <- :'dimensionFilter' Ws :'=' Ws String
#
#
# SComment <: '#' *(!EndOfLine .) (EndOfLine | !.)
#
#
# MComment <: '/*' *(MComment | !'*/' . ) '*/'
#
#
# EndOfLine <: '\r\n' | '\n' | '\r'
#
# Wsp <: *[ \t]
#
# Ws <: *(EndOfLine | SComment | MComment | [ \t])
|
"""
Newick format (:mod:`skbio.io.format.newick`)
=============================================
.. currentmodule:: skbio.io.format.newick
Newick format (``newick``) stores spanning-trees with weighted edges and node
names in a minimal file format [1]_. This is useful for representing
phylogenetic trees and taxonomies. Newick was created as an informal
specification on June 26, 1986 [2]_.
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |:mod:`skbio.tree.TreeNode` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
A Newick file represents a tree using the following grammar. See below for an
explanation of the format in plain English.
Formal Grammar
^^^^^^^^^^^^^^
.. code-block:: none
NEWICK ==> NODE ;
NODE ==> FORMATTING SUBTREE FORMATTING NODE_INFO FORMATTING
SUBTREE ==> ( CHILDREN ) | null
NODE_INFO ==> LABEL | LENGTH | LABEL FORMATTING LENGTH | null
FORMATTING ==> [ COMMENT_CHARS ] | whitespace | null
CHILDREN ==> NODE | CHILDREN , NODE
LABEL ==> ' ALL_CHARS ' | SAFE_CHARS
LENGTH ==> : FORMATTING NUMBER
COMMENT_CHARS ==> any
ALL_CHARS ==> any
SAFE_CHARS ==> any except: ,;:()[] and whitespace
NUMBER ==> a decimal or integer
.. note:: The ``_`` character inside of SAFE_CHARS will be converted to a
blank space in ``skbio.tree.TreeNode`` and vice versa.
``'`` is considered the escape character. To escape ``'`` use a
preceding ``'``.
The implementation of newick in scikit-bio allows nested comments. To
escape ``[`` or ``]`` from within COMMENT_CHARS, use a preceding ``'``.
Explanation
^^^^^^^^^^^
The Newick format defines a tree by creating a minimal representation of nodes
and their relationships to each other.
Basic Symbols
~~~~~~~~~~~~~
There are several symbols which define nodes, the first of which is the
semi-colon (``;``). The semi-colon creates a root node to its left. Recall that
there can only be one root in a tree.
The next symbol is the comma (``,``), which creates a node to its right.
However, these two alone are not enough. For example imagine the following
string: ``, , , ;``. It is evident that there is a root, but the other 3 nodes,
defined by commas, have no relationship. For this reason, it is not a valid
Newick string to have more than one node at the root level.
To provide these relationships, there is another structure:
paired parenthesis (``( )``). These are inserted at the location of an existing
node and give it the ability to have children. Placing ``( )`` in a node's
location will create a child inside the parenthesis on the left-most
inner edge.
Application of Rules
~~~~~~~~~~~~~~~~~~~~
Adding a comma within the parenthesis will create two children: ``( , )``
(also known as a bifurcating node). Notice that only one comma is needed
because the parenthesis have already created a child. Adding more commas will
create more children who are siblings to each other. For example, writing
``( , , , )`` will create a multifurcating node with 4 child nodes who are
siblings to each other.
The notation for a root can be used to create a complete tree. The ``;`` will
create a root node where parenthesis can be placed: ``( );``. Adding commas
will create more children: ``( , );``. These rules can be applied recursively
ad. infinitum: ``(( , ), ( , ));``.
Adding Node Information
~~~~~~~~~~~~~~~~~~~~~~~
Information about a node can be added to improve the clarity and meaning of a
tree. Each node may have a label and/or a length (to the parent). Newick always
places the node information at the right-most edge of a node's position.
Starting with labels, ``(( , ), ( , ));`` would become
``((D, E)B, (F, G)C)A;``. There is a named root ``A`` and the root's children
(from left to right) are ``B`` and ``C``. ``B`` has the children ``D`` and
``E``, and ``C`` has the children ``F`` and ``G``.
Length represents the distance (or weight of the edge) that connects a node to
its parent. This must be a decimal or integer. As an example, suppose ``D`` is
rather estranged from ``B``, and ``E`` is very close. That can be written as:
``((D:10, E:0.5)B, (F, G)C)A;``. Notice that the colon (``:``) separates the
label from the length. If the length is provided but the label is omitted, a
colon must still precede the length (``(:0.25,:0.5):0.0;``). Without this, the
length would be interpreted as a label (which happens to be a number).
.. note:: Internally scikit-bio will cast a length to ``float`` which
technically means that even exponent strings (``1e-3``) are supported)
Advanced Label and Length Rules
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
More characters can be used to create more descriptive labels. When creating a
label there are some rules that must be considered due to limitations in the
Newick format. The following characters are not allowed within a standard
label: parenthesis, commas, square-brackets, colon, semi-colon, and whitespace.
These characters are also disallowed from occurring within a length, which has
a much stricter format: decimal or integer. Many of these characters are
symbols which define the structure of a Newick tree and are thus disallowed for
obvious reasons. The symbols not yet mentioned are square-brackets (``[ ]``)
and whitespace (space, tab, and newline).
What if these characters are needed within a label? In the simple case of
spaces, an underscore (``_``) will be translated as a space on read and vice
versa on write.
What if a literal underscore or any of the others mentioned are needed?
A label can be escaped (meaning that its contents are understood as regular
text) using single-quotes (``'``). When a label is surrounded by single-quotes,
any character is permissible. If a single-quote is needed inside of an escaped
label or anywhere else, it can be escaped with another single-quote.
For example, ``A_1`` is written ``'A_1'`` and ``'A'_1`` would be ``'''A''_1'``.
Inline Comments
~~~~~~~~~~~~~~~
Square-brackets define a comment, which are the least commonly used part of
the Newick format. Comments are not included in the generated objects and exist
only as human readable text ignored by the parser. The implementation in
scikit-bio allows for nested comments (``[comment [nested]]``). Unpaired
square-brackets can be escaped with a single-quote preceding the bracket when
inside an existing comment. (This is identical to escaping a single-quote).
The single-quote has the highest operator precedence, so there is no need to
worry about starting a comment from within a properly escaped label.
Whitespace
~~~~~~~~~~
Whitespace is not allowed within any un-escaped label or in any length, but it
is permitted anywhere else.
Caveats
~~~~~~~
Newick cannot always provide a unique representation of any tree, in other
words, the same tree can be written multiple ways. For example: ``(A, B);`` is
isomorphic to ``(B, A);``. The implementation in scikit-bio maintains the given
sibling order in its object representations.
Newick has no representation of an unrooted tree. Some biological packages make
the assumption that when a trifurcated root exists in an otherwise bifurcated
tree that the tree must be unrooted. In scikit-bio, ``skbio.tree.TreeNode``
will always be rooted at the ``newick`` root (``;``).
Format Parameters
-----------------
The only supported format parameter is `convert_underscores`. This is `True` by
default. When `False`, underscores found in unescaped labels will not be
converted to spaces. This is useful when reading the output of an external
program in which the underscores were not escaped. This parameter only affects
`read` operations. It does not exist for `write` operations; they will always
properly escape underscores.
Examples
--------
This is a simple Newick string.
>>> from io import StringIO
>>> from skbio import read
>>> from skbio.tree import TreeNode
>>> f = StringIO("((D, E)B, (F, G)C)A;")
>>> tree = read(f, format="newick", into=TreeNode)
>>> f.close()
>>> print(tree.ascii_art())
/-D
/B-------|
| \-E
-A-------|
| /-F
\C-------|
\-G
This is a complex Newick string.
>>> f = StringIO("[example](a:0.1, 'b_b''':0.2, (c:0.3, d_d:0.4)e:0.5)f:0.0;")
>>> tree = read(f, format="newick", into=TreeNode)
>>> f.close()
>>> print(tree.ascii_art())
/-a
|
-f-------|--b_b'
|
| /-c
\e-------|
\-d d
Notice that the node originally labeled ``d_d`` became ``d d``. Additionally
``'b_b'''`` became ``b_b'``. Note that the underscore was preserved in `b_b'`.
References
----------
.. [1] http://evolution.genetics.washington.edu/phylip/newick_doc.html
.. [2] http://evolution.genetics.washington.edu/phylip/newicktree.html
""" |
"""
=============================
Byteswapping and byte order
=============================
Introduction to byte ordering and ndarrays
==========================================
The ``ndarray`` is an object that provide a python array interface to data
in memory.
It often happens that the memory that you want to view with an array is
not of the same byte ordering as the computer on which you are running
Python.
For example, I might be working on a computer with a little-endian CPU -
such as an Intel Pentium, but I have loaded some data from a file
written by a computer that is big-endian. Let's say I have loaded 4
bytes from a file written by a Sun (big-endian) computer. I know that
these 4 bytes represent two 16-bit integers. On a big-endian machine, a
two-byte integer is stored with the Most Significant Byte (MSB) first,
and then the Least Significant Byte (LSB). Thus the bytes are, in memory order:
#. MSB integer 1
#. LSB integer 1
#. MSB integer 2
#. LSB integer 2
Let's say the two integers were in fact 1 and 770. Because 770 = 256 *
3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2.
The bytes I have loaded from the file would have these contents:
>>> big_end_str = chr(0) + chr(1) + chr(3) + chr(2)
>>> big_end_str
'\\x00\\x01\\x03\\x02'
We might want to use an ``ndarray`` to access these integers. In that
case, we can create an array around this memory, and tell numpy that
there are two integers, and that they are 16 bit and big-endian:
>>> import numpy as np
>>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_str)
>>> big_end_arr[0]
1
>>> big_end_arr[1]
770
Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian'
(``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For
example, if our data represented a single unsigned 4-byte little-endian
integer, the dtype string would be ``<u4``.
In fact, why don't we try that?
>>> little_end_u4 = np.ndarray(shape=(1,),dtype='<u4', buffer=big_end_str)
>>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3
True
Returning to our ``big_end_arr`` - in this case our underlying data is
big-endian (data endianness) and we've set the dtype to match (the dtype
is also big-endian). However, sometimes you need to flip these around.
.. warning::
Scalars currently do not include byte order information, so extracting
a scalar from an array will return an integer in native byte order.
Hence:
>>> big_end_arr[0].dtype.byteorder == little_end_u4[0].dtype.byteorder
True
Changing byte ordering
======================
As you can imagine from the introduction, there are two ways you can
affect the relationship between the byte ordering of the array and the
underlying memory it is looking at:
* Change the byte-ordering information in the array dtype so that it
interprets the underlying data as being in a different byte order.
This is the role of ``arr.newbyteorder()``
* Change the byte-ordering of the underlying data, leaving the dtype
interpretation as it was. This is what ``arr.byteswap()`` does.
The common situations in which you need to change byte ordering are:
#. Your data and dtype endianess don't match, and you want to change
the dtype so that it matches the data.
#. Your data and dtype endianess don't match, and you want to swap the
data so that they match the dtype
#. Your data and dtype endianess match, but you want the data swapped
and the dtype to reflect this
Data and dtype endianness don't match, change dtype to match data
-----------------------------------------------------------------
We make something where they don't match:
>>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='<i2', buffer=big_end_str)
>>> wrong_end_dtype_arr[0]
256
The obvious fix for this situation is to change the dtype so it gives
the correct endianness:
>>> fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder()
>>> fixed_end_dtype_arr[0]
1
Note the array has not changed in memory:
>>> fixed_end_dtype_arr.tobytes() == big_end_str
True
Data and type endianness don't match, change data to match dtype
----------------------------------------------------------------
You might want to do this if you need the data in memory to be a certain
ordering. For example you might be writing the memory out to a file
that needs a certain byte ordering.
>>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap()
>>> fixed_end_mem_arr[0]
1
Now the array *has* changed in memory:
>>> fixed_end_mem_arr.tobytes() == big_end_str
False
Data and dtype endianness match, swap data and dtype
----------------------------------------------------
You may have a correctly specified array dtype, but you need the array
to have the opposite byte order in memory, and you want the dtype to
match so the array values make sense. In this case you just do both of
the previous operations:
>>> swapped_end_arr = big_end_arr.byteswap().newbyteorder()
>>> swapped_end_arr[0]
1
>>> swapped_end_arr.tobytes() == big_end_str
False
An easier way of casting the data to a specific dtype and byte ordering
can be achieved with the ndarray astype method:
>>> swapped_end_arr = big_end_arr.astype('<i2')
>>> swapped_end_arr[0]
1
>>> swapped_end_arr.tobytes() == big_end_str
False
""" |
"""
========
Glossary
========
.. glossary::
along an axis
Axes are defined for arrays with more than one dimension. A
2-dimensional array has two corresponding axes: the first running
vertically downwards across rows (axis 0), and the second running
horizontally across columns (axis 1).
Many operation can take place along one of these axes. For example,
we can sum each row of an array, in which case we operate along
columns, or axis 1::
>>> x = np.arange(12).reshape((3,4))
>>> x
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.sum(axis=1)
array([ 6, 22, 38])
array
A homogeneous container of numerical elements. Each element in the
array occupies a fixed amount of memory (hence homogeneous), and
can be a numerical element of a single type (such as float, int
or complex) or a combination (such as ``(float, int, float)``). Each
array has an associated data-type (or ``dtype``), which describes
the numerical type of its elements::
>>> x = np.array([1, 2, 3], float)
>>> x
array([ 1., 2., 3.])
>>> x.dtype # floating point number, 64 bits of memory per element
dtype('float64')
# More complicated data type: each array element is a combination of
# and integer and a floating point number
>>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)])
array([(1, 2.0), (3, 4.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Fast element-wise operations, called `ufuncs`_, operate on arrays.
array_like
Any sequence that can be interpreted as an ndarray. This includes
nested lists, tuples, scalars and existing arrays.
attribute
A property of an object that can be accessed using ``obj.attribute``,
e.g., ``shape`` is an attribute of an array::
>>> x = np.array([1, 2, 3])
>>> x.shape
(3,)
BLAS
`Basic Linear Algebra Subprograms <http://en.wikipedia.org/wiki/BLAS>`_
broadcast
NumPy can do operations on arrays whose shapes are mismatched::
>>> x = np.array([1, 2])
>>> y = np.array([[3], [4]])
>>> x
array([1, 2])
>>> y
array([[3],
[4]])
>>> x + y
array([[4, 5],
[5, 6]])
See `doc.broadcasting`_ for more information.
C order
See `row-major`
column-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In column-major order, the leftmost index "varies the
fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the column-major order as::
[1, 4, 2, 5, 3, 6]
Column-major order is also known as the Fortran order, as the Fortran
programming language uses it.
decorator
An operator that transforms a function. For example, a ``log``
decorator may be defined to print debugging information upon
function execution::
>>> def log(f):
... def new_logging_func(*args, **kwargs):
... print("Logging call with parameters:", args, kwargs)
... return f(*args, **kwargs)
...
... return new_logging_func
Now, when we define a function, we can "decorate" it using ``log``::
>>> @log
... def add(a, b):
... return a + b
Calling ``add`` then yields:
>>> add(1, 2)
Logging call with parameters: (1, 2) {}
3
dictionary
Resembling a language dictionary, which provides a mapping between
words and descriptions thereof, a Python dictionary is a mapping
between two objects::
>>> x = {1: 'one', 'two': [1, 2]}
Here, `x` is a dictionary mapping keys to values, in this case
the integer 1 to the string "one", and the string "two" to
the list ``[1, 2]``. The values may be accessed using their
corresponding keys::
>>> x[1]
'one'
>>> x['two']
[1, 2]
Note that dictionaries are not stored in any specific order. Also,
most mutable (see *immutable* below) objects, such as lists, may not
be used as keys.
For more information on dictionaries, read the
`Python tutorial <http://docs.python.org/tut>`_.
Fortran order
See `column-major`
flattened
Collapsed to a one-dimensional array. See `ndarray.flatten`_ for details.
immutable
An object that cannot be modified after execution is called
immutable. Two common examples are strings and tuples.
instance
A class definition gives the blueprint for constructing an object::
>>> class House(object):
... wall_colour = 'white'
Yet, we have to *build* a house before it exists::
>>> h = House() # build a house
Now, ``h`` is called a ``House`` instance. An instance is therefore
a specific realisation of a class.
iterable
A sequence that allows "walking" (iterating) over items, typically
using a loop such as::
>>> x = [1, 2, 3]
>>> [item**2 for item in x]
[1, 4, 9]
It is often used in combination with ``enumerate``::
>>> keys = ['a','b','c']
>>> for n, k in enumerate(keys):
... print("Key %d: %s" % (n, k))
...
Key 0: a
Key 1: b
Key 2: c
list
A Python container that can hold any number of objects or items.
The items do not have to be of the same type, and can even be
lists themselves::
>>> x = [2, 2.0, "two", [2, 2.0]]
The list `x` contains 4 items, each which can be accessed individually::
>>> x[2] # the string 'two'
'two'
>>> x[3] # a list, containing an integer 2 and a float 2.0
[2, 2.0]
It is also possible to select more than one item at a time,
using *slicing*::
>>> x[0:2] # or, equivalently, x[:2]
[2, 2.0]
In code, arrays are often conveniently expressed as nested lists::
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
For more information, read the section on lists in the `Python
tutorial <http://docs.python.org/tut>`_. For a mapping
type (key-value), see *dictionary*.
mask
A boolean array, used to select only certain elements for an operation::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> mask = (x > 2)
>>> mask
array([False, False, False, True, True], dtype=bool)
>>> x[mask] = -1
>>> x
array([ 0, 1, 2, -1, -1])
masked array
Array that suppressed values indicated by a mask::
>>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True])
>>> x
masked_array(data = [-- 2.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
>>> x + [1, 2, 3]
masked_array(data = [-- 4.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
Masked arrays are often used when operating on arrays containing
missing or invalid entries.
matrix
A 2-dimensional ndarray that preserves its two-dimensional nature
throughout operations. It has certain special operations, such as ``*``
(matrix multiplication) and ``**`` (matrix power), defined::
>>> x = np.mat([[1, 2], [3, 4]])
>>> x
matrix([[1, 2],
[3, 4]])
>>> x**2
matrix([[ 7, 10],
[15, 22]])
method
A function associated with an object. For example, each ndarray has a
method called ``repeat``::
>>> x = np.array([1, 2, 3])
>>> x.repeat(2)
array([1, 1, 2, 2, 3, 3])
ndarray
See *array*.
record array
An `ndarray`_ with `structured data type`_ which has been subclassed as
np.recarray and whose dtype is of type np.record, making the
fields of its data type to be accessible by attribute.
reference
If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore,
``a`` and ``b`` are different names for the same Python object.
row-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In row-major order, the rightmost index "varies
the fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the row-major order as::
[1, 2, 3, 4, 5, 6]
Row-major order is also known as the C order, as the C programming
language uses it. New NumPy arrays are by default in row-major order.
self
Often seen in method signatures, ``self`` refers to the instance
of the associated class. For example:
>>> class Paintbrush(object):
... color = 'blue'
...
... def paint(self):
... print("Painting the city %s!" % self.color)
...
>>> p = Paintbrush()
>>> p.color = 'red'
>>> p.paint() # self refers to 'p'
Painting the city red!
slice
Used to select only certain elements from a sequence::
>>> x = range(5)
>>> x
[0, 1, 2, 3, 4]
>>> x[1:3] # slice from 1 to 3 (excluding 3 itself)
[1, 2]
>>> x[1:5:2] # slice from 1 to 5, but skipping every second element
[1, 3]
>>> x[::-1] # slice a sequence in reverse
[4, 3, 2, 1, 0]
Arrays may have more than one dimension, each which can be sliced
individually::
>>> x = np.array([[1, 2], [3, 4]])
>>> x
array([[1, 2],
[3, 4]])
>>> x[:, 1]
array([2, 4])
structured data type
A data type composed of other datatypes
tuple
A sequence that may contain a variable number of types of any
kind. A tuple is immutable, i.e., once constructed it cannot be
changed. Similar to a list, it can be indexed and sliced::
>>> x = (1, 'one', [1, 2])
>>> x
(1, 'one', [1, 2])
>>> x[0]
1
>>> x[:2]
(1, 'one')
A useful concept is "tuple unpacking", which allows variables to
be assigned to the contents of a tuple::
>>> x, y = (1, 2)
>>> x, y = 1, 2
This is often used when a function returns multiple values:
>>> def return_many():
... return 1, 'alpha', None
>>> a, b, c = return_many()
>>> a, b, c
(1, 'alpha', None)
>>> a
1
>>> b
'alpha'
ufunc
Universal function. A fast element-wise array operation. Examples include
``add``, ``sin`` and ``logical_or``.
view
An array that does not own its data, but refers to another array's
data instead. For example, we may create a view that only shows
every second element of another array::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> y = x[::2]
>>> y
array([0, 2, 4])
>>> x[0] = 3 # changing x changes y as well, since y is a view on x
>>> y
array([3, 2, 4])
wrapper
Python is a high-level (highly abstracted, or English-like) language.
This abstraction comes at a price in execution speed, and sometimes
it becomes necessary to use lower level languages to do fast
computations. A wrapper is code that provides a bridge between
high and the low level languages, allowing, e.g., Python to execute
code written in C or Fortran.
Examples include ctypes, SWIG and Cython (which wraps C and C++)
and f2py (which wraps Fortran).
""" |
# -*- coding: utf-8 -*-
# This file is part of ranger, the console file manager.
# This configuration file is licensed under the same terms as ranger.
# ===================================================================
#
# NOTE: If you copied this file to ~/.config/ranger/commands_full.py,
# then it will NOT be loaded by ranger, and only serve as a reference.
#
# ===================================================================
# This file contains ranger's commands.
# It's all in python; lines beginning with # are comments.
#
# Note that additional commands are automatically generated from the methods
# of the class ranger.core.actions.Actions.
#
# You can customize commands in the file ~/.config/ranger/commands.py.
# It has the same syntax as this file. In fact, you can just copy this
# file there with `ranger --copy-config=commands' and make your modifications.
# But make sure you update your configs when you update ranger.
#
# ===================================================================
# Every class defined here which is a subclass of `Command' will be used as a
# command in ranger. Several methods are defined to interface with ranger:
# execute(): called when the command is executed.
# cancel(): called when closing the console.
# tab(tabnum): called when <TAB> is pressed.
# quick(): called after each keypress.
#
# tab() argument tabnum is 1 for <TAB> and -1 for <S-TAB> by default
#
# The return values for tab() can be either:
# None: There is no tab completion
# A string: Change the console to this string
# A list/tuple/generator: cycle through every item in it
#
# The return value for quick() can be:
# False: Nothing happens
# True: Execute the command afterwards
#
# The return value for execute() and cancel() doesn't matter.
#
# ===================================================================
# Commands have certain attributes and methods that facilitate parsing of
# the arguments:
#
# self.line: The whole line that was written in the console.
# self.args: A list of all (space-separated) arguments to the command.
# self.quantifier: If this command was mapped to the key "X" and
# the user pressed 6X, self.quantifier will be 6.
# self.arg(n): The n-th argument, or an empty string if it doesn't exist.
# self.rest(n): The n-th argument plus everything that followed. For example,
# if the command was "search foo bar a b c", rest(2) will be "bar a b c"
# self.start(n): Anything before the n-th argument. For example, if the
# command was "search foo bar a b c", start(2) will be "search foo"
#
# ===================================================================
# And this is a little reference for common ranger functions and objects:
#
# self.fm: A reference to the "fm" object which contains most information
# about ranger.
# self.fm.notify(string): Print the given string on the screen.
# self.fm.notify(string, bad=True): Print the given string in RED.
# self.fm.reload_cwd(): Reload the current working directory.
# self.fm.thisdir: The current working directory. (A File object.)
# self.fm.thisfile: The current file. (A File object too.)
# self.fm.thistab.get_selection(): A list of all selected files.
# self.fm.execute_console(string): Execute the string as a ranger command.
# self.fm.open_console(string): Open the console with the given string
# already typed in for you.
# self.fm.move(direction): Moves the cursor in the given direction, which
# can be something like down=3, up=5, right=1, left=1, to=6, ...
#
# File objects (for example self.fm.thisfile) have these useful attributes and
# methods:
#
# tfile.path: The path to the file.
# tfile.basename: The base name only.
# tfile.load_content(): Force a loading of the directories content (which
# obviously works with directories only)
# tfile.is_directory: True/False depending on whether it's a directory.
#
# For advanced commands it is unavoidable to dive a bit into the source code
# of ranger.
# ===================================================================
|
# -*- encoding: utf-8 -*-
# back ported from CPython 3
# A. HISTORY OF THE SOFTWARE
# ==========================
#
# Python was created in the early 1990s by NAME at Stichting
# Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
# as a successor of a language called ABC. NAME remains Python's
# principal author, although it includes many contributions from others.
#
# In 1995, NAME continued his work on Python at the Corporation for
# National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
# in Reston, Virginia where he released several versions of the
# software.
#
# In May 2000, NAME and the Python core development team moved to
# BeOpen.com to form the BeOpen PythonLabs team. In October of the same
# year, the PythonLabs team moved to Digital Creations (now Zope
# Corporation, see http://www.zope.com). In 2001, the Python Software
# Foundation (PSF, see http://www.python.org/psf/) was formed, a
# non-profit organization created specifically to own Python-related
# Intellectual Property. Zope Corporation is a sponsoring member of
# the PSF.
#
# All Python releases are Open Source (see http://www.opensource.org for
# the Open Source Definition). Historically, most, but not all, Python
# releases have also been GPL-compatible; the table below summarizes
# the various releases.
#
# Release Derived Year Owner GPL-
# from compatible? (1)
#
# 0.9.0 thru 1.2 1991-1995 CWI yes
# 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
# 1.6 1.5.2 2000 CNRI no
# 2.0 1.6 2000 BeOpen.com no
# 1.6.1 1.6 2001 CNRI yes (2)
# 2.1 2.0+1.6.1 2001 PSF no
# 2.0.1 2.0+1.6.1 2001 PSF yes
# 2.1.1 2.1+2.0.1 2001 PSF yes
# 2.2 2.1.1 2001 PSF yes
# 2.1.2 2.1.1 2002 PSF yes
# 2.1.3 2.1.2 2002 PSF yes
# 2.2.1 2.2 2002 PSF yes
# 2.2.2 2.2.1 2002 PSF yes
# 2.2.3 2.2.2 2003 PSF yes
# 2.3 2.2.2 2002-2003 PSF yes
# 2.3.1 2.3 2002-2003 PSF yes
# 2.3.2 2.3.1 2002-2003 PSF yes
# 2.3.3 2.3.2 2002-2003 PSF yes
# 2.3.4 2.3.3 2004 PSF yes
# 2.3.5 2.3.4 2005 PSF yes
# 2.4 2.3 2004 PSF yes
# 2.4.1 2.4 2005 PSF yes
# 2.4.2 2.4.1 2005 PSF yes
# 2.4.3 2.4.2 2006 PSF yes
# 2.4.4 2.4.3 2006 PSF yes
# 2.5 2.4 2006 PSF yes
# 2.5.1 2.5 2007 PSF yes
# 2.5.2 2.5.1 2008 PSF yes
# 2.5.3 2.5.2 2008 PSF yes
# 2.6 2.5 2008 PSF yes
# 2.6.1 2.6 2008 PSF yes
# 2.6.2 2.6.1 2009 PSF yes
# 2.6.3 2.6.2 2009 PSF yes
# 2.6.4 2.6.3 2009 PSF yes
# 2.6.5 2.6.4 2010 PSF yes
# 2.7 2.6 2010 PSF yes
#
# Footnotes:
#
# (1) GPL-compatible doesn't mean that we're distributing Python under
# the GPL. All Python licenses, unlike the GPL, let you distribute
# a modified version without making your changes open source. The
# GPL-compatible licenses make it possible to combine Python with
# other software that is released under the GPL; the others don't.
#
# (2) According to NAME 1.6.1 is not GPL-compatible,
# because its license has a choice of law clause. According to
# CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
# is "not incompatible" with the GPL.
#
# Thanks to the many outside volunteers who have worked under NAME's
# direction to make these releases possible.
#
#
# B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
# ===============================================================
#
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013 Python Software Foundation; All Rights Reserved" are retained
# in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
#
#
# BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
# -------------------------------------------
#
# BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
#
# 1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
# office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
# Individual or Organization ("Licensee") accessing and otherwise using
# this software in source or binary form and its associated
# documentation ("the Software").
#
# 2. Subject to the terms and conditions of this BeOpen Python License
# Agreement, BeOpen hereby grants Licensee a non-exclusive,
# royalty-free, world-wide license to reproduce, analyze, test, perform
# and/or display publicly, prepare derivative works, distribute, and
# otherwise use the Software alone or in any derivative version,
# provided, however, that the BeOpen Python License is retained in the
# Software, alone or in any derivative version prepared by Licensee.
#
# 3. BeOpen is making the Software available to Licensee on an "AS IS"
# basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
# SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
# AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
# DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 5. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 6. This License Agreement shall be governed by and interpreted in all
# respects by the law of the State of California, excluding conflict of
# law provisions. Nothing in this License Agreement shall be deemed to
# create any relationship of agency, partnership, or joint venture
# between BeOpen and Licensee. This License Agreement does not grant
# permission to use BeOpen trademarks or trade names in a trademark
# sense to endorse or promote products or services of Licensee, or any
# third party. As an exception, the "BeOpen Python" logos available at
# http://www.pythonlabs.com/logos.html may be used according to the
# permissions granted on that web page.
#
# 7. By copying, installing or otherwise using the software, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
#
#
# CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
# ---------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Corporation for National
# Research Initiatives, having an office at 1895 Preston White Drive,
# Reston, VA 20191 ("CNRI"), and the Individual or Organization
# ("Licensee") accessing and otherwise using Python 1.6.1 software in
# source or binary form and its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, CNRI
# hereby grants Licensee a nonexclusive, royalty-free, world-wide
# license to reproduce, analyze, test, perform and/or display publicly,
# prepare derivative works, distribute, and otherwise use Python 1.6.1
# alone or in any derivative version, provided, however, that CNRI's
# License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
# 1995-2001 Corporation for National Research Initiatives; All Rights
# Reserved" are retained in Python 1.6.1 alone or in any derivative
# version prepared by Licensee. Alternately, in lieu of CNRI's License
# Agreement, Licensee may substitute the following text (omitting the
# quotes): "Python 1.6.1 is made available subject to the terms and
# conditions in CNRI's License Agreement. This Agreement together with
# Python 1.6.1 may be located on the Internet using the following
# unique, persistent identifier (known as a handle): 1895.22/1013. This
# Agreement may also be obtained from a proxy server on the Internet
# using the following URL: http://hdl.handle.net/1895.22/1013".
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python 1.6.1 or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python 1.6.1.
#
# 4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
# basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# 1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. This License Agreement shall be governed by the federal
# intellectual property law of the United States, including without
# limitation the federal copyright law, and, to the extent such
# U.S. federal law does not apply, by the law of the Commonwealth of
# Virginia, excluding Virginia's conflict of law provisions.
# Notwithstanding the foregoing, with regard to derivative works based
# on Python 1.6.1 that incorporate non-separable material that was
# previously distributed under the GNU General Public License (GPL), the
# law of the Commonwealth of Virginia shall govern this License
# Agreement only as to issues arising under or with respect to
# Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
# License Agreement shall be deemed to create any relationship of
# agency, partnership, or joint venture between CNRI and Licensee. This
# License Agreement does not grant permission to use CNRI trademarks or
# trade name in a trademark sense to endorse or promote products or
# services of Licensee, or any third party.
#
# 8. By clicking on the "ACCEPT" button where indicated, or by copying,
# installing or otherwise using Python 1.6.1, Licensee agrees to be
# bound by the terms and conditions of this License Agreement.
#
# ACCEPT
#
#
# CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
# --------------------------------------------------
#
# Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
# The Netherlands. All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Stichting Mathematisch
# Centrum or CWI not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
"""
========
Glossary
========
along an axis
Axes are defined for arrays with more than one dimension. A
2-dimensional array has two corresponding axes: the first running
vertically downwards across rows (axis 0), and the second running
horizontally across columns (axis 1).
Many operation can take place along one of these axes. For example,
we can sum each row of an array, in which case we operate along
columns, or axis 1::
>>> x = np.arange(12).reshape((3,4))
>>> x
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.sum(axis=1)
array([ 6, 22, 38])
array
A homogeneous container of numerical elements. Each element in the
array occupies a fixed amount of memory (hence homogeneous), and
can be a numerical element of a single type (such as float, int
or complex) or a combination (such as ``(float, int, float)``). Each
array has an associated data-type (or ``dtype``), which describes
the numerical type of its elements::
>>> x = np.array([1, 2, 3], float)
>>> x
array([ 1., 2., 3.])
>>> x.dtype # floating point number, 64 bits of memory per element
dtype('float64')
# More complicated data type: each array element is a combination of
# and integer and a floating point number
>>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)])
array([(1, 2.0), (3, 4.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Fast element-wise operations, called `ufuncs`_, operate on arrays.
array_like
Any sequence that can be interpreted as an ndarray. This includes
nested lists, tuples, scalars and existing arrays.
attribute
A property of an object that can be accessed using ``obj.attribute``,
e.g., ``shape`` is an attribute of an array::
>>> x = np.array([1, 2, 3])
>>> x.shape
(3,)
BLAS
`Basic Linear Algebra Subprograms <http://en.wikipedia.org/wiki/BLAS>`_
broadcast
NumPy can do operations on arrays whose shapes are mismatched::
>>> x = np.array([1, 2])
>>> y = np.array([[3], [4]])
>>> x
array([1, 2])
>>> y
array([[3],
[4]])
>>> x + y
array([[4, 5],
[5, 6]])
See `doc.broadcasting`_ for more information.
C order
See `row-major`
column-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In column-major order, the leftmost index "varies the
fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the column-major order as::
[1, 4, 2, 5, 3, 6]
Column-major order is also known as the Fortran order, as the Fortran
programming language uses it.
decorator
An operator that transforms a function. For example, a ``log``
decorator may be defined to print debugging information upon
function execution::
>>> def log(f):
... def new_logging_func(*args, **kwargs):
... print "Logging call with parameters:", args, kwargs
... return f(*args, **kwargs)
...
... return new_logging_func
Now, when we define a function, we can "decorate" it using ``log``::
>>> @log
... def add(a, b):
... return a + b
Calling ``add`` then yields:
>>> add(1, 2)
Logging call with parameters: (1, 2) {}
3
dictionary
Resembling a language dictionary, which provides a mapping between
words and descriptions thereof, a Python dictionary is a mapping
between two objects::
>>> x = {1: 'one', 'two': [1, 2]}
Here, `x` is a dictionary mapping keys to values, in this case
the integer 1 to the string "one", and the string "two" to
the list ``[1, 2]``. The values may be accessed using their
corresponding keys::
>>> x[1]
'one'
>>> x['two']
[1, 2]
Note that dictionaries are not stored in any specific order. Also,
most mutable (see *immutable* below) objects, such as lists, may not
be used as keys.
For more information on dictionaries, read the
`Python tutorial <http://docs.python.org/tut>`_.
Fortran order
See `column-major`
flattened
Collapsed to a one-dimensional array. See `ndarray.flatten`_ for details.
immutable
An object that cannot be modified after execution is called
immutable. Two common examples are strings and tuples.
instance
A class definition gives the blueprint for constructing an object::
>>> class House(object):
... wall_colour = 'white'
Yet, we have to *build* a house before it exists::
>>> h = House() # build a house
Now, ``h`` is called a ``House`` instance. An instance is therefore
a specific realisation of a class.
iterable
A sequence that allows "walking" (iterating) over items, typically
using a loop such as::
>>> x = [1, 2, 3]
>>> [item**2 for item in x]
[1, 4, 9]
It is often used in combintion with ``enumerate``::
>>> keys = ['a','b','c']
>>> for n, k in enumerate(keys):
... print "Key %d: %s" % (n, k)
...
Key 0: a
Key 1: b
Key 2: c
list
A Python container that can hold any number of objects or items.
The items do not have to be of the same type, and can even be
lists themselves::
>>> x = [2, 2.0, "two", [2, 2.0]]
The list `x` contains 4 items, each which can be accessed individually::
>>> x[2] # the string 'two'
'two'
>>> x[3] # a list, containing an integer 2 and a float 2.0
[2, 2.0]
It is also possible to select more than one item at a time,
using *slicing*::
>>> x[0:2] # or, equivalently, x[:2]
[2, 2.0]
In code, arrays are often conveniently expressed as nested lists::
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
For more information, read the section on lists in the `Python
tutorial <http://docs.python.org/tut>`_. For a mapping
type (key-value), see *dictionary*.
mask
A boolean array, used to select only certain elements for an operation::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> mask = (x > 2)
>>> mask
array([False, False, False, True, True], dtype=bool)
>>> x[mask] = -1
>>> x
array([ 0, 1, 2, -1, -1])
masked array
Array that suppressed values indicated by a mask::
>>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True])
>>> x
masked_array(data = [-- 2.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
>>> x + [1, 2, 3]
masked_array(data = [-- 4.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
Masked arrays are often used when operating on arrays containing
missing or invalid entries.
matrix
A 2-dimensional ndarray that preserves its two-dimensional nature
throughout operations. It has certain special operations, such as ``*``
(matrix multiplication) and ``**`` (matrix power), defined::
>>> x = np.mat([[1, 2], [3, 4]])
>>> x
matrix([[1, 2],
[3, 4]])
>>> x**2
matrix([[ 7, 10],
[15, 22]])
method
A function associated with an object. For example, each ndarray has a
method called ``repeat``::
>>> x = np.array([1, 2, 3])
>>> x.repeat(2)
array([1, 1, 2, 2, 3, 3])
ndarray
See *array*.
reference
If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore,
``a`` and ``b`` are different names for the same Python object.
row-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In row-major order, the rightmost index "varies
the fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the row-major order as::
[1, 2, 3, 4, 5, 6]
Row-major order is also known as the C order, as the C programming
language uses it. New Numpy arrays are by default in row-major order.
self
Often seen in method signatures, ``self`` refers to the instance
of the associated class. For example:
>>> class Paintbrush(object):
... color = 'blue'
...
... def paint(self):
... print "Painting the city %s!" % self.color
...
>>> p = Paintbrush()
>>> p.color = 'red'
>>> p.paint() # self refers to 'p'
Painting the city red!
slice
Used to select only certain elements from a sequence::
>>> x = range(5)
>>> x
[0, 1, 2, 3, 4]
>>> x[1:3] # slice from 1 to 3 (excluding 3 itself)
[1, 2]
>>> x[1:5:2] # slice from 1 to 5, but skipping every second element
[1, 3]
>>> x[::-1] # slice a sequence in reverse
[4, 3, 2, 1, 0]
Arrays may have more than one dimension, each which can be sliced
individually::
>>> x = np.array([[1, 2], [3, 4]])
>>> x
array([[1, 2],
[3, 4]])
>>> x[:, 1]
array([2, 4])
tuple
A sequence that may contain a variable number of types of any
kind. A tuple is immutable, i.e., once constructed it cannot be
changed. Similar to a list, it can be indexed and sliced::
>>> x = (1, 'one', [1, 2])
>>> x
(1, 'one', [1, 2])
>>> x[0]
1
>>> x[:2]
(1, 'one')
A useful concept is "tuple unpacking", which allows variables to
be assigned to the contents of a tuple::
>>> x, y = (1, 2)
>>> x, y = 1, 2
This is often used when a function returns multiple values:
>>> def return_many():
... return 1, 'alpha', None
>>> a, b, c = return_many()
>>> a, b, c
(1, 'alpha', None)
>>> a
1
>>> b
'alpha'
ufunc
Universal function. A fast element-wise array operation. Examples include
``add``, ``sin`` and ``logical_or``.
view
An array that does not own its data, but refers to another array's
data instead. For example, we may create a view that only shows
every second element of another array::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> y = x[::2]
>>> y
array([0, 2, 4])
>>> x[0] = 3 # changing x changes y as well, since y is a view on x
>>> y
array([3, 2, 4])
wrapper
Python is a high-level (highly abstracted, or English-like) language.
This abstraction comes at a price in execution speed, and sometimes
it becomes necessary to use lower level languages to do fast
computations. A wrapper is code that provides a bridge between
high and the low level languages, allowing, e.g., Python to execute
code written in C or Fortran.
Examples include ctypes, SWIG and Cython (which wraps C and C++)
and f2py (which wraps Fortran).
""" |
"""
This module contains generic generator functions for traversing tree
(and DAG) structures. It is agnostic to the underlying data structure
and implementation of the tree object. It does this through dependency
injection of the tree's accessor functions: get_parents and
get_children.
The following depth-first traversal methods are implemented:
* Pre-order: Parent yielded before children; child with multiple
parents is yielded when first encountered.
Example use cases (when DAGs are *not* supported):
1. User access. If computing a user's access to a node relies
on the user's access to the node's parents, access to the
parent has to be computed before access to the child can
be determined. To support access chains, a user's access on
a node is actually an accumulation of accesses down from the
root node through the ancestor chain to the actual node.
2. Field value percolated down. If a value for a field is
dependent on a combination of the child's and the parent's
value, the parent's value should be computed before that of
the child's. Similar to "User access", the value would be
percolated down through the entire ancestor chain.
Example: Start Date is
max(node's start date, start date of each ancestor)
This takes the most restrictive value.
3. Depth. When computing the depth of a tree, since a child's
depth value is 1 + the parent's depth value, the parent's
value should be computed before the child's.
4. Fast Subtree Deletion. If the tree is to be pruned during
traversal, an entire subtree can be deleted, without
traversing the children, as soon as the parent is determined
to be deleted.
* Topological: Parent yielded before children; child with multiple
parents yielded only after all its parents are visited.
Example use cases (when DAGs *are* supported):
1. User access. Similar to pre-order, except a user's access
is now determined by taking a *union* of the percolated
access value from each of the node's parents combined with
its own access.
2. Field value percolated down. Similar to pre-order, except the
value for a node is calculated from the array of
percolated values from each of its parents combined
with its own.
Example: Start Date is
max(node's start date, min(max(ancestry of each parent))
This takes the most permissive from all ancestry chains.
3. Depth. Similar to pre-order, except the depth of a node will
be 1 + the minimum (or the maximum depending on semantics)
of the depth of all its parents.
4. Deletion. Deletion of subtrees are not as fast as they are
for pre-order since a node can be accessed through multiple
parents.
* Post-order: Children yielded before its parents.
Example use cases:
1. Counting. When each node wants to count the number of nodes
within its sub-structure, the count for each child has to be
calculated before its parents, since a parent's value
depends on its children.
2. Map function (when order doesn't matter). If a function
needs to be evaluated for each node in a DAG and the order
that the nodes are iterated doesn't matter, then use
post-order since it is faster than topological for DAGs.
3. Field value percolated up. If a value for a field is based
on the value from it's children, the children's values need
to be computed before their parents.
Example: Minimum Due Date of all nodes within the
sub-structure.
Note: In-order traversal is not implemented as of yet. We can do so
if/when needed.
Optimization once DAGs are not supported:
Supporting Directed Acyclic Graphs (DAGs) requires us to use
topological sort, which has the following negative performance
implications:
* For a simple tree, we can immediately skip over traversing
descendants, once it is determined that a parent is not to be yielded
(based on the return value from the 'filter_func' function). However,
since we support DAGs, we cannot simply skip over descendants since
they may still be accessible through a different ancestry chain and
need to be revisited once all their parents are visited.
* For topological sort, we need the get_parents accessor function in
order to determine whether all of a node's parents have been visited.
This means the underlying implementation of the graph needs to have
an efficient way to get a node's parents, perhaps with back pointers
to each node's parents. This requires additional storage space, which
could be eliminated if DAGs are not supported.
""" |
"""
========================================
Special functions (:mod:`scipy.special`)
========================================
.. module:: scipy.special
Nearly all of the functions below are universal functions and follow
broadcasting and automatic array-looping rules. Exceptions are noted.
Error handling
==============
Errors are handled by returning nans, or other appropriate values.
Some of the special function routines will emit warnings when an error
occurs. By default this is disabled. To enable such messages use
``errprint(1)``, and to disable such messages use ``errprint(0)``.
Example:
>>> print scipy.special.bdtr(-1,10,0.3)
>>> scipy.special.errprint(1)
>>> print scipy.special.bdtr(-1,10,0.3)
.. autosummary::
:toctree: generated/
errprint
SpecialFunctionWarning -- Warning that can be issued with ``errprint(True)``
Available functions
===================
Airy functions
--------------
.. autosummary::
:toctree: generated/
airy -- Airy functions and their derivatives.
airye -- Exponentially scaled Airy functions
ai_zeros -- [+]Zeros of Airy functions Ai(x) and Ai'(x)
bi_zeros -- [+]Zeros of Airy functions Bi(x) and Bi'(x)
itairy --
Elliptic Functions and Integrals
--------------------------------
.. autosummary::
:toctree: generated/
ellipj -- Jacobian elliptic functions
ellipk -- Complete elliptic integral of the first kind.
ellipkm1 -- ellipkm1(x) == ellipk(1 - x)
ellipkinc -- Incomplete elliptic integral of the first kind.
ellipe -- Complete elliptic integral of the second kind.
ellipeinc -- Incomplete elliptic integral of the second kind.
Bessel Functions
----------------
.. autosummary::
:toctree: generated/
jv -- Bessel function of real-valued order and complex argument.
jve -- Exponentially scaled Bessel function.
yn -- Bessel function of second kind (integer order).
yv -- Bessel function of the second kind (real-valued order).
yve -- Exponentially scaled Bessel function of the second kind.
kn -- Modified Bessel function of the second kind (integer order).
kv -- Modified Bessel function of the second kind (real order).
kve -- Exponentially scaled modified Bessel function of the second kind.
iv -- Modified Bessel function.
ive -- Exponentially scaled modified Bessel function.
hankel1 -- Hankel function of the first kind.
hankel1e -- Exponentially scaled Hankel function of the first kind.
hankel2 -- Hankel function of the second kind.
hankel2e -- Exponentially scaled Hankel function of the second kind.
The following is not an universal function:
.. autosummary::
:toctree: generated/
lmbda -- [+]Sequence of lambda functions with arbitrary order v.
Zeros of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
jnjnp_zeros -- [+]Zeros of integer-order Bessel functions and derivatives sorted in order.
jnyn_zeros -- [+]Zeros of integer-order Bessel functions and derivatives as separate arrays.
jn_zeros -- [+]Zeros of Jn(x)
jnp_zeros -- [+]Zeros of Jn'(x)
yn_zeros -- [+]Zeros of Yn(x)
ynp_zeros -- [+]Zeros of Yn'(x)
y0_zeros -- [+]Complex zeros: Y0(z0)=0 and values of Y0'(z0)
y1_zeros -- [+]Complex zeros: Y1(z1)=0 and values of Y1'(z1)
y1p_zeros -- [+]Complex zeros of Y1'(z1')=0 and values of Y1(z1')
Faster versions of common Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
j0 -- Bessel function of order 0.
j1 -- Bessel function of order 1.
y0 -- Bessel function of second kind of order 0.
y1 -- Bessel function of second kind of order 1.
i0 -- Modified Bessel function of order 0.
i0e -- Exponentially scaled modified Bessel function of order 0.
i1 -- Modified Bessel function of order 1.
i1e -- Exponentially scaled modified Bessel function of order 1.
k0 -- Modified Bessel function of the second kind of order 0.
k0e -- Exponentially scaled modified Bessel function of the second kind of order 0.
k1 -- Modified Bessel function of the second kind of order 1.
k1e -- Exponentially scaled modified Bessel function of the second kind of order 1.
Integrals of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
itj0y0 -- Basic integrals of j0 and y0 from 0 to x.
it2j0y0 -- Integrals of (1-j0(t))/t from 0 to x and y0(t)/t from x to inf.
iti0k0 -- Basic integrals of i0 and k0 from 0 to x.
it2i0k0 -- Integrals of (i0(t)-1)/t from 0 to x and k0(t)/t from x to inf.
besselpoly -- Integral of a Bessel function: Jv(2* a* x) * x[+]lambda from x=0 to 1.
Derivatives of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
jvp -- Nth derivative of Jv(v,z)
yvp -- Nth derivative of Yv(v,z)
kvp -- Nth derivative of Kv(v,z)
ivp -- Nth derivative of Iv(v,z)
h1vp -- Nth derivative of H1v(v,z)
h2vp -- Nth derivative of H2v(v,z)
Spherical Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
sph_jn -- [+]Sequence of spherical Bessel functions, jn(z)
sph_yn -- [+]Sequence of spherical Bessel functions, yn(z)
sph_jnyn -- [+]Sequence of spherical Bessel functions, jn(z) and yn(z)
sph_in -- [+]Sequence of spherical Bessel functions, in(z)
sph_kn -- [+]Sequence of spherical Bessel functions, kn(z)
sph_inkn -- [+]Sequence of spherical Bessel functions, in(z) and kn(z)
Riccati-Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
riccati_jn -- [+]Sequence of Ricatti-Bessel functions of first kind.
riccati_yn -- [+]Sequence of Ricatti-Bessel functions of second kind.
Struve Functions
----------------
.. autosummary::
:toctree: generated/
struve -- Struve function --- Hv(x)
modstruve -- Modified Struve function --- Lv(x)
itstruve0 -- Integral of H0(t) from 0 to x
it2struve0 -- Integral of H0(t)/t from x to Inf.
itmodstruve0 -- Integral of L0(t) from 0 to x.
Raw Statistical Functions
-------------------------
.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
.. autosummary::
:toctree: generated/
bdtr -- Sum of terms 0 through k of the binomial pdf.
bdtrc -- Sum of terms k+1 through n of the binomial pdf.
bdtri -- Inverse of bdtr
bdtrik --
bdtrin --
btdtr -- Integral from 0 to x of beta pdf.
btdtri -- Quantiles of beta distribution
btdtria --
btdtrib --
fdtr -- Integral from 0 to x of F pdf.
fdtrc -- Integral from x to infinity under F pdf.
fdtri -- Inverse of fdtrc
gdtr -- Integral from 0 to x of gamma pdf.
gdtrc -- Integral from x to infinity under gamma pdf.
gdtria -- Inverse with respect to `a` of gdtr.
gdtrib -- Inverse with respect to `b` of gdtr.
gdtrix -- Inverse with respect to `x` of gdtr.
nbdtr -- Sum of terms 0 through k of the negative binomial pdf.
nbdtrc -- Sum of terms k+1 to infinity under negative binomial pdf.
nbdtri -- Inverse of nbdtr
nbdtrik --
nbdtrin --
ncfdtr -- CDF of non-central t distribution.
ncfdtridfd -- Find degrees of freedom (denominator) of noncentral F distribution.
ncfdtridfn -- Find degrees of freedom (numerator) of noncentral F distribution.
ncfdtri -- Inverse CDF of noncentral F distribution.
ncfdtrinc -- Find noncentrality parameter of noncentral F distribution.
nctdtr -- CDF of noncentral t distribution.
nctdtridf -- Find degrees of freedom of noncentral t distribution.
nctdtrit -- Inverse CDF of noncentral t distribution.
nctdtrinc -- Find noncentrality parameter of noncentral t distribution.
nrdtrimn -- Find mean of normal distribution from cdf and std.
nrdtrisd -- Find std of normal distribution from cdf and mean.
pdtr -- Sum of terms 0 through k of the Poisson pdf.
pdtrc -- Sum of terms k+1 to infinity of the Poisson pdf.
pdtri -- Inverse of pdtr
pdtrik --
stdtr -- Integral from -infinity to t of the Student-t pdf.
stdtridf --
stdtrit --
chdtr -- Integral from 0 to x of the Chi-square pdf.
chdtrc -- Integral from x to infnity of Chi-square pdf.
chdtri -- Inverse of chdtrc.
chdtriv --
ndtr -- Integral from -infinity to x of standard normal pdf
log_ndtr -- Logarithm of integral from -infinity to x of standard normal pdf
ndtri -- Inverse of ndtr (quantiles)
chndtr --
chndtridf --
chndtrinc --
chndtrix --
smirnov -- Kolmogorov-Smirnov complementary CDF for one-sided test statistic (Dn+ or Dn-)
smirnovi -- Inverse of smirnov.
kolmogorov -- The complementary CDF of the (scaled) two-sided test statistic (Kn*) valid for large n.
kolmogi -- Inverse of kolmogorov
tklmbda -- Tukey-Lambda CDF
logit --
expit --
boxcox -- Compute the Box-Cox transformation.
boxcox1p -- Compute the Box-Cox transformation of 1 + x.
inv_boxcox -- Compute the inverse of the Box-Cox tranformation.
inv_boxcox1p -- Compute the inverse of the Box-Cox transformation of 1 + x.
Information Theory Functions
----------------------------
.. autosummary::
:toctree: generated/
entr -- entr(x) = -x*log(x)
rel_entr -- rel_entr(x, y) = x*log(x/y)
kl_div -- kl_div(x, y) = x*log(x/y) - x + y
huber -- Huber loss function.
pseudo_huber -- Pseudo-Huber loss function.
Gamma and Related Functions
---------------------------
.. autosummary::
:toctree: generated/
gamma -- Gamma function.
gammaln -- Log of the absolute value of the gamma function.
gammasgn -- Sign of the gamma function.
gammainc -- Incomplete gamma integral.
gammaincinv -- Inverse of gammainc.
gammaincc -- Complemented incomplete gamma integral.
gammainccinv -- Inverse of gammaincc.
beta -- Beta function.
betaln -- Log of the absolute value of the beta function.
betainc -- Incomplete beta integral.
betaincinv -- Inverse of betainc.
psi -- Logarithmic derivative of the gamma function.
rgamma -- One divided by the gamma function.
polygamma -- Nth derivative of psi function.
multigammaln -- Log of the multivariate gamma.
digamma -- Digamma function (derivative of the logarithm of gamma).
poch -- The Pochhammer symbol (rising factorial).
Error Function and Fresnel Integrals
------------------------------------
.. autosummary::
:toctree: generated/
erf -- Error function.
erfc -- Complemented error function (1- erf(x))
erfcx -- Scaled complemented error function exp(x**2)*erfc(x)
erfi -- Imaginary error function, -i erf(i x)
erfinv -- Inverse of error function
erfcinv -- Inverse of erfc
wofz -- Fadeeva function.
dawsn -- Dawson's integral.
fresnel -- Fresnel sine and cosine integrals.
fresnel_zeros -- Complex zeros of both Fresnel integrals
modfresnelp -- Modified Fresnel integrals F_+(x) and K_+(x)
modfresnelm -- Modified Fresnel integrals F_-(x) and K_-(x)
These are not universal functions:
.. autosummary::
:toctree: generated/
erf_zeros -- [+]Complex zeros of erf(z)
fresnelc_zeros -- [+]Complex zeros of Fresnel cosine integrals
fresnels_zeros -- [+]Complex zeros of Fresnel sine integrals
Legendre Functions
------------------
.. autosummary::
:toctree: generated/
lpmv -- Associated Legendre Function of arbitrary non-negative degree v.
sph_harm -- Spherical Harmonics (complex-valued) Y^m_n(theta,phi)
These are not universal functions:
.. autosummary::
:toctree: generated/
clpmn -- [+]Associated Legendre Function of the first kind for complex arguments.
lpn -- [+]Legendre Functions (polynomials) of the first kind
lqn -- [+]Legendre Functions of the second kind.
lpmn -- [+]Associated Legendre Function of the first kind for real arguments.
lqmn -- [+]Associated Legendre Function of the second kind.
Ellipsoidal Harmonics
---------------------
.. autosummary::
:toctree: generated/
ellip_harm -- Ellipsoidal harmonic E
ellip_harm_2 -- Ellipsoidal harmonic F
ellip_normal -- Ellipsoidal normalization constant
Orthogonal polynomials
----------------------
The following functions evaluate values of orthogonal polynomials:
.. autosummary::
:toctree: generated/
assoc_laguerre
eval_legendre
eval_chebyt
eval_chebyu
eval_chebyc
eval_chebys
eval_jacobi
eval_laguerre
eval_genlaguerre
eval_hermite
eval_hermitenorm
eval_gegenbauer
eval_sh_legendre
eval_sh_chebyt
eval_sh_chebyu
eval_sh_jacobi
The functions below, in turn, return the polynomial coefficients in
:class:`~.orthopoly1d` objects, which function similarly as :ref:`numpy.poly1d`.
The :class:`~.orthopoly1d` class also has an attribute ``weights`` which returns
the roots, weights, and total weights for the appropriate form of Gaussian
quadrature. These are returned in an ``n x 3`` array with roots in the first
column, weights in the second column, and total weights in the final column.
Note that :class:`~.orthopoly1d` objects are converted to ``poly1d`` when doing
arithmetic, and lose information of the original orthogonal polynomial.
.. autosummary::
:toctree: generated/
legendre -- [+]Legendre polynomial P_n(x) (lpn -- for function).
chebyt -- [+]Chebyshev polynomial T_n(x)
chebyu -- [+]Chebyshev polynomial U_n(x)
chebyc -- [+]Chebyshev polynomial C_n(x)
chebys -- [+]Chebyshev polynomial S_n(x)
jacobi -- [+]Jacobi polynomial P^(alpha,beta)_n(x)
laguerre -- [+]Laguerre polynomial, L_n(x)
genlaguerre -- [+]Generalized (Associated) Laguerre polynomial, L^alpha_n(x)
hermite -- [+]Hermite polynomial H_n(x)
hermitenorm -- [+]Normalized Hermite polynomial, He_n(x)
gegenbauer -- [+]Gegenbauer (Ultraspherical) polynomials, C^(alpha)_n(x)
sh_legendre -- [+]shifted Legendre polynomial, P*_n(x)
sh_chebyt -- [+]shifted Chebyshev polynomial, T*_n(x)
sh_chebyu -- [+]shifted Chebyshev polynomial, U*_n(x)
sh_jacobi -- [+]shifted Jacobi polynomial, J*_n(x) = G^(p,q)_n(x)
.. warning::
Computing values of high-order polynomials (around ``order > 20``) using
polynomial coefficients is numerically unstable. To evaluate polynomial
values, the ``eval_*`` functions should be used instead.
Hypergeometric Functions
------------------------
.. autosummary::
:toctree: generated/
hyp2f1 -- Gauss hypergeometric function (2F1)
hyp1f1 -- Confluent hypergeometric function (1F1)
hyperu -- Confluent hypergeometric function (U)
hyp0f1 -- Confluent hypergeometric limit function (0F1)
hyp2f0 -- Hypergeometric function (2F0)
hyp1f2 -- Hypergeometric function (1F2)
hyp3f0 -- Hypergeometric function (3F0)
Parabolic Cylinder Functions
----------------------------
.. autosummary::
:toctree: generated/
pbdv -- Parabolic cylinder function Dv(x) and derivative.
pbvv -- Parabolic cylinder function Vv(x) and derivative.
pbwa -- Parabolic cylinder function W(a,x) and derivative.
These are not universal functions:
.. autosummary::
:toctree: generated/
pbdv_seq -- [+]Sequence of parabolic cylinder functions Dv(x)
pbvv_seq -- [+]Sequence of parabolic cylinder functions Vv(x)
pbdn_seq -- [+]Sequence of parabolic cylinder functions Dn(z), complex z
Mathieu and Related Functions
-----------------------------
.. autosummary::
:toctree: generated/
mathieu_a -- Characteristic values for even solution (ce_m)
mathieu_b -- Characteristic values for odd solution (se_m)
These are not universal functions:
.. autosummary::
:toctree: generated/
mathieu_even_coef -- [+]sequence of expansion coefficients for even solution
mathieu_odd_coef -- [+]sequence of expansion coefficients for odd solution
The following return both function and first derivative:
.. autosummary::
:toctree: generated/
mathieu_cem -- Even Mathieu function
mathieu_sem -- Odd Mathieu function
mathieu_modcem1 -- Even modified Mathieu function of the first kind
mathieu_modcem2 -- Even modified Mathieu function of the second kind
mathieu_modsem1 -- Odd modified Mathieu function of the first kind
mathieu_modsem2 -- Odd modified Mathieu function of the second kind
Spheroidal Wave Functions
-------------------------
.. autosummary::
:toctree: generated/
pro_ang1 -- Prolate spheroidal angular function of the first kind
pro_rad1 -- Prolate spheroidal radial function of the first kind
pro_rad2 -- Prolate spheroidal radial function of the second kind
obl_ang1 -- Oblate spheroidal angular function of the first kind
obl_rad1 -- Oblate spheroidal radial function of the first kind
obl_rad2 -- Oblate spheroidal radial function of the second kind
pro_cv -- Compute characteristic value for prolate functions
obl_cv -- Compute characteristic value for oblate functions
pro_cv_seq -- Compute sequence of prolate characteristic values
obl_cv_seq -- Compute sequence of oblate characteristic values
The following functions require pre-computed characteristic value:
.. autosummary::
:toctree: generated/
pro_ang1_cv -- Prolate spheroidal angular function of the first kind
pro_rad1_cv -- Prolate spheroidal radial function of the first kind
pro_rad2_cv -- Prolate spheroidal radial function of the second kind
obl_ang1_cv -- Oblate spheroidal angular function of the first kind
obl_rad1_cv -- Oblate spheroidal radial function of the first kind
obl_rad2_cv -- Oblate spheroidal radial function of the second kind
Kelvin Functions
----------------
.. autosummary::
:toctree: generated/
kelvin -- All Kelvin functions (order 0) and derivatives.
kelvin_zeros -- [+]Zeros of All Kelvin functions (order 0) and derivatives
ber -- Kelvin function ber x
bei -- Kelvin function bei x
berp -- Derivative of Kelvin function ber x
beip -- Derivative of Kelvin function bei x
ker -- Kelvin function ker x
kei -- Kelvin function kei x
kerp -- Derivative of Kelvin function ker x
keip -- Derivative of Kelvin function kei x
These are not universal functions:
.. autosummary::
:toctree: generated/
ber_zeros -- [+]Zeros of Kelvin function bei x
bei_zeros -- [+]Zeros of Kelvin function ber x
berp_zeros -- [+]Zeros of derivative of Kelvin function ber x
beip_zeros -- [+]Zeros of derivative of Kelvin function bei x
ker_zeros -- [+]Zeros of Kelvin function kei x
kei_zeros -- [+]Zeros of Kelvin function ker x
kerp_zeros -- [+]Zeros of derivative of Kelvin function ker x
keip_zeros -- [+]Zeros of derivative of Kelvin function kei x
Combinatorics
-------------
.. autosummary::
:toctree: generated/
comb -- [+]Combinations of N things taken k at a time, "N choose k"
perm -- [+]Permutations of N things taken k at a time, "k-permutations of N"
Other Special Functions
-----------------------
.. autosummary::
:toctree: generated/
agm -- Arithmetic-Geometric Mean
bernoulli -- Bernoulli numbers
binom -- Binomial coefficient.
diric -- Dirichlet function (periodic sinc)
euler -- Euler numbers
expn -- Exponential integral.
exp1 -- Exponential integral of order 1 (for complex argument)
expi -- Another exponential integral -- Ei(x)
factorial -- The factorial function, n! = special.gamma(n+1)
factorial2 -- Double factorial, (n!)!
factorialk -- [+](...((n!)!)!...)! where there are k '!'
shichi -- Hyperbolic sine and cosine integrals.
sici -- Integral of the sinc and "cosinc" functions.
spence -- Dilogarithm integral.
lambertw -- Lambert W function
zeta -- Riemann zeta function of two arguments.
zetac -- Standard Riemann zeta function minus 1.
Convenience Functions
---------------------
.. autosummary::
:toctree: generated/
cbrt -- Cube root.
exp10 -- 10 raised to the x power.
exp2 -- 2 raised to the x power.
radian -- radian angle given degrees, minutes, and seconds.
cosdg -- cosine of the angle given in degrees.
sindg -- sine of the angle given in degrees.
tandg -- tangent of the angle given in degrees.
cotdg -- cotangent of the angle given in degrees.
log1p -- log(1+x)
expm1 -- exp(x)-1
cosm1 -- cos(x)-1
round -- round the argument to the nearest integer. If argument ends in 0.5 exactly, pick the nearest even integer.
xlogy -- x*log(y)
xlog1py -- x*log1p(y)
.. [+] in the description indicates a function which is not a universal
.. function and does not follow broadcasting and automatic
.. array-looping rules.
""" |
#
# ElementTree
# $Id: ElementTree.py 3224 2007-08-27 21:23:39Z USERNAME $
#
# light-weight XML support for Python 1.5.2 and later.
#
# history:
# 2001-10-20 fl created (from various sources)
# 2001-11-01 fl return root from parse method
# 2002-02-16 fl sort attributes in lexical order
# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
# 2002-05-01 fl finished TreeBuilder refactoring
# 2002-07-14 fl added basic namespace support to ElementTree.write
# 2002-07-25 fl added QName attribute support
# 2002-10-20 fl fixed encoding in write
# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
# 2002-11-27 fl accept file objects or file names for parse/write
# 2002-12-04 fl moved XMLTreeBuilder back to this module
# 2003-01-11 fl fixed entity encoding glitch for us-ascii
# 2003-02-13 fl added XML literal factory
# 2003-02-21 fl added ProcessingInstruction/PI factory
# 2003-05-11 fl added tostring/fromstring helpers
# 2003-05-26 fl added ElementPath support
# 2003-07-05 fl added makeelement factory method
# 2003-07-28 fl added more well-known namespace prefixes
# 2003-08-15 fl fixed typo in ElementTree.findtext (Thomas NAME 2003-09-04 fl fall back on emulator if ElementPath is not installed
# 2003-10-31 fl markup updates
# 2003-11-15 fl fixed nested namespace bug
# 2004-03-28 fl added XMLID helper
# 2004-06-02 fl added default support to findtext
# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
# 2004-08-23 fl take advantage of post-2.1 expat features
# 2005-02-01 fl added iterparse implementation
# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
# 2006-11-18 fl added parser support for IronPython (ElementIron)
# 2007-08-27 fl fixed newlines in attributes
#
# Copyright (c) 1999-2007 by NAME All rights reserved.
#
# EMAIL
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by NAME By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
|
"""This module tests SyntaxErrors.
Here's an example of the sort of thing that is tested.
>>> def f(x):
... global x
Traceback (most recent call last):
SyntaxError: name 'x' is local and global (<doctest test.test_syntax[0]>, line 1)
The tests are all raise SyntaxErrors. They were created by checking
each C call that raises SyntaxError. There are several modules that
raise these exceptions-- ast.c, compile.c, future.c, pythonrun.c, and
symtable.c.
The parser itself outlaws a lot of invalid syntax. None of these
errors are tested here at the moment. We should add some tests; since
there are infinitely many programs with invalid syntax, we would need
to be judicious in selecting some.
The compiler generates a synthetic module name for code executed by
doctest. Since all the code comes from the same module, a suffix like
[1] is appended to the module name, As a consequence, changing the
order of tests in this module means renumbering all the errors after
it. (Maybe we should enable the ellipsis option for these tests.)
In ast.c, syntax errors are raised by calling ast_error().
Errors from set_context():
TODO(jhylton): "assignment to None" is inconsistent with other messages
>>> obj.None = 1
Traceback (most recent call last):
SyntaxError: assignment to None (<doctest test.test_syntax[1]>, line 1)
>>> None = 1
Traceback (most recent call last):
SyntaxError: assignment to None (<doctest test.test_syntax[2]>, line 1)
It's a syntax error to assign to the empty tuple. Why isn't it an
error to assign to the empty list? It will always raise some error at
runtime.
>>> () = 1
Traceback (most recent call last):
SyntaxError: can't assign to () (<doctest test.test_syntax[3]>, line 1)
>>> f() = 1
Traceback (most recent call last):
SyntaxError: can't assign to function call (<doctest test.test_syntax[4]>, line 1)
>>> del f()
Traceback (most recent call last):
SyntaxError: can't delete function call (<doctest test.test_syntax[5]>, line 1)
>>> a + 1 = 2
Traceback (most recent call last):
SyntaxError: can't assign to operator (<doctest test.test_syntax[6]>, line 1)
>>> (x for x in x) = 1
Traceback (most recent call last):
SyntaxError: can't assign to generator expression (<doctest test.test_syntax[7]>, line 1)
>>> 1 = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal (<doctest test.test_syntax[8]>, line 1)
>>> "abc" = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal (<doctest test.test_syntax[9]>, line 1)
>>> `1` = 1
Traceback (most recent call last):
SyntaxError: can't assign to repr (<doctest test.test_syntax[10]>, line 1)
If the left-hand side of an assignment is a list or tuple, an illegal
expression inside that contain should still cause a syntax error.
This test just checks a couple of cases rather than enumerating all of
them.
>>> (a, "b", c) = (1, 2, 3)
Traceback (most recent call last):
SyntaxError: can't assign to literal (<doctest test.test_syntax[11]>, line 1)
>>> [a, b, c + 1] = [1, 2, 3]
Traceback (most recent call last):
SyntaxError: can't assign to operator (<doctest test.test_syntax[12]>, line 1)
>>> a if 1 else b = 1
Traceback (most recent call last):
SyntaxError: can't assign to conditional expression (<doctest test.test_syntax[13]>, line 1)
From compiler_complex_args():
>>> def f(None=1):
... pass
Traceback (most recent call last):
SyntaxError: assignment to None (<doctest test.test_syntax[14]>, line 1)
From ast_for_arguments():
>>> def f(x, y=1, z):
... pass
Traceback (most recent call last):
SyntaxError: non-default argument follows default argument (<doctest test.test_syntax[15]>, line 1)
>>> def f(x, None):
... pass
Traceback (most recent call last):
SyntaxError: assignment to None (<doctest test.test_syntax[16]>, line 1)
>>> def f(*None):
... pass
Traceback (most recent call last):
SyntaxError: assignment to None (<doctest test.test_syntax[17]>, line 1)
>>> def f(**None):
... pass
Traceback (most recent call last):
SyntaxError: assignment to None (<doctest test.test_syntax[18]>, line 1)
From ast_for_funcdef():
>>> def None(x):
... pass
Traceback (most recent call last):
SyntaxError: assignment to None (<doctest test.test_syntax[19]>, line 1)
From ast_for_call():
>>> def f(it, *varargs):
... return list(it)
>>> L = range(10)
>>> f(x for x in L)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(x for x in L, 1)
Traceback (most recent call last):
SyntaxError: Generator expression must be parenthesized if not sole argument (<doctest test.test_syntax[23]>, line 1)
>>> f((x for x in L), 1)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... i244, i245, i246, i247, i248, i249, i250, i251, i252,
... i253, i254, i255)
Traceback (most recent call last):
SyntaxError: more than 255 arguments (<doctest test.test_syntax[25]>, line 1)
The actual error cases counts positional arguments, keyword arguments,
and generator expression arguments separately. This test combines the
three.
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... (x for x in i244), i245, i246, i247, i248, i249, i250, i251,
... i252=1, i253=1, i254=1, i255=1)
Traceback (most recent call last):
SyntaxError: more than 255 arguments (<doctest test.test_syntax[26]>, line 1)
>>> f(lambda x: x[0] = 3)
Traceback (most recent call last):
SyntaxError: lambda cannot contain assignment (<doctest test.test_syntax[27]>, line 1)
The grammar accepts any test (basically, any expression) in the
keyword slot of a call site. Test a few different options.
>>> f(x()=2)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression (<doctest test.test_syntax[28]>, line 1)
>>> f(a or b=1)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression (<doctest test.test_syntax[29]>, line 1)
>>> f(x.y=1)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression (<doctest test.test_syntax[30]>, line 1)
From ast_for_expr_stmt():
>>> (x for x in x) += 1
Traceback (most recent call last):
SyntaxError: augmented assignment to generator expression not possible (<doctest test.test_syntax[31]>, line 1)
>>> None += 1
Traceback (most recent call last):
SyntaxError: assignment to None (<doctest test.test_syntax[32]>, line 1)
>>> f() += 1
Traceback (most recent call last):
SyntaxError: illegal expression for augmented assignment (<doctest test.test_syntax[33]>, line 1)
Test continue in finally in weird combinations.
continue in for loop under finally shouuld be ok.
>>> def test():
... try:
... pass
... finally:
... for abc in range(10):
... continue
... print abc
>>> test()
9
Start simple, a continue in a finally should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause (<doctest test.test_syntax[36]>, line 6)
This is essentially a continue in a finally which should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... try:
... continue
... except:
... pass
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause (<doctest test.test_syntax[37]>, line 7)
>>> def foo():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause (<doctest test.test_syntax[38]>, line 5)
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause (<doctest test.test_syntax[39]>, line 6)
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... try:
... continue
... finally:
... pass
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause (<doctest test.test_syntax[40]>, line 7)
>>> def foo():
... for a in ():
... try: pass
... finally:
... try:
... pass
... except:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause (<doctest test.test_syntax[41]>, line 8)
There is one test for a break that is not in a loop. The compiler
uses a single data structure to keep track of try-finally and loops,
so we need to be sure that a break is actually inside a loop. If it
isn't, there should be a syntax error.
>>> try:
... print 1
... break
... print 2
... finally:
... print 3
Traceback (most recent call last):
...
SyntaxError: 'break' outside loop (<doctest test.test_syntax[42]>, line 3)
This should probably raise a better error than a SystemError (or none at all).
In 2.5 there was a missing exception and an assert was triggered in a debug
build. The number of blocks must be greater than CO_MAXBLOCKS. SF #1565514
>>> while 1:
... while 2:
... while 3:
... while 4:
... while 5:
... while 6:
... while 8:
... while 9:
... while 10:
... while 11:
... while 12:
... while 13:
... while 14:
... while 15:
... while 16:
... while 17:
... while 18:
... while 19:
... while 20:
... while 21:
... while 22:
... break
Traceback (most recent call last):
...
SystemError: too many statically nested blocks
This tests assignment-context; there was a bug in Python 2.5 where compiling
a complex 'if' (one with 'elif') would fail to notice an invalid suite,
leading to spurious errors.
>>> if 1:
... x() = 1
... elif 1:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call (<doctest test.test_syntax[44]>, line 2)
>>> if 1:
... pass
... elif 1:
... x() = 1
Traceback (most recent call last):
...
SyntaxError: can't assign to function call (<doctest test.test_syntax[45]>, line 4)
>>> if 1:
... x() = 1
... elif 1:
... pass
... else:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call (<doctest test.test_syntax[46]>, line 2)
>>> if 1:
... pass
... elif 1:
... x() = 1
... else:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call (<doctest test.test_syntax[47]>, line 4)
>>> if 1:
... pass
... elif 1:
... pass
... else:
... x() = 1
Traceback (most recent call last):
...
SyntaxError: can't assign to function call (<doctest test.test_syntax[48]>, line 6)
>>> f(a=23, a=234)
Traceback (most recent call last):
...
SyntaxError: keyword argument repeated (<doctest test.test_syntax[49]>, line 1)
""" |
""" The core class bits of TileStache.
Two important classes can be found here.
Layer represents a set of tiles in TileStache. It keeps references to
providers, projections, a Configuration instance, and other details required
for to the storage and rendering of a tile set. Layers are represented in the
configuration file as a dictionary:
{
"cache": ...,
"layers":
{
"example-name":
{
"provider": { ... },
"metatile": { ... },
"preview": { ... },
"projection": ...,
"stale lock timeout": ...,
"cache lifespan": ...,
"write cache": ...,
"bounds": { ... },
"allowed origin": ...,
"maximum cache age": ...,
"redirects": ...,
"tile height": ...,
"jpeg options": ...,
"png options": ...
}
}
}
- "provider" refers to a Provider, explained in detail in TileStache.Providers.
- "metatile" optionally makes it possible for multiple individual tiles to be
rendered at one time, for greater speed and efficiency. This is commonly used
for the Mapnik provider. See below for more information on metatiles.
- "preview" optionally overrides the starting point for the built-in per-layer
slippy map preview, useful for image-based layers where appropriate.
See below for more information on the preview.
- "projection" names a geographic projection, explained in TileStache.Geography.
If omitted, defaults to spherical mercator.
- "stale lock timeout" is an optional number of seconds to wait before forcing
a lock that might be stuck. This is defined on a per-layer basis, rather than
for an entire cache at one time, because you may have different expectations
for the rendering speeds of different layer configurations. Defaults to 15.
- "cache lifespan" is an optional number of seconds that cached tiles should
be stored. This is defined on a per-layer basis. Defaults to forever if None,
0 or omitted.
- "write cache" is an optional boolean value to allow skipping cache write
altogether. This is defined on a per-layer basis. Defaults to true if omitted.
- "bounds" is an optional dictionary of six tile boundaries to limit the
rendered area: low (lowest zoom level), high (highest zoom level), north,
west, south, and east (all in degrees).
- "allowed origin" is an optional string that shows up in the response HTTP
header Access-Control-Allow-Origin, useful for when you need to provide
javascript direct access to response data such as GeoJSON or pixel values.
The header is part of a W3C working draft (http://www.w3.org/TR/cors/).
- "maximum cache age" is an optional number of seconds used to control behavior
of downstream caches. Causes TileStache responses to include Cache-Control
and Expires HTTP response headers. Useful when TileStache is itself hosted
behind an HTTP cache such as Squid, Cloudfront, or Akamai.
- "redirects" is an optional dictionary of per-extension HTTP redirects,
treated as lowercase. Useful in cases where your tile provider can support
many formats but you want to enforce limits to save on cache usage.
If a request is made for a tile with an extension in the dictionary keys,
a response can be generated that redirects the client to the same tile
with another extension.
- "tile height" gives the height of the image tile in pixels. You almost always
want to leave this at the default value of 256, but you can use a value of 512
to create double-size, double-resolution tiles for high-density phone screens.
- "jpeg options" is an optional dictionary of JPEG creation options, passed
through to PIL: http://effbot.org/imagingbook/format-jpeg.htm.
- "png options" is an optional dictionary of PNG creation options, passed
through to PIL: http://effbot.org/imagingbook/format-png.htm.
- "pixel effect" is an optional dictionary that defines an effect to be applied
for all tiles of this layer. Pixel effect can be any of these: blackwhite,
greyscale, desaturate, pixelate, halftone, or blur.
The public-facing URL of a single tile for this layer might look like this:
http://example.com/tilestache.cgi/example-name/0/0/0.png
Sample JPEG creation options:
{
"quality": 90,
"progressive": true,
"optimize": true
}
Sample PNG creation options:
{
"optimize": true,
"palette": "filename.act"
}
Sample pixel effect:
{
"name": "desaturate",
"factor": 0.85
}
Sample bounds:
{
"low": 9, "high": 15,
"south": 37.749, "west": -122.358,
"north": 37.860, "east": -122.113
}
Metatile represents a larger area to be rendered at one time. Metatiles are
represented in the configuration file as a dictionary:
{
"rows": 4,
"columns": 4,
"buffer": 64
}
- "rows" and "columns" are the height and width of the metatile measured in
tiles. This example metatile is four rows tall and four columns wide, so it
will render sixteen tiles simultaneously.
- "buffer" is a buffer area around the metatile, measured in pixels. This is
useful for providers with labels or icons, where it's necessary to draw a
bit extra around the edges to ensure that text is not cut off. This example
metatile has a buffer of 64 pixels, so the resulting metatile will be 1152
pixels square: 4 rows x 256 pixels + 2 x 64 pixel buffer.
The preview can be accessed through a URL like /<layer name>/preview.html:
{
"lat": 33.9901,
"lon": -116.1637,
"zoom": 16,
"ext": "jpg"
}
- "lat" and "lon" are the starting latitude and longitude in degrees.
- "zoom" is the starting zoom level.
- "ext" is the filename extension, e.g. "png".
""" |
"""Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, ypu must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
""" |
"""
Introduction
============
Persistent objects are simply the objects which automatically save their state
when they are destroyed and restore it when they are recreated, even during
another program invocation.
.. _persistent-overview:
Persistent Object Overview
==========================
Most often, persistent objects are, in fact, persistent windows as it is especially
convenient to automatically restore the UI state when the program is restarted but
an object of any class can be made persistent. Moreover, persistence is implemented
in a non-intrusive way so that the original object class doesn't need to be modified
at all in order to add support for saving and restoring its properties.
The persistence framework involves:
* **PersistenceManager** which all persistent objects register themselves with. This class
handles actual saving and restoring of persistent data as well as various global
aspects of persistence, e.g. it can be used to disable restoring the saved data;
* **PersistentObject** is the base class for all persistent objects or, rather, adaptors
for the persistent objects as this class main purpose is to provide the bridge between
the original class -- which has no special persistence support -- and PersistenceManager;
* **PersistentHandlers** which handle different kind of saving/restoring actions depending
on the widget kind.
Using Persistent Windows
========================
wxPython has built-in support for a (constantly growing) number of controls. Currently the
following classes are supported:
* wx.TopLevelWindow (and hence wx.Frame and wx.Dialog, together with their own AUI perspectives);
* wx.MenuBar, FlatMenuBar;
* AuiToolBar;
* wx.Notebook, wx.Toolbook, wx.Treebook, wx.Choicebook, wx.aui.AuiNotebook,
AuiNotebook (together with its own AUI perspective),
FlatNotebook, LabelBook,
FlatImageBook;
* wx.CheckBox;
* wx.ListBox, wx.VListBox, wx.HtmlListBox, wx.SimpleHtmlListBox, wx.gizmos.EditableListBox;
* wx.ListCtrl, wx.ListView, UltimateListCtrl;
* wx.CheckListBox;
* wx.Choice, wx.ComboBox, wx.combo.OwnerDrawnComboBox;
* wx.RadioBox;
* wx.RadioButton;
* wx.ScrolledWindow, wx.lib.scrolledpanel.ScrolledPanel;
* wx.Slider, KnobCtrl;
* wx.SpinButton, wx.SpinCtrl, FloatSpin;
* wx.SplitterWindow;
* wx.TextCtrl, wx.SearchCtrl, wx.lib.expando.ExpandoTextCtrl, wx.lib.masked.Ctrl;
* wx.ToggleButton, wx.lib.buttons.GenToggleButton, wx.lib.buttons.GenBitmapToggleButton,
wx.lib.buttons.GenBitmapTextToggleButton, SToggleButton,
SBitmapToggleButton, SBitmapTextToggleButton;
* wx.TreeCtrl, wx.GenericDirCtrl, CustomTreeCtrl;
* wx.gizmos.TreeListCtrl, HyperTreeList;
* wx.lib.calendar.CalendarCtrl;
* wx.CollapsiblePane, PyCollapsiblePane;
* wx.DatePickerCtrl, wx.GenericDatePickerCtrl;
* wx.media.MediaCtrl;
* wx.ColourPickerCtrl, wx.lib.colourselect.ColourSelect;
* wx.FilePickerCtrl, wx.DirPickerCtrl;
* wx.FontPickerCtrl;
* wx.FileHistory;
* wx.DirDialog, wx.FileDialog;
* wx.FindReplaceDialog;
* wx.FontDialog;
* wx.ColourDialog, CubeColourDialog;
* FoldPanelBar;
* wx.SingleChoiceDialog, wx.MultiChoiceDialog;
* wx.TextEntryDialog, wx.PasswordEntryDialog.
To automatically save and restore the properties of the windows of classes listed
above you need to:
* Set a unique name for the window using `SetName()`: this step is important as the
name is used in the configuration file and so must be unique among all windows of
the same class;
* Call `PersistenceManager.Register(window)` at any moment after creating the window
and then `PersistenceManager.Restore(window)` when the settings may be restored
(which can't be always done immediately, e.g. often the window needs to be populated
first). If settings can be restored immediately after the window creation, as is often
the case for wx.TopLevelWindow, for example, then `PersistenceManager.RegisterAndRestore(window)`
can be used to do both at once.
* If you want the settings for the window to be saved when your main frame is destroyed (or your app closes), simply call
`PersistenceManager.SaveAndUnregister(window)` with no arguments.
Usage
=====
Example of using a notebook control which automatically remembers the last open page::
import wx, os
import wx.lib.agw.persist as PM
class MyFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "Persistent Controls Demo")
self.book = wx.Notebook(self, wx.ID_ANY)
# Very important step!!
self.book.SetName("MyBook") # Do not use the default name!!
self.book.AddPage(wx.Panel(self.book), "Hello")
self.book.AddPage(wx.Panel(self.book), "World")
self.Bind(wx.EVT_CLOSE, self.OnClose)
self._persistMgr = PM.PersistenceManager.Get()
_configFile = os.path.join(os.getcwd(), self.book.GetName())
self._persistMgr.SetPersistenceFile(_configFile)
if not self._persistMgr.RegisterAndRestoreAll(self.book):
# Nothing was restored, so choose the default page ourselves
self.book.SetSelection(0)
def OnClose(self, event):
self._persistMgr.SaveAndUnregister(self.book)
event.Skip()
# our normal wxApp-derived class, as usual
app = wx.App(0)
frame = MyFrame(None)
app.SetTopWindow(frame)
frame.Show()
app.MainLoop()
.. _persistent-windows:
Defining Custom Persistent Windows
==================================
User-defined classes can be easily integrated with PersistenceManager. To add support
for your custom class MyWidget you just need to:
* Define a MyWidgetHandler class inheriting from `AbstractHandler`;
* Implement its `GetKind()` method returning a unique string identifying all MyWidget
objects, typically something like "widget";
* Implement its `Save()` and `Restore()` methods to actually save and restore the widget
settings using `PersistentObject.SaveValue()` and `PersistentObject.RestoreValue()` methods.
If you want to add persistence support for a class not deriving from wx.Window, you need
to derive MyPersistentWidget directly from PersistentObject and so implement its
`PersistentObject.GetName()` method too. Additionally, you must ensure that
`PersistenceManager.SaveAndUnregister()` is called when your object is destroyed as this
can be only done automatically for windows.
TODOs
=====
* Find a way to handle :class:`ToolBar` UI settings as it has been done for :class:`~lib.agw.aui.auibar.AuiToolBar`:
current :class:`ToolBar` doesn't seem to have easy access to the underlying toolbar tools;
* Implement handler(s) for :class:`grid.Grid` for row/columns sizes (possibly adding another style
to `PersistenceManager` as :class:`grid.Grid` sets up arrays to store individual row and column
sizes when non-default sizes are used. The memory requirements for this could become prohibitive
if the grid is very large);
* Find a way to properly save and restore dialog data (:class:`ColourDialog`, :class:`FontDialog` etc...);
* Add handlers for the remaining widgets not yet wrapped (mostly in :mod:`lib`).
License And Version
===================
`PersistentObjects` library is distributed under the wxPython license.
Latest revision: NAME @ 27 Mar 2013, 21.00 GMT
Version 0.4.
""" |
"""
PySOS version 1.51 - March 22 2007
Authors: NAME and NAME module allows you to connect to a SOS server on any host
and port, as well as send and receive SOS messages.
INITIALIZATION:
===============
Make sure you are running the SOS server, then create a new instance:
>>> import pysos
>>> srv = pysos.sossrv() # you may enter host=... and port=... here
>>> # localhost and 7915 are default
SENDING MESSAGES:
=================
There are 3 different ways to send messages. They aren't too
different, and it's up to your personal preference which one to use:
1ST METHOD:
-----------
>>> data = pysos.pack('<BBBB', 0, 1, 2, 3)
>>>
>>> srv.post(daddr = 5, saddr = 3, did = 128,
... sid = 128, type = 32, data = data)
Any of these can be omitted, in which case the defaults specified
with set_message_defaults() are utilized.
2ND METHOD:
-----------
This method is largely the same as the previous, but it separates
the message creation from the act of sending it:
>>> m = pysos.msg(daddr = 5, saddr = 3, did = 128,
... sid = 128, type = 32, data = data)
>>>
>>> srv.post_msg(m)
3RD METHOD:
-----------
If you prefer to use SOS's post_net syntax, you may do so like this:
>>> srv.post_net(128, 128, 32, 4, data, 0, 5)
In this case, saddr is the one specified with set_message_defaults(),
or 0xFFFE by default. This is because post_net does not let you specify
your saddr in SOS.
Also note that the "length" and "flags" parameters are ignored.
RECEIVING MESSAGES:
===================
There are 2 different methods you can use. The first one is
synchronous (blocking) and the 2nd asynchronous -- it allows you to
register listeners and then run a non-blocking method to start listening
for messages. You can use both of these methods with the same sossrv.
1ST METHOD (synchronous):
-------------------------
>>> msg = srv.listen(did = 128, sid = 128,
... daddr = 0x14, saddr = 0x16,
... type = 32, nreplies = 5, timeout = 3.0)
This method returns the first matching messages. It returns the message as
a dictionary with keys 'did', 'sid', 'daddr', 'saddr', 'type, 'length',
and 'data'. To cast msg['data'] into a tuple, you may use the unpack()
method, as such:
>>> data = pysos.unpack('<LHB', msg['data'])
Where '<LHB' stands for a little endian ('<') struct composed of a
uint32 ('L'), uint16 ('H') and uint8 ('B'). For signed integers, use
lowercase letters instead. Of course, the string you feed into the unpack()
method depends on your particular data struct.
2ND METHOD (asynchronous):
--------------------------
For this method you register a trigger (i.e. listener). Then a thread
in the background will call a function of your choosing
when the trigger fires.
This is how you specify a trigger:
>>> srv.register_trigger(func, did = 128,
... sid = 128, daddr = 0x14,
... saddr = 0x16, type = 32)
Where you may omit any parameter (except func) to match all messages,
irrespective of that parameter's value. That is, None is a wildcard.
At any point, you may use the deregister_trigger() method to remove
triggers from the pysos instance. When deregistering a trigger, None
is once again used as wildcard.
RPC-STYLE COMMUNICATIONS:
=========================
You can also do an RPC-style call, which posts a message to the network
and returns the response message(s):
>>> replylist = srv.post_rpc_msg(m, rtype=36, nreplies=10, timeout=5.0)
The above command creates a message dictionary (through sossrv.msg_dict)
which is sent to all the nodes. We collect up to 10 replies with message
type 36 in the variable msglist. The replies are in a list of message
dicts. If 5 seconds elapse, we just return the messages obtained thus
far.
For those who do not wish to first create a message dict (the variable
called 'm' in the example above), there is the post_rpc() method:
>>> post_rpc(did = 0x97, daddr = 13, type = 32,
... rsid = 0x97, rsaddr = 13, rtype = 40,
... timeout = 3, nreplies = 5)
MORE INFORMATION:
=================
Use each method's help function for more details.
""" |
"""The Python portion of the script that builds TUI
Usage:
% python setup.py [--quiet] py2app
History:
2004-02-20 NAME Specify libs in buildapp instead of as cmd-line args.
Stop forcing in the "os" module since that's no longer needed.
Use USE_ZIPIMPORR=False instead of unpacking Modules.zip.
2004-03-03 NAME Modified to use the new runtui.py as the main program.
2004-08-23 NAME Modified to save the source for built-in scripts.
2004-09-09 NAME Bug fix: was not including the tcl snack package.
2004-10-06 NAME Modified to include version info in the proper way.
Hence also modified to stop including it in the file name.
2004-11-19 NAME Modified to use current RO and TUI instead of one on the
PYTHONPATH, to avoid importing svn stuff.
2005-03-03 NAME Modified to import the new RO/Wdg/Resources.
2005-08-02 NAME Modified for the new TUI layout that allows the python code
to be zipped and separated from resources.
2005-09-22 NAME Added TUI/Scripts to the list of resources.
2006-01-21 NAME Renamed from buildtui.py to setup.py.
Modified to use py2app.
2006-02-24 NAME Modified to include matplotlib.
Added addDataFiles.
2006-03-08 NAME Modified to use new runtuiWithLog.py instead of runtui.py.
2006-05-25 NAME Added module FileDialog so the NICFPS:Focus script loads.
2006-06-01 NAME Corrected location of matplotlib data files.
2006-09-08 NAME Modified for py2app version 0.3.4 (which requires setuptools
and handles matplotlib automatically).
Added UniversalBinaryOK constant.
2006-12-01 NAME Changed UniversalBinaryOK to True, due to universal Aqua Tcl/Tk 8.4.14.
2006-12-28 NAME Changed UniversalBinaryOK back to False; Aqua Tcl/Tk 8.4.14 is buggy.
2007-01-16 NAME Added email.Utils to required modules (needed for Python 2.5).
2007-01-30 NAME Modified unused resource-adding code to support new RO layout.
2007-06-07 NAME Changed UniversalBinaryOK to True, due to universal Aqua Tcl/Tk 8.4.15.
2007-09-10 NAME Changed UniversalBinaryOK back to False due to the bugs in Aqua Tcl/Tk 8.4.15
(color picker broken and window geometry wrong on MacOS X 10.3.9)
2007-10-01 NAME Changed UniversalBinaryOK back to True, due to universal Aqua Tcl/Tk 8.4.16.
The color picker is fixed, but window geometry is still bad under MacOS X 10.3.9.
2007-11-08 NAME Changed UniversalBinaryOK back to False due to the bugs in Aqua Tcl/Tk 8.4.16
(nasty memory leak)
2007-12-20 NAME Bug fix: always built a universal binary on Intel Macs (regardless of UniversalBinaryOK).
2008-01-14 NAME Changed UniversalBinaryOK back to True. Aqua Tcl/Tk 8.4.14 does have the problem
of losing the mouse pointer, but with the improved guider control-click
and 8.4.15-8.4.17 have a nasty memory leak and may be the last 8.4.x produced.
2008-01-29 NAME Modified to put tcl snack in a new location that is now supported by runtuiWithLog.py
and no longer requires that the Tcl/Tk Framework be installed.
Other tweaks to better support not including the Tcl/Tk Framework.
2009-10-22 NAME Removed installation of snack (now that TUI uses pygame to play sounds).
2009-11-09 NAME Modified to get application name from TUI.Version.
2010-07-02 NAME Removed email.Utils from required modules (it causes trouble for modern builds).
2011-08-11 NAME Removed obsolete LSPrefersPPC from property list.
Removed obsolete constant UniversalBinaryOK.
2014-02-17 NAME Added LSArchitecturePriority to PList to force 32-bit mode (for 10.9 compatibility).
Added LSMinimumSystemVersion to PList.
2014-10-17 NAME Back to 64-bit mode, now that we have a modern version of Tcl/Tk to try.
2015-11-10 NAME Added "FileDialog" back to inclModules; the current py2app requires it.
""" |
"""
=======================================
Signal processing (:mod:`scipy.signal`)
=======================================
Convolution
===========
.. autosummary::
:toctree: generated/
convolve -- N-dimensional convolution.
correlate -- N-dimensional correlation.
fftconvolve -- N-dimensional convolution using the FFT.
convolve2d -- 2-dimensional convolution (more options).
correlate2d -- 2-dimensional correlation (more options).
sepfir2d -- Convolve with a 2-D separable FIR filter.
choose_conv_method -- Chooses faster of FFT and direct convolution methods.
B-splines
=========
.. autosummary::
:toctree: generated/
bspline -- B-spline basis function of order n.
cubic -- B-spline basis function of order 3.
quadratic -- B-spline basis function of order 2.
gauss_spline -- Gaussian approximation to the B-spline basis function.
cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline.
qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline.
cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline.
qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline.
cspline1d_eval -- Evaluate a cubic spline at the given points.
qspline1d_eval -- Evaluate a quadratic spline at the given points.
spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array.
Filtering
=========
.. autosummary::
:toctree: generated/
order_filter -- N-dimensional order filter.
medfilt -- N-dimensional median filter.
medfilt2d -- 2-dimensional median filter (faster).
wiener -- N-dimensional wiener filter.
symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems).
symiirorder2 -- 4th-order IIR filter (cascade of second-order systems).
lfilter -- 1-dimensional FIR and IIR digital linear filtering.
lfiltic -- Construct initial conditions for `lfilter`.
lfilter_zi -- Compute an initial state zi for the lfilter function that
-- corresponds to the steady state of the step response.
filtfilt -- A forward-backward filter.
savgol_filter -- Filter a signal using the Savitzky-Golay filter.
deconvolve -- 1-d deconvolution using lfilter.
sosfilt -- 1-dimensional IIR digital linear filtering using
-- a second-order sections filter representation.
sosfilt_zi -- Compute an initial state zi for the sosfilt function that
-- corresponds to the steady state of the step response.
sosfiltfilt -- A forward-backward filter for second-order sections.
hilbert -- Compute 1-D analytic signal, using the Hilbert transform.
hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform.
decimate -- Downsample a signal.
detrend -- Remove linear and/or constant trends from data.
resample -- Resample using Fourier method.
resample_poly -- Resample using polyphase filtering method.
upfirdn -- Upsample, apply FIR filter, downsample.
Filter design
=============
.. autosummary::
:toctree: generated/
bilinear -- Digital filter from an analog filter using
-- the bilinear transform.
findfreqs -- Find array of frequencies for computing filter response.
firls -- FIR filter design using least-squares error minimization.
firwin -- Windowed FIR filter design, with frequency response
-- defined as pass and stop bands.
firwin2 -- Windowed FIR filter design, with arbitrary frequency
-- response.
freqs -- Analog filter frequency response.
freqz -- Digital filter frequency response.
sosfreqz -- Digital filter frequency response for SOS format filter.
group_delay -- Digital filter group delay.
iirdesign -- IIR filter design given bands and gains.
iirfilter -- IIR filter design given order and critical frequencies.
kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given
-- the number of taps and the transition width at
-- discontinuities in the frequency response.
kaiser_beta -- Compute the Kaiser parameter beta, given the desired
-- FIR filter attenuation.
kaiserord -- Design a Kaiser window to limit ripple and width of
-- transition region.
savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay
-- filter.
remez -- Optimal FIR filter design.
unique_roots -- Unique roots and their multiplicities.
residue -- Partial fraction expansion of b(s) / a(s).
residuez -- Partial fraction expansion of b(z) / a(z).
invres -- Inverse partial fraction expansion for analog filter.
invresz -- Inverse partial fraction expansion for digital filter.
BadCoefficients -- Warning on badly conditioned filter coefficients
Lower-level filter design functions:
.. autosummary::
:toctree: generated/
abcd_normalize -- Check state-space matrices and ensure they are rank-2.
band_stop_obj -- Band Stop Objective Function for order minimization.
besselap -- Return (z,p,k) for analog prototype of Bessel filter.
buttap -- Return (z,p,k) for analog prototype of Butterworth filter.
cheb1ap -- Return (z,p,k) for type I Chebyshev filter.
cheb2ap -- Return (z,p,k) for type II Chebyshev filter.
cmplx_sort -- Sort roots based on magnitude.
ellipap -- Return (z,p,k) for analog prototype of elliptic filter.
lp2bp -- Transform a lowpass filter prototype to a bandpass filter.
lp2bs -- Transform a lowpass filter prototype to a bandstop filter.
lp2hp -- Transform a lowpass filter prototype to a highpass filter.
lp2lp -- Transform a lowpass filter prototype to a lowpass filter.
normalize -- Normalize polynomial representation of a transfer function.
Matlab-style IIR filter design
==============================
.. autosummary::
:toctree: generated/
butter -- Butterworth
buttord
cheby1 -- Chebyshev Type I
cheb1ord
cheby2 -- Chebyshev Type II
cheb2ord
ellip -- Elliptic (Cauer)
ellipord
bessel -- Bessel (no order selection available -- try butterod)
iirnotch -- Design second-order IIR notch digital filter.
iirpeak -- Design second-order IIR peak (resonant) digital filter.
Continuous-Time Linear Systems
==============================
.. autosummary::
:toctree: generated/
lti -- Continuous-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
lsim -- continuous-time simulation of output to linear system.
lsim2 -- like lsim, but `scipy.integrate.odeint` is used.
impulse -- impulse response of linear, time-invariant (LTI) system.
impulse2 -- like impulse, but `scipy.integrate.odeint` is used.
step -- step response of continous-time LTI system.
step2 -- like step, but `scipy.integrate.odeint` is used.
freqresp -- frequency response of a continuous-time LTI system.
bode -- Bode magnitude and phase data (continuous-time LTI).
Discrete-Time Linear Systems
============================
.. autosummary::
:toctree: generated/
dlti -- Discrete-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
dlsim -- simulation of output to a discrete-time linear system.
dimpulse -- impulse response of a discrete-time LTI system.
dstep -- step response of a discrete-time LTI system.
dfreqresp -- frequency response of a discrete-time LTI system.
dbode -- Bode magnitude and phase data (discrete-time LTI).
LTI Representations
===================
.. autosummary::
:toctree: generated/
tf2zpk -- transfer function to zero-pole-gain.
tf2sos -- transfer function to second-order sections.
tf2ss -- transfer function to state-space.
zpk2tf -- zero-pole-gain to transfer function.
zpk2sos -- zero-pole-gain to second-order sections.
zpk2ss -- zero-pole-gain to state-space.
ss2tf -- state-pace to transfer function.
ss2zpk -- state-space to pole-zero-gain.
sos2zpk -- second-order sections to zero-pole-gain.
sos2tf -- second-order sections to transfer function.
cont2discrete -- continuous-time to discrete-time LTI conversion.
place_poles -- pole placement.
Waveforms
=========
.. autosummary::
:toctree: generated/
chirp -- Frequency swept cosine signal, with several freq functions.
gausspulse -- Gaussian modulated sinusoid
max_len_seq -- Maximum length sequence
sawtooth -- Periodic sawtooth
square -- Square wave
sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial
unit_impulse -- Discrete unit impulse
Window functions
================
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
barthann -- Bartlett-Hann window
bartlett -- Bartlett window
blackman -- Blackman window
blackmanharris -- Minimum 4-term Blackman-Harris window
bohman -- Bohman window
boxcar -- Boxcar window
chebwin -- Dolph-Chebyshev window
cosine -- Cosine window
exponential -- Exponential window
flattop -- Flat top window
gaussian -- Gaussian window
general_gaussian -- Generalized Gaussian window
hamming -- Hamming window
hann -- Hann window
hanning -- Hann window
kaiser -- Kaiser window
nuttall -- Nuttall's minimum 4-term Blackman-Harris window
parzen -- Parzen window
slepian -- Slepian window
triang -- Triangular window
tukey -- Tukey window
Wavelets
========
.. autosummary::
:toctree: generated/
cascade -- compute scaling function and wavelet from coefficients
daub -- return low-pass
morlet -- Complex Morlet wavelet.
qmf -- return quadrature mirror filter from low-pass
ricker -- return ricker wavelet
cwt -- perform continuous wavelet transform
Peak finding
============
.. autosummary::
:toctree: generated/
find_peaks_cwt -- Attempt to find the peaks in the given 1-D array
argrelmin -- Calculate the relative minima of data
argrelmax -- Calculate the relative maxima of data
argrelextrema -- Calculate the relative extrema of data
Spectral Analysis
=================
.. autosummary::
:toctree: generated/
periodogram -- Compute a (modified) periodogram
welch -- Compute a periodogram using Welch's method
csd -- Compute the cross spectral density, using Welch's method
coherence -- Compute the magnitude squared coherence, using Welch's method
spectrogram -- Compute the spectrogram
lombscargle -- Computes the Lomb-Scargle periodogram
vectorstrength -- Computes the vector strength
""" |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact EMAIL Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the special and conditions of the license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact EMAIL with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email EMAIL for *
# * further information. *
# * *
# * If you received these files with a written license agreement or *
# * contract stating terms other than the terms above, then that *
# * alternative license agreement takes precedence over these comments. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the EMAIL mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
|
"""
=============================
Byteswapping and byte order
=============================
Introduction to byte ordering and ndarrays
==========================================
The ``ndarray`` is an object that provide a python array interface to data
in memory.
It often happens that the memory that you want to view with an array is
not of the same byte ordering as the computer on which you are running
Python.
For example, I might be working on a computer with a little-endian CPU -
such as an Intel Pentium, but I have loaded some data from a file
written by a computer that is big-endian. Let's say I have loaded 4
bytes from a file written by a Sun (big-endian) computer. I know that
these 4 bytes represent two 16-bit integers. On a big-endian machine, a
two-byte integer is stored with the Most Significant Byte (MSB) first,
and then the Least Significant Byte (LSB). Thus the bytes are, in memory order:
#. MSB integer 1
#. LSB integer 1
#. MSB integer 2
#. LSB integer 2
Let's say the two integers were in fact 1 and 770. Because 770 = 256 *
3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2.
The bytes I have loaded from the file would have these contents:
>>> big_end_str = chr(0) + chr(1) + chr(3) + chr(2)
>>> big_end_str
'\\x00\\x01\\x03\\x02'
We might want to use an ``ndarray`` to access these integers. In that
case, we can create an array around this memory, and tell numpy that
there are two integers, and that they are 16 bit and big-endian:
>>> import numpy as np
>>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_str)
>>> big_end_arr[0]
1
>>> big_end_arr[1]
770
Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian'
(``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For
example, if our data represented a single unsigned 4-byte little-endian
integer, the dtype string would be ``<u4``.
In fact, why don't we try that?
>>> little_end_u4 = np.ndarray(shape=(1,),dtype='<u4', buffer=big_end_str)
>>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3
True
Returning to our ``big_end_arr`` - in this case our underlying data is
big-endian (data endianness) and we've set the dtype to match (the dtype
is also big-endian). However, sometimes you need to flip these around.
.. warning::
Scalars currently do not include byte order information, so extracting
a scalar from an array will return an integer in native byte order.
Hence:
>>> big_end_arr[0].dtype.byteorder == little_end_u4[0].dtype.byteorder
True
Changing byte ordering
======================
As you can imagine from the introduction, there are two ways you can
affect the relationship between the byte ordering of the array and the
underlying memory it is looking at:
* Change the byte-ordering information in the array dtype so that it
interprets the undelying data as being in a different byte order.
This is the role of ``arr.newbyteorder()``
* Change the byte-ordering of the underlying data, leaving the dtype
interpretation as it was. This is what ``arr.byteswap()`` does.
The common situations in which you need to change byte ordering are:
#. Your data and dtype endianess don't match, and you want to change
the dtype so that it matches the data.
#. Your data and dtype endianess don't match, and you want to swap the
data so that they match the dtype
#. Your data and dtype endianess match, but you want the data swapped
and the dtype to reflect this
Data and dtype endianness don't match, change dtype to match data
-----------------------------------------------------------------
We make something where they don't match:
>>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='<i2', buffer=big_end_str)
>>> wrong_end_dtype_arr[0]
256
The obvious fix for this situation is to change the dtype so it gives
the correct endianness:
>>> fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder()
>>> fixed_end_dtype_arr[0]
1
Note the the array has not changed in memory:
>>> fixed_end_dtype_arr.tobytes() == big_end_str
True
Data and type endianness don't match, change data to match dtype
----------------------------------------------------------------
You might want to do this if you need the data in memory to be a certain
ordering. For example you might be writing the memory out to a file
that needs a certain byte ordering.
>>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap()
>>> fixed_end_mem_arr[0]
1
Now the array *has* changed in memory:
>>> fixed_end_mem_arr.tobytes() == big_end_str
False
Data and dtype endianness match, swap data and dtype
----------------------------------------------------
You may have a correctly specified array dtype, but you need the array
to have the opposite byte order in memory, and you want the dtype to
match so the array values make sense. In this case you just do both of
the previous operations:
>>> swapped_end_arr = big_end_arr.byteswap().newbyteorder()
>>> swapped_end_arr[0]
1
>>> swapped_end_arr.tobytes() == big_end_str
False
An easier way of casting the data to a specific dtype and byte ordering
can be achieved with the ndarray astype method:
>>> swapped_end_arr = big_end_arr.astype('<i2')
>>> swapped_end_arr[0]
1
>>> swapped_end_arr.tobytes() == big_end_str
False
""" |
"""
===================
Universal Functions
===================
Ufuncs are, generally speaking, mathematical functions or operations that are
applied element-by-element to the contents of an array. That is, the result
in each output array element only depends on the value in the corresponding
input array (or arrays) and on no other array elements. Numpy comes with a
large suite of ufuncs, and scipy extends that suite substantially. The simplest
example is the addition operator: ::
>>> np.array([0,2,3,4]) + np.array([1,1,-1,2])
array([1, 3, 2, 6])
The unfunc module lists all the available ufuncs in numpy. Documentation on
the specific ufuncs may be found in those modules. This documentation is
intended to address the more general aspects of unfuncs common to most of
them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.)
have equivalent functions defined (e.g. add() for +)
Type coercion
=============
What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of
two different types? What is the type of the result? Typically, the result is
the higher of the two types. For example: ::
float32 + float64 -> float64
int8 + int32 -> int32
int16 + float32 -> float32
float32 + complex64 -> complex64
There are some less obvious cases generally involving mixes of types
(e.g. uints, ints and floats) where equal bit sizes for each are not
capable of saving all the information in a different type of equivalent
bit size. Some examples are int32 vs float32 or uint32 vs int32.
Generally, the result is the higher type of larger size than both
(if available). So: ::
int32 + float32 -> float64
uint32 + int32 -> int64
Finally, the type coercion behavior when expressions involve Python
scalars is different than that seen for arrays. Since Python has a
limited number of types, combining a Python int with a dtype=np.int8
array does not coerce to the higher type but instead, the type of the
array prevails. So the rules for Python scalars combined with arrays is
that the result will be that of the array equivalent the Python scalar
if the Python scalar is of a higher 'kind' than the array (e.g., float
vs. int), otherwise the resultant type will be that of the array.
For example: ::
Python int + int8 -> int8
Python float + int8 -> float64
ufunc methods
=============
Binary ufuncs support 4 methods.
**.reduce(arr)** applies the binary operator to elements of the array in
sequence. For example: ::
>>> np.add.reduce(np.arange(10)) # adds all elements of array
45
For multidimensional arrays, the first dimension is reduced by default: ::
>>> np.add.reduce(np.arange(10).reshape(2,5))
array([ 5, 7, 9, 11, 13])
The axis keyword can be used to specify different axes to reduce: ::
>>> np.add.reduce(np.arange(10).reshape(2,5),axis=1)
array([10, 35])
**.accumulate(arr)** applies the binary operator and generates an an
equivalently shaped array that includes the accumulated amount for each
element of the array. A couple examples: ::
>>> np.add.accumulate(np.arange(10))
array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
>>> np.multiply.accumulate(np.arange(1,9))
array([ 1, 2, 6, 24, 120, 720, 5040, 40320])
The behavior for multidimensional arrays is the same as for .reduce(),
as is the use of the axis keyword).
**.reduceat(arr,indices)** allows one to apply reduce to selected parts
of an array. It is a difficult method to understand. See the documentation
at:
**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and
arr2. It will work on multidimensional arrays (the shape of the result is
the concatenation of the two input shapes.: ::
>>> np.multiply.outer(np.arange(3),np.arange(4))
array([[0, 0, 0, 0],
[0, 1, 2, 3],
[0, 2, 4, 6]])
Output arguments
================
All ufuncs accept an optional output array. The array must be of the expected
output shape. Beware that if the type of the output array is of a different
(and lower) type than the output result, the results may be silently truncated
or otherwise corrupted in the downcast to the lower type. This usage is useful
when one wants to avoid creating large temporary arrays and instead allows one
to reuse the same array memory repeatedly (at the expense of not being able to
use more convenient operator notation in expressions). Note that when the
output argument is used, the ufunc still returns a reference to the result.
>>> x = np.arange(2)
>>> np.add(np.arange(2),np.arange(2.),x)
array([0, 2])
>>> x
array([0, 2])
and & or as ufuncs
==================
Invariably people try to use the python 'and' and 'or' as logical operators
(and quite understandably). But these operators do not behave as normal
operators since Python treats these quite differently. They cannot be
overloaded with array equivalents. Thus using 'and' or 'or' with an array
results in an error. There are two alternatives:
1) use the ufunc functions logical_and() and logical_or().
2) use the bitwise operators & and \\|. The drawback of these is that if
the arguments to these operators are not boolean arrays, the result is
likely incorrect. On the other hand, most usages of logical_and and
logical_or are with boolean arrays. As long as one is careful, this is
a convenient way to apply these operators.
""" |
"""
installed RPMs - Command ``rpm -qa``
====================================
The ``InstalledRpms`` class parses the output of the ``rpm -qa`` command. Each line
is parsed and stored in an ``InstalledRpm`` object. The ``rpm -qa`` command may
output data in different formats and each format can be handled by the parsing
routines of this class. The basic format of command is the package and is shown
in the Examples.
Sample input data::
a52dec-0.7.4-18.el7.nux.x86_64 Tue 14 Jul 2015 09:25:38 AEST 1398536494
aalib-libs-1.4.0-0.22.rc5.el7.x86_64 Tue 14 Jul 2015 09:25:40 AEST 1390535634
abrt-2.1.11-35.el7.x86_64 Wed 09 Nov 2016 14:52:01 AEDT 1446193355
...
kernel-3.10.0-230.el7synaptics.1186112.1186106.2.x86_64 Wed 20 May 2015 11:24:00 AEST 1425955944
kernel-3.10.0-267.el7.x86_64 Sat 24 Oct 2015 09:56:17 AEDT 1434466402
kernel-3.10.0-327.36.3.el7.x86_64 Wed 09 Nov 2016 14:53:25 AEDT 1476954923
kernel-headers-3.10.0-327.36.3.el7.x86_64 Wed 09 Nov 2016 14:20:59 AEDT 1476954923
kernel-tools-3.10.0-327.36.3.el7.x86_64 Wed 09 Nov 2016 15:09:42 AEDT 1476954923
kernel-tools-libs-3.10.0-327.36.3.el7.x86_64 Wed 09 Nov 2016 14:52:13 AEDT 1476954923
kexec-tools-2.0.7-38.el7_2.1.x86_64 Wed 09 Nov 2016 14:48:21 AEDT 1452845178
...
zlib-1.2.7-15.el7.x86_64 Wed 09 Nov 2016 14:21:19 AEDT 1431443476
zsh-5.0.2-14.el7_2.2.x86_64 Wed 09 Nov 2016 15:13:19 AEDT 1464185248
Examples:
>>> from insights.parsers.installed_rpms import InstalledRpms
>>> from insights.tests import context_wrap
>>> content = '''
... openjpeg-libs-1.3-9.el6_3.x86_64
... openldap-2.4.23-31.el6.x86_64
... openobex-1.4-7.el6.x86_64
... openssh-server-5.3p1-104.el6.x86_64
... openssh-askpass-5.3p1-84.1.el6.x86_64
... openssl-1.0.0-27.el6.x86_64
... '''
>>> shared = {InstalledRpms: InstalledRpms(context_wrap(content))}
>>> rpms = shared[InstalledRpms]
>>> 'openjpeg-libs' in rpms
True
>>> rpms.corrupt
False
>>> rpms.get_max('openjpeg-libs')
0:openjpeg-libs-1.3-9.el6_3
>>> type(rpms.get_max('openjpeg-libs'))
<class 'insights.parsers.installed_rpms.InstalledRpm'>
>>> rpms.get_min('openjpeg-libs')
0:openjpeg-libs-1.3-9.el6_3
>>> rpm = rpms.get_max('openssh-server')
>>> rpm
0:openssh-server-5.3p1-104.el6
>>> type(rpm)
<class 'insights.parsers.installed_rpms.InstalledRpm'>
>>> rpm.package
'openssh-server-5.3p1-104.el6'
>>> rpm.nvr
'openssh-server-5.3p1-104.el6'
>>> rpm.source
>>> rpm.name
'openssh-server'
>>> rpm.version
'5.3p1'
>>> rpm.release
'104.el6'
>>> rpm.arch
'x86_64'
>>> rpm.epoch
'0'
>>> from insights.parsers.installed_rpms import InstalledRpm
>>> rpm2 = InstalledRpm.from_package('openssh-server-6.0-100.el6.x86_64')
>>> rpm == rpm2
False
>>> rpm > rpm2
False
>>> rpm < rpm2
True
""" |
""" Provider that returns vector representation of features in a data source.
This is a provider that does not return an image, but rather queries
a data source for raw features and replies with a vector representation
such as GeoJSON. For example, it's possible to retrieve data for
locations of OpenStreetMap points of interest or street centerlines
contained within a tile's boundary.
Many Polymaps (http://polymaps.org) examples use GeoJSON vector data tiles,
which can be effectively created using this provider.
Vector functionality is provided by OGR (http://www.gdal.org/ogr/).
Thank you, NAME two serializations and three encodings are supported for a total
of six possible kinds of output with these tile name extensions:
GeoJSON (.geojson):
See http://geojson.org/geojson-spec.html
Arc GeoServices JSON (.arcjson):
See http://www.esri.com/library/whitepapers/pdfs/geoservices-rest-spec.pdf
GeoBSON (.geobson) and Arc GeoServices BSON (.arcbson):
BSON-encoded GeoJSON and Arc JSON, see http://bsonspec.org/#/specification
GeoAMF (.geoamf) and Arc GeoServices AMF (.arcamf):
AMF0-encoded GeoJSON and Arc JSON, see:
http://opensource.adobe.com/wiki/download/attachments/1114283/amf0_spec_121207.pdf
Possible future supported formats might include KML and others. Get in touch
via Github to suggest other formats: http://github.com/migurski/TileStache.
Common parameters:
driver:
String used to identify an OGR driver. Currently, "ESRI Shapefile",
"PostgreSQL", "MySQL", Oracle, Spatialite and "GeoJSON" are supported as
data source drivers, with "postgis" and "shapefile" accepted as synonyms.
Not case-sensitive.
OGR's complete list of potential formats can be found here:
http://www.gdal.org/ogr/ogr_formats.html. Feel free to get in touch via
Github to suggest new formats: http://github.com/migurski/TileStache.
parameters:
Dictionary of parameters for each driver.
PostgreSQL:
"dbname" parameter is required, with name of database.
"host", "user", and "password" are optional connection parameters.
One of "table" or "query" is required, with a table name in the first
case and a complete SQL query in the second.
Shapefile and GeoJSON:
"file" parameter is required, with filesystem path to data file.
properties:
Optional list or dictionary of case-sensitive output property names.
If omitted, all fields from the data source will be included in response.
If a list, treated as a whitelist of field names to include in response.
If a dictionary, treated as a whitelist and re-mapping of field names.
clipped:
Default is true.
Boolean flag for optionally clipping the output geometries to the
bounds of the enclosing tile, or the string value "padded" for clipping
to the bounds of the tile plus 5%. This results in incomplete geometries,
dramatically smaller file sizes, and improves performance and
compatibility with Polymaps (http://polymaps.org).
projected:
Default is false.
Boolean flag for optionally returning geometries in projected rather than
geographic coordinates. Typically this means EPSG:900913 a.k.a. spherical
mercator projection. Stylistically a poor fit for GeoJSON, but useful
when returning Arc GeoServices responses.
precision:
Default is 6.
Optional number of decimal places to use for floating point values.
spacing:
Optional number of tile pixels for spacing geometries in responses. Used
to cut down on the number of returned features by ensuring that only those
features at least this many pixels apart are returned. Order of features
in the data source matters: early features beat out later features.
verbose:
Default is false.
Boolean flag for optionally expanding output with additional whitespace
for readability. Results in larger but more readable GeoJSON responses.
id_property:
Default is None.
Sets the id of the geojson feature to the specified field of the data source.
This can be used, for example, to identify a unique key field for the feature.
Example TileStache provider configuration:
"vector-postgis-points":
{
"provider": {"name": "vector", "driver": "PostgreSQL",
"parameters": {"dbname": "geodata", "user": "geodata",
"table": "planet_osm_point"}}
}
"vector-postgis-lines":
{
"provider": {"name": "vector", "driver": "postgis",
"parameters": {"dbname": "geodata", "user": "geodata",
"table": "planet_osm_line"}}
}
"vector-shapefile-points":
{
"provider": {"name": "vector", "driver": "ESRI Shapefile",
"parameters": {"file": "oakland-uptown-point.shp"},
"properties": ["NAME", "HIGHWAY"]}
}
"vector-shapefile-lines":
{
"provider": {"name": "vector", "driver": "shapefile",
"parameters": {"file": "oakland-uptown-line.shp"},
"properties": {"NAME": "name", "HIGHWAY": "highway"}}
}
"vector-postgis-query":
{
"provider": {"name": "vector", "driver": "PostgreSQL",
"parameters": {"dbname": "geodata", "user": "geodata",
"query": "SELECT osm_id, name, highway, way FROM planet_osm_line WHERE SUBSTR(name, 1, 1) = '1'"}}
}
"vector-sf-streets":
{
"provider": {"name": "vector", "driver": "GeoJSON",
"parameters": {"file": "stclines.json"},
"properties": ["STREETNAME"]}
}
Caveats:
Your data source must have a valid defined projection, or OGR will not know
how to correctly filter and reproject it. Although response tiles are typically
in web (spherical) mercator projection, the actual vector content of responses
is unprojected back to plain WGS84 latitude and longitude.
If you are using PostGIS and spherical mercator a.k.a. SRID 900913,
you can save yourself a world of trouble by using this definition:
http://github.com/straup/postgis-tools/raw/master/spatial_ref_900913-8.3.sql
""" |
{
'name': 'Web',
'category': 'Hidden',
'version': 'IP_ADDRESS',
'description':
"""
OpenERP Web core module.
========================
This module provides the core of the OpenERP Web Client.
""",
'depends': [],
'auto_install': True,
'post_load': 'wsgi_postload',
'js' : [
"static/src/fixbind.js",
"static/lib/datejs/globalization/en-US.js",
"static/lib/datejs/core.js",
"static/lib/datejs/parser.js",
"static/lib/datejs/sugarpak.js",
"static/lib/datejs/extras.js",
"static/lib/jquery/jquery-1.8.3.js",
"static/lib/jquery.MD5/jquery.md5.js",
"static/lib/jquery.form/jquery.form.js",
"static/lib/jquery.validate/jquery.validate.js",
"static/lib/jquery.ba-bbq/jquery.ba-bbq.js",
"static/lib/spinjs/spin.js",
"static/lib/jquery.autosize/jquery.autosize.js",
"static/lib/jquery.blockUI/jquery.blockUI.js",
"static/lib/jquery.placeholder/jquery.placeholder.js",
"static/lib/jquery.ui/js/jquery-ui-1.9.1.custom.js",
"static/lib/jquery.ui.timepicker/js/jquery-ui-timepicker-addon.js",
"static/lib/jquery.ui.notify/js/jquery.notify.js",
"static/lib/jquery.deferred-queue/jquery.deferred-queue.js",
"static/lib/jquery.scrollTo/jquery.scrollTo-min.js",
"static/lib/jquery.tipsy/jquery.tipsy.js",
"static/lib/jquery.textext/jquery.textext.js",
"static/lib/jquery.timeago/jquery.timeago.js",
"static/lib/qweb/qweb2.js",
"static/lib/underscore/underscore.js",
"static/lib/underscore/underscore.string.js",
"static/lib/backbone/backbone.js",
"static/lib/cleditor/jquery.cleditor.js",
"static/lib/py.js/lib/py.js",
"static/src/js/boot.js",
"static/src/js/testing.js",
"static/src/js/pyeval.js",
"static/src/js/corelib.js",
"static/src/js/coresetup.js",
"static/src/js/dates.js",
"static/src/js/formats.js",
"static/src/js/chrome.js",
"static/src/js/views.js",
"static/src/js/data.js",
"static/src/js/data_export.js",
"static/src/js/search.js",
"static/src/js/view_form.js",
"static/src/js/view_list.js",
"static/src/js/view_list_editable.js",
"static/src/js/view_tree.js",
],
'css' : [
"static/lib/jquery.ui.bootstrap/css/custom-theme/jquery-ui-1.9.0.custom.css",
"static/lib/jquery.ui.timepicker/css/jquery-ui-timepicker-addon.css",
"static/lib/jquery.ui.notify/css/ui.notify.css",
"static/lib/jquery.tipsy/tipsy.css",
"static/lib/jquery.textext/jquery.textext.css",
"static/src/css/base.css",
"static/src/css/data_export.css",
"static/lib/cleditor/jquery.cleditor.css",
],
'qweb' : [
"static/src/xml/*.xml",
],
'test': [
"static/test/testing.js",
"static/test/class.js",
"static/test/registry.js",
"static/test/form.js",
"static/test/data.js",
"static/test/list-utils.js",
"static/test/formats.js",
"static/test/rpc.js",
"static/test/evals.js",
"static/test/search.js",
"static/test/Widget.js",
"static/test/list.js",
"static/test/list-editable.js",
"static/test/mutex.js"
],
'bootstrap': True,
} |
#
# ElementTree
# $Id: ElementTree.py 2326 2005-03-17 07:45:21Z USERNAME $
#
# light-weight XML support for Python 1.5.2 and later.
#
# history:
# 2001-10-20 fl created (from various sources)
# 2001-11-01 fl return root from parse method
# 2002-02-16 fl sort attributes in lexical order
# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
# 2002-05-01 fl finished TreeBuilder refactoring
# 2002-07-14 fl added basic namespace support to ElementTree.write
# 2002-07-25 fl added QName attribute support
# 2002-10-20 fl fixed encoding in write
# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
# 2002-11-27 fl accept file objects or file names for parse/write
# 2002-12-04 fl moved XMLTreeBuilder back to this module
# 2003-01-11 fl fixed entity encoding glitch for us-ascii
# 2003-02-13 fl added XML literal factory
# 2003-02-21 fl added ProcessingInstruction/PI factory
# 2003-05-11 fl added tostring/fromstring helpers
# 2003-05-26 fl added ElementPath support
# 2003-07-05 fl added makeelement factory method
# 2003-07-28 fl added more well-known namespace prefixes
# 2003-08-15 fl fixed typo in ElementTree.findtext (Thomas NAME 2003-09-04 fl fall back on emulator if ElementPath is not installed
# 2003-10-31 fl markup updates
# 2003-11-15 fl fixed nested namespace bug
# 2004-03-28 fl added XMLID helper
# 2004-06-02 fl added default support to findtext
# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
# 2004-08-23 fl take advantage of post-2.1 expat features
# 2005-02-01 fl added iterparse implementation
# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
#
# Copyright (c) 1999-2005 by NAME All rights reserved.
#
# EMAIL
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2005 by NAME By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
|
"""
A multi-dimensional ``Vector`` class, take 3
A ``Vector`` is built from an iterable of numbers::
>>> Vector([3.1, 4.2])
Vector([3.1, 4.2])
>>> Vector((3, 4, 5))
Vector([3.0, 4.0, 5.0])
>>> Vector(range(10))
Vector([0.0, 1.0, 2.0, 3.0, 4.0, ...])
Tests with 2-dimensions (same results as ``vector2d_v1.py``)::
>>> v1 = Vector([3, 4])
>>> x, y = v1
>>> x, y
(3.0, 4.0)
>>> v1
Vector([3.0, 4.0])
>>> v1_clone = eval(repr(v1))
>>> v1 == v1_clone
True
>>> print(v1)
(3.0, 4.0)
>>> octets = bytes(v1)
>>> octets
b'd\\x00\\x00\\x00\\x00\\x00\\x00\\x08@\\x00\\x00\\x00\\x00\\x00\\x00\\x10@'
>>> abs(v1)
5.0
>>> bool(v1), bool(Vector([0, 0]))
(True, False)
Test of ``.frombytes()`` class method:
>>> v1_clone = Vector.frombytes(bytes(v1))
>>> v1_clone
Vector([3.0, 4.0])
>>> v1 == v1_clone
True
Tests with 3-dimensions::
>>> v1 = Vector([3, 4, 5])
>>> x, y, z = v1
>>> x, y, z
(3.0, 4.0, 5.0)
>>> v1
Vector([3.0, 4.0, 5.0])
>>> v1_clone = eval(repr(v1))
>>> v1 == v1_clone
True
>>> print(v1)
(3.0, 4.0, 5.0)
>>> abs(v1) # doctest:+ELLIPSIS
7.071067811...
>>> bool(v1), bool(Vector([0, 0, 0]))
(True, False)
Tests with many dimensions::
>>> v7 = Vector(range(7))
>>> v7
Vector([0.0, 1.0, 2.0, 3.0, 4.0, ...])
>>> abs(v7) # doctest:+ELLIPSIS
9.53939201...
Test of ``.__bytes__`` and ``.frombytes()`` methods::
>>> v1 = Vector([3, 4, 5])
>>> v1_clone = Vector.frombytes(bytes(v1))
>>> v1_clone
Vector([3.0, 4.0, 5.0])
>>> v1 == v1_clone
True
Tests of sequence behavior::
>>> v1 = Vector([3, 4, 5])
>>> len(v1)
3
>>> v1[0], v1[len(v1)-1], v1[-1]
(3.0, 5.0, 5.0)
Test of slicing::
>>> v7 = Vector(range(7))
>>> v7[-1]
6.0
>>> v7[1:4]
Vector([1.0, 2.0, 3.0])
>>> v7[-1:]
Vector([6.0])
>>> v7[1,2]
Traceback (most recent call last):
...
TypeError: Vector indices must be integers
Tests of dynamic attribute access::
>>> v7 = Vector(range(10))
>>> v7.x
0.0
>>> v7.y, v7.z, v7.t
(1.0, 2.0, 3.0)
Dynamic attribute lookup failures::
>>> v7.k
Traceback (most recent call last):
...
AttributeError: 'Vector' object has no attribute 'k'
>>> v3 = Vector(range(3))
>>> v3.t
Traceback (most recent call last):
...
AttributeError: 'Vector' object has no attribute 't'
>>> v3.spam
Traceback (most recent call last):
...
AttributeError: 'Vector' object has no attribute 'spam'
Tests of preventing attributes from 'a' to 'z'::
>>> v1.x = 7
Traceback (most recent call last):
...
AttributeError: readonly attribute 'x'
>>> v1.w = 7
Traceback (most recent call last):
...
AttributeError: can't set attributes 'a' to 'z' in 'Vector'
Other attributes can be set::
>>> v1.X = 'albatross'
>>> v1.X
'albatross'
>>> v1.ni = 'Ni!'
>>> v1.ni
'Ni!'
""" |
"""==============
Array indexing
==============
Array indexing refers to any use of the square brackets ([]) to index
array values. There are many options to indexing, which give numpy
indexing great power, but with power comes some complexity and the
potential for confusion. This section is just an overview of the
various options and issues related to indexing. Aside from single
element indexing, the details on most of these options are to be
found in related sections.
Assignment vs referencing
=========================
Most of the following examples show the use of indexing when
referencing data in an array. The examples work just as well
when assigning to an array. See the section at the end for
specific examples and explanations on how assignments work.
Single element indexing
=======================
Single element indexing for a 1-D array is what one expects. It work
exactly like that for other standard Python sequences. It is 0-based,
and accepts negative indices for indexing from the end of the array. ::
>>> x = np.arange(10)
>>> x[2]
2
>>> x[-2]
8
Unlike lists and tuples, numpy arrays support multidimensional indexing
for multidimensional arrays. That means that it is not necessary to
separate each dimension's index into its own set of square brackets. ::
>>> x.shape = (2,5) # now x is 2-dimensional
>>> x[1,3]
8
>>> x[1,-1]
9
Note that if one indexes a multidimensional array with fewer indices
than dimensions, one gets a subdimensional array. For example: ::
>>> x[0]
array([0, 1, 2, 3, 4])
That is, each index specified selects the array corresponding to the
rest of the dimensions selected. In the above example, choosing 0
means that the remaining dimension of length 5 is being left unspecified,
and that what is returned is an array of that dimensionality and size.
It must be noted that the returned array is not a copy of the original,
but points to the same values in memory as does the original array.
In this case, the 1-D array at the first position (0) is returned.
So using a single index on the returned array, results in a single
element being returned. That is: ::
>>> x[0][2]
2
So note that ``x[0,2] = x[0][2]`` though the second case is more
inefficient as a new temporary array is created after the first index
that is subsequently indexed by 2.
Note to those used to IDL or Fortran memory order as it relates to
indexing. NumPy uses C-order indexing. That means that the last
index usually represents the most rapidly changing memory location,
unlike Fortran or IDL, where the first index represents the most
rapidly changing location in memory. This difference represents a
great potential for confusion.
Other indexing options
======================
It is possible to slice and stride arrays to extract arrays of the
same number of dimensions, but of different sizes than the original.
The slicing and striding works exactly the same way it does for lists
and tuples except that they can be applied to multiple dimensions as
well. A few examples illustrates best: ::
>>> x = np.arange(10)
>>> x[2:5]
array([2, 3, 4])
>>> x[:-7]
array([0, 1, 2])
>>> x[1:7:2]
array([1, 3, 5])
>>> y = np.arange(35).reshape(5,7)
>>> y[1:5:2,::3]
array([[ 7, 10, 13],
[21, 24, 27]])
Note that slices of arrays do not copy the internal array data but
also produce new views of the original data.
It is possible to index arrays with other arrays for the purposes of
selecting lists of values out of arrays into new arrays. There are
two different ways of accomplishing this. One uses one or more arrays
of index values. The other involves giving a boolean array of the proper
shape to indicate the values to be selected. Index arrays are a very
powerful tool that allow one to avoid looping over individual elements in
arrays and thus greatly improve performance.
It is possible to use special features to effectively increase the
number of dimensions in an array through indexing so the resulting
array aquires the shape needed for use in an expression or with a
specific function.
Index arrays
============
NumPy arrays may be indexed with other arrays (or any other sequence-
like object that can be converted to an array, such as lists, with the
exception of tuples; see the end of this document for why this is). The
use of index arrays ranges from simple, straightforward cases to
complex, hard-to-understand cases. For all cases of index arrays, what
is returned is a copy of the original data, not a view as one gets for
slices.
Index arrays must be of integer type. Each value in the array indicates
which value in the array to use in place of the index. To illustrate: ::
>>> x = np.arange(10,1,-1)
>>> x
array([10, 9, 8, 7, 6, 5, 4, 3, 2])
>>> x[np.array([3, 3, 1, 8])]
array([7, 7, 9, 2])
The index array consisting of the values 3, 3, 1 and 8 correspondingly
create an array of length 4 (same as the index array) where each index
is replaced by the value the index array has in the array being indexed.
Negative values are permitted and work as they do with single indices
or slices: ::
>>> x[np.array([3,3,-3,8])]
array([7, 7, 4, 2])
It is an error to have index values out of bounds: ::
>>> x[np.array([3, 3, 20, 8])]
<type 'exceptions.IndexError'>: index 20 out of bounds 0<=index<9
Generally speaking, what is returned when index arrays are used is
an array with the same shape as the index array, but with the type
and values of the array being indexed. As an example, we can use a
multidimensional index array instead: ::
>>> x[np.array([[1,1],[2,3]])]
array([[9, 9],
[8, 7]])
Indexing Multi-dimensional arrays
=================================
Things become more complex when multidimensional arrays are indexed,
particularly with multidimensional index arrays. These tend to be
more unusual uses, but they are permitted, and they are useful for some
problems. We'll start with the simplest multidimensional case (using
the array y from the previous examples): ::
>>> y[np.array([0,2,4]), np.array([0,1,2])]
array([ 0, 15, 30])
In this case, if the index arrays have a matching shape, and there is
an index array for each dimension of the array being indexed, the
resultant array has the same shape as the index arrays, and the values
correspond to the index set for each position in the index arrays. In
this example, the first index value is 0 for both index arrays, and
thus the first value of the resultant array is y[0,0]. The next value
is y[2,1], and the last is y[4,2].
If the index arrays do not have the same shape, there is an attempt to
broadcast them to the same shape. If they cannot be broadcast to the
same shape, an exception is raised: ::
>>> y[np.array([0,2,4]), np.array([0,1])]
<type 'exceptions.ValueError'>: shape mismatch: objects cannot be
broadcast to a single shape
The broadcasting mechanism permits index arrays to be combined with
scalars for other indices. The effect is that the scalar value is used
for all the corresponding values of the index arrays: ::
>>> y[np.array([0,2,4]), 1]
array([ 1, 15, 29])
Jumping to the next level of complexity, it is possible to only
partially index an array with index arrays. It takes a bit of thought
to understand what happens in such cases. For example if we just use
one index array with y: ::
>>> y[np.array([0,2,4])]
array([[ 0, 1, 2, 3, 4, 5, 6],
[14, 15, 16, 17, 18, 19, 20],
[28, 29, 30, 31, 32, 33, 34]])
What results is the construction of a new array where each value of
the index array selects one row from the array being indexed and the
resultant array has the resulting shape (number of index elements,
size of row).
An example of where this may be useful is for a color lookup table
where we want to map the values of an image into RGB triples for
display. The lookup table could have a shape (nlookup, 3). Indexing
such an array with an image with shape (ny, nx) with dtype=np.uint8
(or any integer type so long as values are with the bounds of the
lookup table) will result in an array of shape (ny, nx, 3) where a
triple of RGB values is associated with each pixel location.
In general, the shape of the resultant array will be the concatenation
of the shape of the index array (or the shape that all the index arrays
were broadcast to) with the shape of any unused dimensions (those not
indexed) in the array being indexed.
Boolean or "mask" index arrays
==============================
Boolean arrays used as indices are treated in a different manner
entirely than index arrays. Boolean arrays must be of the same shape
as the initial dimensions of the array being indexed. In the
most straightforward case, the boolean array has the same shape: ::
>>> b = y>20
>>> y[b]
array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
Unlike in the case of integer index arrays, in the boolean case, the
result is a 1-D array containing all the elements in the indexed array
corresponding to all the true elements in the boolean array. The
elements in the indexed array are always iterated and returned in
:term:`row-major` (C-style) order. The result is also identical to
``y[np.nonzero(b)]``. As with index arrays, what is returned is a copy
of the data, not a view as one gets with slices.
The result will be multidimensional if y has more dimensions than b.
For example: ::
>>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y
array([False, False, False, True, True], dtype=bool)
>>> y[b[:,5]]
array([[21, 22, 23, 24, 25, 26, 27],
[28, 29, 30, 31, 32, 33, 34]])
Here the 4th and 5th rows are selected from the indexed array and
combined to make a 2-D array.
In general, when the boolean array has fewer dimensions than the array
being indexed, this is equivalent to y[b, ...], which means
y is indexed by b followed by as many : as are needed to fill
out the rank of y.
Thus the shape of the result is one dimension containing the number
of True elements of the boolean array, followed by the remaining
dimensions of the array being indexed.
For example, using a 2-D boolean array of shape (2,3)
with four True elements to select rows from a 3-D array of shape
(2,3,5) results in a 2-D result of shape (4,5): ::
>>> x = np.arange(30).reshape(2,3,5)
>>> x
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]]])
>>> b = np.array([[True, True, False], [False, True, True]])
>>> x[b]
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]])
For further details, consult the numpy reference documentation on array indexing.
Combining index arrays with slices
==================================
Index arrays may be combined with slices. For example: ::
>>> y[np.array([0,2,4]),1:3]
array([[ 1, 2],
[15, 16],
[29, 30]])
In effect, the slice is converted to an index array
np.array([[1,2]]) (shape (1,2)) that is broadcast with the index array
to produce a resultant array of shape (3,2).
Likewise, slicing can be combined with broadcasted boolean indices: ::
>>> y[b[:,5],1:3]
array([[22, 23],
[29, 30]])
Structural indexing tools
=========================
To facilitate easy matching of array shapes with expressions and in
assignments, the np.newaxis object can be used within array indices
to add new dimensions with a size of 1. For example: ::
>>> y.shape
(5, 7)
>>> y[:,np.newaxis,:].shape
(5, 1, 7)
Note that there are no new elements in the array, just that the
dimensionality is increased. This can be handy to combine two
arrays in a way that otherwise would require explicitly reshaping
operations. For example: ::
>>> x = np.arange(5)
>>> x[:,np.newaxis] + x[np.newaxis,:]
array([[0, 1, 2, 3, 4],
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8]])
The ellipsis syntax maybe used to indicate selecting in full any
remaining unspecified dimensions. For example: ::
>>> z = np.arange(81).reshape(3,3,3,3)
>>> z[1,...,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
This is equivalent to: ::
>>> z[1,:,:,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
Assigning values to indexed arrays
==================================
As mentioned, one can select a subset of an array to assign to using
a single index, slices, and index and mask arrays. The value being
assigned to the indexed array must be shape consistent (the same shape
or broadcastable to the shape the index produces). For example, it is
permitted to assign a constant to a slice: ::
>>> x = np.arange(10)
>>> x[2:7] = 1
or an array of the right size: ::
>>> x[2:7] = np.arange(5)
Note that assignments may result in changes if assigning
higher types to lower types (like floats to ints) or even
exceptions (assigning complex to floats or ints): ::
>>> x[1] = 1.2
>>> x[1]
1
>>> x[1] = 1.2j
<type 'exceptions.TypeError'>: can't convert complex to long; use
long(abs(z))
Unlike some of the references (such as array and mask indices)
assignments are always made to the original data in the array
(indeed, nothing else would make sense!). Note though, that some
actions may not work as one may naively expect. This particular
example is often surprising to people: ::
>>> x = np.arange(0, 50, 10)
>>> x
array([ 0, 10, 20, 30, 40])
>>> x[np.array([1, 1, 3, 1])] += 1
>>> x
array([ 0, 11, 20, 31, 40])
Where people expect that the 1st location will be incremented by 3.
In fact, it will only be incremented by 1. The reason is because
a new array is extracted from the original (as a temporary) containing
the values at 1, 1, 3, 1, then the value 1 is added to the temporary,
and then the temporary is assigned back to the original array. Thus
the value of the array at x[1]+1 is assigned to x[1] three times,
rather than being incremented 3 times.
Dealing with variable numbers of indices within programs
========================================================
The index syntax is very powerful but limiting when dealing with
a variable number of indices. For example, if you want to write
a function that can handle arguments with various numbers of
dimensions without having to write special case code for each
number of possible dimensions, how can that be done? If one
supplies to the index a tuple, the tuple will be interpreted
as a list of indices. For example (using the previous definition
for the array z): ::
>>> indices = (1,1,1,1)
>>> z[indices]
40
So one can use code to construct tuples of any number of indices
and then use these within an index.
Slices can be specified within programs by using the slice() function
in Python. For example: ::
>>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2]
>>> z[indices]
array([39, 40])
Likewise, ellipsis can be specified by code by using the Ellipsis
object: ::
>>> indices = (1, Ellipsis, 1) # same as [1,...,1]
>>> z[indices]
array([[28, 31, 34],
[37, 40, 43],
[46, 49, 52]])
For this reason it is possible to use the output from the np.where()
function directly as an index since it always returns a tuple of index
arrays.
Because the special treatment of tuples, they are not automatically
converted to an array as a list would be. As an example: ::
>>> z[[1,1,1,1]] # produces a large array
array([[[[27, 28, 29],
[30, 31, 32], ...
>>> z[(1,1,1,1)] # returns a single value
40
""" |
"""
Wrappers to LAPACK library
==========================
flapack -- wrappers for Fortran [*] LAPACK routines
clapack -- wrappers for ATLAS LAPACK routines
calc_lwork -- calculate optimal lwork parameters
get_lapack_funcs -- query for wrapper functions.
[*] If ATLAS libraries are available then Fortran routines
actually use ATLAS routines and should perform equally
well to ATLAS routines.
Module flapack
++++++++++++++
In the following all function names are shown without
type prefix (s,d,c,z). Optimal values for lwork can
be computed using calc_lwork module.
Linear Equations
----------------
Drivers::
lu,piv,x,info = gesv(a,b,overwrite_a=0,overwrite_b=0)
lub,piv,x,info = gbsv(kl,ku,ab,b,overwrite_ab=0,overwrite_b=0)
c,x,info = posv(a,b,lower=0,overwrite_a=0,overwrite_b=0)
Computational routines::
lu,piv,info = getrf(a,overwrite_a=0)
x,info = getrs(lu,piv,b,trans=0,overwrite_b=0)
inv_a,info = getri(lu,piv,lwork=min_lwork,overwrite_lu=0)
c,info = potrf(a,lower=0,clean=1,overwrite_a=0)
x,info = potrs(c,b,lower=0,overwrite_b=0)
inv_a,info = potri(c,lower=0,overwrite_c=0)
inv_c,info = trtri(c,lower=0,unitdiag=0,overwrite_c=0)
Linear Least Squares (LLS) Problems
-----------------------------------
Drivers::
v,x,s,rank,info = gelss(a,b,cond=-1.0,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
Computational routines::
qr,tau,info = geqrf(a,lwork=min_lwork,overwrite_a=0)
q,info = orgqr|ungqr(qr,tau,lwork=min_lwork,overwrite_qr=0,overwrite_tau=1)
Generalized Linear Least Squares (LSE and GLM) Problems
-------------------------------------------------------
Standard Eigenvalue and Singular Value Problems
-----------------------------------------------
Drivers::
w,v,info = syev|heev(a,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0)
w,v,info = syevd|heevd(a,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0)
w,v,info = syevr|heevr(a,compute_v=1,lower=0,vrange=,irange=,atol=-1.0,lwork=min_lwork,overwrite_a=0)
t,sdim,(wr,wi|w),vs,info = gees(select,a,compute_v=1,sort_t=0,lwork=min_lwork,select_extra_args=(),overwrite_a=0)
wr,(wi,vl|w),vr,info = geev(a,compute_vl=1,compute_vr=1,lwork=min_lwork,overwrite_a=0)
u,s,vt,info = gesdd(a,compute_uv=1,lwork=min_lwork,overwrite_a=0)
Computational routines::
ht,tau,info = gehrd(a,lo=0,hi=n-1,lwork=min_lwork,overwrite_a=0)
ba,lo,hi,pivscale,info = gebal(a,scale=0,permute=0,overwrite_a=0)
Generalized Eigenvalue and Singular Value Problems
--------------------------------------------------
Drivers::
w,v,info = sygv|hegv(a,b,itype=1,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
w,v,info = sygvd|hegvd(a,b,itype=1,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
(alphar,alphai|alpha),beta,vl,vr,info = ggev(a,b,compute_vl=1,compute_vr=1,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
Auxiliary routines
------------------
a,info = lauum(c,lower=0,overwrite_c=0)
a = laswp(a,piv,k1=0,k2=len(piv)-1,off=0,inc=1,overwrite_a=0)
Module clapack
++++++++++++++
Linear Equations
----------------
Drivers::
lu,piv,x,info = gesv(a,b,rowmajor=1,overwrite_a=0,overwrite_b=0)
c,x,info = posv(a,b,lower=0,rowmajor=1,overwrite_a=0,overwrite_b=0)
Computational routines::
lu,piv,info = getrf(a,rowmajor=1,overwrite_a=0)
x,info = getrs(lu,piv,b,trans=0,rowmajor=1,overwrite_b=0)
inv_a,info = getri(lu,piv,rowmajor=1,overwrite_lu=0)
c,info = potrf(a,lower=0,clean=1,rowmajor=1,overwrite_a=0)
x,info = potrs(c,b,lower=0,rowmajor=1,overwrite_b=0)
inv_a,info = potri(c,lower=0,rowmajor=1,overwrite_c=0)
inv_c,info = trtri(c,lower=0,unitdiag=0,rowmajor=1,overwrite_c=0)
Auxiliary routines
------------------
a,info = lauum(c,lower=0,rowmajor=1,overwrite_c=0)
Module calc_lwork
+++++++++++++++++
Optimal lwork is maxwrk. Default is minwrk.
minwrk,maxwrk = gehrd(prefix,n,lo=0,hi=n-1)
minwrk,maxwrk = gesdd(prefix,m,n,compute_uv=1)
minwrk,maxwrk = gelss(prefix,m,n,nrhs)
minwrk,maxwrk = getri(prefix,n)
minwrk,maxwrk = geev(prefix,n,compute_vl=1,compute_vr=1)
minwrk,maxwrk = heev(prefix,n,lower=0)
minwrk,maxwrk = syev(prefix,n,lower=0)
minwrk,maxwrk = gees(prefix,n,compute_v=1)
minwrk,maxwrk = geqrf(prefix,m,n)
minwrk,maxwrk = gqr(prefix,m,n)
""" |
"""
Display upcoming Google Calendar events.
This module will display information about upcoming Google Calendar events
in one of two formats which can be toggled with a button press. The event
URL may also be opened in a web browser with a button press.
Some events details can be retreived in the Google Calendar API Documentation.
https://developers.google.com/calendar/v3/reference/events
Configuration parameters:
auth_token: The path to where the access/refresh token will be saved
after successful credential authorization.
(default '~/.config/py3status/google_calendar.auth_token')
blacklist_events: Event names in this list will not be shown in the module
(case insensitive).
(default [])
browser_invocation: Command to run to open browser. Curly braces stands for URL opened.
(default "xdg-open {}")
button_open: Opens the event URL in the default web browser.
(default 3)
button_refresh: Refreshes the module and updates the list of events.
(default 2)
button_toggle: Toggles a boolean to hide/show the data for each event.
(default 1)
cache_timeout: How often the module is refreshed in seconds
(default 60)
client_secret: the path to your client_secret file which
contains your OAuth 2.0 credentials.
(default '~/.config/py3status/google_calendar.client_secret')
events_within_hours: Select events within the next given hours.
(default 12)
force_lowercase: Sets whether to force all event output to lower case.
(default False)
format: The format for module output.
(default '{events}|\\?color=event \u2687')
format_date: The format for date related format placeholders.
May be any Python strftime directives for dates.
(default '%a %d-%m')
format_event: The format for each event. The information can be toggled
with 'button_toggle' based on the value of 'is_toggled'.
*(default '[\\?color=event {summary}][\\?if=is_toggled ({start_time}'
' - {end_time}, {start_date})|[ ({location})][ {format_timer}]]')*
format_notification: The format for event warning notifications.
(default '{summary} {start_time} - {end_time}')
format_separator: The string used to separate individual events.
(default ' \\| ')
format_time: The format for time-related placeholders except `{format_timer}`.
May use any Python strftime directives for times.
(default '%I:%M %p')
format_timer: The format used for the {format_timer} placeholder to display
time until an event starts or time until an event in progress is over.
*(default '\\?color=time ([\\?if=days {days}d ][\\?if=hours {hours}h ]'
'[\\?if=minutes {minutes}m])[\\?if=is_current left]')*
ignore_all_day_events: Sets whether to display all day events or not.
(default False)
num_events: The maximum number of events to display.
(default 3)
preferred_event_link: link to open in the browser.
accepted values :
hangoutLink (open the VC room associated with the event),
htmlLink (open the event's details in Google Calendar)
fallback to htmlLink if the preferred_event_link does not exist it the event.
(default "htmlLink")
response: Only display events for which the response status is
on the list.
Available values in the Google Calendar API's documentation,
look for the attendees[].responseStatus.
(default ['accepted'])
thresholds: Thresholds for events. The first entry is the color for event 1,
the second for event 2, and so on.
(default [])
time_to_max: Threshold (in minutes) for when to display the `{format_timer}`
string; e.g. if time_to_max is 60, `{format_timer}` will only be
displayed for events starting in 60 minutes or less.
(default 180)
warn_threshold: The number of minutes until an event starts before a
warning is displayed to notify the user; e.g. if warn_threshold is 30
and an event is starting in 30 minutes or less, a notification will be
displayed. disabled by default.
(default 0)
warn_timeout: The number of seconds before a warning should be issued again.
(default 300)
Control placeholders:
{is_toggled} a boolean toggled by button_toggle
Format placeholders:
{events} All the events to display.
format_event and format_notification placeholders:
{description} The description for the calendar event.
{end_date} The end date for the event.
{end_time} The end time for the event.
{location} The location for the event.
{start_date} The start date for the event.
{start_time} The start time for the event.
{summary} The summary (i.e. title) for the event.
{format_timer} The time until the event starts (or until it is over
if already in progress).
format_timer placeholders:
{days} The number of days until the event.
{hours} The number of hours until the event.
{minutes} The number of minutes until the event.
Color options:
color_event: Color for a single event.
color_time: Color for the time associated with each event.
Requires:
1. Python library google-api-python-client.
2. Python library python-dateutil.
3. OAuth 2.0 credentials for the Google Calendar api.
Follow Step 1 of the guide here to obtain your OAuth 2.0 credentials:
https://developers.google.com/google-apps/calendar/quickstart/python
Download the client_secret.json file which contains your client ID and
client secret. In your config file, set configuration parameter
client_secret to the path to your client_secret.json file.
The first time you run the module, a browser window will open asking you
to authorize access to your calendar. After authorization is complete,
an access/refresh token will be saved to the path configured in
auth_token, and i3status will be restarted. This restart will
occur only once after the first time you successfully authorize.
Examples:
```
# add color gradients for events and dates/times
google_calendar {
thresholds = {
'event': [(1, '#d0e6ff'), (2, '#bbdaff'), (3, '#99c7ff'),
(4, '#86bcff'), (5, '#62a9ff'), (6, '#8c8cff'), (7, '#7979ff')],
'time': [(1, '#ffcece'), (2, '#ffbfbf'), (3, '#ff9f9f'),
(4, '#ff7f7f'), (5, '#ff5f5f'), (6, '#ff3f3f'), (7, '#ff1f1f')]
}
}
```
@author NAME BSD
SAMPLE OUTPUT
[
{'full_text': "Homer's Birthday (742 Evergreen Terrace) (1h 23m) | "},
{'full_text': "Doctor's Appointment | Lunch with John"},
]
""" |
# -*- coding: utf-8 -*-
#'''
#QE pw.x Parsers
#=======================================
#Parsers for pw.x inputs and outputs
#'''
#from io import StringIO
#from exa import _pd as pd
#from exa import _np as np
#from exa.config import Config
#if Config.numba:
# from exa.jitted.indexing import idxs_from_starts_and_counts
#else:
# from exa.algorithms.indexing import idxs_from_starts_and_counts
#from atomic import Length
#from atomic.atom import Atom
#from atomic.frame import _min_frame_from_atom, Frame
#from qe.types import to_py_type, get_length
#from qe.classes import classes, Timings
#from qe.pw.classes import PWInput, PWOutput
#
#
#def parse_pw_input(path):
# '''
# '''
# kwargs = {}
# current = None
# with open(path) as f:
# for line in f:
# low = line.lower()
# split = low.split()
# block = split[0].replace('&', '')
# if block in classes.keys() and '=' not in line:
# current = block
# kwargs[current] = classes[block]()
# if len(split) > 1:
# kwargs[current]._info = ' '.join(split[1:])
# if '=' in line:
# k, v = line.split('=')
# k = k.strip()
# v = to_py_type(v)
# kwargs[current][k] = v
# elif '&' not in line and '/' not in line and current not in low:
# kwargs[current] = kwargs[current]._append(line)
# return PWInput(**kwargs)
#
#
#def parse_pw_output(path):
# '''
# Args:
# path (str): Output file path
#
# Returns:
# obj ()
# '''
# header = []
# footer = []
# scf = []
# eigs = []
# frames = []
# positions = []
# block = 'header'
# kwargs = {'header': [], 'footer': [], 'scf': [], 'eigs': [],
# 'frames': [], 'atom': [], 'forces': [], 'meta': []}
# with open(path) as f:
# for line in f:
# if 'Self-consistent Calculation' in line:
# block = 'scf'
# elif 'End of self-consistent calculation' in line:
# block = 'eigs'
# elif 'highest occupied level' in line or 'Fermi energy' in line:
# block = 'frames'
# elif 'Forces acting on atoms' in line:
# block = 'forces'
# elif 'ATOMIC_POSITIONS' in line:
# block = 'atom'
# elif 'Writing output' in line:
# block = 'meta'
# elif 'init_run' in line:
# block = 'footer'
# kwargs[block].append(line)
# timings = _parse_footer(kwargs['footer'])
# atom = _parse_atom(''.join(kwargs['atom']), ''.join(kwargs['forces']))
# frame = _parse_frame(kwargs['frames'], atom)
# out = PWOutput(timings=timings, atom=atom, frame=frame)
# return out, kwargs
#
#
#def _parse_footer(footer):
# '''
# '''
# data = {'catagory': [], 'called_by': [], 'name': [], 'cpu': [], 'wall': [], 'ncalls': []}
# called_by = ''
# catagory = 'summary'
# for line in footer:
# if 'Called by' in line:
# called_by = line.replace('Called by ', '').replace(':\n', '')
# if called_by != 'init_run':
# catagory = 'individual'
# elif 'routines' in line:
# called_by = line.split()[0].lower()
# catagory = 'individual'
# elif 'calls' in line:
# ls = line.split()
# name = ls[0]
# cpu = float(ls[2].replace('s', ''))
# wall = float(ls[4].replace('s', ''))
# ncalls = int(ls[7])
# data['catagory'].append(catagory)
# data['called_by'].append(called_by)
# data['name'].append(name)
# data['cpu'].append(cpu)
# data['wall'].append(wall)
# data['ncalls'].append(ncalls)
# df = Timings.from_dict(data)
# df.set_index(['catagory', 'name'], inplace=True)
# df['cpu'] = df['cpu'].map(lambda x: pd.Timedelta(seconds=x.astype(np.float64)))
# df['wall'] = df['wall'].map(lambda x: pd.Timedelta(seconds=x.astype(np.float64)))
# df.sort_index(inplace=True)
# return df
#
#
#def _parse_atom(atoms, forces, label=True):
# '''
# '''
# forces = StringIO(forces)
# forces = pd.read_csv(forces, delim_whitespace=True, header=None,
# names=['n0', 'n1', 'n2', 'n3', 'n4', 'n5', 'fx', 'fy', 'fz'])
# forces = forces.ix[(forces['n0'] == 'atom'), ['fx', 'fy', 'fz']].reset_index(drop=True)
# atom = StringIO(atoms)
# atom = pd.read_csv(atom, delim_whitespace=True, header=None,
# names=['symbol', 'x', 'y', 'z'])
# frames = atom[atom['symbol'] == 'ATOMIC_POSITIONS']
# unit = get_length(frames.iloc[0, 1])
# starts = frames.index.values + 1
# counts = (frames.index[1:] - frames.index[:-1]).values - 1
# counts = np.append(counts, atom.index.values[-1] - frames.index[-1] - 1)
# atom.dropna(inplace=True)
# atom.reset_index(inplace=True, drop=True)
# frame, lbl, indices = idxs_from_starts_and_counts(starts, counts)
# atom['symbol'] = atom['symbol'].astype('category')
# atom['frame'] = frame
# atom['label'] = lbl
# atom['fx'] = forces['fx']
# atom['fy'] = forces['fy']
# atom['fz'] = forces['fz']
# atom[['x', 'y', 'z', 'fx', 'fy', 'fz']] = atom[['x', 'y', 'z', 'fx', 'fy', 'fz']].astype(np.float64)
# conv = Length[unit, 'au']
# atom['x'] *= conv
# atom['y'] *= conv
# atom['z'] *= conv
# atom['fx'] *= conv
# atom['fy'] *= conv
# atom['fz'] *= conv
# atom.index.names = ['atom']
# return Atom(atom)
#
#
#def _parse_frame(data, atom):
# '''
# '''
# df = _min_frame_from_atom(atom)
# rows = {'energy': [], 'one_electron_energy': [], 'hartree_energy': [], 'xc_energy': [],
# 'ewald': [], 'smearing': []}
# for line in data:
# split = line.split()
# if 'total energy' in line and '!' in line:
# rows['energy'].append(split[4])
# elif 'one-electron' in line:
# rows['one_electron_energy'].append(split[3])
# elif 'hartree contribution' in line:
# rows['hartree_energy'].append(split[3])
# elif 'xc contribution' in line:
# rows['xc_energy'].append(split[3])
# elif 'ewald contribution' in line:
# rows['ewald'].append(split[3])
# elif 'smearing contrib' in line:
# rows['smearing'].append(split[4])
# frame = pd.DataFrame.from_dict(rows)
# frame = frame.astype(np.float64)
# for col in df.columns:
# frame[col] = df[col]
# return Frame(frame)
#
|
"""
This module contains the machinery handling assumptions.
All symbolic objects have assumption attributes that can be accessed via
.is_<assumption name> attribute.
Assumptions determine certain properties of symbolic objects and can
have 3 possible values: True, False, None. True is returned if the
object has the property and False is returned if it doesn't or can't
(i.e. doesn't make sense):
>>> from sympy import I
>>> I.is_algebraic
True
>>> I.is_real
False
>>> I.is_prime
False
When the property cannot be determined (or when a method is not
implemented) None will be returned, e.g. a generic symbol, x, may or
may not be positive so a value of None is returned for x.is_positive.
By default, all symbolic values are in the largest set in the given context
without specifying the property. For example, a symbol that has a property
being integer, is also real, complex, etc.
Here follows a list of possible assumption names:
.. glossary::
commutative
object commutes with any other object with
respect to multiplication operation.
complex
object can have only values from the set
of complex numbers.
imaginary
object value is a number that can be written as a real
number multiplied by the imaginary unit ``I``. See
[3]_. Please note, that ``0`` is not considered to be an
imaginary number, see
`issue #7649 <https://github.com/sympy/sympy/issues/7649>`_.
real
object can have only values from the set
of real numbers.
integer
object can have only values from the set
of integers.
odd
even
object can have only values from the set of
odd (even) integers [2]_.
prime
object is a natural number greater than ``1`` that has
no positive divisors other than ``1`` and itself. See [6]_.
composite
object is a positive integer that has at least one positive
divisor other than ``1`` or the number itself. See [4]_.
zero
nonzero
object is zero (not zero).
rational
object can have only values from the set
of rationals.
algebraic
object can have only values from the set
of algebraic numbers [11]_.
transcendental
object can have only values from the set
of transcendental numbers [10]_.
irrational
object value cannot be represented exactly by Rational, see [5]_.
finite
infinite
object absolute value is bounded (is value is
arbitrarily large). See [7]_, [8]_, [9]_.
negative
nonnegative
object can have only negative (only
nonnegative) values [1]_.
positive
nonpositive
object can have only positive (only
nonpositive) values.
hermitian
antihermitian
object belongs to the field of hermitian
(antihermitian) operators.
Examples
========
>>> from sympy import Symbol
>>> x = Symbol('x', real=True); x
x
>>> x.is_real
True
>>> x.is_complex
True
See Also
========
.. seealso::
:py:class:`sympy.core.numbers.ImaginaryUnit`
:py:class:`sympy.core.numbers.Zero`
:py:class:`sympy.core.numbers.One`
Notes
=====
Assumption values are stored in obj._assumptions dictionary or
are returned by getter methods (with property decorators) or are
attributes of objects/classes.
References
==========
.. [1] http://en.wikipedia.org/wiki/Negative_number
.. [2] http://en.wikipedia.org/wiki/Parity_%28mathematics%29
.. [3] http://en.wikipedia.org/wiki/Imaginary_number
.. [4] http://en.wikipedia.org/wiki/Composite_number
.. [5] http://en.wikipedia.org/wiki/Irrational_number
.. [6] http://en.wikipedia.org/wiki/Prime_number
.. [7] http://en.wikipedia.org/wiki/Finite
.. [8] https://docs.python.org/3/library/math.html#math.isfinite
.. [9] http://docs.scipy.org/doc/numpy/reference/generated/numpy.isfinite.html
.. [10] http://en.wikipedia.org/wiki/Transcendental_number
.. [11] http://en.wikipedia.org/wiki/Algebraic_number
""" |
"""
This page is in the table of contents.
Skeiniso is an analysis script to display a gcode file in an isometric view.
The skeiniso manual page is at:
http://www.bitsfrombytes.com/wiki/index.php?title=Skeinforge_Skeiniso
==Operation==
The default 'Activate Skeiniso' checkbox is on. When it is on, the functions described below will work when called from the skeinforge toolchain, when it is off, the functions will not be called from the toolchain. The functions will still be called, whether or not the 'Activate Skeiniso' checkbox is on, when skeiniso is run directly. Skeiniso can not separate the layers when it reads gcode without comments.
The viewer is simple, the viewpoint can only be moved in a sphere around the center of the model by changing the viewpoint latitude and longitude. Different regions of the model can be hidden by setting the width of the thread to zero. The alternating bands act as contour bands and their brightness and width can be changed.
==Settings==
===Animation===
====Animation Line Quickening====
Default is one.
The quickness of the tool animation over the quickness of the actual tool.
====Animation Slide Show Rate====
Default is two layers per second.
The rate, in layers per second, at which the layer changes when the soar or dive button is pressed..
===Axis Rulings===
Default is on.
When selected, rulings will be drawn on the axis lines.
===Banding===
====Band Height====
Default is five layers.
Defines the height of the band in layers, a pair of bands is twice that height.
====Bottom Band Brightness====
Default is 0.7.
Defines the ratio of the brightness of the bottom band over the brightness of the top band. The higher it is the brighter the bottom band will be.
====Bottom Layer Brightness====
Default is one.
Defines the ratio of the brightness of the bottom layer over the brightness of the top layer. With a low bottom layer brightness ratio the bottom of the model will be darker than the top of the model, as if it was being illuminated by a light just above the top.
====Bright Band Start====
Default choice is 'From the Top'.
The button group that determines where the bright band starts from.
=====From the Bottom=====
When selected, the bright bands will start from the bottom.
=====From the Top=====
When selected, the bright bands will start from the top.
===Draw Arrows===
Default is on.
When selected, arrows will be drawn at the end of each line segment.
===Export Menu===
When the submenu in the export menu item in the file menu is clicked, an export canvas dialog will be displayed, which can export the canvas to a file.
===Go Around Extruder Off Travel===
Default is off.
When selected, the display will include the travel when the extruder is off, which means it will include the nozzle wipe path if any.
===Layers===
====Layer====
Default is zero.
On the display window, the Up button increases the 'Layer' by one, and the Down button decreases the layer by one. When the layer displayed in the layer spin box is changed then <Return> is hit, the layer shown will be set to the spin box, to a mimimum of zero and to a maximum of the highest index layer.The Soar button increases the layer at the 'Animation Slide Show Rate', and the Dive (double left arrow button beside the layer field) button decreases the layer at the slide show rate.
====Layer Extra Span====
Default is a huge number.
The viewer will draw the layers in the range including the 'Layer' index and the 'Layer' index plus the 'Layer Extra Span'. If the 'Layer Extra Span' is negative, the layers viewed will start at the 'Layer' index, plus the 'Layer Extra Span', and go up to and include the 'Layer' index. If the 'Layer Extra Span' is zero, only the 'Layer' index layer will be displayed. If the 'Layer Extra Span' is positive, the layers viewed will start at the 'Layer' index, and go up to and include the 'Layer' index plus the 'Layer Extra Span'.
===Line===
Default is zero.
The index of the selected line on the layer that is highlighted when the 'Display Line' mouse tool is chosen. The line spin box up button increases the 'Line' by one. If the line index of the layer goes over the index of the last line, the layer index will be increased by one and the new line index will be zero. The down button decreases the line index by one. If the line index goes below the index of the first line, the layer index will be decreased by one and the new line index will be at the last line. When the line displayed in the line field is changed then <Return> is hit, the line shown will be set to the line field, to a mimimum of zero and to a maximum of the highest index line. The Soar button increases the line at the speed at which the extruder would move, times the 'Animation Line Quickening' ratio, and the Dive (double left arrow button beside the line field) button decreases the line at the animation line quickening ratio.
===Mouse Mode===
Default is 'Display Line'.
The mouse tool can be changed from the 'Mouse Mode' menu button or picture button. The mouse tools listen to the arrow keys when the canvas has the focus. Clicking in the canvas gives the canvas the focus, and when the canvas has the focus a thick black border is drawn around the canvas.
====Display Line====
The 'Display Line' tool will display the highlight the selected line, and display the file line count, counting from one, and the gcode line itself. When the 'Display Line' tool is active, clicking the canvas will select the nearest line to the mouse click.
====Viewpoint Move====
The 'Viewpoint Move' tool will move the viewpoint in the xy plane when the mouse is clicked and dragged on the canvas.
====Viewpoint Rotate====
The 'Viewpoint Rotate' tool will rotate the viewpoint around the origin, when the mouse is clicked and dragged on the canvas, or the arrow keys have been used and <Return> is pressed. The viewpoint can also be moved by dragging the mouse. The viewpoint latitude will be increased when the mouse is dragged from the center towards the edge. The viewpoint longitude will be changed by the amount around the center the mouse is dragged. This is not very intuitive, but I don't know how to do this the intuitive way and I have other stuff to develop. If the shift key is pressed; if the latitude is changed more than the longitude, only the latitude will be changed, if the longitude is changed more only the longitude will be changed.
===Number of Fill Layers===
====Number of Fill Bottom Layers====
Default is one.
The "Number of Fill Bottom Layers" is the number of layers at the bottom which will be colored olive.
===Number of Fill Top Layers===
Default is one.
The "Number of Fill Top Layers" is the number of layers at the top which will be colored blue.
===Scale===
Default is ten.
The scale setting is the scale of the image in pixels per millimeter, the higher the number, the greater the size of the display.
The zoom in mouse tool will zoom in the display at the point where the mouse was clicked, increasing the scale by a factor of two. The zoom out tool will zoom out the display at the point where the mouse was clicked, decreasing the scale by a factor of two.
===Screen Inset===
====Screen Horizontal Inset====
Default is one hundred.
The "Screen Horizontal Inset" determines how much the canvas will be inset in the horizontal direction from the edge of screen, the higher the number the more it will be inset and the smaller it will be.
====Screen Vertical Inset====
Default is two hundred and twenty.
The "Screen Vertical Inset" determines how much the canvas will be inset in the vertical direction from the edge of screen, the higher the number the more it will be inset and the smaller it will be..
===Viewpoint===
====Viewpoint Latitude====
Default is fifteen degrees.
The "Viewpoint Latitude" is the latitude of the viewpoint, a latitude of zero is the top pole giving a top view, a latitude of ninety gives a side view and a latitude of 180 gives a bottom view.
====Viewpoint Longitude====
Default is 210 degrees.
The "Viewpoint Longitude" is the longitude of the viewpoint.
===Width===
The width of each type of thread and of each axis can be changed. If the width is set to zero, the thread will not be visible.
====Width of Axis Negative Side====
Default is two.
Defines the width of the negative side of the axis.
====Width of Axis Positive Side====
Default is six.
Defines the width of the positive side of the axis.
====Width of Infill Thread====
Default is one.
The "Width of Infill Thread" sets the width of the green extrusion threads, those threads which are not loops and not part of the raft.
====Width of Fill Bottom Thread====
Default is two.
The "Width of Fill Bottom Thread" sets the width of the olive extrusion threads at the bottom of the model.
====Width of Fill Top Thread====
Default is two.
The "Width of Fill Top Thread" sets the width of the blue extrusion threads at the top of the model.
====Width of Loop Thread====
Default is three.
The "Width of Loop Thread" sets the width of the yellow loop threads, which are not perimeters.
====Width of Perimeter Inside Thread====
Default is eight.
The "Width of Perimeter Inside Thread" sets the width of the orange inside perimeter threads.
====Width of Perimeter Outside Thread====
Default is eight.
The "Width of Perimeter Outside Thread" sets the width of the red outside perimeter threads.
====Width of Raft Thread====
Default is one.
The "Width of Raft Thread" sets the width of the brown raft threads.
====Width of Selection Thread====
Default is six.
The "Width of Selection Thread" sets the width of the selected line.
====Width of Travel Thread====
Default is zero.
The "Width of Travel Thread" sets the width of the grey extruder off travel threads.
==Icons==
The dive, soar and zoom icons are from NAME soarSilk icon set 1.3 at:
http://www.famfamfam.com/lab/icons/silk/
==Gcodes==
An explanation of the gcodes is at:
http://reprap.org/bin/view/Main/Arduino_GCode_Interpreter
and at:
http://reprap.org/bin/view/Main/MCodeReference
A gode example is at:
http://forums.reprap.org/file.php?12,file=565
==Examples==
Below are examples of skeiniso being used. These examples are run in a terminal in the folder which contains Screw Holder_penultimate.gcode and skeiniso.py.
> python skeiniso.py
This brings up the skeiniso dialog.
> python skeiniso.py Screw Holder_penultimate.gcode
This brings up the skeiniso viewer to view the gcode file.
> python
Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31)
[GCC 4.2.1 (SUSE Linux)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import skeiniso
>>> skeiniso.main()
This brings up the skeiniso dialog.
>>> skeiniso.getWindowAnalyzeFile('Screw Holder_penultimate.gcode')
This brings up the skeiniso viewer to view the gcode file.
""" |
"""Drag-and-drop support for Tkinter.
This is very preliminary. I currently only support dnd *within* one
application, between different windows (or within the same window).
I an trying to make this as generic as possible -- not dependent on
the use of a particular widget or icon type, etc. I also hope that
this will work with Pmw.
To enable an object to be dragged, you must create an event binding
for it that starts the drag-and-drop process. Typically, you should
bind <ButtonPress> to a callback function that you write. The function
should call Tkdnd.dnd_start(source, event), where 'source' is the
object to be dragged, and 'event' is the event that invoked the call
(the argument to your callback function). Even though this is a class
instantiation, the returned instance should not be stored -- it will
be kept alive automatically for the duration of the drag-and-drop.
When a drag-and-drop is already in process for the Tk interpreter, the
call is *ignored*; this normally averts starting multiple simultaneous
dnd processes, e.g. because different button callbacks all
dnd_start().
The object is *not* necessarily a widget -- it can be any
application-specific object that is meaningful to potential
drag-and-drop targets.
Potential drag-and-drop targets are discovered as follows. Whenever
the mouse moves, and at the start and end of a drag-and-drop move, the
Tk widget directly under the mouse is inspected. This is the target
widget (not to be confused with the target object, yet to be
determined). If there is no target widget, there is no dnd target
object. If there is a target widget, and it has an attribute
dnd_accept, this should be a function (or any callable object). The
function is called as dnd_accept(source, event), where 'source' is the
object being dragged (the object passed to dnd_start() above), and
'event' is the most recent event object (generally a <Motion> event;
it can also be <ButtonPress> or <ButtonRelease>). If the dnd_accept()
function returns something other than None, this is the new dnd target
object. If dnd_accept() returns None, or if the target widget has no
dnd_accept attribute, the target widget's parent is considered as the
target widget, and the search for a target object is repeated from
there. If necessary, the search is repeated all the way up to the
root widget. If none of the target widgets can produce a target
object, there is no target object (the target object is None).
The target object thus produced, if any, is called the new target
object. It is compared with the old target object (or None, if there
was no old target widget). There are several cases ('source' is the
source object, and 'event' is the most recent event object):
- Both the old and new target objects are None. Nothing happens.
- The old and new target objects are the same object. Its method
dnd_motion(source, event) is called.
- The old target object was None, and the new target object is not
None. The new target object's method dnd_enter(source, event) is
called.
- The new target object is None, and the old target object is not
None. The old target object's method dnd_leave(source, event) is
called.
- The old and new target objects differ and neither is None. The old
target object's method dnd_leave(source, event), and then the new
target object's method dnd_enter(source, event) is called.
Once this is done, the new target object replaces the old one, and the
Tk mainloop proceeds. The return value of the methods mentioned above
is ignored; if they raise an exception, the normal exception handling
mechanisms take over.
The drag-and-drop processes can end in two ways: a final target object
is selected, or no final target object is selected. When a final
target object is selected, it will always have been notified of the
potential drop by a call to its dnd_enter() method, as described
above, and possibly one or more calls to its dnd_motion() method; its
dnd_leave() method has not been called since the last call to
dnd_enter(). The target is notified of the drop by a call to its
method dnd_commit(source, event).
If no final target object is selected, and there was an old target
object, its dnd_leave(source, event) method is called to complete the
dnd sequence.
Finally, the source object is notified that the drag-and-drop process
is over, by a call to source.dnd_end(target, event), specifying either
the selected target object, or None if no target object was selected.
The source object can use this to implement the commit action; this is
sometimes simpler than to do it in the target's dnd_commit(). The
target's dnd_commit() method could then simply be aliased to
dnd_leave().
At any time during a dnd sequence, the application can cancel the
sequence by calling the cancel() method on the object returned by
dnd_start(). This will call dnd_leave() if a target is currently
active; it will never call dnd_commit().
""" |
"""
=============================
Byteswapping and byte order
=============================
Introduction to byte ordering and ndarrays
==========================================
The ``ndarray`` is an object that provide a python array interface to data
in memory.
It often happens that the memory that you want to view with an array is
not of the same byte ordering as the computer on which you are running
Python.
For example, I might be working on a computer with a little-endian CPU -
such as an Intel Pentium, but I have loaded some data from a file
written by a computer that is big-endian. Let's say I have loaded 4
bytes from a file written by a Sun (big-endian) computer. I know that
these 4 bytes represent two 16-bit integers. On a big-endian machine, a
two-byte integer is stored with the Most Significant Byte (MSB) first,
and then the Least Significant Byte (LSB). Thus the bytes are, in memory order:
#. MSB integer 1
#. LSB integer 1
#. MSB integer 2
#. LSB integer 2
Let's say the two integers were in fact 1 and 770. Because 770 = 256 *
3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2.
The bytes I have loaded from the file would have these contents:
>>> big_end_str = chr(0) + chr(1) + chr(3) + chr(2)
>>> big_end_str
'\\x00\\x01\\x03\\x02'
We might want to use an ``ndarray`` to access these integers. In that
case, we can create an array around this memory, and tell numpy that
there are two integers, and that they are 16 bit and big-endian:
>>> import numpy as np
>>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_str)
>>> big_end_arr[0]
1
>>> big_end_arr[1]
770
Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian'
(``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For
example, if our data represented a single unsigned 4-byte little-endian
integer, the dtype string would be ``<u4``.
In fact, why don't we try that?
>>> little_end_u4 = np.ndarray(shape=(1,),dtype='<u4', buffer=big_end_str)
>>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3
True
Returning to our ``big_end_arr`` - in this case our underlying data is
big-endian (data endianness) and we've set the dtype to match (the dtype
is also big-endian). However, sometimes you need to flip these around.
.. warning::
Scalars currently do not include byte order information, so extracting
a scalar from an array will return an integer in native byte order.
Hence:
>>> big_end_arr[0].dtype.byteorder == little_end_u4[0].dtype.byteorder
True
Changing byte ordering
======================
As you can imagine from the introduction, there are two ways you can
affect the relationship between the byte ordering of the array and the
underlying memory it is looking at:
* Change the byte-ordering information in the array dtype so that it
interprets the underlying data as being in a different byte order.
This is the role of ``arr.newbyteorder()``
* Change the byte-ordering of the underlying data, leaving the dtype
interpretation as it was. This is what ``arr.byteswap()`` does.
The common situations in which you need to change byte ordering are:
#. Your data and dtype endianess don't match, and you want to change
the dtype so that it matches the data.
#. Your data and dtype endianess don't match, and you want to swap the
data so that they match the dtype
#. Your data and dtype endianess match, but you want the data swapped
and the dtype to reflect this
Data and dtype endianness don't match, change dtype to match data
-----------------------------------------------------------------
We make something where they don't match:
>>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='<i2', buffer=big_end_str)
>>> wrong_end_dtype_arr[0]
256
The obvious fix for this situation is to change the dtype so it gives
the correct endianness:
>>> fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder()
>>> fixed_end_dtype_arr[0]
1
Note the array has not changed in memory:
>>> fixed_end_dtype_arr.tobytes() == big_end_str
True
Data and type endianness don't match, change data to match dtype
----------------------------------------------------------------
You might want to do this if you need the data in memory to be a certain
ordering. For example you might be writing the memory out to a file
that needs a certain byte ordering.
>>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap()
>>> fixed_end_mem_arr[0]
1
Now the array *has* changed in memory:
>>> fixed_end_mem_arr.tobytes() == big_end_str
False
Data and dtype endianness match, swap data and dtype
----------------------------------------------------
You may have a correctly specified array dtype, but you need the array
to have the opposite byte order in memory, and you want the dtype to
match so the array values make sense. In this case you just do both of
the previous operations:
>>> swapped_end_arr = big_end_arr.byteswap().newbyteorder()
>>> swapped_end_arr[0]
1
>>> swapped_end_arr.tobytes() == big_end_str
False
An easier way of casting the data to a specific dtype and byte ordering
can be achieved with the ndarray astype method:
>>> swapped_end_arr = big_end_arr.astype('<i2')
>>> swapped_end_arr[0]
1
>>> swapped_end_arr.tobytes() == big_end_str
False
""" |
#!/usr/bin/env python
#############################
# ChaLearn AutoML challenge #
#############################
# Usage: python run.py input_dir output_dir
# This sample code can be used either
# - to submit RESULTS depostited in the res/ subdirectory or
# - as a template for CODE submission.
#
# The input directory input_dir contains 5 subdirectories named by dataset,
# including:
# dataname/dataname_feat.type -- the feature type "Numerical", "Binary", or "Categorical" (Note: if this file is abscent, get the feature type from the dataname.info file)
# dataname/dataname_public.info -- parameters of the data and task, including metric and time_budget
# dataname/dataname_test.data -- training, validation and test data (solutions/target values are given for training data only)
# dataname/dataname_train.data
# dataname/dataname_train.solution
# dataname/dataname_valid.data
#
# The output directory will receive the predicted values (no subdirectories):
# dataname_test_000.predict -- Provide predictions at regular intervals to make sure you get some results even if the program crashes
# dataname_test_001.predict
# dataname_test_002.predict
# ...
# dataname_valid_000.predict
# dataname_valid_001.predict
# dataname_valid_002.predict
# ...
#
# Result submission:
# =================
# Search for @RESULT to locate that part of the code.
# ** Always keep this code. **
# If the subdirectory res/ contains result files (predicted values)
# the code just copies them to the output and does not train/test models.
# If no results are found, a model is trained and tested (see code submission).
#
# Code submission:
# ===============
# Search for @CODE to locate that part of the code.
# ** You may keep or modify this template or subtitute your own code. **
# The program saves predictions regularly. This way the program produces
# at least some results if it dies (or is terminated) prematurely.
# This also allows us to plot learning curves. The last result is used by the
# scoring program.
# We implemented 2 classes:
# 1) DATA LOADING:
# ------------
# Use/modify
# D = DataManager(basename, input_dir, ...)
# to load and preprocess data.
# Missing values --
# Our default method for replacing missing values is trivial: they are replaced by 0.
# We also add extra indicator features where missing values occurred. This doubles the number of features.
# Categorical variables --
# The location of potential Categorical variable is indicated in D.feat_type.
# NOTHING special is done about them in this sample code.
# Feature selection --
# We only implemented an ad hoc feature selection filter efficient for the
# dorothea dataset to show that performance improves significantly
# with that filter. It takes effect only for binary classification problems with sparse
# matrices as input and unbalanced classes.
# 2) LEARNING MACHINE:
# ----------------
# Use/modify
# M = MyAutoML(D.info, ...)
# to create a model.
# Number of base estimators --
# Our models are ensembles. Adding more estimators may improve their accuracy.
# Use M.model.n_estimators = num
# Training --
# M.fit(D.data['X_train'], D.data['Y_train'])
# Fit the parameters and hyper-parameters (all inclusive!)
# What we implemented hard-codes hyper-parameters, you probably want to
# optimize them. Also, we made a somewhat arbitrary choice of models in
# for the various types of data, just to give some baseline results.
# You probably want to do better model selection and/or add your own models.
# Testing --
# Y_valid = M.predict(D.data['X_valid'])
# Y_test = M.predict(D.data['X_test'])
#
# ALL INFORMATION, SOFTWARE, DOCUMENTATION, AND DATA ARE PROVIDED "AS-IS".
# ISABELLE GUYON, CHALEARN, AND/OR OTHER ORGANIZERS OR CODE AUTHORS DISCLAIM
# ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ANY PARTICULAR PURPOSE, AND THE
# WARRANTY OF NON-INFRIGEMENT OF ANY THIRD PARTY'S INTELLECTUAL PROPERTY RIGHTS.
# IN NO EVENT SHALL ISABELLE GUYON AND/OR OTHER ORGANIZERS BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF SOFTWARE, DOCUMENTS, MATERIALS,
# PUBLICATIONS, OR INFORMATION MADE AVAILABLE FOR THE CHALLENGE.
#
# Main contributors: NAME and NAME March-October 2014
# Originally inspired by code code: NAME Kaggle, March 2013
# Modified by NAME and NAME Microsoft, December 2013
# Modified by NAME November 2015
# =========================== BEGIN USER OPTIONS ==============================
# Verbose mode:
##############
# Recommended to keep verbose = True: shows various progression messages
|
"""
Numerical python functions written for compatability with matlab(TM)
commands with the same names.
Matlab(TM) compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (power spectral density over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in matlab(TM), but are useful anyway:
:meth:`cohere_pairs`
Coherence over all pairs. This is not a matlab function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:meth:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly::
See :ref:`misc-examples-index`
:meth:`rec2txt`
pretty print a record array
:meth:`rec2csv`
store record array in CSV file
:meth:`csv2rec`
import record array from CSV file with type inspection
:meth:`rec_append_fields`
adds field(s)/array(s) to record array
:meth:`rec_drop_fields`
drop fields from record array
:meth:`rec_join`
join two record arrays on sequence of fields
:meth:`rec_groupby`
summarize data by groups (similar to SQL GROUP BY)
:meth:`rec_summarize`
helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:meth:`conv`
convolution (numpy.convolve)
:meth:`corrcoef`
The matrix of correlation coefficients
:meth:`hist`
Histogram (numpy.histogram)
:meth:`linspace`
Linear spaced array from min to max
:meth:`load`
load ASCII file - use numpy.loadtxt
:meth:`meshgrid`
Make a 2D grid from 2 1 arrays (numpy.meshgrid)
:meth:`polyfit`
least squares best polynomial fit of x to y (numpy.polyfit)
:meth:`polyval`
evaluate a vector for a vector of polynomial coeffs (numpy.polyval)
:meth:`save`
save ASCII file - use numpy.savetxt
:meth:`trapz`
trapeziodal integration (trapz(x,y) -> numpy.trapz(y,x))
:meth:`vander`
the Vandermonde matrix (numpy.vander)
""" |
#!/usr/bin/env python3
############################################################################
#
# MODULE: i.cutlinesmod
# AUTHOR(S): Moritz NAME with help of NAME modified by
# NAME PURPOSE: Create tiles the borders of which do not cut across semantically
# meaningful objects
# COPYRIGHT: (C) 1997-2018 by the GRASS Development Team
#
# This program is free software under the GNU General Public
# License (>=v2). Read the file COPYING that comes with GRASS
# for details.
#############################################################################
#%Module
#% description: Creates semantically meaningful tile borders
#% keyword: imagery
#% keyword: tiling
#%end
#
#%option G_OPT_R_INPUT
#% description: Raster map to use as input for tiling
#% required: yes
#%end
#
#%option G_OPT_V_OUTPUT
#% description: Name of output vector map with cutline polygons
#%end
#
#%option
#% key: number_lines
#% type: integer
#% description: Number of tile border lines in each direction
#% required: yes
#%end
#
#%option
#% key: edge_detection
#% type: string
#% description: Edge detection algorithm to use
#% options: zc,canny
#% answer: zc
#% required: yes
#%end
#
#%option G_OPT_V_INPUTS
#% key: existing_cutlines
#% label: Input vector maps with existing cutlines
#% required: no
#%end
#
#%option
#% key: no_edge_friction
#% type: integer
#% description: Additional friction for non-edge pixels
#% required: yes
#% answer: 5
#%end
#
#%option
#% key: lane_border_multiplier
#% type: integer
#% description: Multiplier for borders of lanes compared to non-edge pixels
#% required: yes
#% answer: 10
#%end
#
#%option
#% key: min_tile_size
#% type: integer
#% description: Minimum size of tiles in map units
#% required: no
#%end
#
#%option
#% key: zc_threshold
#% type: double
#% label: Sensitivity of Gaussian filter (i.zc)
#% answer: 1
#% required: no
#% guisection: Zero-crossing
#%end
#
#%option
#% key: zc_width
#% type: integer
#% label: x-y extent of the Gaussian filter (i.zc)
#% answer: 9
#% required: no
#% guisection: Zero-crossing
#%end
#
#%option
#% key: canny_low_threshold
#% type: double
#% label: Low treshold for edges (i.edge)
#% answer: 3
#% required: no
#% guisection: Canny
#%end
#
#%option
#% key: canny_high_threshold
#% type: double
#% label: High treshold for edges (i.edge)
#% answer: 10
#% required: no
#% guisection: Canny
#%end
#
#%option
#% key: canny_sigma
#% type: double
#% label: Kernel radius (i.edge)
#% answer: 2
#% required: no
#% guisection: Canny
#%end
#
#%option
#% key: tile_width
#% type: integer
#% description: Width of tiles for tiled edge detection (pixels)
#% required: no
#% guisection: Parallel processing
#%end
#
#%option
#% key: tile_height
#% type: integer
#% description: Height of tiles for tiled edge detection (pixels)
#% required: no
#% guisection: Parallel processing
#%end
#
#%option
#% key: overlap
#% type: integer
#% description: Overlap between tiles for tiled edge detection (pixels)
#% required: no
#% answer: 1
#% guisection: Parallel processing
#%end
#
#%option
#% key: processes
#% type: integer
#% description: Number of parallel processes
#% answer: 1
#% required: yes
#% guisection: Parallel processing
#%end
#
#%option
#% key: memory
#% type: integer
#% description: RAM memory available (in MB)
#% answer: 300
#% required: yes
#%end
#
#%rules
#% collective: tile_width, tile_height, overlap
#%end
|
"""
Wrappers to LAPACK library
==========================
flapack -- wrappers for Fortran [*] LAPACK routines
clapack -- wrappers for ATLAS LAPACK routines
calc_lwork -- calculate optimal lwork parameters
get_lapack_funcs -- query for wrapper functions.
[*] If ATLAS libraries are available then Fortran routines
actually use ATLAS routines and should perform equally
well to ATLAS routines.
Module flapack
++++++++++++++
In the following all function names are shown without
type prefix (s,d,c,z). Optimal values for lwork can
be computed using calc_lwork module.
Linear Equations
----------------
Drivers::
lu,piv,x,info = gesv(a,b,overwrite_a=0,overwrite_b=0)
lub,piv,x,info = gbsv(kl,ku,ab,b,overwrite_ab=0,overwrite_b=0)
c,x,info = posv(a,b,lower=0,overwrite_a=0,overwrite_b=0)
Computational routines::
lu,piv,info = getrf(a,overwrite_a=0)
x,info = getrs(lu,piv,b,trans=0,overwrite_b=0)
inv_a,info = getri(lu,piv,lwork=min_lwork,overwrite_lu=0)
c,info = potrf(a,lower=0,clean=1,overwrite_a=0)
x,info = potrs(c,b,lower=0,overwrite_b=0)
inv_a,info = potri(c,lower=0,overwrite_c=0)
inv_c,info = trtri(c,lower=0,unitdiag=0,overwrite_c=0)
Linear Least Squares (LLS) Problems
-----------------------------------
Drivers::
v,x,s,rank,info = gelss(a,b,cond=-1.0,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
Computational routines::
qr,tau,info = geqrf(a,lwork=min_lwork,overwrite_a=0)
q,info = orgqr|ungqr(qr,tau,lwork=min_lwork,overwrite_qr=0,overwrite_tau=1)
Generalized Linear Least Squares (LSE and GLM) Problems
-------------------------------------------------------
Standard Eigenvalue and Singular Value Problems
-----------------------------------------------
Drivers::
w,v,info = syev|heev(a,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0)
w,v,info = syevd|heevd(a,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0)
w,v,info = syevr|heevr(a,compute_v=1,lower=0,vrange=,irange=,atol=-1.0,lwork=min_lwork,overwrite_a=0)
t,sdim,(wr,wi|w),vs,info = gees(select,a,compute_v=1,sort_t=0,lwork=min_lwork,select_extra_args=(),overwrite_a=0)
wr,(wi,vl|w),vr,info = geev(a,compute_vl=1,compute_vr=1,lwork=min_lwork,overwrite_a=0)
u,s,vt,info = gesdd(a,compute_uv=1,lwork=min_lwork,overwrite_a=0)
Computational routines::
ht,tau,info = gehrd(a,lo=0,hi=n-1,lwork=min_lwork,overwrite_a=0)
ba,lo,hi,pivscale,info = gebal(a,scale=0,permute=0,overwrite_a=0)
Generalized Eigenvalue and Singular Value Problems
--------------------------------------------------
Drivers::
w,v,info = sygv|hegv(a,b,itype=1,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
w,v,info = sygvd|hegvd(a,b,itype=1,compute_v=1,lower=0,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
(alphar,alphai|alpha),beta,vl,vr,info = ggev(a,b,compute_vl=1,compute_vr=1,lwork=min_lwork,overwrite_a=0,overwrite_b=0)
Auxiliary routines
------------------
a,info = lauum(c,lower=0,overwrite_c=0)
a = laswp(a,piv,k1=0,k2=len(piv)-1,off=0,inc=1,overwrite_a=0)
Module clapack
++++++++++++++
Linear Equations
----------------
Drivers::
lu,piv,x,info = gesv(a,b,rowmajor=1,overwrite_a=0,overwrite_b=0)
c,x,info = posv(a,b,lower=0,rowmajor=1,overwrite_a=0,overwrite_b=0)
Computational routines::
lu,piv,info = getrf(a,rowmajor=1,overwrite_a=0)
x,info = getrs(lu,piv,b,trans=0,rowmajor=1,overwrite_b=0)
inv_a,info = getri(lu,piv,rowmajor=1,overwrite_lu=0)
c,info = potrf(a,lower=0,clean=1,rowmajor=1,overwrite_a=0)
x,info = potrs(c,b,lower=0,rowmajor=1,overwrite_b=0)
inv_a,info = potri(c,lower=0,rowmajor=1,overwrite_c=0)
inv_c,info = trtri(c,lower=0,unitdiag=0,rowmajor=1,overwrite_c=0)
Auxiliary routines
------------------
a,info = lauum(c,lower=0,rowmajor=1,overwrite_c=0)
Module calc_lwork
+++++++++++++++++
Optimal lwork is maxwrk. Default is minwrk.
minwrk,maxwrk = gehrd(prefix,n,lo=0,hi=n-1)
minwrk,maxwrk = gesdd(prefix,m,n,compute_uv=1)
minwrk,maxwrk = gelss(prefix,m,n,nrhs)
minwrk,maxwrk = getri(prefix,n)
minwrk,maxwrk = geev(prefix,n,compute_vl=1,compute_vr=1)
minwrk,maxwrk = heev(prefix,n,lower=0)
minwrk,maxwrk = syev(prefix,n,lower=0)
minwrk,maxwrk = gees(prefix,n,compute_v=1)
minwrk,maxwrk = geqrf(prefix,m,n)
minwrk,maxwrk = gqr(prefix,m,n)
""" |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#"""
#You must run this test module using nose (chant nosetests from the command line)
#** There are some issues with nose, offset by the fact that it does multi-thread and setup_module better than unittest
#* This is NOT a TestCase ... it could be except that unittest screws up setup_module
#* nosetests may hang in some ERROR conditions. SIGHUP, SIGINT and SIGSTP are not noticed. SIGKILL (-9) works
#* You should NOT pass command line arguments to nosetests. You can pass them, but it causes trouble:
#* Nosetests passes them into the test environment which breaks socorro's configuration behavior
#* You can set NOSE_WHATEVER envariables prior to running if you need to. See nosetests --help
#* some useful envariables:
#* NOSE_VERBOSE=x where x in [0, # Prints only 'OK' at end of test run
#* 1, # default: Prints one '.' per test like unittest
#* x >= 2, # Prints first comment line if exists, else the function name per test
#* ]
#* NOSE_WHERE=directory_path[,directoroy_path[,...]] : run only tests in these directories. Note commas
#* NOSE_ATTR=attrspec[,attrspec ...] : run only tests for which at least one attrspec evaluates true.
#* Accepts '!attr' and 'attr=False'. Does NOT accept natural python syntax ('atter != True', 'not attr')
#* NOSE_NOCAPTURE=TrueValue : nosetests normally captures stdout and only displays it if the test has fail or error.
#* print debugging works with this envariable, or you can instead print to stderr or use a logger
#*
#* With NOSE_VERBOSE > 1, you may see "functionName(self): (slow=N)" for some tests. N is the max seconds waiting
#"""
#import copy
#import datetime as dt
#import errno
#import logging
#import logging.handlers
#import os
#import re
#import shutil
#import signal
#import threading
#import time
#import traceback
#import psycopg2
#from nose.tools import *
#import socorro.database.postgresql as soc_pg
#import socorro.database.database as sdatabase
#import socorro.lib.ConfigurationManager as configurationManager
#import socorro.monitor.monitor as monitor
#import socorro.unittest.testlib.createJsonDumpStore as createJDS
#import socorro.unittest.testlib.dbtestutil as dbtestutil
#from socorro.unittest.testlib.testDB import TestDB
#from socorro.unittest.testlib.util import runInOtherProcess
#import socorro.unittest.testlib.util as tutil
#from socorro.lib.datetimeutil import utc_now
#import monitorTestconfig as testConfig
#import socorro.database.schema as schema
#class Me: # not quite "self"
#"""
#I need stuff to be initialized once per module. Rather than having a bazillion globals, lets just have 'me'
#"""
#pass
#me = None
#loglineS = '^[1-9][0-9]{3}-[0-9]{2}-[0-9]{2}.*'
#loglineRE = re.compile(loglineS)
#def setup_module():
#global me
#if me:
#return
## else initialize
## print "MODULE setup"
#me = Me()
#me.markingTemplate = "MARK %s: %s"
#me.startMark = 'start'
#me.endMark = 'end'
#me.testDB = TestDB()
#me.config = configurationManager.newConfiguration(configurationModule = testConfig, applicationName='Testing Monitor')
#tutil.nosePrintModule(__file__)
#myDir = os.path.split(__file__)[0]
#if not myDir: myDir = '.'
#replDict = {'testDir':'%s'%myDir}
#for i in me.config:
#try:
#me.config[i] = me.config.get(i)%(replDict)
#except:
#pass
#knownTests = [x for x in dir(TestMonitor) if x.startswith('test')]
#me.logWasExtracted = {}
#for t in knownTests:
#me.logWasExtracted[t] = False
#me.logger = monitor.logger
#me.logger.setLevel(logging.DEBUG)
#me.logFilePathname = me.config.logFilePathname
#logfileDir = os.path.split(me.config.logFilePathname)[0]
#try:
#os.makedirs(logfileDir)
#except OSError,x:
#if errno.EEXIST != x.errno: raise
#f = open(me.config.logFilePathname,'w')
#f.close()
#fileLog = logging.FileHandler(me.logFilePathname, 'a')
#fileLog.setLevel(logging.DEBUG)
#fileLogFormatter = logging.Formatter(me.config.logFileLineFormatString)
#fileLog.setFormatter(fileLogFormatter)
#me.logger.addHandler(fileLog)
#me.database = sdatabase.Database(me.config)
##me.dsn = "host=%s dbname=%s user=%s password=%s" % (me.config.databaseHost,me.config.databaseName,
##me.config.databaseUserName,me.config.databasePassword)
#def teardown_module():
#global me
#logging.shutdown()
#try:
#os.unlink(me.logFilePathname)
#except OSError,x:
#if errno.ENOENT != x.errno:
#raise
#class TestMonitor:
#markingLog = False
#def setUp(self):
#global me
#self.connection = me.database.connection()
##self.connection = psycopg2.connect(me.dsn)
## just in case there was a crash on prior run
#me.testDB.removeDB(me.config,me.logger)
#me.testDB.createDB(me.config,me.logger)
#def tearDown(self):
#global me
##import socorro.database.postgresql as db_pg #DEBUG
##print "\ntearDown",db_pg.connectionStatus(self.connection)
#me.testDB.removeDB(me.config,me.logger)
##try:
##shutil.rmtree(me.config.storageRoot)
##except OSError,x:
##pass
##try:
##shutil.rmtree(me.config.deferredStorageRoot)
##except OSError,x:
##pass
##try:
##if me.config.saveSuccessfulMinidumpsTo:
##shutil.rmtree(me.config.saveSuccessfulMinidumpsTo)
##except OSError,x:
##pass
##try:
##if me.config.saveFailedMinidumpsTo:
##shutil.rmtree(me.config.saveFailedMinidumpsTo)
##except OSError,x:
##pass
#self.connection.close()
#def markLog(self):
#global me
#testName = traceback.extract_stack()[-2][2]
#if TestMonitor.markingLog:
#TestMonitor.markingLog = False
#me.logger.info(me.markingTemplate%(testName,me.endMark))
## print (' ==== <<%s>> '+me.markingTemplate)%(os.getpid(),testName,me.endMark) #DEBUG
#else:
#TestMonitor.markingLog = True
#me.logger.info(me.markingTemplate%(testName,me.startMark))
## print (' ==== <<%s>> '+me.markingTemplate)%(os.getpid(),testName,me.startMark) #DEBUG
#def extractLogSegment(self):
#global me
#testName = traceback.extract_stack()[-2][2]
## print ' ==== <<%s>> EXTRACTING: %s (%s)'%(os.getpid(),testName,me.logWasExtracted[testName]) #DEBUG
#if me.logWasExtracted[testName]:
#return []
#try:
#file = open(me.config.logFilePathname)
#except IOError,x:
#if errno.ENOENT != x.errno:
#raise
#else:
#return []
#me.logWasExtracted[testName] = True
#startTag = me.markingTemplate%(testName,me.startMark)
#stopTag = me.markingTemplate%(testName,me.endMark)
#lines = file.readlines()
#segment = []
#i = 0
#while i < len(lines):
#if not startTag in lines[i]:
#i += 1
#continue
#else:
#i += 1
#try:
#while not stopTag in lines[i]:
#segment.append(lines[i].strip())
#i += 1
#except IndexError:
#pass
#break
#return segment
#def testConstructor(self):
#"""
#testConstructor(self):
#Constructor must fail if any of a lot of configuration details are missing
#Constructor must succeed if all config is present
#Constructor should never log anything
#"""
## print 'TEST: testConstructor'
#global me
#requiredConfigs = [
#"databaseHost",
#"databaseName",
#"databaseUserName",
#"databasePassword",
##"storageRoot",
##"deferredStorageRoot",
##"jsonFileSuffix",
##"dumpFileSuffix",
#"processorCheckInTime",
#"standardLoopDelay",
#"cleanupJobsLoopDelay",
#"priorityLoopDelay",
##"saveSuccessfulMinidumpsTo",
##"saveFailedMinidumpsTo",
#]
#cc = copy.copy(me.config)
#self.markLog()
#for rc in requiredConfigs:
#del(cc[rc])
#try:
#m = monitor.Monitor(cc)
#assert False, "expected to raise some kind of exception for missing %s" % (rc)
#except Exception,x:
#pass
#cc[rc] = me.config[rc]
#monitor.Monitor(me.config) # expect this to work. If it raises an error, we'll see it
#self.markLog()
#seg = self.extractLogSegment()
#cleanSeg = []
#for line in seg:
#if 'Constructor has set the following values' in line:
#continue
#if line.startswith('self.'):
#continue
#if 'creating crashStorePool' in line:
#continue
#cleanSeg.append(line)
#assert [] == cleanSeg, 'expected no logging for constructor call (success or failure) but %s'%(str(cleanSeg))
#def runStartChild(self):
#global me
#try:
#m = monitor.Monitor(me.config)
#m.start()
#me.logger.fail("This line forces a wrong count in later assertions: We expected a SIGTERM before getting here.")
## following sequence of except: handles both 2.4.x and 2.5.x hierarchy
#except SystemExit,x:
#me.logger.info("CHILD SystemExit in %s: %s [%s]"%(threading.currentThread().getName(),type(x),x))
#os._exit(0)
#except KeyboardInterrupt,x:
#me.logger.info("CHILD KeyboardInterrupt in %s: %s [%s]"%(threading.currentThread().getName(),type(x),x))
#os._exit(0)
#except Exception,x:
#me.logger.info("CHILD Exception in %s: %s [%s]"%(threading.currentThread().getName(),type(x),x))
#os._exit(0)
#def testStart(self):
#"""
#testStart(self): (slow=2)
#This test may run for a second or two
#start does:
#a lot of logging ... and there really isn't much else to test, so we are testing that. Ugh.
#For this one, we won't pay attention to what stops the threads
#"""
#global me
#self.markLog()
#runInOtherProcess(self.runStartChild,logger=me.logger)
#self.markLog()
#seg = self.extractLogSegment()
#prior = ''
#dateWalk = 0
#connectionClosed = 0
#priorityConnect = 0
#priorityQuit = 0
#priorityDone = 0
#cleanupStart = 0
#cleanupQuit = 0
#cleanupDone = 0
#for i in seg:
#data = i.split(None,4)
#if 4 < len(data):
#date,tyme,level,dash,msg = i.split(None,4)
#else:
#msg = i
#if msg.startswith('MainThread'):
#if 'connection' in msg and 'closed' in msg: connectionClosed += 1
#if 'destructiveDateWalk' in msg: dateWalk += 1
#elif msg.startswith('priorityLoopingThread'):
#if 'connecting to database' in msg: priorityConnect += 1
#if 'detects quit' in msg: priorityQuit += 1
#if 'priorityLoop done' in msg: priorityDone += 1
#elif msg.startswith('jobCleanupThread'):
#if 'jobCleanupLoop starting' in msg: cleanupStart += 1
#if 'got quit' in msg: cleanupQuit += 1
#if 'jobCleanupLoop done' in msg: cleanupDone += 1
#assert 2 == dateWalk, 'expect logging for start and end of destructiveDateWalk, got %d'%(dateWalk)
#assert 2 == connectionClosed, 'expect two connection close messages, got %d' %(connectionClosed)
#assert 1 == priorityConnect, 'priorityLoop had better connect to database exactly once, got %d' %(priorityConnect)
#assert 1 == priorityQuit, 'priorityLoop should detect quit exactly once, got %d' %(priorityQuit)
#assert 1 == priorityDone, 'priorityLoop should report self done exactly once, got %d' %(priorityDone)
#assert 1 == cleanupStart, 'jobCleanup should report start exactly once, got %d' %(cleanupStart)
#assert 1 == cleanupQuit, 'jobCleanup should report quit exactly once, got %d' %(cleanupQuit)
#assert 1 == cleanupDone, 'jobCleanup should report done exactly once, got %d' %(cleanupDone)
#def testRespondToSIGHUP(self):
#"""
#testRespondToSIGHUP(self): (slow=1)
#This test may run for a second or two
#We should notice a SIGHUP and die nicely. This is exactly like testStart except that we look
#for different logging events (ugh)
#"""
#global me
#self.markLog()
#runInOtherProcess(self.runStartChild,logger=me.logger,signal=signal.SIGHUP)
#self.markLog()
#seg = self.extractLogSegment()
#kbd = 0
#sighup = 0
#sigterm = 0
#for line in seg:
#print line
#if loglineRE.match(line):
#date,tyme,level,dash,msg = line.split(None,4)
#if msg.startswith('MainThread'):
#if 'KeyboardInterrupt' in msg: kbd += 1
#if 'SIGHUP detected' in msg: sighup += 1
#if 'SIGTERM detected' in msg: sigterm += 1
#assert 1 == kbd, 'Better see exactly one keyboard interrupt, got %d' % (kbd)
#assert 1 == sighup, 'Better see exactly one sighup event, got %d' % (sighup)
#assert 0 == sigterm, 'Better not see sigterm event, got %d' % (sigterm)
#def testRespondToSIGTERM(self):
#"""
#testRespondToSIGTERM(self): (slow=1)
#This test may run for a second or two
#We should notice a SIGTERM and die nicely. This is exactly like testStart except that we look
#for different logging events (ugh)
#"""
#global me
#self.markLog()
#runInOtherProcess(self.runStartChild,signal=signal.SIGTERM)
#self.markLog()
#seg = self.extractLogSegment()
#kbd = 0
#sighup = 0
#sigterm = 0
#for line in seg:
#if loglineRE.match(line):
#date,tyme,level,dash,msg = line.split(None,4)
#if msg.startswith('MainThread'):
#if 'KeyboardInterrupt' in msg: kbd += 1
#if 'SIGTERM detected' in msg: sigterm += 1
#if 'SIGHUP detected' in msg: sighup += 1
#assert 1 == kbd, 'Better see exactly one keyboard interrupt, got %d' % (kbd)
#assert 1 == sigterm, 'Better see exactly one sigterm event, got %d' % (sigterm)
#assert 0 == sighup, 'Better not see sighup event, got %d' % (sighup)
#def testQuitCheck(self):
#"""
#testQuitCheck(self):
#This test makes sure that the main loop notices when it has been told to quit.
#"""
#global me
#mon = monitor.Monitor(me.config)
#mon.quit = True
#assert_raises(KeyboardInterrupt,mon.quitCheck)
#def quitter(self):
#time.sleep(self.timeTilQuit)
#self.mon.quit = True
#def testResponsiveSleep(self):
#"""
#testResponsiveSleep(self): (slow=4)
#This test may run for some few seconds. Shouldn't be more than 6 tops (and if so, it will have failed).
#Tests that the responsiveSleep method actually responds by raising KeyboardInterrupt.
#"""
#global me
#mon = monitor.Monitor(me.config)
#self.timeTilQuit = 2
#self.mon = mon
#quitter = threading.Thread(name='Quitter', target=self.quitter)
#quitter.start()
#assert_raises(KeyboardInterrupt,mon.responsiveSleep,5)
#quitter.join()
#def testGetDatabaseConnectionPair(self):
#"""
#testGetDatabaseConnectionPair(self):
#test that the wrapper for psycopghelper.DatabaseConnectionPool works as expected
#"""
#global me
#mon = monitor.Monitor(me.config)
#tcon,tcur = mon.getDatabaseConnectionPair()
#mcon,mcur = mon.databaseConnectionPool.connectionCursorPair()
#try:
#assert tcon == mcon
#assert tcur != mcur
#finally:
#mon.databaseConnectionPool.cleanup()
##def testGetStorageFor(self):
##"""
##testGetStorageFor(self):
##Test that the wrapper for JsonDumpStorage doesn't twist things incorrectly
##"""
##global me
##self.markLog()
##createJDS.createTestSet(createJDS.jsonFileData,jsonKwargs={'logger':me.logger},rootDir=me.config.storageRoot)
##createJDS.createTestSet(createJDS.jsonMoreData,jsonKwargs={'logger':me.logger},rootDir=me.config.deferredStorageRoot)
##mon = monitor.Monitor(me.config)
##assert_raises(monitor.UuidNotFoundException,mon.getStorageFor,'nothing')
##expected = me.config.storageRoot.rstrip(os.sep)
##got = mon.getStorageFor('0bba929f-8721-460c-dead-a43c20071025').root
##assert expected == got, 'Expected [%s] got [%s]'%(expected,got)
##expected = me.config.deferredStorageRoot.rstrip(os.sep)
##got = mon.getStorageFor('29adfb61-f75b-11dc-b6be-001320081225').root
##assert expected == got, 'Expected [%s] got [%s]'%(expected,got)
##self.markLog()
##seg = self.extractLogSegment()
##cleanSeg = []
##for lline in seg:
##line = lline.strip()
##if 'Constructor has set the following values' in line:
##continue
##if 'DEBUG - MainThread - getJson' in line:
##continue
##if line.startswith('self.'):
##continue
##cleanSeg.append(line)
##assert [] == cleanSeg, 'unexpected logging for this test: %s'%(str(cleanSeg))
##def testRemoveBadUuidFromJsonDumpStorage(self):
##"""
##testRemoveBadUuidFromJsonDumpStorage(self):
##This just wraps JsonDumpStorage. Assure we aren't futzing up the wrap (fail with non-exist uuid)
##"""
##global me
##createJDS.createTestSet(createJDS.jsonFileData,jsonKwargs={'logger':me.logger},rootDir=me.config.storageRoot)
##mon = monitor.Monitor(me.config)
##badUuid = '0bad0bad-0bad-6666-9999-0bad20001025'
##assert_raises(monitor.UuidNotFoundException,mon.removeUuidFromJsonDumpStorage,badUuid)
##def testRemoveGoodUuidFromJsonDumpStorage(self):
##"""
##testRemoveGoodUuidFromJsonDumpStorage(self):
##This really just wraps JsonDumpStorage call. Assure we aren't futzing up the wrap (succeed with existing uuids)
##"""
##global me
##createJDS.createTestSet(createJDS.jsonFileData,jsonKwargs={'logger':me.logger},rootDir=me.config.storageRoot)
##createJDS.createTestSet(createJDS.jsonMoreData,jsonKwargs={'logger':me.logger},rootDir=me.config.deferredStorageRoot)
##mon = monitor.Monitor(me.config)
##goodUuid = '0b781b88-ecbe-4cc4-dead-6bbb20081225';
### this should work the first time...
##mon.removeUuidFromJsonDumpStorage(goodUuid)
### ... and then fail the second time
##assert_raises(monitor.UuidNotFoundException,mon.removeUuidFromJsonDumpStorage, goodUuid)
#def testCompareSecondOfSequence(self):
#"""
#testCompareSecondOfSequence(self):
#Not much to test, but do it
#"""
#x = (1,10)
#y = (10,1)
#assert cmp(x,y) < 0 # check assumptions about cmp...
#assert monitor.Monitor.compareSecondOfSequence(x,y) > 0
#assert cmp(y,x) > 0
#assert monitor.Monitor.compareSecondOfSequence(y,x) < 0
#def testJobSchedulerIterNoProcs(self):
#"""
#testJobSchedulerIterNoProcs(self):
#Assure that attempts at balanced scheduling with no processor raises monitor.NoProcessorsRegisteredException
#"""
#global me
#m = monitor.Monitor(me.config)
#dbCon,dbCur = m.getDatabaseConnectionPair()
#try:
#iter = m.jobSchedulerIter(dbCur)
#assert_raises(SystemExit,iter.next)
#finally:
#m.databaseConnectionPool.cleanup()
## def testJobScheduleIter_AllOldProcessors(self):
## """
## testJobScheduleIter_AllOldProcessors(self):
## If we have only old processors, we should fail (but as of 2009-january, don't: Test is commented out)
## """
## global me
## m = monitor.Monitor(me.config)
## dbCon,dbCur = m.getDatabaseConnectionPair()
## stamp = utc_now() - dt.timedelta(minutes=10)
## dbtestutil.fillProcessorTable(dbCur, 5, stamp=stamp)
## iter = m.jobSchedulerIter(dbCur)
## assert_raises(WhatKind? iter.next)
#def testJobSchedulerIterGood(self):
#"""
#testJobSchedulerIterGood(self):
#Plain vanilla test of the balanced job scheduler.
#"""
#global me
#numProcessors = 15
#dbtestutil.fillProcessorTable(self.connection.cursor(),numProcessors)
#m = monitor.Monitor(me.config)
#dbCon,dbCur = m.getDatabaseConnectionPair()
#try:
#iter = m.jobSchedulerIter(dbCur)
#dbCon.commit()
#num = 0
#hits = dict(((1+x,0) for x in range (numProcessors)))
#for id in iter:
#num += 1
#hits[int(id)] += 1
#if num >= numProcessors: break
#for i in range(numProcessors):
#assert hits[i+1] == 1, 'At index %d, got count %d'%(i+1, hits[i+1])
#for id in iter:
#num += 1
#hits[int(id)] += 1
#if num >= 3*numProcessors: break
#finally:
#m.databaseConnectionPool.cleanup()
#for i in range(numProcessors):
#assert hits[i+1] == 3, 'At index %d, got count %d'%(i+1, hits[i+1])
#def getCurrentProcessorList(self):
#"""Useful for figuring out what is there before we call some method or other."""
#global me
#sql = "select p.id, count(j.*) from processors p left join (select owner from jobs where success is null) as j on p.id = j.owner group by p.id;"
#cur = self.connection.cursor()
#cur.execute(sql);
#self.connection.commit()
#return [(aRow[0], aRow[1]) for aRow in dbCur.fetchall()] #processorId, numberOfAssignedJobs
#def testJobScheduleIter_StartUnbalanced(self):
#"""
#testJobScheduleIter_StartUnbalanced(self):
#Assure that an unbalanced start eventually produces balanced result
#"""
#numProcessors = 5
#dbtestutil.fillProcessorTable(self.connection.cursor(),numProcessors)
#self.connection.commit()
#m = monitor.Monitor(me.config)
#dbCon,dbCur = m.getDatabaseConnectionPair()
#try:
#dbtestutil.addSomeJobs(dbCur,dict([(1+x,1+x) for x in range(numProcessors)]),logger=me.logger)
#iter = m.jobSchedulerIter(dbCur)
#num = 0
#hits = dict(((1+x,0) for x in range (numProcessors)))
#for id in iter:
#num += 1
#hits[int(id)] += 1
#me.logger.debug('HIT on %d: %d'%(id,hits[id]))
#if num >= 3*numProcessors: break
#for i in range(numProcessors):
#assert hits[i+1] == 5 - i, 'Expected num hits to be count down sequence from 5 to 1, but at idx %d, got %d'%(i+1,hits[i+1])
#me.logger.debug('ONE: At index %d, got count %d'%(i+1, hits[i+1]))
#finally:
#m.databaseConnectionPool.cleanup()
## def testJobScheduleIter_SomeOldProcessors(self):
## """
## testJobScheduleIter_SomeOldProcessors(self):
## If we have some old processors, be sure we don't see them in the iter
## As of 2009-January, that is not the case, so we have commented this test.
## """
## global me
## m = monitor.Monitor(me.config)
## dbCon,dbCur = m.etDatabaseConnectionPair() error: try:...(dbCon)...finally m.databaseConnectionPool.cleanup()
## now = utc_now() error: Use dbtestutil.datetimeNow(aCursor)
## then = now - dt.timedelta(minutes=10)
## dbtestutil.fillProcessorTable(dbCur, None, processorMap = {1:then,2:then,3:now,4:then,5:then })
## iter = m.jobScheduleIter(dbCur)
## hits = dict(((x,0) for x in range (1,6)))
## num = 0;
## for id in iter:
## num += 1
## hits[int(id)] += 1
## if num > 3: break
## for i in (1,2,4,5):
## assert hits[i] == 0, 'Expected that no old processors would be used in the iterator'
## assert hits[3] == 4, 'Expected that all the iterations would choose the one live processor'
#def testUnbalancedJobSchedulerIterNoProcs(self):
#"""
#testUnbalancedJobSchedulerIterNoProcs(self):
#With no processors, we will get a system exit
#"""
#global me
#m = monitor.Monitor(me.config)
#cur = self.connection.cursor()
#try:
#iter = m.unbalancedJobSchedulerIter(cur)
#assert_raises(SystemExit, iter.next)
#finally:
#self.connection.commit()
#def testUnbalancedJobSchedulerIter_AllOldProcs(self):
#"""
#testUnbalancedJobSchedulerIter_AllOldProcs(self):
#With only processors that are too old, we will get a system exit
#"""
#global me
#m = monitor.Monitor(me.config)
#cur = self.connection.cursor()
#try:
#stamp = dbtestutil.datetimeNow(cur) - dt.timedelta(minutes=10)
#dbtestutil.fillProcessorTable(cur, 5, stamp=stamp)
#iter = m.unbalancedJobSchedulerIter(cur)
#assert_raises(SystemExit, iter.next)
#finally:
#self.connection.commit()
#def testUnbalancedJobSchedulerIter_SomeOldProcs(self):
#"""
#testUnbalancedJobSchedulerIter_SomeOldProcs(self):
#With some processors that are too old, we will get only the young ones in some order
#"""
#global me
#m = monitor.Monitor(me.config)
#dbCon,dbCur = m.getDatabaseConnectionPair()
#try:
#now = dbtestutil.datetimeNow(dbCur)
#then = now - dt.timedelta(minutes=10)
#dbtestutil.fillProcessorTable(dbCur, None, processorMap = {1:then,2:then,3:now,4:then,5:then })
#iter = m.unbalancedJobSchedulerIter(dbCur)
#hits = dict(((x,0) for x in range (1,6)))
#num = 0;
#for id in iter:
#num += 1
#hits[int(id)] += 1
#if num > 3: break
#for i in (1,2,4,5):
#assert hits[i] == 0, 'Expected that no old processors would be used in the iterator'
#assert hits[3] == 4, 'Expected that all the iterations would choose the one live processor'
#finally:
#m.databaseConnectionPool.cleanup()
#def testUnbalancedJobSchedulerIter(self):
#"""
#testUnbalancedJobSchedulerIter(self):
#With an unbalanced load on the processors, each processor still gets the same number of hits
#"""
#global me
#numProcessors = 5
#loopCount = 3
#dbtestutil.fillProcessorTable(self.connection.cursor(),numProcessors)
#self.connection.commit()
#m = monitor.Monitor(me.config)
#dbCon,dbCur = m.getDatabaseConnectionPair()
#try:
#dbtestutil.addSomeJobs(dbCur,{1:12},logger=me.logger)
#iter = m.unbalancedJobSchedulerIter(dbCur)
#num = 0
#hits = dict(((1+x,0) for x in range (numProcessors)))
#for id in iter:
#num += 1
#hits[int(id)] += 1
#if num >= loopCount*numProcessors: break
#for i in range(numProcessors):
#assert hits[i+1] == loopCount, 'expected %d for processor %d, but got %d'%(loopCount,i+1,hits[i+1])
#finally:
#m.databaseConnectionPool.cleanup()
#def setJobSuccess(self, cursor, idTimesAndSuccessSeq):
#global me
#sql = "UPDATE jobs SET starteddatetime = %s, completeddatetime = %s, success = %s WHERE id = %s"
#for row in idTimesAndSuccessSeq:
#if row[2]: row[2] = True
#if not row[2]: row[2] = False
#cursor.executemany(sql,idTimesAndSuccessSeq)
#cursor.connection.commit()
#sql = 'SELECT id, uuid, success FROM jobs ORDER BY id'
#cursor.execute(sql)
#return cursor.fetchall()
#def jobsAllocated(self):
#global me
#m = monitor.Monitor(me.config)
#cur = self.connection.cursor()
#sql = "SELECT count(*) from jobs"
#cur.execute(sql)
#self.connection.commit()
#return cur.fetchone()[0]
##def testCleanUpCompletedAndFailedJobs_WithSaves(self):
##"""
##testCleanUpCompletedAndFailedJobs_WithSaves(self):
##The default config asks for successful and failed jobs to be saved
##"""
##global me
##cursor = self.connection.cursor()
##dbtestutil.fillProcessorTable(cursor,4)
##m = monitor.Monitor(me.config)
##createJDS.createTestSet(createJDS.jsonFileData,jsonKwargs={'logger':me.logger},rootDir=me.config.storageRoot)
##runInOtherProcess(m.standardJobAllocationLoop, stopCondition=(lambda : self.jobsAllocated() == 14),logger=me.logger)
##started = dbtestutil.datetimeNow(cursor)
##self.connection.commit()
##completed = started + dt.timedelta(microseconds=100)
##idTimesAndSuccessSeq = [
##[started,completed,True,1],
##[started,completed,True,3],
##[started,completed,True,5],
##[started,completed,True,11],
##[started,None,False,2],
##[started,None,False,4],
##[started,None,False,8],
##[started,None,False,12],
##]
##dbCon,dbCur = m.getDatabaseConnectionPair()
##try:
##jobdata = self.setJobSuccess(dbCur,idTimesAndSuccessSeq)
##m.cleanUpCompletedAndFailedJobs()
##finally:
##m.databaseConnectionPool.cleanup()
##successSave = set()
##failSave = set()
##expectSuccessSave = set()
##expectFailSave = set()
##remainBehind = set()
##for dir, dirs, files in os.walk(me.config.storageRoot):
##remainBehind.update(os.path.splitext(x)[0] for x in files)
##for d in idTimesAndSuccessSeq:
##if d[2]:
##expectSuccessSave.add(d[3])
##else:
##expectFailSave.add(d[3])
##for dir,dirs,files in os.walk(me.config.saveSuccessfulMinidumpsTo):
##successSave.update((os.path.splitext(x)[0] for x in files))
##for dir,dirs,files in os.walk(me.config.saveFailedMinidumpsTo):
##failSave.update((os.path.splitext(x)[0] for x in files))
##for x in jobdata:
##if None == x[2]:
##assert not x[1] in failSave and not x[1] in successSave, "if we didn't set success state for %s, then it wasn't copied"%(x[1])
##assert x[1] in remainBehind, "if we didn't set success state for %s, then it should remain behind"%(x[1])
##assert not x[0] in expectFailSave and not x[0] in expectSuccessSave, "database should match expectatations for id=%s"%(x[0])
##elif True == x[2]:
##assert not x[1] in failSave and x[1] in successSave, "if we set success for %s, it is copied to %s"%(x[1],me.config.saveSuccessfulMinidumpsTo)
##assert not x[0] in expectFailSave and x[0] in expectSuccessSave, "database should match expectatations for id=%s"%(x[0])
##assert not x[1] in remainBehind, "if we did set success state for %s, then it should not remain behind"%(x[1])
##elif False == x[2]:
##assert x[1] in failSave and not x[1] in successSave, "if we set failure for %s, it is copied to %s"%(x[1],me.config.saveFailedMinidumpsTo)
##assert x[0] in expectFailSave and not x[0] in expectSuccessSave, "database should match expectatations for id=%s"%(x[0])
##assert not x[1] in remainBehind, "if we did set success state for %s, then it should not remain behind"%(x[1])
##def testCleanUpCompletedAndFailedJobs_WithoutSaves(self):
##"""
##testCleanUpCompletedAndFailedJobs_WithoutSaves(self):
##First, dynamically set config to not save successful or failed jobs. They are NOT removed from the file system
##"""
##global me
##cc = copy.copy(me.config)
##cursor = self.connection.cursor()
##dbtestutil.fillProcessorTable(cursor,4)
##for conf in ['saveSuccessfulMinidumpsTo','saveFailedMinidumpsTo']:
##cc[conf] = ''
##m = monitor.Monitor(cc)
##createJDS.createTestSet(createJDS.jsonFileData,jsonKwargs={'logger':me.logger},rootDir=me.config.storageRoot)
##runInOtherProcess(m.standardJobAllocationLoop, stopCondition=(lambda : self.jobsAllocated() == 14),logger=me.logger)
##started = dbtestutil.datetimeNow(cursor)
##self.connection.commit()
##completed = started + dt.timedelta(microseconds=100)
##idTimesAndSuccessSeq = [
##[started,completed,True,1],
##[started,completed,True,3],
##[started,completed,True,5],
##[started,completed,True,11],
##[started,None,False,2],
##[started,None,False,4],
##[started,None,False,8],
##[started,None,False,12],
##]
##dbCon,dbCur = m.getDatabaseConnectionPair()
##try:
##jobdata = self.setJobSuccess(dbCur,idTimesAndSuccessSeq)
##m.cleanUpCompletedAndFailedJobs()
##finally:
##m.databaseConnectionPool.cleanup()
##successSave = set()
##failSave = set()
##expectSuccessSave = set()
##expectFailSave = set()
##for d in idTimesAndSuccessSeq:
##if d[2]:
##expectSuccessSave.add(d[3])
##else:
##expectFailSave.add(d[3])
##for dir,dirs,files in os.walk(me.config.saveSuccessfulMinidumpsTo):
##successSave.update((os.path.splitext(x)[0] for x in files))
##for dir,dirs,files in os.walk(me.config.saveFailedMinidumpsTo):
##failSave.update((os.path.splitext(x)[0] for x in files))
##remainBehind = set()
##for dir, dirs, files in os.walk(me.config.storageRoot):
##remainBehind.update(os.path.splitext(x)[0] for x in files)
##assert len(successSave) == 0, "We expect not to save any successful jobs with this setting"
##assert len(failSave) == 0, "We expect not to save any failed jobs with this setting"
##for x in jobdata:
##if None == x[2]:
##assert not x[0] in expectFailSave and not x[0] in expectSuccessSave, "database should match expectatations for id=%s"%(x[0])
##assert x[1] in remainBehind, "if we didn't set success state for %s, then it should remain behind"%(x[1])
##elif True == x[2]:
##assert not x[0] in expectFailSave and x[0] in expectSuccessSave, "database should match expectatations for id=%s"%(x[0])
##elif False == x[2]:
##assert x[0] in expectFailSave and not x[0] in expectSuccessSave, "database should match expectatations for id=%s"%(x[0])
#def testCleanUpDeadProcessors_AllDead(self):
#"""
#testCleanUpDeadProcessors_AllDead(self):
#As of 2009-01-xx, Monitor.cleanUpDeadProcessors(...) does nothing except write to a log file
#... and fail if there are no live processors
#"""
#global me
#m = monitor.Monitor(me.config)
#dbCon,dbCur = m.getDatabaseConnectionPair()
#try:
#now = dbtestutil.datetimeNow(dbCur)
#then = now - dt.timedelta(minutes=10)
#dbtestutil.fillProcessorTable(dbCur, None, processorMap = {1:then,2:then,3:then,4:then,5:then })
#assert_raises(SystemExit,m.cleanUpDeadProcessors, dbCur)
#finally:
#m.databaseConnectionPool.cleanup()
#def testQueueJob(self):
#"""
#testQueueJob(self):
#make sure jobs table starts empty
#make sure returned values reflect database values
#make sure assigned processors are correctly reflected
#make sure duplicate uuid is caught, reported, and work continues
#"""
#global me
#m = monitor.Monitor(me.config)
#sql = 'SELECT pathname,uuid,owner from jobs;'
#numProcessors = 4
#dbtestutil.fillProcessorTable(self.connection.cursor(),numProcessors)
#dbCon,dbCur = m.getDatabaseConnectionPair()
#try:
#procIdGenerator = m.jobSchedulerIter(dbCur)
#dbCur.execute(sql)
#beforeJobsData = dbCur.fetchall()
#assert 0 == len(beforeJobsData), 'There should be no queued jobs before we start our run'
#expectedHits = dict(((1+x,0) for x in range (numProcessors)))
#mapper = {}
#hits = dict(((1+x,0) for x in range (numProcessors)))
#for uuid,data in createJDS.jsonFileData.items():
#procId = m.queueJob(dbCur,uuid,procIdGenerator)
#expectedHits[procId] += 1;
#mapper[uuid] = procId
#dbCur.execute(sql)
#afterJobsData = dbCur.fetchall()
#for row in afterJobsData:
#hits[row[2]] += 1
##me.logger.debug("ASSERT %s == %s for index %s"%(mapper.get(row[1],'WHAT?'), row[2], row[1]))
#assert mapper[row[1]] == row[2], 'Expected %s from %s but got %s'%(mapper.get(row[1],"WOW"),row[1],row[2])
#for key in expectedHits.keys():
##me.logger.debug("ASSERTING %s == %s for index %s"%(expectedHits.get(key,'BAD KEY'),hits.get(key,'EVIL KEY'),key))
#assert expectedHits[key] == hits[key], "Expected count of %s for %s, but got %s"%(expectedHits[key],key,hits[key])
#self.markLog()
#dupUuid = createJDS.jsonFileData.keys()[0]
#try:
#procId = m.queueJob(dbCur,dupUuid,procIdGenerator)
#assert False, "Expected that IntegrityError would be raised queue-ing %s but it wasn't"%(dupUuid)
#except psycopg2.IntegrityError:
#pass
#except Exception,x:
#assert False, "Expected that only IntegrityError would be raised, but got %s: %s"%(type(x),x)
#self.markLog()
#finally:
#m.databaseConnectionPool.cleanup()
#def testQueuePriorityJob(self):
#"""
#testQueuePriorityJob(self):
#queuePriorityJob does:
#removes job uuid from priorityjobs table (if possible)
#add uuid to priority_jobs_NNN table for NNN the processor id
#add uuid, id, etc to jobs table with priority > 0
#"""
#global me
#m = monitor.Monitor(me.config)
#numProcessors = 4
#dbtestutil.fillProcessorTable(self.connection.cursor(),numProcessors)
#data = dbtestutil.makeJobDetails({1:2,2:2,3:3,4:3})
#dbCon,dbCur = m.getDatabaseConnectionPair()
#try:
#procIdGenerator = m.jobSchedulerIter(dbCur)
#insertSql = "INSERT into priorityjobs (uuid) VALUES (%s);"
#uuidToId = {}
#for tup in data:
#uuidToId[tup[1]] = tup[2]
#uuids = uuidToId.keys()
#for uuid in uuids:
#if uuidToId[uuid]%2:
#dbCur.execute(insertSql,[uuid])
#dbCon.commit()
#countSql = "SELECT count(*) from %s;"
#dbCur.execute(countSql%('priorityjobs'))
#priorityJobCount = dbCur.fetchone()[0]
#dbCur.execute(countSql%('jobs'))
#jobCount = dbCur.fetchone()[0]
#eachPriorityJobCount = {}
#for uuid in uuids:
#procId = m.queuePriorityJob(dbCur,uuid, procIdGenerator)
#dbCur.execute('SELECT count(*) from jobs where jobs.priority > 0')
#assert dbCur.fetchone()[0] == 1 + jobCount, 'Expect that each queuePriority will increase jobs table by one'
#jobCount += 1
#try:
#eachPriorityJobCount[procId] += 1
#except KeyError:
#eachPriorityJobCount[procId] = 1
#if uuidToId[uuid]%2:
#dbCur.execute(countSql%('priorityjobs'))
#curCount = dbCur.fetchone()[0]
#assert curCount == priorityJobCount -1, 'Expected to remove one job from priorityjobs for %s'%uuid
#priorityJobCount -= 1
#for id in eachPriorityJobCount.keys():
#dbCur.execute(countSql%('priority_jobs_%s'%id))
#count = dbCur.fetchone()[0]
#assert eachPriorityJobCount[id] == count, 'Expected that the count %s added to id %s matches %s found'%(eachPriorityJobCount[id],id,count)
#finally:
#m.databaseConnectionPool.cleanup()
#def testGetPriorityUuids(self):
#"""
#testGetPriorityUuids(self):
#Check that we find none if the priorityjobs table is empty
#Check that we find as many as we put into priorityjobs table
#"""
#global me
#m = monitor.Monitor(me.config)
#count = len(m.getPriorityUuids(self.connection.cursor()))
#assert 0 == count, 'Expect no priorityjobs unless they were added. Got %d'%(count)
#data = dbtestutil.makeJobDetails({1:2,2:2,3:3,4:3})
#insertSql = "INSERT into priorityjobs (uuid) VALUES (%s);"
#self.connection.cursor().executemany(insertSql,[ [x[1]] for x in data ])
#self.connection.commit()
#count = len(m.getPriorityUuids(self.connection.cursor()))
#self.connection.commit()
#assert len(data) == count,'expect same count in data as priorityJobs, got %d'%(count)
##def testLookForPriorityJobsAlreadyInQueue(self):
##"""
##testLookForPriorityJobsAlreadyInQueue(self):
##Check that we erase jobs from priorityjobs table if they are there
##Check that we increase by one the priority in jobs table
##Check that we add job (only) to appropriate priority_jobs_NNN table
##Check that attempting same uuid again raises IntegrityError
##"""
##global me
##numProcessors = 5
##dbtestutil.fillProcessorTable(self.connection.cursor(),numProcessors)
##m = monitor.Monitor(me.config)
##data = dbtestutil.makeJobDetails({1:2,2:2,3:3,4:3,5:2})
##dbCon,dbCur = m.getDatabaseConnectionPair()
##try:
##procIdGenerator = m.jobSchedulerIter(dbCur)
##insertSql = "INSERT into priorityjobs (uuid) VALUES (%s);"
##updateSql = "UPDATE jobs set priority = 1 where uuid = %s;"
##allUuids = [x[1] for x in data]
##priorityJobUuids = [];
##missingUuids = []
##uuidToProcId = {}
##for counter in range(len(allUuids)):
##uuid = allUuids[counter]
##if 0 == counter % 3: # add to jobs and priorityjobs table
##uuidToProcId[uuid] = m.queueJob(dbCur,data[counter][1],procIdGenerator)
##priorityJobUuids.append((uuid,))
##elif 1 == counter % 3: # add to jobs table only
##uuidToProcId[uuid] = m.queueJob(dbCur,data[counter][1],procIdGenerator)
##else: # 2== counter %3 # don't add anywhere
##missingUuids.append(uuid)
##dbCur.executemany(insertSql,priorityJobUuids)
##dbCon.commit()
##for uuid in priorityJobUuids:
##dbCur.execute(updateSql,(uuid,))
##self.markLog()
##m.lookForPriorityJobsAlreadyInQueue(dbCur,allUuids)
##self.markLog()
##seg = self.extractLogSegment()
##for line in seg:
##date,tyme,level,dash,thr,ddash,msg = line.split(None,6)
##assert thr == 'MainThread','Expected only MainThread log lines, got[%s]'%(line)
##uuid = msg.split()[2]
##assert not uuid in missingUuids, 'Found %s that should not be in missingUuids'%(uuid)
##assert uuid in uuidToProcId.keys(), 'Found %s that should be in uuidToProcId'%(uuid)
##countSql = "SELECT count(*) from %s;"
##dbCur.execute(countSql%('priorityjobs'))
##priCount = dbCur.fetchone()[0]
##assert 0 == priCount, 'Expect that all the priority jobs are removed, but found %s'%(priCount)
##countSql = "SELECT count(*) from priority_jobs_%s WHERE uuid = %%s;"
##for uuid,procid in uuidToProcId.items():
##dbCur.execute(countSql%(procid),(uuid,))
##priCount = dbCur.fetchone()[0]
##assert priCount == 1, 'Expect to find %s in priority_jobs_%s exactly once'%(uuid,procid)
##for badid in range(1,numProcessors+1):
##if badid == procid: continue
##dbCur.execute(countSql%(badid),(uuid,))
##badCount = dbCur.fetchone()[0]
##assert 0 == badCount, 'Expect to find %s ONLY in other priority_jobs_NNN, found it in priority_jobs_%s'%(uuid,badid)
##for uuid,procid in uuidToProcId.items():
##try:
##m.lookForPriorityJobsAlreadyInQueue(dbCur,(uuid,))
##assert False, 'Expected line above would raise IntegrityError or InternalError'
##except psycopg2.IntegrityError,x:
##dbCon.rollback()
##except:
##assert False, 'Expected only IntegrityError from the try block'
##finally:
##m.databaseConnectionPool.cleanup()
##def testUuidInJsonDumpStorage(self):
##"""
##testUuidInJsonDumpStorage(self):
##Test that the wrapper for JsonDumpStorage isn't all twisted up:
##assure we find something in normal and deferred storage, and miss something that isn't there
##do NOT test that the 'markAsSeen' actually works: That should be testJsonDumpStorage's job
##"""
##global me
##m = monitor.Monitor(me.config)
##createJDS.createTestSet(createJDS.jsonFileData,jsonKwargs={'logger':me.logger},rootDir=me.config.storageRoot)
##createJDS.createTestSet(createJDS.jsonMoreData,jsonKwargs={'logger':me.logger},rootDir=me.config.deferredStorageRoot)
##self.markLog()
##badUuid = '0bad0bad-0bad-6666-9999-0bad20001025'
##goodUuid = '0bba929f-8721-460c-dead-a43c20071025'
##defUuid = '29adfb61-f75b-11dc-b6be-001320081225'
##assert m.uuidInJsonDumpStorage(goodUuid), 'Dunno how that happened'
##assert m.uuidInJsonDumpStorage(defUuid), 'Dunno how that happened'
##assert not m.uuidInJsonDumpStorage(badUuid), 'Dunno how that happened'
##self.markLog()
##seg = self.extractLogSegment()
##cleanSeg = []
##for lline in seg:
##if 'DEBUG - MainThread - getJson ' in lline:
##continue
##cleanSeg.append(lline)
##assert [] == cleanSeg, "Shouldn't log for success or failure: %s"%cleanSeg
##def testLookForPriorityJobsInJsonDumpStorage(self):
##"""
##testLookForPriorityJobsInJsonDumpStorage(self):
##assure that we can find each uuid in standard and deferred storage
##assure that we do not find any bogus uuid
##assure that found uuids are added to jobs table with priority 1, and priority_jobs_NNN table for processor id NNN
##"""
##global me
##m = monitor.Monitor(me.config)
##createJDS.createTestSet(createJDS.jsonFileData,jsonKwargs={'logger':me.logger},rootDir=me.config.storageRoot)
##createJDS.createTestSet(createJDS.jsonMoreData,jsonKwargs={'logger':me.logger},rootDir=me.config.deferredStorageRoot)
##normUuids = createJDS.jsonFileData.keys()
##defUuids = createJDS.jsonMoreData.keys()
##allUuids = []
##allUuids.extend(normUuids)
##allUuids.extend(defUuids)
##badUuid = '0bad0bad-0bad-6666-9999-0bad20001025'
##dbCon,dbCur = m.getDatabaseConnectionPair()
##try:
##numProcessors = 5
##dbtestutil.fillProcessorTable(self.connection.cursor(),numProcessors)
##self.markLog()
##m.lookForPriorityJobsInJsonDumpStorage(dbCur,allUuids)
##assert [] == allUuids, 'Expect that all the uuids were found and removed from the looked for "set"'
##m.lookForPriorityJobsInJsonDumpStorage(dbCur,(badUuid,))
##self.markLog()
##seg = self.extractLogSegment()
##getIdAndPrioritySql = "SELECT owner,priority from jobs WHERE uuid = %s"
##getCountSql = "SELECT count(*) from %s"
##idCounts = dict( ( (x,0) for x in range(1,numProcessors+1) ) )
##allUuids.extend(normUuids)
##allUuids.extend(defUuids)
##for uuid in allUuids:
##dbCur.execute(getIdAndPrioritySql,(uuid,))
##procid,pri = dbCur.fetchone()
##assert 1 == pri, 'Expected priority of 1 for %s, but got %s'%(uuid,pri)
##idCounts[procid] += 1
##dbCur.execute(getIdAndPrioritySql,(badUuid,))
##assert not dbCur.fetchone(), "Expect to get None entries in jobs table for badUuid"
##for id,expectCount in idCounts.items():
##dbCur.execute(getCountSql%('priority_jobs_%s'%id))
##seenCount = dbCur.fetchone()[0]
##assert expectCount == seenCount, 'Expected %s, got %s as count in priority_jobs_%s'%(expectCount,seenCount,id)
##finally:
##m.databaseConnectionPool.cleanup()
##def testPriorityJobsNotFound(self):
##"""
##testPriorityJobsNotFound(self):
##for each uuid, log an error and remove the uuid from the provided table
##"""
##global me
##m = monitor.Monitor(me.config)
##dbCon,dbCur = m.getDatabaseConnectionPair()
##try:
##dropBogusSql = "DROP TABLE IF EXISTS bogus;"
##createBogusSql = "CREATE TABLE bogus (uuid varchar(55));"
##insertBogusSql = "INSERT INTO bogus (uuid) VALUES ('NOPE'), ('NEVERMIND');"
##countSql = "SELECT count(*) from %s"
##dbCur.execute(dropBogusSql)
##dbCon.commit()
##dbCur.execute(createBogusSql)
##dbCon.commit()
##dbCur.execute(insertBogusSql)
##dbCon.commit()
##dbCur.execute(countSql%('bogus'))
##bogusCount0 = dbCur.fetchone()[0]
##assert 2 == bogusCount0
##self.markLog()
##m.priorityJobsNotFound(dbCur,['NOPE','NEVERMIND'])
##dbCur.execute(countSql%('bogus'))
##bogusCount1 = dbCur.fetchone()[0]
##assert 2 == bogusCount1, 'Expect uuids deleted, if any, from priorityjobs by default'
##m.priorityJobsNotFound(dbCur,['NOPE','NEVERMIND'], 'bogus')
##dbCur.execute(countSql%('bogus'))
##bogusCount2 = dbCur.fetchone()[0]
##assert 0 == bogusCount2, 'Expect uuids deleted from bogus when it is specified'
##self.markLog()
##dbCur.execute(dropBogusSql)
##dbCon.commit()
##finally:
##m.databaseConnectionPool.cleanup()
##neverCount = 0
##nopeCount = 0
##seg = self.extractLogSegment()
##for line in seg:
##if " - MainThread - priority uuid" in line:
##if 'NOPE was never found' in line: nopeCount += 1
##if 'NEVERMIND was never found' in line: neverCount += 1
##assert 2 == neverCount
##assert 2 == nopeCount
|
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
""" |
# Convert Font Awesome, Fork Awesome, Google Material Design, Material Design Icons, Kenney Game and Ionicons
# icon font parameters to C89, C++11 and C# compatible formats.
#
#------------------------------------------------------------------------------
# 1 - Source material
#
# 1.1 - Font Awesome
# 1.1.1 - version 4
# https://raw.githubusercontent.com/FortAwesome/Font-Awesome/fa-4/src/icons.yml
# https://github.com/FortAwesome/Font-Awesome/blob/fa-4/fonts/fontawesome-webfont.ttf
# 1.1.2 - version 5
# https://raw.githubusercontent.com/FortAwesome/Font-Awesome/master/advanced-options/metadata/icons.yml
# https://github.com/FortAwesome/Font-Awesome/blob/master/web-fonts-with-css/webfonts/fa-brands-400.ttf
# https://github.com/FortAwesome/Font-Awesome/blob/master/web-fonts-with-css/webfonts/fa-regular-400.ttf
# https://github.com/FortAwesome/Font-Awesome/blob/master/web-fonts-with-css/webfonts/fa-solid-900.ttf
# 1.2 - Fork Awesome
# https://raw.githubusercontent.com/ForkAwesome/Fork-Awesome/master/src/icons/icons.yml
# https://github.com/ForkAwesome/Fork-Awesome/blob/master/fonts/forkawesome-webfont.ttf
# 1.3 - Google Material Design
# https://raw.githubusercontent.com/google/material-design-icons/master/iconfont/codepoints
# https://github.com/google/material-design-icons/blob/master/iconfont/MaterialIcons-Regular.ttf
# 1.4 - Material Design Icons
# https://raw.githubusercontent.com/Templarian/MaterialDesign-Webfont/master/css/materialdesignicons.css
# https://github.com/Templarian/MaterialDesign-Webfont/blob/master/fonts/materialdesignicons-webfont.ttf
# 1.5 - Kenney Game icons
# https://raw.githubusercontent.com/nicodinh/kenney-icon-font/master/css/kenney-icons.css
# https://github.com/nicodinh/kenney-icon-font/blob/master/fonts/kenney-icon-font.ttf
# 1.6 - Ionicons
# https://raw.githubusercontent.com/ionic-team/ionicons/master/src/docs/archived/v2/css/ionicons.css
# https://github.com/ionic-team/ionicons/blob/master/src/docs/archived/v2/fonts/ionicons.ttf
#
#------------------------------------------------------------------------------
# 2 - Data sample
#
# Font Awesome example:
# - input: music:
# changes:
# - '1'
# - 5.0.0
# label: Music
# search:
# terms:
# - note
# - sound
# styles:
# - solid
# unicode: f001
# - output C++11: #define ICON_FA_MUSIC u8"\uf001"
# - output C89: #define ICON_FA_MUSIC "\xEF\x80\x81"
# - output C#: public const string Music = "\uf001";
#
# All fonts have computed min and max unicode fonts ICON_MIN and ICON_MAX
# - output C89, C++11: #define ICON_MIN_FA 0xf000
# #define ICON_MAX_FA 0xf2e0
# - output C#: public const int IconMin = 0xf000;
# public const int IconMax = 0xf2e0;
#
#------------------------------------------------------------------------------
# 3 - Script dependencies
#
# 3.1 - Fonts source material online
# 3.2 - Python 2.7 - https://www.python.org/download/releases/2.7/
# 3.3 - Requests - http://docs.python-requests.org/
# 3.4 - PyYAML - http://pyyaml.org/
#
#------------------------------------------------------------------------------
# 4 - References
#
# GitHub repository: https://github.com/juliettef/IconFontCppHeaders/
#
#------------------------------------------------------------------------------
|
"""
Writing Plugins
---------------
nose supports plugins for test collection, selection, observation and
reporting. There are two basic rules for plugins:
* Plugin classes should subclass :class:`nose.plugins.Plugin`.
* Plugins may implement any of the methods described in the class
:doc:`IPluginInterface <interface>` in nose.plugins.base. Please note that
this class is for documentary purposes only; plugins may not subclass
IPluginInterface.
Hello World
===========
Here's a basic plugin. It doesn't do much so read on for more ideas or dive
into the :doc:`IPluginInterface <interface>` to see all available hooks.
.. code-block:: python
import logging
import os
from nose.plugins import Plugin
log = logging.getLogger('nose.plugins.helloworld')
class HelloWorld(Plugin):
name = 'helloworld'
def options(self, parser, env=os.environ):
super(HelloWorld, self).options(parser, env=env)
def configure(self, options, conf):
super(HelloWorld, self).configure(options, conf)
if not self.enabled:
return
def finalize(self, result):
log.info('Hello pluginized world!')
Registering
===========
.. Note::
Important note: the following applies only to the default
plugin manager. Other plugin managers may use different means to
locate and load plugins.
For nose to find a plugin, it must be part of a package that uses
setuptools_, and the plugin must be included in the entry points defined
in the setup.py for the package:
.. code-block:: python
setup(name='Some plugin',
# ...
entry_points = {
'nose.plugins.0.10': [
'someplugin = someplugin:SomePlugin'
]
},
# ...
)
Once the package is installed with install or develop, nose will be able
to load the plugin.
.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
Registering a plugin without setuptools
=======================================
It is currently possible to register a plugin programmatically by
creating a custom nose runner like this :
.. code-block:: python
import nose
from yourplugin import YourPlugin
if __name__ == '__main__':
nose.main(addplugins=[YourPlugin()])
Defining options
================
All plugins must implement the methods ``options(self, parser, env)``
and ``configure(self, options, conf)``. Subclasses of nose.plugins.Plugin
that want the standard options should call the superclass methods.
nose uses optparse.OptionParser from the standard library to parse
arguments. A plugin's ``options()`` method receives a parser
instance. It's good form for a plugin to use that instance only to add
additional arguments that take only long arguments (--like-this). Most
of nose's built-in arguments get their default value from an environment
variable.
A plugin's ``configure()`` method receives the parsed ``OptionParser`` options
object, as well as the current config object. Plugins should configure their
behavior based on the user-selected settings, and may raise exceptions
if the configured behavior is nonsensical.
Logging
=======
nose uses the logging classes from the standard library. To enable users
to view debug messages easily, plugins should use ``logging.getLogger()`` to
acquire a logger in the ``nose.plugins`` namespace.
Recipes
=======
* Writing a plugin that monitors or controls test result output
Implement any or all of ``addError``, ``addFailure``, etc., to monitor test
results. If you also want to monitor output, implement
``setOutputStream`` and keep a reference to the output stream. If you
want to prevent the builtin ``TextTestResult`` output, implement
``setOutputSteam`` and *return a dummy stream*. The default output will go
to the dummy stream, while you send your desired output to the real stream.
Example: `examples/html_plugin/htmlplug.py`_
* Writing a plugin that handles exceptions
Subclass :doc:`ErrorClassPlugin <errorclasses>`.
Examples: :doc:`nose.plugins.deprecated <deprecated>`,
:doc:`nose.plugins.skip <skip>`
* Writing a plugin that adds detail to error reports
Implement ``formatError`` and/or ``formatFailture``. The error tuple
you return (error class, error message, traceback) will replace the
original error tuple.
Examples: :doc:`nose.plugins.capture <capture>`,
:doc:`nose.plugins.failuredetail <failuredetail>`
* Writing a plugin that loads tests from files other than python modules
Implement ``wantFile`` and ``loadTestsFromFile``. In ``wantFile``,
return True for files that you want to examine for tests. In
``loadTestsFromFile``, for those files, return an iterable
containing TestCases (or yield them as you find them;
``loadTestsFromFile`` may also be a generator).
Example: :doc:`nose.plugins.doctests <doctests>`
* Writing a plugin that prints a report
Implement ``begin`` if you need to perform setup before testing
begins. Implement ``report`` and output your report to the provided stream.
Examples: :doc:`nose.plugins.cover <cover>`, :doc:`nose.plugins.prof <prof>`
* Writing a plugin that selects or rejects tests
Implement any or all ``want*`` methods. Return False to reject the test
candidate, True to accept it -- which means that the test candidate
will pass through the rest of the system, so you must be prepared to
load tests from it if tests can't be loaded by the core loader or
another plugin -- and None if you don't care.
Examples: :doc:`nose.plugins.attrib <attrib>`,
:doc:`nose.plugins.doctests <doctests>`, :doc:`nose.plugins.testid <testid>`
More Examples
=============
See any builtin plugin or example plugin in the examples_ directory in
the nose source distribution. There is a list of third-party plugins
`on jottit`_.
.. _examples/html_plugin/htmlplug.py: http://python-nose.googlecode.com/svn/trunk/examples/html_plugin/htmlplug.py
.. _examples: http://python-nose.googlecode.com/svn/trunk/examples
.. _on jottit: http://nose-plugins.jottit.com/
""" |
"""
>>> from scipy import array, matrix
>>> from pybrain.auxiliary.pca import makeCentered
>>> data = array([[2.5, 2.4],
... [0.5, 0.7],
... [2.2, 2.9],
... [1.9, 2.2],
... [3.1, 3.0],
... [2.3, 2.7],
... [2.0, 1.6],
... [1.0, 1.1],
... [1.5, 1.6],
... [1.1, 0.9]])
>>> makeCentered(data)
array([[ 0.69, 0.49],
[-1.31, -1.21],
[ 0.39, 0.99],
[ 0.09, 0.29],
[ 1.29, 1.09],
[ 0.49, 0.79],
[ 0.19, -0.31],
[-0.81, -0.81],
[-0.31, -0.31],
[-0.71, -1.01]])
Tests for regular PCA
---------------------
>>> from pybrain.auxiliary.pca import pca, reduceDim
>>> pca(data, 1)
array([[-0.6778734 , -0.73517866]])
>>> reduceDim(data, 1)
matrix([[-0.82797019],
[ 1.77758033],
[-0.99219749],
[-0.27421042],
[-1.67580142],
[-0.9129491 ],
[ 0.09910944],
[ 1.14457216],
[ 0.43804614],
[ 1.22382056]])
>>> reduceDim(data, 2)
matrix([[-0.82797019, -0.17511531],
[ 1.77758033, 0.14285723],
[-0.99219749, 0.38437499],
[-0.27421042, 0.13041721],
[-1.67580142, -0.20949846],
[-0.9129491 , 0.17528244],
[ 0.09910944, -0.3498247 ],
[ 1.14457216, 0.04641726],
[ 0.43804614, 0.01776463],
[ 1.22382056, -0.16267529]])
>>> data2 = matrix([
... [2.4, 2.5],
... [0.7, 0.5],
... [2.9, 2.2],
... [2.2, 1.9],
... [3.0, 3.1],
... [2.7, 2.3],
... [1.6, 2.0],
... [1.1, 1.0],
... [1.6, 1.5],
... [0.9, 1.1]])
>>> reduceDim(data2, 2)
matrix([[ 0.17511531, 0.82797019],
[-0.14285723, -1.77758033],
[-0.38437499, 0.99219749],
[-0.13041721, 0.27421042],
[ 0.20949846, 1.67580142],
[-0.17528244, 0.9129491 ],
[ 0.3498247 , -0.09910944],
[-0.04641726, -1.14457216],
[-0.01776463, -0.43804614],
[ 0.16267529, -1.22382056]])
>>> data3 = matrix([
... [7.0, 4.0, 3.0],
... [4.0, 1.0, 8.0],
... [6.0, 3.0, 5.0],
... [8.0, 6.0, 1.0],
... [8.0, 5.0, 7.0],
... [7.0, 2.0, 9.0],
... [5.0, 3.0, 3.0],
... [9.0, 5.0, 8.0],
... [7.0, 4.0, 5.0],
... [8.0, 2.0, 2.0]])
>>> reduceDim(data3, 1)
matrix([[-2.15142276],
[ 3.80418259],
[ 0.15321328],
[-4.7065185 ],
[ 1.29375788],
[ 4.0993133 ],
[-1.62582148],
[ 2.11448986],
[-0.2348172 ],
[-2.74637697]])
Tests for probabilistic PCA
---------------------------
>>> from pybrain.auxiliary.pca import pPca
>>> pc = pPca(data, 1)
>>> x, y = pc[0, 0], pc[0, 1]
>>> x / y
0.92...
""" |
"""
This page is in the table of contents.
The xml.py script is an import translator plugin to get a carving from an xml file.
An import plugin is a script in the interpret_plugins folder which has the function getCarving. It is meant to be run from the interpret tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name.
The getCarving function takes the file name of an xml file and returns the carving.
An example of an xml boolean geometry format file follows below.
<?xml version='1.0' ?>
<fabmetheus version="2010-03-29">
<difference id="cube_cylinder_difference">
<matrix m14="-10.0" m24="20.0" m34="5.0" />
<cube id="Cube 5" halfx="5.0" halfy="5.0" halfz="5.0">
</cube>
<cylinder id="Cylinder 5" height="10.0" radiusx="5.0" radiusy="5.0" topOverBottom="1.0">
<matrix m14="5.0" m24="-5.0" />
</cylinder>
</difference>
</fabmetheus>
In the 'fabmetheus' format, all class names are lower case. The defined geometric objects are cube, cylinder, difference, group, sphere, trianglemesh and union. The id attribute is not necessary. The default matrix is a four by four identity matrix. The attributes of the cube, cylinder and sphere default to one. The attributes of the vertexes in the triangle mesh default to zero. The boolean solids are difference, intersection and union. The difference solid is the first solid minus the remaining solids. The combined_shape.xml example in the xml_models folder in the models folder is pasted below.
<?xml version='1.0' ?>
<fabmetheus version="2010-03-29">
<difference id="cube_cylinder_difference">
<matrix m14="-10.0" m24="20.0" m34="5.0" />
<cube id="Cube 5" halfx="5.0" halfy="5.0" halfz="5.0">
</cube>
<cylinder id="Cylinder 5" height="10.0" radiusx="5.0" radiusy="5.0" topOverBottom="1.0">
<matrix m14="5.0" m24="-5.0" />
</cylinder>
</difference>
<intersection id="cube_cylinder_intersection">
<matrix m14="-10.0" m34="5.0" />
<cube id="Cube 5" halfx="5.0" halfy="5.0" halfz="5.0">
</cube>
<cylinder id="Cylinder 5" height="10.0" radiusx="5.0" radiusy="5.0" topOverBottom="1.0">
<matrix m14="5.0" m24="-5.0" />
</cylinder>
</intersection>
<union id="cube_cylinder_union">
<matrix m14="-10.0" m24="-20.0" m34="5.0" />
<cube id="Cube 5" halfx="5.0" halfy="5.0" halfz="5.0">
</cube>
<cylinder id="Cylinder 5" height="10.0" radiusx="5.0" radiusy="5.0" topOverBottom="1.0">
<matrix m14="5.0" m24="-5.0" />
</cylinder>
</union>
<group id="sphere_tetrahedron_group">
<matrix m14="10.0" m24="-20.0" m34="5.0" />
<sphere id="Group Sphere 5" radiusx="5.0" radiusy="5.0" radiusz="5.0">
</sphere>
<trianglemesh id="Group Tetrahedron 5">
<matrix m14="15.0" />
<vertex x="-5.0" y="-5.0" z="-5.0" />
<vertex x="5.0" y="-5.0" z="-5.0" />
<vertex y="5.0" z="-5.0" />
<vertex z="5.0" />
<face vertex0="0" vertex1="2" vertex2="1" />
<face vertex0="3" vertex1="1" vertex2="2" />
<face vertex0="3" vertex1="2" vertex2="0" />
<face vertex0="3" vertex1="0" vertex2="1" />
</trianglemesh>
</group>
<sphere id="Sphere 5" radiusx="5.0" radiusy="5.0" radiusz="5.0">
<matrix m14="10.0" m34="5.0" />
</sphere>
<trianglemesh id="Tetrahedron 5">
<matrix m14="10.0" m24="20.0" m34="5.0" />
<vertex x="-5.0" y="-5.0" z="-5.0" />
<vertex x="5.0" y="-5.0" z="-5.0" />
<vertex y="5.0" z="-5.0" />
<vertex z="5.0" />
<face vertex0="0" vertex1="2" vertex2="1" />
<face vertex0="3" vertex1="1" vertex2="2" />
<face vertex0="3" vertex1="2" vertex2="0" />
<face vertex0="3" vertex1="0" vertex2="1" />
</trianglemesh>
</fabmetheus>
The 'fabmetheus' xml format is the preferred skeinforge format. When the Interpret button in the Interpret tool in Analyze is clicked, any xml format for which there is a plugin will be converted to the 'fabmetheus' format.
There is a plugin for the 'Art of Illusion' xml format. An xml file can be exported from Art of Illusion by going to the "File" menu, then going into the "Export" menu item, then picking the XML choice. This will bring up the XML file chooser window, choose a place to save the file then click "OK". Leave the "compressFile" checkbox unchecked. All the objects from the scene will be exported, the artofillusion plugin will ignore the light and camera. If you want to fabricate more than one object at a time, you can have multiple objects in the Art of Illusion scene and they will all be carved, then fabricated together.
""" |
"""CPStats, a package for collecting and reporting on program statistics.
Overview
========
Statistics about program operation are an invaluable monitoring and debugging
tool. Unfortunately, the gathering and reporting of these critical values is
usually ad-hoc. This package aims to add a centralized place for gathering
statistical performance data, a structure for recording that data which
provides for extrapolation of that data into more useful information,
and a method of serving that data to both human investigators and
monitoring software. Let's examine each of those in more detail.
Data Gathering
--------------
Just as Python's `logging` module provides a common importable for gathering
and sending messages, performance statistics would benefit from a similar
common mechanism, and one that does *not* require each package which wishes
to collect stats to import a third-party module. Therefore, we choose to
re-use the `logging` module by adding a `statistics` object to it.
That `logging.statistics` object is a nested dict. It is not a custom class,
because that would:
1. require libraries and applications to import a third-party module in
order to participate
2. inhibit innovation in extrapolation approaches and in reporting tools, and
3. be slow.
There are, however, some specifications regarding the structure of the dict.::
{
+----"SQLAlchemy": {
| "Inserts": 4389745,
| "Inserts per Second":
| lambda s: s["Inserts"] / (time() - s["Start"]),
| C +---"Table Statistics": {
| o | "widgets": {-----------+
N | l | "Rows": 1.3M, | Record
a | l | "Inserts": 400, |
m | e | },---------------------+
e | c | "froobles": {
s | t | "Rows": 7845,
p | i | "Inserts": 0,
a | o | },
c | n +---},
e | "Slow Queries":
| [{"Query": "SELECT * FROM widgets;",
| "Processing Time": 47.840923343,
| },
| ],
+----},
}
The `logging.statistics` dict has four levels. The topmost level is nothing
more than a set of names to introduce modularity, usually along the lines of
package names. If the SQLAlchemy project wanted to participate, for example,
it might populate the item `logging.statistics['SQLAlchemy']`, whose value
would be a second-layer dict we call a "namespace". Namespaces help multiple
packages to avoid collisions over key names, and make reports easier to read,
to boot. The maintainers of SQLAlchemy should feel free to use more than one
namespace if needed (such as 'SQLAlchemy ORM'). Note that there are no case
or other syntax constraints on the namespace names; they should be chosen
to be maximally readable by humans (neither too short nor too long).
Each namespace, then, is a dict of named statistical values, such as
'Requests/sec' or 'Uptime'. You should choose names which will look
good on a report: spaces and capitalization are just fine.
In addition to scalars, values in a namespace MAY be a (third-layer)
dict, or a list, called a "collection". For example, the CherryPy
:class:`StatsTool` keeps track of what each request is doing (or has most
recently done) in a 'Requests' collection, where each key is a thread ID; each
value in the subdict MUST be a fourth dict (whew!) of statistical data about
each thread. We call each subdict in the collection a "record". Similarly,
the :class:`StatsTool` also keeps a list of slow queries, where each record
contains data about each slow query, in order.
Values in a namespace or record may also be functions, which brings us to:
Extrapolation
-------------
The collection of statistical data needs to be fast, as close to unnoticeable
as possible to the host program. That requires us to minimize I/O, for example,
but in Python it also means we need to minimize function calls. So when you
are designing your namespace and record values, try to insert the most basic
scalar values you already have on hand.
When it comes time to report on the gathered data, however, we usually have
much more freedom in what we can calculate. Therefore, whenever reporting
tools (like the provided :class:`StatsPage` CherryPy class) fetch the contents
of `logging.statistics` for reporting, they first call
`extrapolate_statistics` (passing the whole `statistics` dict as the only
argument). This makes a deep copy of the statistics dict so that the
reporting tool can both iterate over it and even change it without harming
the original. But it also expands any functions in the dict by calling them.
For example, you might have a 'Current Time' entry in the namespace with the
value "lambda scope: time.time()". The "scope" parameter is the current
namespace dict (or record, if we're currently expanding one of those
instead), allowing you access to existing static entries. If you're truly
evil, you can even modify more than one entry at a time.
However, don't try to calculate an entry and then use its value in further
extrapolations; the order in which the functions are called is not guaranteed.
This can lead to a certain amount of duplicated work (or a redesign of your
schema), but that's better than complicating the spec.
After the whole thing has been extrapolated, it's time for:
Reporting
---------
The :class:`StatsPage` class grabs the `logging.statistics` dict, extrapolates
it all, and then transforms it to HTML for easy viewing. Each namespace gets
its own header and attribute table, plus an extra table for each collection.
This is NOT part of the statistics specification; other tools can format how
they like.
You can control which columns are output and how they are formatted by updating
StatsPage.formatting, which is a dict that mirrors the keys and nesting of
`logging.statistics`. The difference is that, instead of data values, it has
formatting values. Use None for a given key to indicate to the StatsPage that a
given column should not be output. Use a string with formatting
(such as '%.3f') to interpolate the value(s), or use a callable (such as
lambda v: v.isoformat()) for more advanced formatting. Any entry which is not
mentioned in the formatting dict is output unchanged.
Monitoring
----------
Although the HTML output takes pains to assign unique id's to each <td> with
statistical data, you're probably better off fetching /cpstats/data, which
outputs the whole (extrapolated) `logging.statistics` dict in JSON format.
That is probably easier to parse, and doesn't have any formatting controls,
so you get the "original" data in a consistently-serialized format.
Note: there's no treatment yet for datetime objects. Try time.time() instead
for now if you can. Nagios will probably thank you.
Turning Collection Off
----------------------
It is recommended each namespace have an "Enabled" item which, if False,
stops collection (but not reporting) of statistical data. Applications
SHOULD provide controls to pause and resume collection by setting these
entries to False or True, if present.
Usage
=====
To collect statistics on CherryPy applications::
from cherrypy.lib import cpstats
appconfig['/']['tools.cpstats.on'] = True
To collect statistics on your own code::
import logging
# Initialize the repository
if not hasattr(logging, 'statistics'): logging.statistics = {}
# Initialize my namespace
mystats = logging.statistics.setdefault('My Stuff', {})
# Initialize my namespace's scalars and collections
mystats.update({
'Enabled': True,
'Start Time': time.time(),
'Important Events': 0,
'Events/Second': lambda s: (
(s['Important Events'] / (time.time() - s['Start Time']))),
})
...
for event in events:
...
# Collect stats
if mystats.get('Enabled', False):
mystats['Important Events'] += 1
To report statistics::
root.cpstats = cpstats.StatsPage()
To format statistics reports::
See 'Reporting', above.
""" |
# # ===============================================================================
# # Copyright 2013 NAME #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# # ===============================================================================
#
# # ============= enthought library imports =======================
# # ============= standard library imports ========================
# # ============= local library imports ==========================
# from __future__ import absolute_import
# from envisage.ui.tasks.task_factory import TaskFactory
#
# from pychron.database.isotope_database_manager import IsotopeDatabaseManager
# from pychron.database.tasks.connection_preferences import ConnectionPreferencesPane
# from pychron.envisage.tasks.base_task_plugin import BaseTaskPlugin
# from pychron.mass_spec.tasks.preferences import MassSpecConnectionPane
#
#
# class DatabasePlugin(BaseTaskPlugin):
# id = 'pychron.database'
# name = 'Database'
# _connectable = False
# _db = None
#
# test_pychron_description = 'Test the connection to the Pychron Database'
# test_massspec_description = 'Test the connection to the MassSpec Database'
# test_pychron_version_description = 'Test compatibility of Pychron with the current Database'
#
# # test_pychron_error = ''
# # test_massspec_error = ''
# # test_pychron_version_error = ''
#
# def stop(self):
# from pychron.globals import globalv
#
# kind = globalv.prev_db_kind
# if kind:
# man = self._get_database_manager(connect=False)
# man.db.kind = globalv.prev_db_kind
#
# def start(self):
# self.startup_test()
# if self._connectable:
# self._db.populate_default_tables()
# del self._db
#
# def test_pychron_version(self):
# iso = self._get_database_manager()
# result, err = False, ''
# try:
# result = iso.db.test_version()
# except TypeError:
# err = 'Not connected'
#
# return result, err
# # if err:
# # self.test_pychron_version_error = err
# #
# # return 'Passed' if not err else 'Failed'
#
# def test_pychron(self):
# iso = self._get_database_manager()
# self._connectable = result = iso.is_connected()
#
# return result, iso.db.connection_error
# # if not c:
# # self.test_pychron_error = iso.db.connection_error
#
# # return 'Passed' if c else 'Failed'
#
# def test_massspec(self):
# ret, err = 'Skipped', ''
# db = self.application.get_service(
# 'pychron.mass_spec.database.massspec_database_adapter.MassSpecDatabaseAdapter')
# if db:
# db.bind_preferences()
# connected = db.connect(warn=False)
# ret = 'Passed'
# if not connected:
# err = db.connection_error
# # self.test_massspec_error = db.connection_error
# ret = 'Failed'
# return ret, err
#
# # private
# def _get_database_manager(self, connect=True):
# if not self._db:
# iso = IsotopeDatabaseManager(application=self.application,
# warn=False,
# version_warn=False,
# attribute_warn=False,
# connect=connect)
# self._db = iso
#
# return self._db
#
# def _get_pref(self, name):
# prefs = self.application.preferences
# return prefs.get('pychron.massspec.database.{}'.format(name))
#
# def _slave_factory(self):
# from pychron.database.tasks.replication_task import ReplicationTask
#
# s = ReplicationTask()
# return s
#
# def _tasks_default(self):
# return [TaskFactory(id='pychron.slave',
# name='Replication',
# factory=self._slave_factory)]
#
# def _preferences_panes_default(self):
# return [ConnectionPreferencesPane,
# MassSpecConnectionPane]
#
# def _service_offers_default(self):
# sos = [self.service_offer_factory(
# protocol=IsotopeDatabaseManager,
# factory=IsotopeDatabaseManager)]
#
# if self._get_pref('enabled'):
# from pychron.mass_spec.database.massspec_database_adapter import MassSpecDatabaseAdapter
#
# sos.append(self.service_offer_factory(
# protocol=MassSpecDatabaseAdapter,
# factory=MassSpecDatabaseAdapter))
# # name = self._get_pref('name')
# # host = self._get_pref('host')
# # password = self._get_pref('password')
# # username = self._get_pref('username')
# # db = MassSpecDatabaseAdapter(name=name,
# # host=host,
# # password=password,
# # username=username)
# #
#
# return sos
#
# # ============= EOF =============================================
|
"""
Basic functions used by several sub-packages and
useful to have in the main name-space.
Type Handling
-------------
================ ===================
iscomplexobj Test for complex object, scalar result
isrealobj Test for real object, scalar result
iscomplex Test for complex elements, array result
isreal Test for real elements, array result
imag Imaginary part
real Real part
real_if_close Turns complex number with tiny imaginary part to real
isneginf Tests for negative infinity, array result
isposinf Tests for positive infinity, array result
isnan Tests for nans, array result
isinf Tests for infinity, array result
isfinite Tests for finite numbers, array result
isscalar True if argument is a scalar
nan_to_num Replaces NaN's with 0 and infinities with large numbers
cast Dictionary of functions to force cast to each type
common_type Determine the minimum common type code for a group
of arrays
mintypecode Return minimal allowed common typecode.
================ ===================
Index Tricks
------------
================ ===================
mgrid Method which allows easy construction of N-d
'mesh-grids'
``r_`` Append and construct arrays: turns slice objects into
ranges and concatenates them, for 2d arrays appends rows.
index_exp Konrad Hinsen's index_expression class instance which
can be useful for building complicated slicing syntax.
================ ===================
Useful Functions
----------------
================ ===================
select Extension of where to multiple conditions and choices
extract Extract 1d array from flattened array according to mask
insert Insert 1d array of values into Nd array according to mask
linspace Evenly spaced samples in linear space
logspace Evenly spaced samples in logarithmic space
fix Round x to nearest integer towards zero
mod Modulo mod(x,y) = x % y except keeps sign of y
amax Array maximum along axis
amin Array minimum along axis
ptp Array max-min along axis
cumsum Cumulative sum along axis
prod Product of elements along axis
cumprod Cumluative product along axis
diff Discrete differences along axis
angle Returns angle of complex argument
unwrap Unwrap phase along given axis (1-d algorithm)
sort_complex Sort a complex-array (based on real, then imaginary)
trim_zeros Trim the leading and trailing zeros from 1D array.
vectorize A class that wraps a Python function taking scalar
arguments into a generalized function which can handle
arrays of arguments using the broadcast rules of
numerix Python.
================ ===================
Shape Manipulation
------------------
================ ===================
squeeze Return a with length-one dimensions removed.
atleast_1d Force arrays to be > 1D
atleast_2d Force arrays to be > 2D
atleast_3d Force arrays to be > 3D
vstack Stack arrays vertically (row on row)
hstack Stack arrays horizontally (column on column)
column_stack Stack 1D arrays as columns into 2D array
dstack Stack arrays depthwise (along third dimension)
stack Stack arrays along a new axis
split Divide array into a list of sub-arrays
hsplit Split into columns
vsplit Split into rows
dsplit Split along third dimension
================ ===================
Matrix (2D Array) Manipulations
-------------------------------
================ ===================
fliplr 2D array with columns flipped
flipud 2D array with rows flipped
rot90 Rotate a 2D array a multiple of 90 degrees
eye Return a 2D array with ones down a given diagonal
diag Construct a 2D array from a vector, or return a given
diagonal from a 2D array.
mat Construct a Matrix
bmat Build a Matrix from blocks
================ ===================
Polynomials
-----------
================ ===================
poly1d A one-dimensional polynomial class
poly Return polynomial coefficients from roots
roots Find roots of polynomial given coefficients
polyint Integrate polynomial
polyder Differentiate polynomial
polyadd Add polynomials
polysub Substract polynomials
polymul Multiply polynomials
polydiv Divide polynomials
polyval Evaluate polynomial at given argument
================ ===================
Import Tricks
-------------
================ ===================
ppimport Postpone module import until trying to use it
ppimport_attr Postpone module import until trying to use its attribute
ppresolve Import postponed module and return it.
================ ===================
Machine Arithmetics
-------------------
================ ===================
machar_single Single precision floating point arithmetic parameters
machar_double Double precision floating point arithmetic parameters
================ ===================
Threading Tricks
----------------
================ ===================
ParallelExec Execute commands in parallel thread.
================ ===================
1D Array Set Operations
-----------------------
Set operations for 1D numeric arrays based on sort() function.
================ ===================
ediff1d Array difference (auxiliary function).
unique Unique elements of an array.
intersect1d Intersection of 1D arrays with unique elements.
setxor1d Set exclusive-or of 1D arrays with unique elements.
in1d Test whether elements in a 1D array are also present in
another array.
union1d Union of 1D arrays with unique elements.
setdiff1d Set difference of 1D arrays with unique elements.
================ ===================
""" |
"""Drag-and-drop support for Tkinter.
This is very preliminary. I currently only support dnd *within* one
application, between different windows (or within the same window).
I am trying to make this as generic as possible -- not dependent on
the use of a particular widget or icon type, etc. I also hope that
this will work with Pmw.
To enable an object to be dragged, you must create an event binding
for it that starts the drag-and-drop process. Typically, you should
bind <ButtonPress> to a callback function that you write. The function
should call Tkdnd.dnd_start(source, event), where 'source' is the
object to be dragged, and 'event' is the event that invoked the call
(the argument to your callback function). Even though this is a class
instantiation, the returned instance should not be stored -- it will
be kept alive automatically for the duration of the drag-and-drop.
When a drag-and-drop is already in process for the Tk interpreter, the
call is *ignored*; this normally averts starting multiple simultaneous
dnd processes, e.g. because different button callbacks all
dnd_start().
The object is *not* necessarily a widget -- it can be any
application-specific object that is meaningful to potential
drag-and-drop targets.
Potential drag-and-drop targets are discovered as follows. Whenever
the mouse moves, and at the start and end of a drag-and-drop move, the
Tk widget directly under the mouse is inspected. This is the target
widget (not to be confused with the target object, yet to be
determined). If there is no target widget, there is no dnd target
object. If there is a target widget, and it has an attribute
dnd_accept, this should be a function (or any callable object). The
function is called as dnd_accept(source, event), where 'source' is the
object being dragged (the object passed to dnd_start() above), and
'event' is the most recent event object (generally a <Motion> event;
it can also be <ButtonPress> or <ButtonRelease>). If the dnd_accept()
function returns something other than None, this is the new dnd target
object. If dnd_accept() returns None, or if the target widget has no
dnd_accept attribute, the target widget's parent is considered as the
target widget, and the search for a target object is repeated from
there. If necessary, the search is repeated all the way up to the
root widget. If none of the target widgets can produce a target
object, there is no target object (the target object is None).
The target object thus produced, if any, is called the new target
object. It is compared with the old target object (or None, if there
was no old target widget). There are several cases ('source' is the
source object, and 'event' is the most recent event object):
- Both the old and new target objects are None. Nothing happens.
- The old and new target objects are the same object. Its method
dnd_motion(source, event) is called.
- The old target object was None, and the new target object is not
None. The new target object's method dnd_enter(source, event) is
called.
- The new target object is None, and the old target object is not
None. The old target object's method dnd_leave(source, event) is
called.
- The old and new target objects differ and neither is None. The old
target object's method dnd_leave(source, event), and then the new
target object's method dnd_enter(source, event) is called.
Once this is done, the new target object replaces the old one, and the
Tk mainloop proceeds. The return value of the methods mentioned above
is ignored; if they raise an exception, the normal exception handling
mechanisms take over.
The drag-and-drop processes can end in two ways: a final target object
is selected, or no final target object is selected. When a final
target object is selected, it will always have been notified of the
potential drop by a call to its dnd_enter() method, as described
above, and possibly one or more calls to its dnd_motion() method; its
dnd_leave() method has not been called since the last call to
dnd_enter(). The target is notified of the drop by a call to its
method dnd_commit(source, event).
If no final target object is selected, and there was an old target
object, its dnd_leave(source, event) method is called to complete the
dnd sequence.
Finally, the source object is notified that the drag-and-drop process
is over, by a call to source.dnd_end(target, event), specifying either
the selected target object, or None if no target object was selected.
The source object can use this to implement the commit action; this is
sometimes simpler than to do it in the target's dnd_commit(). The
target's dnd_commit() method could then simply be aliased to
dnd_leave().
At any time during a dnd sequence, the application can cancel the
sequence by calling the cancel() method on the object returned by
dnd_start(). This will call dnd_leave() if a target is currently
active; it will never call dnd_commit().
""" |
"""
=================
Structured Arrays
=================
Introduction
============
Numpy provides powerful capabilities to create arrays of structured datatype.
These arrays permit one to manipulate the data by named fields. A simple
example will show what is meant.: ::
>>> x = np.array([(1,2.,'Hello'), (2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> x
array([(1, 2.0, 'Hello'), (2, 3.0, 'World')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
Here we have created a one-dimensional array of length 2. Each element of
this array is a structure that contains three items, a 32-bit integer, a 32-bit
float, and a string of length 10 or less. If we index this array at the second
position we get the second structure: ::
>>> x[1]
(2,3.,"World")
Conveniently, one can access any field of the array by indexing using the
string that names that field. ::
>>> y = x['foo']
>>> y
array([ 2., 3.], dtype=float32)
>>> y[:] = 2*y
>>> y
array([ 4., 6.], dtype=float32)
>>> x
array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
In these examples, y is a simple float array consisting of the 2nd field
in the structured type. But, rather than being a copy of the data in the structured
array, it is a view, i.e., it shares exactly the same memory locations.
Thus, when we updated this array by doubling its values, the structured
array shows the corresponding values as doubled as well. Likewise, if one
changes the structured array, the field view also changes: ::
>>> x[1] = (-1,-1.,"Master")
>>> x
array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
>>> y
array([ 4., -1.], dtype=float32)
Defining Structured Arrays
==========================
One defines a structured array through the dtype object. There are
**several** alternative ways to define the fields of a record. Some of
these variants provide backward compatibility with Numeric, numarray, or
another module, and should not be used except for such purposes. These
will be so noted. One specifies record structure in
one of four alternative ways, using an argument (as supplied to a dtype
function keyword or a dtype object constructor itself). This
argument must be one of the following: 1) string, 2) tuple, 3) list, or
4) dictionary. Each of these is briefly described below.
1) String argument.
In this case, the constructor expects a comma-separated list of type
specifiers, optionally with extra shape information. The fields are
given the default names 'f0', 'f1', 'f2' and so on.
The type specifiers can take 4 different forms: ::
a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f2, f4, f8, c8, c16, a<n>
(representing bytes, ints, unsigned ints, floats, complex and
fixed length strings of specified byte lengths)
b) int8,...,uint8,...,float16, float32, float64, complex64, complex128
(this time with bit sizes)
c) older Numeric/numarray type specifications (e.g. Float32).
Don't use these in new code!
d) Single character type specifiers (e.g H for unsigned short ints).
Avoid using these unless you must. Details can be found in the
Numpy book
These different styles can be mixed within the same string (but why would you
want to do that?). Furthermore, each type specifier can be prefixed
with a repetition number, or a shape. In these cases an array
element is created, i.e., an array within a record. That array
is still referred to as a single field. An example: ::
>>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64')
>>> x
array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])],
dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))])
By using strings to define the record structure, it precludes being
able to name the fields in the original definition. The names can
be changed as shown later, however.
2) Tuple argument: The only relevant tuple case that applies to record
structures is when a structure is mapped to an existing data type. This
is done by pairing in a tuple, the existing data type with a matching
dtype definition (using any of the variants being described here). As
an example (using a definition using a list, so see 3) for further
details): ::
>>> x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')]))
>>> x
array([0, 0, 0])
>>> x['r']
array([0, 0, 0], dtype=uint8)
In this case, an array is produced that looks and acts like a simple int32 array,
but also has definitions for fields that use only one byte of the int32 (a bit
like Fortran equivalencing).
3) List argument: In this case the record structure is defined with a list of
tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field
('' is permitted), 2) the type of the field, and 3) the shape (optional).
For example::
>>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
>>> x
array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])],
dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))])
4) Dictionary argument: two different forms are permitted. The first consists
of a dictionary with two required keys ('names' and 'formats'), each having an
equal sized list of values. The format list contains any type/shape specifier
allowed in other contexts. The names must be strings. There are two optional
keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to
the required two where offsets contain integer offsets for each field, and
titles are objects containing metadata for each field (these do not have
to be strings), where the value of None is permitted. As an example: ::
>>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[('col1', '>i4'), ('col2', '>f4')])
The other dictionary form permitted is a dictionary of name keys with tuple
values specifying type, offset, and an optional title. ::
>>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')])
Accessing and modifying field names
===================================
The field names are an attribute of the dtype object defining the structure.
For the last example: ::
>>> x.dtype.names
('col1', 'col2')
>>> x.dtype.names = ('x', 'y')
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')])
>>> x.dtype.names = ('x', 'y', 'z') # wrong number of names
<type 'exceptions.ValueError'>: must replace all names at once with a sequence of length 2
Accessing field titles
====================================
The field titles provide a standard place to put associated info for fields.
They do not have to be strings. ::
>>> x.dtype.fields['x'][2]
'title 1'
Accessing multiple fields at once
====================================
You can access multiple fields at once using a list of field names: ::
>>> x = np.array([(1.5,2.5,(1.0,2.0)),(3.,4.,(4.,5.)),(1.,3.,(2.,6.))],
dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
Notice that `x` is created with a list of tuples. ::
>>> x[['x','y']]
array([(1.5, 2.5), (3.0, 4.0), (1.0, 3.0)],
dtype=[('x', '<f4'), ('y', '<f4')])
>>> x[['x','value']]
array([(1.5, [[1.0, 2.0], [1.0, 2.0]]), (3.0, [[4.0, 5.0], [4.0, 5.0]]),
(1.0, [[2.0, 6.0], [2.0, 6.0]])],
dtype=[('x', '<f4'), ('value', '<f4', (2, 2))])
The fields are returned in the order they are asked for.::
>>> x[['y','x']]
array([(2.5, 1.5), (4.0, 3.0), (3.0, 1.0)],
dtype=[('y', '<f4'), ('x', '<f4')])
Filling structured arrays
=========================
Structured arrays can be filled by field or row by row. ::
>>> arr = np.zeros((5,), dtype=[('var1','f8'),('var2','f8')])
>>> arr['var1'] = np.arange(5)
If you fill it in row by row, it takes a take a tuple
(but not a list or array!)::
>>> arr[0] = (10,20)
>>> arr
array([(10.0, 20.0), (1.0, 0.0), (2.0, 0.0), (3.0, 0.0), (4.0, 0.0)],
dtype=[('var1', '<f8'), ('var2', '<f8')])
Record Arrays
=============
For convenience, numpy provides "record arrays" which allow one to access
fields of structured arrays by attribute rather than by index. Record arrays
are structured arrays wrapped using a subclass of ndarray,
:class:`numpy.recarray`, which allows field access by attribute on the array
object, and record arrays also use a special datatype, :class:`numpy.record`,
which allows field access by attribute on the individual elements of the array.
The simplest way to create a record array is with :func:`numpy.rec.array`: ::
>>> recordarr = np.rec.array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> recordarr.bar
array([ 2., 3.], dtype=float32)
>>> recordarr[1:2]
rec.array([(2, 3.0, 'World')],
dtype=[('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')])
>>> recordarr[1:2].foo
array([2], dtype=int32)
>>> recordarr.foo[1:2]
array([2], dtype=int32)
>>> recordarr[1].baz
'World'
numpy.rec.array can convert a wide variety of arguments into record arrays,
including normal structured arrays: ::
>>> arr = array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])
>>> recordarr = np.rec.array(arr)
The numpy.rec module provides a number of other convenience functions for
creating record arrays, see :ref:`record array creation routines
<routines.array-creation.rec>`.
A record array representation of a structured array can be obtained using the
appropriate :ref:`view`: ::
>>> arr = np.array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')])
>>> recordarr = arr.view(dtype=dtype((np.record, arr.dtype)),
... type=np.recarray)
For convenience, viewing an ndarray as type `np.recarray` will automatically
convert to `np.record` datatype, so the dtype can be left out of the view: ::
>>> recordarr = arr.view(np.recarray)
>>> recordarr.dtype
dtype((numpy.record, [('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')]))
To get back to a plain ndarray both the dtype and type must be reset. The
following view does so, taking into account the unusual case that the
recordarr was not a structured type: ::
>>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray)
Record array fields accessed by index or by attribute are returned as a record
array if the field has a structured type but as a plain ndarray otherwise. ::
>>> recordarr = np.rec.array([('Hello', (1,2)),("World", (3,4))],
... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])])
>>> type(recordarr.foo)
<type 'numpy.ndarray'>
>>> type(recordarr.bar)
<class 'numpy.core.records.recarray'>
Note that if a field has the same name as an ndarray attribute, the ndarray
attribute takes precedence. Such fields will be inaccessible by attribute but
may still be accessed by index.
""" |
"""
********
Overview
********
The ``synapseclient`` package provides an interface to
`Synapse <http://www.synapse.org>`_, a collaborative
workspace for reproducible data intensive research projects,
providing support for:
- integrated presentation of data, code and text
- fine grained access control
- provenance_ tracking
The ``synapseclient`` package lets you communicate with the cloud-hosted
Synapse service to access data and create shared data analysis projects from
within Python scripts or at the interactive Python console. Other Synapse clients
exist for `R <https://www.synapse.org/#!Synapse:syn1834618>`_,
`Java <https://github.com/Sage-Bionetworks/Synapse-Repository-Services/tree/develop/client/synapseJavaClient>`_,
and the `web <https://www.synapse.org/>`_. The Python client can also be used from the
`command line <CommandLineClient.html>`_.
If you're just getting started with Synapse,
have a look at the Getting Started guides for `Synapse <http://docs.synapse.org/articles/getting_started.html>`_
and `the Python client <http://docs.synapse.org/python/>`_.
Good example projects are:
- `TCGA Pan-cancer (syn300013) <https://www.synapse.org/#!Synapse:syn300013>`_
- `Development of a Prognostic Model for Breast Cancer Survival in an Open Challenge Environment (syn1721874) <https://www.synapse.org/#!Synapse:syn1721874>`_
- `Demo projects (syn1899339) <https://www.synapse.org/#!Synapse:syn1899339>`_
Installation
============
The `synapseclient <https://pypi.python.org/pypi/synapseclient/>`_ package is available from PyPI. It can
be installed or upgraded with pip::
(sudo) pip install (--upgrade) synapseclient[pandas,pysftp]
The dependencies on pandas and pysftp are optional. The Synapse :py:mod:`synapseclient.table`
feature integrates with Pandas. Support for sftp is required for users of SFTP file storage.
Both require native libraries to be compiled or installed separately from prebuilt binaries.
Source code and development versions are `available on Github <https://github.com/Sage-Bionetworks/synapsePythonClient>`_.
Installing from source::
git clone git://github.com/Sage-Bionetworks/synapsePythonClient.git
cd synapsePythonClient
You can stay on the master branch to get the latest stable release or check out the develop branch or a tagged revision::
git checkout <branch or tag>
Next, either install the package in the site-packages directory ``python setup.py install`` or ``python setup.py develop`` to make the installation follow the head without having to reinstall::
python setup.py <install or develop>
Connecting to Synapse
=====================
To use Synapse, you'll need to
`register <https://www.synapse.org/#!RegisterAccount:0>`_
for an account. The Synapse website can authenticate using a Google account,
but you'll need to take the extra step of creating a Synapse password
to use the programmatic clients.
Once that's done, you'll be able to load the library, create a :py:class:`Synapse` object and login::
import synapseclient
syn = synapseclient.Synapse()
syn.login('EMAIL', 'secret')
For more information, see:
- :py:class:`Synapse`
- :py:func:`Synapse.login`
- :py:func:`Synapse.logout`
Imports
=======
Several components of the synapseclient can be imported as needed::
from synapseclient import Activity
from synapseclient import Entity, Project, Folder, File, Link
from synapseclient import Evaluation, Submission, SubmissionStatus
from synapseclient import Wiki
Accessing Data
==============
Synapse identifiers are used to refer to projects and data which are represented by
:py:mod:`synapseclient.entity` objects. For
example, the entity `syn1899498 <https://www.synapse.org/#!Synapse:syn1899498>`_
represents a tab-delimited file containing a 100 by 4 matrix. Getting the
entity retrieves an object that holds metadata describing the matrix,
and also downloads the file to a local cache::
entity = syn.get('syn1899498')
View the entity's metadata in the Python console::
print(entity)
This is one simple way to read in a small matrix::
rows = []
with open(entity.path) as f:
header = f.readline().split('\\t')
for line in f:
row = [float(x) for x in line.split('\\t')]
rows.append(row)
View the entity in the browser::
syn.onweb('syn1899498')
- :py:class:`synapseclient.entity.Entity`
- :py:func:`synapseclient.Synapse.get`
- :py:func:`synapseclient.Synapse.onweb`
Organizing data in a Project
============================
You can create your own projects and upload your own data sets. Synapse stores
entities in a hierarchical or tree structure. Projects are at the top level and
must be uniquely named::
import synapseclient
from synapseclient import Project, Folder, File, Link
project = Project('My uniquely named project')
project = syn.store(project)
Creating a folder::
data_folder = Folder('Data', parent=project)
data_folder = syn.store(data_folder)
Adding files to the project::
test_entity = File('/path/to/data/file.xyz', description='Fancy new data', parent=data_folder)
test_entity = syn.store(test_entity)
In addition to simple data storage, Synapse entities can be `annotated <#annotating-synapse-entities>`_ with
key/value metadata, described in markdown documents (wikis_), and linked
together in provenance_ graphs to create a reproducible record of a data
analysis pipeline.
See also:
- :py:class:`synapseclient.entity.Entity`
- :py:class:`synapseclient.entity.Project`
- :py:class:`synapseclient.entity.Folder`
- :py:class:`synapseclient.entity.File`
- :py:class:`synapseclient.entity.Link`
- :py:func:`synapseclient.Synapse.store`
Annotating Synapse entities
===========================
Annotations are arbitrary metadata attached to Synapse entities, for example::
test_entity.genome_assembly = "hg19"
See:
- :py:mod:`synapseclient.annotations`
Provenance
==========
Synapse provides tools for tracking 'provenance', or the transformation of raw data
into processed results, by linking derived data objects to source data and the
code used to perform the transformation.
See:
- :py:class:`synapseclient.activity.Activity`
Tables
======
Tables can be built up by adding sets of rows that follow a user-defined schema
and queried using a SQL-like syntax.
See:
- :py:mod:`synapseclient.table`
- :py:class:`synapseclient.table.Schema`
- :py:class:`synapseclient.table.Column`
- :py:func:`synapseclient.Synapse.getColumns`
- :py:func:`synapseclient.Synapse.getTableColumns`
Wikis
=====
Wiki pages can be attached to an Synapse entity (i.e. project, folder, file, etc).
Text and graphics can be composed in markdown and rendered in the web view of
the object.
See:
- :py:func:`synapseclient.Synapse.getWiki`
- :py:class:`synapseclient.wiki.Wiki`
Evaluations
===========
An evaluation is a Synapse construct useful for building processing pipelines and
for scoring predictive modelling and data analysis challenges.
See:
- :py:mod:`synapseclient.evaluation`
- :py:func:`synapseclient.Synapse.getEvaluation`
- :py:func:`synapseclient.Synapse.submit`
- :py:func:`synapseclient.Synapse.getSubmissions`
- :py:func:`synapseclient.Synapse.getSubmission`
- :py:func:`synapseclient.Synapse.getSubmissionStatus`
Querying
========
Synapse supports a `SQL-like query language <https://sagebionetworks.jira.com/wiki/display/PLFM/Repository+Service+API#RepositoryServiceAPI-QueryAPI>`_::
results = syn.query('SELECT id, name FROM entity WHERE parentId=="syn1899495"')
for result in results['results']:
print(result['entity.id'], result['entity.name'])
Querying for my projects. Finding projects owned by the current user::
profile = syn.getUserProfile()
results = syn.query('SELECT id, name FROM project WHERE project.createdByPrincipalId==%s' % profile['ownerId'])
for result in results['results']:
print(result['project.id'], result['project.name'])
See:
- :py:func:`synapseclient.Synapse.query`
- :py:func:`synapseclient.Synapse.chunkedQuery`
Access control
==============
By default, data sets in Synapse are private to your user account, but they can
easily be shared with specific users, groups, or the public.
TODO: finish this once there is a reasonable way to find principalIds.
See:
- :py:func:`Synapse.getPermissions`
- :py:func:`Synapse.setPermissions`
Accessing the API directly
==========================
These methods enable access to the Synapse REST(ish) API taking care of details
like endpoints and authentication. See the
`REST API documentation <http://docs.synapse.org/rest/>`_.
See:
- :py:func:`synapseclient.Synapse.restGET`
- :py:func:`synapseclient.Synapse.restPOST`
- :py:func:`synapseclient.Synapse.restPUT`
- :py:func:`synapseclient.Synapse.restDELETE`
Synapse utilites
================
There is a companion module called synapseutils that provide higher
level functionality such as recursive copying of content, syncing with
Synapse and additional query functionality.
See:
- :py:mod:`synapseutils`
More information
================
For more information see the
`Synapse User Guide <http://docs.synapse.org/articles/>`_. These
API docs are browsable online at
`http://docs.synapse.org/python/ <http://docs.synapse.org/python/>`_.
Getting updates
===============
To get information about new versions of the client including development versions
see `synapseclient.check_for_updates() <Versions.html#synapseclient.version_check.check_for_updates>`_ and `synapseclient.release_notes() <Versions.html#synapseclient.version_check.release_notes>`_.
""" |
"""
========
Glossary
========
.. glossary::
along an axis
Axes are defined for arrays with more than one dimension. A
2-dimensional array has two corresponding axes: the first running
vertically downwards across rows (axis 0), and the second running
horizontally across columns (axis 1).
Many operation can take place along one of these axes. For example,
we can sum each row of an array, in which case we operate along
columns, or axis 1::
>>> x = np.arange(12).reshape((3,4))
>>> x
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.sum(axis=1)
array([ 6, 22, 38])
array
A homogeneous container of numerical elements. Each element in the
array occupies a fixed amount of memory (hence homogeneous), and
can be a numerical element of a single type (such as float, int
or complex) or a combination (such as ``(float, int, float)``). Each
array has an associated data-type (or ``dtype``), which describes
the numerical type of its elements::
>>> x = np.array([1, 2, 3], float)
>>> x
array([ 1., 2., 3.])
>>> x.dtype # floating point number, 64 bits of memory per element
dtype('float64')
# More complicated data type: each array element is a combination of
# and integer and a floating point number
>>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)])
array([(1, 2.0), (3, 4.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Fast element-wise operations, called `ufuncs`_, operate on arrays.
array_like
Any sequence that can be interpreted as an ndarray. This includes
nested lists, tuples, scalars and existing arrays.
attribute
A property of an object that can be accessed using ``obj.attribute``,
e.g., ``shape`` is an attribute of an array::
>>> x = np.array([1, 2, 3])
>>> x.shape
(3,)
BLAS
`Basic Linear Algebra Subprograms <http://en.wikipedia.org/wiki/BLAS>`_
broadcast
NumPy can do operations on arrays whose shapes are mismatched::
>>> x = np.array([1, 2])
>>> y = np.array([[3], [4]])
>>> x
array([1, 2])
>>> y
array([[3],
[4]])
>>> x + y
array([[4, 5],
[5, 6]])
See `doc.broadcasting`_ for more information.
C order
See `row-major`
column-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In column-major order, the leftmost index "varies the
fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the column-major order as::
[1, 4, 2, 5, 3, 6]
Column-major order is also known as the Fortran order, as the Fortran
programming language uses it.
decorator
An operator that transforms a function. For example, a ``log``
decorator may be defined to print debugging information upon
function execution::
>>> def log(f):
... def new_logging_func(*args, **kwargs):
... print "Logging call with parameters:", args, kwargs
... return f(*args, **kwargs)
...
... return new_logging_func
Now, when we define a function, we can "decorate" it using ``log``::
>>> @log
... def add(a, b):
... return a + b
Calling ``add`` then yields:
>>> add(1, 2)
Logging call with parameters: (1, 2) {}
3
dictionary
Resembling a language dictionary, which provides a mapping between
words and descriptions thereof, a Python dictionary is a mapping
between two objects::
>>> x = {1: 'one', 'two': [1, 2]}
Here, `x` is a dictionary mapping keys to values, in this case
the integer 1 to the string "one", and the string "two" to
the list ``[1, 2]``. The values may be accessed using their
corresponding keys::
>>> x[1]
'one'
>>> x['two']
[1, 2]
Note that dictionaries are not stored in any specific order. Also,
most mutable (see *immutable* below) objects, such as lists, may not
be used as keys.
For more information on dictionaries, read the
`Python tutorial <http://docs.python.org/tut>`_.
Fortran order
See `column-major`
flattened
Collapsed to a one-dimensional array. See `ndarray.flatten`_ for details.
immutable
An object that cannot be modified after execution is called
immutable. Two common examples are strings and tuples.
instance
A class definition gives the blueprint for constructing an object::
>>> class House(object):
... wall_colour = 'white'
Yet, we have to *build* a house before it exists::
>>> h = House() # build a house
Now, ``h`` is called a ``House`` instance. An instance is therefore
a specific realisation of a class.
iterable
A sequence that allows "walking" (iterating) over items, typically
using a loop such as::
>>> x = [1, 2, 3]
>>> [item**2 for item in x]
[1, 4, 9]
It is often used in combintion with ``enumerate``::
>>> keys = ['a','b','c']
>>> for n, k in enumerate(keys):
... print "Key %d: %s" % (n, k)
...
Key 0: a
Key 1: b
Key 2: c
list
A Python container that can hold any number of objects or items.
The items do not have to be of the same type, and can even be
lists themselves::
>>> x = [2, 2.0, "two", [2, 2.0]]
The list `x` contains 4 items, each which can be accessed individually::
>>> x[2] # the string 'two'
'two'
>>> x[3] # a list, containing an integer 2 and a float 2.0
[2, 2.0]
It is also possible to select more than one item at a time,
using *slicing*::
>>> x[0:2] # or, equivalently, x[:2]
[2, 2.0]
In code, arrays are often conveniently expressed as nested lists::
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
For more information, read the section on lists in the `Python
tutorial <http://docs.python.org/tut>`_. For a mapping
type (key-value), see *dictionary*.
mask
A boolean array, used to select only certain elements for an operation::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> mask = (x > 2)
>>> mask
array([False, False, False, True, True], dtype=bool)
>>> x[mask] = -1
>>> x
array([ 0, 1, 2, -1, -1])
masked array
Array that suppressed values indicated by a mask::
>>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True])
>>> x
masked_array(data = [-- 2.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
>>> x + [1, 2, 3]
masked_array(data = [-- 4.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
Masked arrays are often used when operating on arrays containing
missing or invalid entries.
matrix
A 2-dimensional ndarray that preserves its two-dimensional nature
throughout operations. It has certain special operations, such as ``*``
(matrix multiplication) and ``**`` (matrix power), defined::
>>> x = np.mat([[1, 2], [3, 4]])
>>> x
matrix([[1, 2],
[3, 4]])
>>> x**2
matrix([[ 7, 10],
[15, 22]])
method
A function associated with an object. For example, each ndarray has a
method called ``repeat``::
>>> x = np.array([1, 2, 3])
>>> x.repeat(2)
array([1, 1, 2, 2, 3, 3])
ndarray
See *array*.
record array
An `ndarray`_ with `structured data type`_ which has been subclassed as
np.recarray and whose dtype is of type np.record, making the
fields of its data type to be accessible by attribute.
reference
If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore,
``a`` and ``b`` are different names for the same Python object.
row-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In row-major order, the rightmost index "varies
the fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the row-major order as::
[1, 2, 3, 4, 5, 6]
Row-major order is also known as the C order, as the C programming
language uses it. New Numpy arrays are by default in row-major order.
self
Often seen in method signatures, ``self`` refers to the instance
of the associated class. For example:
>>> class Paintbrush(object):
... color = 'blue'
...
... def paint(self):
... print "Painting the city %s!" % self.color
...
>>> p = Paintbrush()
>>> p.color = 'red'
>>> p.paint() # self refers to 'p'
Painting the city red!
slice
Used to select only certain elements from a sequence::
>>> x = range(5)
>>> x
[0, 1, 2, 3, 4]
>>> x[1:3] # slice from 1 to 3 (excluding 3 itself)
[1, 2]
>>> x[1:5:2] # slice from 1 to 5, but skipping every second element
[1, 3]
>>> x[::-1] # slice a sequence in reverse
[4, 3, 2, 1, 0]
Arrays may have more than one dimension, each which can be sliced
individually::
>>> x = np.array([[1, 2], [3, 4]])
>>> x
array([[1, 2],
[3, 4]])
>>> x[:, 1]
array([2, 4])
structured data type
A data type composed of other datatypes
tuple
A sequence that may contain a variable number of types of any
kind. A tuple is immutable, i.e., once constructed it cannot be
changed. Similar to a list, it can be indexed and sliced::
>>> x = (1, 'one', [1, 2])
>>> x
(1, 'one', [1, 2])
>>> x[0]
1
>>> x[:2]
(1, 'one')
A useful concept is "tuple unpacking", which allows variables to
be assigned to the contents of a tuple::
>>> x, y = (1, 2)
>>> x, y = 1, 2
This is often used when a function returns multiple values:
>>> def return_many():
... return 1, 'alpha', None
>>> a, b, c = return_many()
>>> a, b, c
(1, 'alpha', None)
>>> a
1
>>> b
'alpha'
ufunc
Universal function. A fast element-wise array operation. Examples include
``add``, ``sin`` and ``logical_or``.
view
An array that does not own its data, but refers to another array's
data instead. For example, we may create a view that only shows
every second element of another array::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> y = x[::2]
>>> y
array([0, 2, 4])
>>> x[0] = 3 # changing x changes y as well, since y is a view on x
>>> y
array([3, 2, 4])
wrapper
Python is a high-level (highly abstracted, or English-like) language.
This abstraction comes at a price in execution speed, and sometimes
it becomes necessary to use lower level languages to do fast
computations. A wrapper is code that provides a bridge between
high and the low level languages, allowing, e.g., Python to execute
code written in C or Fortran.
Examples include ctypes, SWIG and Cython (which wraps C and C++)
and f2py (which wraps Fortran).
""" |
"""
ROTABOX kivy 1.10.0
=======
Rotabox is a *kivy widget* with customizable 2D bounds that follow its rotation.
The users can shape their own, specific bounds, to fit an image (or a series of
images in an animation), using a visual editor (See Rotaboxer below).
Rotabox also offers multitouch interactivity (drag, rotation and scaling).
==========================
Features & particularities
Collision detection methods:
Rotabox offers two collision approaches.
They can't be both used at the same time on the same widget and, normally,
collisions are thought to happen between widgets that use the same detection
method.
Combinations between the two are possible but rather heavier.
* Segment intersection detection (Default option):
(See 'Introduction to Algorithms 3rd Edition', ch.33 Computational Geometry
(https://mitpress.mit.edu/books/introduction-algorithms)
and 'Line Segment Intersection' lecture notes by NAME
(http://jeffe.cs.illinois.edu/teaching/373/notes/x06-sweepline.pdf))
* Supports open-shaped bounds, down to just a single line segment.
* Interacts with Rotaboxes that use either detection method
(more expensive if method is different) and regular widgets.
* In a positive check against a Rotabox of the same method, instead of
*True*, both the intersected sides' indices and their respecrive
polygons' indices are returned, in the form of:
[(this_pol_i, this_side_i), (that_pol_i, that_side_i)].
* Point membership in polygon detection:
(See 'Even-odd rule'(https://en.wikipedia.org/wiki/Even%E2%80%93odd_rule "")
* It can be less expensive when dealing with complex shapes (more than 15
segments), as it can benefit from breaking these shapes into more simple
ones when making the bounds in the editor.
* Requires mutual collision checks (All involved widgets should check for
an accurate reading).
* Interacts with Rotaboxes that use the same detection method and regular
widgets (but behaving, itself, like a regular widget while doing so).
* In a positive check against a Rotabox of the same method, instead of
*True*, the checker's collided polygon's index is returned, in a tuple
(i) to always evaluate to True.
Open collision bounds (Segment method only)
If a polygon is open, the segment between the last and first points of the
polygon is not considered in the collision checks.
Since the segment collision method is only concerned with the polygon's
sides, a widget can 'enter' an open polygon, passing through the opening,
and then hit the back wall from inside, for example.
Note that *collide_point* doesn't work for an open polygon (i.e. an open
polygon cannot be touched accurately).
Visual point tracking
Since a rotating widget doesn't really rotate, its points lose their
reference to its visual (Positional properties like [top] or [center] don't
rotate).
Rotabox can track any of its own points while rotating, provided that they
are predefined (Hence, the custom bounds' ability).
They then can be accessed using their indices.
This can be useful, for example, in changing the point of rotation to a
predefined point on the widget while the latter is rotating.
Touch interactivity
Since, due to the differences between the Scatter and Rotabox concepts, a
way to combine the two couldn't be found, Rotabox uses the Scatter widget's
code, modified to act on the actual size and position of the widget and
child (essential for accurate collision detection).
It supports single and multitouch drag, rotation and scaling (the latter two
use the *origin* property in the singletouch option).
Cython option
Rotabox tries by default to use a compiled cython module (cybounds.so or
cybounds.pyd) for an about X5 speedup.
User needs to compile it for specific systems using the provided
cybounds.c file.
Restrictions
* In order to be able to maintain any arbitrary aspect ratio (e.g. its image's
ratio), Rotabox can't use the *size_hint* property.
Try using *size* property in a relative manner instead
(e.g. `self.width = self.parent.width * .5`).
* Rotabox can only have one child. It can be an *Image* but not necessarily.
Grandchildren, however, can collide independently, only if the widget is not
rotated ( *angle* must be *0* ).
===
API
Basic Usage
To use Rotabox, just include *rotabox_full.py* in your project files.
from rotabox import Rotabox
...
rb = Rotabox()
rb.add_widget(Image(source='img.png'))
self.add_widget(rb)
The instance's default bounding box will be a rectangle, the size of the image,
that rotates with it.
Use *angle* and *origin* properties for rotation.
_________
Interface
**angle** *NumericProperty* (0):
The angle of rotation in degrees.
**origin** *AliasProperty* *tuple* (center):
Sets the point of rotation. Default position is the widget's center.
**image** *ObjectProperty*:
Rotabox's only child will most likely be an *Image*.
If not so, Rotabox will attempt to find the topmost *Image* in its tree and
assign it to this property.
Otherwise, the user can specify an *image* somewhere in the widget's tree,
that the custom bounds will use as a reference.
An .atlas spritesheet can also be used as an animation source and different
bounds can be defined for each frame.
**aspect_ratio** *NumericProperty* (0.)
If not provided, *image*'s ratio is going to be used.
_______________________________
Customizing the Collidable Area
**Rotaboxer** Visual editor.
A convenient way to define the *custom_bounds* of a Rotabox widget.
To use it, run *rotaboxer.py* directly. It can be found in the
*Visual Editor* folder, at the repository.
Open a *.png* image or an *.atlas* file in the editor, draw bounds for it
and export the resulting code to clipboard, to use in a Rotabox widget.
**custom_bounds** *ObjectProperty* (`[[(0, 0), (1, 0), (1, 1), (0, 1)]]`)
This is where the custom bounds are being defined.
It's also the output of the Rotaboxer tool (above).
It can be a *list* of one or more polygons' data as seen in its default
value, above.
Each polygon's data is a *list* of point tuples `(x, y)`.
Points' values should be expressed as percentages of the widget's *width*
and *height*, where `(0, 0)` is widget's `(x, y)`, `(1, 1)` is widget's
`(right, top)` and `(.5, .5)` is widget's *center*.
Here's another example with more polygons:
self.bounds = [[(0.013, 0.985), (0.022, 0.349),
(0.213, 0.028), (0.217, 0.681)],
[(0.267, 0.346), (0.483, -0.005),
(0.691, 0.316), (0.261, 0.975)],
[(0.539, 0.674), (0.73, 0.37),
(0.983, 0.758)]]
*custom_bounds* can also be a *dictionary*, in case of animated bounds
(different bounds for different frames of an animation sequence in an
*.atlas* file), where the *keys* correspond to the frame names in the
*.atlas* file and each *item* is a *list* of one or more polygons' data
like the above.
Here's an example of such a *dictionary*:
self.bounds = {'00': [[(0.201, 0.803), (0.092, 0.491),
(0.219, 0.184), (0.526, 0.064)],
[(0.419, 0.095), (0.595, 0.088),
(0.644, 0.493)]],
'01': [[(0.357, 0.902), (0.17, 0.65),
(0.184, 0.337), (0.343, 0.095),
(0.644, 0.098)]],
'02': [[(...
...
... etc ]]}
**segment_mode** *BooleanProperty* (True):
Toggle between the two collision detection methods *(See Features above)*.
**open_bounds** *ListProperty*:
If a polygon's index is in this list, the segment between the last and first
points of the polygon is not considered in the collision checks
(segment_mode only).
**pre_check** *BooleanProperty* (False):
A collision optimization switch for larger widgets in Cython.
For small widgets (under 45 points), the tax of extra calculations
outweighs any benefit in collision.
_______________
Touch interface
Most of it is familiar from the Scatter widget.
**touched_to_front** *BooleanProperty* (False)
If touched, the widget will be pushed to the top of the parent widget tree.
**collide_after_children** *BooleanProperty* (True)
If True, limiting the touch inside the bounds will be done after dispaching
the touch to the child and grandchildren, so even outside the bounds they
can still be touched.
*IMPORTANT NOTE: Grandchildren, inside or outside the bounds, can collide
independently ONLY if widget is NOT ROTATED ( *angle* must be *0* ).*
Single touch definitions:
**single_drag_touch** *BoundedNumericProperty* (1, min=1)
How many touches will be treated as one single drag touch.
**single_trans_touch** *BoundedNumericProperty* (1, min=1)
How many touches will be treated as one single transformation touch.
Single touch operations:
**allow_drag_x** *BooleanProperty* (False)
**allow_drag_y** *BooleanProperty* (False)
**allow_drag** *AliasProperty*
**single_touch_rotation** *BooleanProperty* (False)
Rotate around *origin*.
**single_touch_scaling** *BooleanProperty* (False)
Scale around *origin*.
Multitouch rotation/scaling:
**multi_touch_rotation** *BooleanProperty* (False)
**multi_touch_scaling** *BooleanProperty* (False)
_________________
Utility interface
**pivot** *ReferenceListProperty*
The point of rotation and scaling.
While *origin* property sets *pivot*'s position, relatively to widget's
*size* and *pos*, *pivot* itself can be used to position the widget, much
like *pos* or *center*.
**get_point(pol_index, point_index)** *Method*
Returns the current position of a certain point.
The argument indices are based on user's [custom_bounds]' structure.
**read_bounds(filename)** *Method*
Define [custom_bounds] using a rotaboxer's project file (.bounds file).
To work, [size] should be already defined.
**draw_bounds** *NumericProperty* (0)
This option can be useful during testing, as it makes the widget's bounds
visible. (1 for bounds, 2 for bounds & bounding boxes)
**scale** *AliasProperty*
Current widget's scale.
**scale_min** *NumericProperty* (0.01)
**scale_max** *NumericProperty* (1e20)
Optional scale restrictions.
**ready** *BooleanProperty* (False)
Signifies the completion of the widget's initial preparations.
Useful to read in cases where the widget is stationary.
Also, its state changes to False when a size change or reset is triggered
and back to True after said size change or reset.
**prepared** *BooleanProperty* (False)
Its state change signifies a reset.
The reset completion signal, however, is the consequent [ready] state change
to True.
___________________________________________________________________________
A Rotabox example can be seen if this module is run directly.
""" |
"""Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a namedtuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes(b'') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, you must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
""" |
#
# ElementTree
# $Id: ElementTree.py 2326 2005-03-17 07:45:21Z USERNAME $
#
# light-weight XML support for Python 1.5.2 and later.
#
# history:
# 2001-10-20 fl created (from various sources)
# 2001-11-01 fl return root from parse method
# 2002-02-16 fl sort attributes in lexical order
# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
# 2002-05-01 fl finished TreeBuilder refactoring
# 2002-07-14 fl added basic namespace support to ElementTree.write
# 2002-07-25 fl added QName attribute support
# 2002-10-20 fl fixed encoding in write
# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
# 2002-11-27 fl accept file objects or file names for parse/write
# 2002-12-04 fl moved XMLTreeBuilder back to this module
# 2003-01-11 fl fixed entity encoding glitch for us-ascii
# 2003-02-13 fl added XML literal factory
# 2003-02-21 fl added ProcessingInstruction/PI factory
# 2003-05-11 fl added tostring/fromstring helpers
# 2003-05-26 fl added ElementPath support
# 2003-07-05 fl added makeelement factory method
# 2003-07-28 fl added more well-known namespace prefixes
# 2003-08-15 fl fixed typo in ElementTree.findtext (Thomas NAME 2003-09-04 fl fall back on emulator if ElementPath is not installed
# 2003-10-31 fl markup updates
# 2003-11-15 fl fixed nested namespace bug
# 2004-03-28 fl added XMLID helper
# 2004-06-02 fl added default support to findtext
# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
# 2004-08-23 fl take advantage of post-2.1 expat features
# 2005-02-01 fl added iterparse implementation
# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
#
# Copyright (c) 1999-2005 by NAME All rights reserved.
#
# EMAIL
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2005 by NAME By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
|
"""Generic socket server classes.
This module tries to capture the various aspects of defining a server:
For socket-based servers:
- address family:
- AF_INET{,6}: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
For request-based servers (including socket-based):
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
save some typing. (There's also the issue that a deep class hierarchy
slows down method lookups.)
There are five classes in an inheritance diagram, four of which represent
synchronous servers of four types:
+------------+
| BaseServer |
+------------+
|
v
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
stream server is the address family, which is simply repeated in both
unix server classes.
Forking and threading versions of each type of server can be created
using the ForkingMixIn and ThreadingMixIn mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer! Setting the various member variables also changes
the behavior of the underlying server mechanism.
To implement a service, you must derive a class from
BaseRequestHandler and redefine its handle() method. You can then run
various versions of the service by combining one of the server classes
with your request handler class.
The request handler class must be different for datagram or stream
services. This can be hidden by using the request handler
subclasses StreamRequestHandler or DatagramRequestHandler.
Of course, you still have to use your head!
For instance, it makes no sense to use a forking server if the service
contains state in memory that can be modified by requests (since the
modifications in the child process would never reach the initial state
kept in the parent process and passed to each child). In this case,
you can use a threading server, but you will probably have to use
locks to avoid two requests that come in nearly simultaneous to apply
conflicting changes to the server state.
On the other hand, if you are building e.g. an HTTP server, where all
data is stored externally (e.g. in the file system), a synchronous
class will essentially render the service "deaf" while one request is
being handled -- which may be for a very long time if a client is slow
to read all the data it has requested. Here a threading or forking
server is appropriate.
In some cases, it may be appropriate to process part of a request
synchronously, but to finish processing in a forked child depending on
the request data. This can be implemented by using a synchronous
server and doing an explicit fork in the request handler class
handle() method.
Another approach to handling multiple simultaneous requests in an
environment that supports neither threads nor fork (or where these are
too expensive or inappropriate for the service) is to maintain an
explicit table of partially finished requests and to use select() to
decide which request to work on next (or whether to handle a new
incoming request). This is particularly important for stream services
where each client can potentially be connected for a long time (if
threads or subprocesses cannot be used).
Future work:
- Standard classes for Sun RPC (which uses either UDP or TCP)
- Standard mix-in classes to implement various authentication
and encryption schemes
- Standard framework for select-based multiplexing
XXX Open problems:
- What to do with out-of-band data?
BaseServer:
- split generic "request" functionality out into BaseServer class.
Copyright (C) 2000 NAME <EMAIL>
example: read entries from a SQL database (requires overriding
get_request() to return a table entry from the database).
entry is processed by a RequestHandlerClass.
""" |
"""
=====================================
Sparse matrices (:mod:`scipy.sparse`)
=====================================
.. currentmodule:: scipy.sparse
SciPy 2-D sparse matrix package for numeric data.
Contents
========
Sparse matrix classes
---------------------
.. autosummary::
:toctree: generated/
bsr_matrix - Block Sparse Row matrix
coo_matrix - A sparse matrix in COOrdinate format
csc_matrix - Compressed Sparse Column matrix
csr_matrix - Compressed Sparse Row matrix
dia_matrix - Sparse matrix with DIAgonal storage
dok_matrix - Dictionary Of Keys based sparse matrix
lil_matrix - Row-based linked list sparse matrix
Functions
---------
Building sparse matrices:
.. autosummary::
:toctree: generated/
eye - Sparse MxN matrix whose k-th diagonal is all ones
identity - Identity matrix in sparse format
kron - kronecker product of two sparse matrices
kronsum - kronecker sum of sparse matrices
diags - Return a sparse matrix from diagonals
spdiags - Return a sparse matrix from diagonals
block_diag - Build a block diagonal sparse matrix
tril - Lower triangular portion of a matrix in sparse format
triu - Upper triangular portion of a matrix in sparse format
bmat - Build a sparse matrix from sparse sub-blocks
hstack - Stack sparse matrices horizontally (column wise)
vstack - Stack sparse matrices vertically (row wise)
rand - Random values in a given shape
norm - Return norm of a sparse matrix
Sparse matrix tools:
.. autosummary::
:toctree: generated/
find
Identifying sparse matrices:
.. autosummary::
:toctree: generated/
issparse
isspmatrix
isspmatrix_csc
isspmatrix_csr
isspmatrix_bsr
isspmatrix_lil
isspmatrix_dok
isspmatrix_coo
isspmatrix_dia
Submodules
----------
.. autosummary::
:toctree: generated/
csgraph - Compressed sparse graph routines
linalg - sparse linear algebra routines
Exceptions
----------
.. autosummary::
:toctree: generated/
SparseEfficiencyWarning
SparseWarning
Usage information
=================
There are seven available sparse matrix types:
1. csc_matrix: Compressed Sparse Column format
2. csr_matrix: Compressed Sparse Row format
3. bsr_matrix: Block Sparse Row format
4. lil_matrix: List of Lists format
5. dok_matrix: Dictionary of Keys format
6. coo_matrix: COOrdinate format (aka IJV, triplet format)
7. dia_matrix: DIAgonal format
To construct a matrix efficiently, use either dok_matrix or lil_matrix.
The lil_matrix class supports basic slicing and fancy
indexing with a similar syntax to NumPy arrays. As illustrated below,
the COO format may also be used to efficiently construct matrices.
To perform manipulations such as multiplication or inversion, first
convert the matrix to either CSC or CSR format. The lil_matrix format is
row-based, so conversion to CSR is efficient, whereas conversion to CSC
is less so.
All conversions among the CSR, CSC, and COO formats are efficient,
linear-time operations.
Matrix vector product
---------------------
To do a vector product between a sparse matrix and a vector simply use
the matrix `dot` method, as described in its docstring:
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> v = np.array([1, 0, -1])
>>> A.dot(v)
array([ 1, -3, -1], dtype=int64)
.. warning:: As of NumPy 1.7, `np.dot` is not aware of sparse matrices,
therefore using it will result on unexpected results or errors.
The corresponding dense array should be obtained first instead:
>>> np.dot(A.toarray(), v)
array([ 1, -3, -1], dtype=int64)
but then all the performance advantages would be lost.
The CSR format is specially suitable for fast matrix vector products.
Example 1
---------
Construct a 1000x1000 lil_matrix and add some values to it:
>>> from scipy.sparse import lil_matrix
>>> from scipy.sparse.linalg import spsolve
>>> from numpy.linalg import solve, norm
>>> from numpy.random import rand
>>> A = lil_matrix((1000, 1000))
>>> A[0, :100] = rand(100)
>>> A[1, 100:200] = A[0, :100]
>>> A.setdiag(rand(1000))
Now convert it to CSR format and solve A x = b for x:
>>> A = A.tocsr()
>>> b = rand(1000)
>>> x = spsolve(A, b)
Convert it to a dense matrix and solve, and check that the result
is the same:
>>> x_ = solve(A.toarray(), b)
Now we can compute norm of the error with:
>>> err = norm(x-x_)
>>> err < 1e-10
True
It should be small :)
Example 2
---------
Construct a matrix in COO format:
>>> from scipy import sparse
>>> from numpy import array
>>> I = array([0,3,1,0])
>>> J = array([0,3,1,2])
>>> V = array([4,5,7,9])
>>> A = sparse.coo_matrix((V,(I,J)),shape=(4,4))
Notice that the indices do not need to be sorted.
Duplicate (i,j) entries are summed when converting to CSR or CSC.
>>> I = array([0,0,1,3,1,0,0])
>>> J = array([0,2,1,3,1,0,0])
>>> V = array([1,1,1,1,1,1,1])
>>> B = sparse.coo_matrix((V,(I,J)),shape=(4,4)).tocsr()
This is useful for constructing finite-element stiffness and mass matrices.
Further Details
---------------
CSR column indices are not necessarily sorted. Likewise for CSC row
indices. Use the .sorted_indices() and .sort_indices() methods when
sorted indices are required (e.g. when passing data to other libraries).
""" |
#-------------------------------------------------------------------------------
# Name: AB_Class.py
# Purpose: Classes for methods to be called for accessing the
# SQLite3 database built in AB_GTFStoSQL.py, visualising it, and
# performing analysis on it.
# Various aspects of the GTFS are constructed into Objects that
# can be easily manipulated and extended.
#
# Classes and completed methods:
# Database(Object) ::A GTFS feed transformed into a SQLite database::
# > __init__(database) ::<database> is a SQLite3 database, constructed by the use of AB_GTFStoSQL.py::
# > getFeedInfo() ::Returns cur.fetchall() of the feed_info table::
# > feedEndDate() ::Returns a datetime object representing the end date of the GTFS feed::
# > feedStartDate() ::Returns a datetime object representing the start date of the GTFS feed::
# > feedDateRange() ::Returns a tuple of two datetime objects, representing [0] the start date of the feed and [1] the end date of the feed::
# > getAllModes() ::Returns a list of Mode objects, one for each type of route_type_desc in the GTFS (routes table)::
# > getAgencies() ::Returns cur.fetchall() of the agency table::
# > checkTableEmpty(tableName="intervals") :: Checks if <tableName> (str) has any rows; returns Boolean to that effect::
# > populateIntervals(DayObj=None, starti=0, endtime=datetime.time(21, 30)) ::Recursively populates the intervals table of self (Database) for <DayObj>. Be careful to ensure that the DB you're populating does not already have a populated intervals table::
# Day(Database) ::A date. PT runs by daily schedules, considering things like whether it is a weekday, etc::
# > __init__(database, datetimeObj) ::<database> is a Database object. <datetimeObj> is a datetime object::
# > getCanxServices() :CAUTION:Returns a list of PTService objects that are cancelled according to the calendar_dates table. For Wellington I suspect this table is a little dodgy::
# > getServicesDay() ::Returns a list of service IDs of services that are scheduled to run on self (Day). Accounts for exceptional additions and removals of services; but not the midnight bug, as a PTService is not a PTTrip::
# > plotModeSplitNVD3(databaseObj, city) ::Uses the Python-NVD3 library to plot a pie chart showing the breakdown of vehicle modes (num. services) in Day. Useful to compare over time, weekday vs. weekend, etc. <city> is str, used in the title of the chart::
# > animateDay(self, start, end, llcrnrlon, llcrnrlat, latheight, aspectratio, sourceproj=None, projected=False, targetproj=None, lat_0=None, lon_0=None, outoption="show", placetext='', skip=5, filepath='', filename='TestOut.mp4') ::See the method for parameter explanations::
# > getActiveTrips(second) ::Returns a list of PTTrip objects representing those trips that are running on self (Day) at <second>. Accounts for service cancellations and the "midnight bug"::
# > countActiveTrips(second) ::Returns an integer count of the number of trips of any mode that are operating at <second> on self (Day), according to self.getActiveTrips(<second>)::
# > countActiveTripsByMode(second) ::Returns an dictionary of {mode: integer} pairs similar to self.countActiveTrips(<second>) that breaks it down by mode::
# > bokehFrequencyByMode(n, Show=False, name="frequency.py", title="frequency.py", graphTitle="Wellington Public Transport Services, ") ::Returns an HTML graph of the number of active service every <n> seconds, on the second, broken down by mode::
# > getSittingStops(second) ::Returns a list of dictionaries which give information about any public transport stops which currently (<second>) have a vehicle sitting at them, on <DayObj>. Correctly handles post-midnight services::
# > getAllTrips() ::Returns a list of PTTrip objects representing those trips that run at least once on self (Day). Accounts for midnight bug correctly::
# > hexbinStops(self, projected=False, sourceproj=4326, targetproj=2134, save=True) :INCOMPLETE:Creates a hexbin plot representing the number of stops vehicles make in Day. Saves by default.::
# > nvd3FrequencyByMode(n, name="frequency_nvd3.html", verbose=True) :INCOMPLETE:Creates a Python-NVD3 chart of frequency at <n> temporal resolution from 0000 to 2359 on self::
# Mode(Database) ::A vehicle class, like "Bus", "Rail", "Ferry" and "Cable Car"::
# > __init__(database, modetype) ::<database> is a Database object. <modetype> is a string (as above) of the mode of interest::
# > getRoutesOfMode ::Returns a list of route objects that are of the same mode type as self::
# > getRoutesModeInDay(DayObj) ::Same as Mode.getRoutesModeInDay, but only returns those routes that run on DayObj::
# > countRoutesModeInDay(DayObj) ::A count of the returned values from Mode.getRoutesModeInDay(DayObj)::
# > countTripsModeInDay(DayObj) :INEFFICIENT:A count of the number of discrete trips made on the routes returned from Mode.getRoutesModeInDay(DayObj)::
# > getAgencies() ::Return a list of Agency objects that have routes of the same <modetype> as Mode::
# PTService(Database) ::A "service" in GTFS parlance, is used to identify when a service is available for one or more routes, when these are run, and what trips they represent::
# > __init__(database, service_id) ::<service_id> is an Integer. See database::
# > getRoutes_PTService() ::Returns a list of all of the Route objects based on the route_id or route_ids (plural) that the PTService object represents::
# Agency(Database) ::An Agency is an opertor usually contracted to run one or more routes with vehicles that they own. They are subject to performance measurements and re-tendering, etc.::
# > __init(Database, agency_id) ::<database> is a Database object. <agency_id> is a String representing the abbreviation of the agency name::
# > getAgencyName() ::Returns a string of the full Agency name::
# > getRoutes_Agency() ::Returns a list of the Route objects representing the routes that the agency is contracted to operate on::
# > getServices() ::Returns a list of the PTService objects representing the services that the agency's routes represent::
# Route(Agency) ::A Route is a path that a trip takes. It has a shape, including vertices and end points. Each route is operated by a single Agency::
# > __init__(database, route_id) ::<database> is a Database object. <route_id> is a String (e.g. 'WBAO001I' for the incoming Number 1 bus)::
# > getAgencyID() ::Returns a String of the Agency (agency_id) that operates the route. Used to construct the Agency object that the Route object inherits from.::
# > getShortName() ::Returns a String of the route_short_name attribute from the routes table representing the name displayed to passengers on bus signage etc., e.g. "130"::
# > getLongName() ::Returns a String of the route_long_name attribute from the routes table representing the full name of the route::
# > getTripsInDayOnRoute(DayObj) ::Returns a list of PTTrip objects that run along the entire route. <DayObj> is a Day object::
# > countTripsInDayOnRoute(DayObj) ::Returns an Integer count of the trips that run on the route on <DayObj> (a la Route.getTripsInDayOnRoute())::
# > doesRouteRunOn(DayObj) ::Returns a Boolean according to whether the Route has a trip on <DayObj>::
# > inboundOrOutbound() ::Returns Strings "Incoming" or "Outgoing" according to whether the Route is such::
# > getMode() ::Returns the mode of the route, as a Mode object::
# PTTrip(Route) ::A PTTrip is a discrete trip made by single mode along a single route::
# > __init__(database, trip_id, DayObj=None) ::<database> is a Database object. <trip_id> is an Integer identifying the trip uniquely. See the database. <DayObj> is a Day object; if not None, then PTTrip.runstoday can be accessed (faster than PTTrip(DB, ID).doesTripRunOn(Day))::
# > getRouteID() ::Returns the route_id (String) of the route that the trip follows. Used to construct the Route object which the Trip object inherits::
# > doesTripRunOn(DayObj) ::Returns a Boolean reporting whether the PTTtrip runs on <DayObj> or not. Considers the exceptions in calendar_dates before deciding, and handles >24h time::
# > getRoute() ::Returns the Route object representing the route taken on Trip::
# > getService() ::Returns the PTService object that includes this trip::
# > getShapelyLine() ::Returns a Shapely LineString object representing the shape of the trip::
# > getShapelyLineProjected(source=4326, target=2134) ::Returns a projected Shapely LineString object, derived from self.getShapelyLine(), where <source> is the unprojected Shapely LineString GCS, and <target> is the target projection for the output. The defaults are WGS84 (<source>) and NZGD2000 (<target>).
# > prettyPrintShapelyLine() ::Prints a columised WKT representation of <self> (trip's) shape, ready tto be copy-pasted into QGIS via a TXT file::
# > plotShapelyLine() ::Uses matplotlib and Shapely to plot the shape of the trip. Does not plot stops (yet?)::
# > getStopsInSequence() ::Returns a list of the stops (as Stop ibjects) that the trip uses, in sequence::
# > whereIsVehicle(DayObj, write=False) ::<DayObj> is a Day object. Returns an ordered list of (second, shapely.geometry.Point) for the entire range of the trip in <DayObj>, every second it runs. If write=True, then write the result to the intervals table of the database::
# > getShapeID() ::Each trip has a particular shape, this returns the ID of it (str)::
# > getTripStartDay(DayObj) ::The start day of a PTTrip is either the given <DayObj>, or the day before it (or neither if it doesn't run). This method returns <DayObj> if the trip starts on <DayObj>, the Day BEFORE <DayObj> if that's right, and None in the third case. Raises an exception in the case of ambiguity::
# > getTripEndDay(DayObj) ::The end day of a PTTrip is either the given <DayObj>, or the day after it (or neither if it doesn't run). This method returns <DayObj> if the trip ends on <DayObj>, the Day AFTER <DayObj> if that's right, and None in the third case. Raises an exception in the case of ambiguity::
# > getTripStartTime(DayObj) ::The day and time that the trip starts, with reference to DayObj, if indeed it runs on DayObj. Returns a datetime.time object. IF the end time is ambiguous (as in some situtations when the trip continues over midnight for consecutive days, this method returns a list of datetime.time objects with the date and the time, using <DayObj> as seed. Return values are to the microsecond resolution. Returns None if PTTrip does not run on <DayObj>::
# > getTripEndTime(DayObj) ::The day and time that the trip ends, with reference to DayObj, if indeed it runs on DayObj. Returns a datetime.time object. IF the end time is ambiguous (as in some situtations when the trip continues over midnight for consecutive days, this method returns a list of datetime.time objects with the date and the time, using <DayObj> as seed. Return values are to the microsecond resolution. Returns None if PTTrip does not run on <DayObj>::
# Stop(Object) ::A place where PT vehicles stop within a route::
# > __init__(database, stop_id) ::<database> is a Database object. <stop_id> is an Integer identifying the trip uniquely, able to link it to stop_times. See the database::
# > getStopCode() ::Returns the stop_code, a short(er) integer version similar to stop_id, but published on signs and used in passenger text alerts::
# > getStopName() ::Returns the stop_name, a long name of the stop (String)::
# > getStopDesc() ::Returns the stop_desc, a short but textual name for the stop::
# > getLocationType() ::Returns location_type_desc from the stops table: ["Stop", "Station", "Hail and Ride"]. For Metlink: ["Stop", "Hail and Ride"]::
# > getShapelyPoint() ::Returns a shapely Point object representing the location of the stop::
# > getShapelyPointProjected(source=4326, target=2134) ::Returns a Shapely point representing the location of the stop, projected from the <source> GCS to the <target> PCS. 2134 = NZGD2000 / UTM zone 59S (default <target>); 4326 = WGS84 (default <source>). Returns a shapely.geometry.point.Point object::
# > getStopTime(TripObj, DayObj) ::Returns a list of tuples of date+time objects representing the day-time(s) when the <TripObj> arrives and departs self (Stop), using <DayObj> as seed::
# > getStopSnappedToRoute(TripObj) ::Returns a Shapely.geometry.point.Point object representing the originally-non-overlapping Stop as a Point overlapping (or very, very nearly overlapping) the Route shape of <TripObj>::
# Tasks for next iteration/s:
# > KEEP CODE DOCUMENTED THROUGHOUT
# > Develop the HTML and CSS for the website and embed the JavaScript graphs
# > Work on the visualisation of the network in real time (based on work done by NAME > Work on the server//Django side of things to get an actual website!
# > Have the first version of the website up and running (one city)!
# > Expand to multiple cities
# > Consider how fare information can be added
#
#
# Author: NAME Inputs: Database written by AB_GTFStoSQL.py
#
#
# Created: 20131107
# Last Updated: 20140101
# Comments Updated: 20140101
#-------------------------------------------------------------------------------
################################################################################
############################### Notes ##########################################
################################################################################
|
"""
There are two types of tubing users. Irish users, and users who wish they were
Irish. j/k. Really, they are:
- casual users want to use whatever we have in tubing.
- advanced users who want to extend tubing to their own devices.
- contributor users want to contribute to tubing.
Our most important users are the casual users. They are also the easiest to
satisfy. For them, we have tubes. Tubes are easy to use and understand.
Advanced users are very important too, but harder to satisfy. We never know
what crazy plans they'll have in mind, so we must be ready. They need the tools
to build new tubes that extend our apparatus in unexpected ways.
For the benefit of the contributors, and ourselves, we're going to outline exactly
how things work now. This documentation is also an exercise in understanding
and simplifying the code base.
We'll call a tubing pipeline an apparatus. An apparatus has a Source, zero to
many Tubes, and a Sink.
A stream is what we call the units flowing through our Tubes. The units can be
bytes, characters, strings or objects.
Tubes
=====
For most users, they simply want to transform elements of data as it goes through the stream. This can be achieved simply with the following idiom::
SomeSource | [Tubes ..] | tubes.Map(lambda x: transform(x)) | [Tubes ..] | SomeSink
Sometimes you'd like to transform an entire chunk of data at a time, instead of one element at a time::
SomeSource | [Tubes ..] | tubes.ChunkMap(lambda x: transform(x)) | [Tubes ..] | SomeSink
Other times you just want to filter out some data::
SomeSource | [Tubes ..] | tubes.Filter(lambda x: x > 10) | [Tubes ..] | SomeSink
All of these general tube tools also take close_fn and abort_fn params and are
shorthand for creating your own Tube class.
Of course, if you need to keep state, you can create a closure, but at some point, that can become cumbersome. You might also want to make a reusable Tube, in that case it could be nice to make a
The easiest way to extend tubing is to create a Transformer, and use TransformerTubeFactory
decorator to turn it into a Tube. A Transformer has the following interface::
@tubes.TransformerTubeFactory()
class NewTube(object):
def transform(self, chunk):
return new_chunk
def close(self):
return last_chunk or None
def abort(self):
pass
A chunk is an iterable of whatever type of stream we are working on, whether it
be bytes, Unicode characters, strings or python objects. We can index it,
slice it, or iterate over it. `transform` simple takes a chunk, and makes a new
chunk out of it. `TransformerTubeFactory` will take care of all the dirty
work. Transformers are enough for most tasks, but if you need to do something
more complex, you may need to go deeper.
.. image:: http://i.imgur.com/DyPouyL.png
alt: NAME let's describe how tubes work in more detail. Here's the Tube interface::
# tube factory can be a class
class TubeFactory(object):
# This is what we export, and what is called when users create a tube.
# The syntax looks like this:
# SourceFactory() | [TubeFactory()...] | SinkFactory()
def __call__(self, *args, **kwargs):
return Tube()
# or a function
def TubeFactory(*args, **kwargs):
return Tube()
# ------------------------
class Tube(object):
def receive(self, source):
# return a TubeWorker
tw = TubeWorker
tw.source = source
return tw
class TubeWorker(object):
def tube(self, receiver):
# receiver is they guy who will call our `read` method. Either
# another Tube or a Sink.
return receiver.receive(self)
def __or__(self, *args, **kwargs):
# Our reason for existing.
return self.tube(*args, **kwargs)
def read(self):
# our receiver will call this guy. We return a tuple here of
# `chunk, eof`. We should return a chunk of len amt of whatever
# type of object we produce. If we've exhausted our upstream
# source, then we should return True as the second element of our
# tuple. The chunk size should be configuratable and read should
# return a len() of chunk size or less.
return [], True
A TubeFactory is what casual users deal with. As you can see, it can be an
object or a function, depending on your style. It's easier for me to reason
about state with an object, but if you prefer a closure, go for it! Classes are
just closures with more verbose states, after all.
When a casual is setting up some tubing, the TubeFactory returns a Tube, but
this isn't the last object we'll create. The Tube doesn't have a source
connected, so it's sort of useless. It's just a placeholder waiting for a
source. As soon as it gets a source, it will hand off all of it's duties to a
TubeWorker.
A TubeWorker is ready to read from it's source, but it doesn't. TubeWorkers are
pretty lazy and need someone else to tell them what to do. That's where a receiver
comes in to play. A receiver can be another Tube, or a Sink. If it's another
Tube, you know the drill. It's just another lazy guy that will only tell his
source to read when his boss tells him to read. Ultimately, the only guy who
wants to do any work is the Sink. At the end of the chain, a sink's receive
function will be called, and he'll get everyone to work.
Technically, we could split the TubeWorker interface into two parts, but it's
not really necessary since they share the same state. We could also combine
TubeFactory, Tube and TubeWorker, and just build up state overtime. I've
seriously consider this, but I don't know, this feels better. I admit, it is a
little complicated, but one advantage you get is that you can do something like
this::
from tubing import tubes
tube = tubes.GZip(chunk_size=2**32)
source | tube | output1
source | tube | output2
Since tube is a factory and not an object, each tubeline will have it's own
state. tubeline.... I just made that up. That's an execution pipeline in
tubing. But we don't want to call it a pipeline, it's a tubeline. Maybe there's
a better name? I picture a chemistry lab with a bunch of transparent tubes
connected to beakers and things like that.
.. image:: http://imgur.com/jTtHITH.jpg
:alt: chemistry lab
Let's call it an apparatus.
TransformerTubeFactory
----------------------
So how does TransformerTubeFactory turn a Transformer into a TubeFactory?
TransformerTubeFactory is a utility that creates a function that wrap a
transformer in a tube. Sort of complicate, eh? I'm sorry about that, but let's
see if we can break it down.
TransformerTubeFactory returns a partial function out of the TransformerTube
instantiation. For the uninitiated, a partial is just a new version of a
function with some of the parameters already filled in. So we're currying the
transformer_cls and the default_chunk_size back to the casuals. They can fill
in the rest of the details and get back a TransformerTube.
The TransformerTubeWorker is where most of the hard work happens. There's a
bunch of code related to reading just enough chunks from our source to satisfy
our receiver. Remember, Workers are lazy, that's good because we won't waste a
bunch of space doing work we don't need to and then waiting for our work to
be consumed.
default_chunk_size is sort of important, by default it's something like 2**18.
It's the size of the chunks that we request from upstream, in the read function
(amt). That's great for byte streams(maybe?), but it's not that great for
large objects. You'll probably want to set it if you are using something other
than bytes. It can be overridden by plebes, this is just the default if they
don't specify it. Remember, we should be making the plebes job easy, so try
and be a nice noble and set it to something sensible. In our own tests, using
2**3 for string or object streams and 2**18 for bytes streams seemed to give
the best trade off between speed and memory usage. YMMV.
We've explained Tubes, very well I might add. And it's a good thing. They are
the most complicated bit in tubing. All that's left is Sources and Sinks.
Sources
=======
TODO
Sinks
=====
TODO
Things You Can't do with Tubing
===============================
- Tee to another apparatus
- async programming
- your laundry
""" |
"""Trust Region Reflective algorithm for least-squares optimization.
The algorithm is based on ideas from paper [STIR]_. The main idea is to
account for presence of the bounds by appropriate scaling of the variables (or
equivalently changing a trust-region shape). Let's introduce a vector v:
| ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
| 1, otherwise
where g is the gradient of a cost function and lb, ub are the bounds. Its
components are distances to the bounds at which the anti-gradient points (if
this distance is finite). Define a scaling matrix D = diag(v**0.5).
First-order optimality conditions can be stated as
D^2 g(x) = 0.
Meaning that components of the gradient should be zero for strictly interior
variables, and components must point inside the feasible region for variables
on the bound.
Now consider this system of equations as a new optimization problem. If the
point x is strictly interior (not on the bound) then the left-hand side is
differentiable and the Newton step for it satisfies
(D^2 H + diag(g) Jv) p = -D^2 g
where H is the Hessian matrix (or its J^T J approximation in least squares),
Jv is the Jacobian matrix of v with components -1, 1 or 0, such that all
elements of matrix C = diag(g) Jv are non-negative. Introduce the change
of the variables x = D x_h (_h would be "hat" in LaTeX). In the new variables
we have a Newton step satisfying
B_h p_h = -g_h,
where B_h = D H D + C, g_h = D g. In least squares B_h = J_h^T J_h, where
J_h = J D. Note that J_h and g_h are proper Jacobian and gradient with respect
to "hat" variables. To guarantee global convergence we formulate a
trust-region problem based on the Newton step in the new variables:
0.5 * p_h^T B_h p + g_h^T p_h -> min, ||p_h|| <= Delta
In the original space B = H + D^{-1} C D^{-1}, and the equivalent trust-region
problem is
0.5 * p^T B p + g^T p -> min, ||D^{-1} p|| <= Delta
Here the meaning of the matrix D becomes more clear: it alters the shape
of a trust-region, such that large steps towards the bounds are not allowed.
In the implementation the trust-region problem is solved in "hat" space,
but handling of the bounds is done in the original space (see below and read
the code).
The introduction of the matrix D doesn't allow to ignore bounds, the algorithm
must keep iterates strictly feasible (to satisfy aforementioned
differentiability), the parameter theta controls step back from the boundary
(see the code for details).
The algorithm does another important trick. If the trust-region solution
doesn't fit into the bounds, then a reflected (from a firstly encountered
bound) search direction is considered. For motivation and analysis refer to
[STIR]_ paper (and other papers of the authors). In practice it doesn't need
a lot of justifications, the algorithm simply chooses the best step among
three: a constrained trust-region step, a reflected step and a constrained
Cauchy step (a minimizer along -g_h in "hat" space, or -D^2 g in the original
space).
Another feature is that a trust-region radius control strategy is modified to
account for appearance of the diagonal C matrix (called diag_h in the code).
Note, that all described peculiarities are completely gone as we consider
problems without bounds (the algorithm becomes a standard trust-region type
algorithm very similar to ones implemented in MINPACK).
The implementation supports two methods of solving the trust-region problem.
The first, called 'exact', applies SVD on Jacobian and then solves the problem
very accurately using the algorithm described in [JJMore]_. It is not
applicable to large problem. The second, called 'lsmr', uses the 2-D subspace
approach (sometimes called "indefinite dogleg"), where the problem is solved
in a subspace spanned by the gradient and the approximate Gauss-Newton step
found by ``scipy.sparse.linalg.lsmr``. A 2-D trust-region problem is
reformulated as a 4-th order algebraic equation and solved very accurately by
``numpy.roots``. The subspace approach allows to solve very large problems
(up to couple of millions of residuals on a regular PC), provided the Jacobian
matrix is sufficiently sparse.
References
----------
.. [STIR] NAME NAME NAME and NAME "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [JJMore] NAME "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. NAME Lecture
""" |
"""
=======================================
Signal processing (:mod:`scipy.signal`)
=======================================
Convolution
===========
.. autosummary::
:toctree: generated/
convolve -- N-dimensional convolution.
correlate -- N-dimensional correlation.
fftconvolve -- N-dimensional convolution using the FFT.
convolve2d -- 2-dimensional convolution (more options).
correlate2d -- 2-dimensional correlation (more options).
sepfir2d -- Convolve with a 2-D separable FIR filter.
choose_conv_method -- Chooses faster of FFT and direct convolution methods.
B-splines
=========
.. autosummary::
:toctree: generated/
bspline -- B-spline basis function of order n.
cubic -- B-spline basis function of order 3.
quadratic -- B-spline basis function of order 2.
gauss_spline -- Gaussian approximation to the B-spline basis function.
cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline.
qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline.
cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline.
qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline.
cspline1d_eval -- Evaluate a cubic spline at the given points.
qspline1d_eval -- Evaluate a quadratic spline at the given points.
spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array.
Filtering
=========
.. autosummary::
:toctree: generated/
order_filter -- N-dimensional order filter.
medfilt -- N-dimensional median filter.
medfilt2d -- 2-dimensional median filter (faster).
wiener -- N-dimensional wiener filter.
symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems).
symiirorder2 -- 4th-order IIR filter (cascade of second-order systems).
lfilter -- 1-dimensional FIR and IIR digital linear filtering.
lfiltic -- Construct initial conditions for `lfilter`.
lfilter_zi -- Compute an initial state zi for the lfilter function that
-- corresponds to the steady state of the step response.
filtfilt -- A forward-backward filter.
savgol_filter -- Filter a signal using the Savitzky-Golay filter.
deconvolve -- 1-d deconvolution using lfilter.
sosfilt -- 1-dimensional IIR digital linear filtering using
-- a second-order sections filter representation.
sosfilt_zi -- Compute an initial state zi for the sosfilt function that
-- corresponds to the steady state of the step response.
sosfiltfilt -- A forward-backward filter for second-order sections.
hilbert -- Compute 1-D analytic signal, using the Hilbert transform.
hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform.
decimate -- Downsample a signal.
detrend -- Remove linear and/or constant trends from data.
resample -- Resample using Fourier method.
resample_poly -- Resample using polyphase filtering method.
upfirdn -- Upsample, apply FIR filter, downsample.
Filter design
=============
.. autosummary::
:toctree: generated/
bilinear -- Digital filter from an analog filter using
-- the bilinear transform.
findfreqs -- Find array of frequencies for computing filter response.
firls -- FIR filter design using least-squares error minimization.
firwin -- Windowed FIR filter design, with frequency response
-- defined as pass and stop bands.
firwin2 -- Windowed FIR filter design, with arbitrary frequency
-- response.
freqs -- Analog filter frequency response.
freqz -- Digital filter frequency response.
sosfreqz -- Digital filter frequency response for SOS format filter.
group_delay -- Digital filter group delay.
iirdesign -- IIR filter design given bands and gains.
iirfilter -- IIR filter design given order and critical frequencies.
kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given
-- the number of taps and the transition width at
-- discontinuities in the frequency response.
kaiser_beta -- Compute the Kaiser parameter beta, given the desired
-- FIR filter attenuation.
kaiserord -- Design a Kaiser window to limit ripple and width of
-- transition region.
savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay
-- filter.
remez -- Optimal FIR filter design.
unique_roots -- Unique roots and their multiplicities.
residue -- Partial fraction expansion of b(s) / a(s).
residuez -- Partial fraction expansion of b(z) / a(z).
invres -- Inverse partial fraction expansion for analog filter.
invresz -- Inverse partial fraction expansion for digital filter.
BadCoefficients -- Warning on badly conditioned filter coefficients
Lower-level filter design functions:
.. autosummary::
:toctree: generated/
abcd_normalize -- Check state-space matrices and ensure they are rank-2.
band_stop_obj -- Band Stop Objective Function for order minimization.
besselap -- Return (z,p,k) for analog prototype of Bessel filter.
buttap -- Return (z,p,k) for analog prototype of Butterworth filter.
cheb1ap -- Return (z,p,k) for type I Chebyshev filter.
cheb2ap -- Return (z,p,k) for type II Chebyshev filter.
cmplx_sort -- Sort roots based on magnitude.
ellipap -- Return (z,p,k) for analog prototype of elliptic filter.
lp2bp -- Transform a lowpass filter prototype to a bandpass filter.
lp2bs -- Transform a lowpass filter prototype to a bandstop filter.
lp2hp -- Transform a lowpass filter prototype to a highpass filter.
lp2lp -- Transform a lowpass filter prototype to a lowpass filter.
normalize -- Normalize polynomial representation of a transfer function.
Matlab-style IIR filter design
==============================
.. autosummary::
:toctree: generated/
butter -- Butterworth
buttord
cheby1 -- Chebyshev Type I
cheb1ord
cheby2 -- Chebyshev Type II
cheb2ord
ellip -- Elliptic (Cauer)
ellipord
bessel -- Bessel (no order selection available -- try butterod)
Continuous-Time Linear Systems
==============================
.. autosummary::
:toctree: generated/
lti -- Continuous-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
lsim -- continuous-time simulation of output to linear system.
lsim2 -- like lsim, but `scipy.integrate.odeint` is used.
impulse -- impulse response of linear, time-invariant (LTI) system.
impulse2 -- like impulse, but `scipy.integrate.odeint` is used.
step -- step response of continous-time LTI system.
step2 -- like step, but `scipy.integrate.odeint` is used.
freqresp -- frequency response of a continuous-time LTI system.
bode -- Bode magnitude and phase data (continuous-time LTI).
Discrete-Time Linear Systems
============================
.. autosummary::
:toctree: generated/
dlti -- Discrete-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
dlsim -- simulation of output to a discrete-time linear system.
dimpulse -- impulse response of a discrete-time LTI system.
dstep -- step response of a discrete-time LTI system.
dfreqresp -- frequency response of a discrete-time LTI system.
dbode -- Bode magnitude and phase data (discrete-time LTI).
LTI Representations
===================
.. autosummary::
:toctree: generated/
tf2zpk -- transfer function to zero-pole-gain.
tf2sos -- transfer function to second-order sections.
tf2ss -- transfer function to state-space.
zpk2tf -- zero-pole-gain to transfer function.
zpk2sos -- zero-pole-gain to second-order sections.
zpk2ss -- zero-pole-gain to state-space.
ss2tf -- state-pace to transfer function.
ss2zpk -- state-space to pole-zero-gain.
sos2zpk -- second-order sections to zero-pole-gain.
sos2tf -- second-order sections to transfer function.
cont2discrete -- continuous-time to discrete-time LTI conversion.
place_poles -- pole placement.
Waveforms
=========
.. autosummary::
:toctree: generated/
chirp -- Frequency swept cosine signal, with several freq functions.
gausspulse -- Gaussian modulated sinusoid
max_len_seq -- Maximum length sequence
sawtooth -- Periodic sawtooth
square -- Square wave
sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial
Window functions
================
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
barthann -- Bartlett-Hann window
bartlett -- Bartlett window
blackman -- Blackman window
blackmanharris -- Minimum 4-term Blackman-Harris window
bohman -- Bohman window
boxcar -- Boxcar window
chebwin -- Dolph-Chebyshev window
cosine -- Cosine window
exponential -- Exponential window
flattop -- Flat top window
gaussian -- Gaussian window
general_gaussian -- Generalized Gaussian window
hamming -- Hamming window
hann -- Hann window
hanning -- Hann window
kaiser -- Kaiser window
nuttall -- Nuttall's minimum 4-term Blackman-Harris window
parzen -- Parzen window
slepian -- Slepian window
triang -- Triangular window
tukey -- Tukey window
Wavelets
========
.. autosummary::
:toctree: generated/
cascade -- compute scaling function and wavelet from coefficients
daub -- return low-pass
morlet -- Complex Morlet wavelet.
qmf -- return quadrature mirror filter from low-pass
ricker -- return ricker wavelet
cwt -- perform continuous wavelet transform
Peak finding
============
.. autosummary::
:toctree: generated/
find_peaks_cwt -- Attempt to find the peaks in the given 1-D array
argrelmin -- Calculate the relative minima of data
argrelmax -- Calculate the relative maxima of data
argrelextrema -- Calculate the relative extrema of data
Spectral Analysis
=================
.. autosummary::
:toctree: generated/
periodogram -- Compute a (modified) periodogram
welch -- Compute a periodogram using Welch's method
csd -- Compute the cross spectral density, using Welch's method
coherence -- Compute the magnitude squared coherence, using Welch's method
spectrogram -- Compute the spectrogram
lombscargle -- Computes the Lomb-Scargle periodogram
vectorstrength -- Computes the vector strength
""" |
"""
[2016-09-07] Challenge #282 [Intermediate] The final Quixo move
https://www.reddit.com/r/dailyprogrammer/comments/51l1j1/20160907_challenge_282_intermediate_the_final/
#Description
[Quixo](https://boardgamegeek.com/boardgame/3190/quixo) is a grid based game. The game is played by 2 groups, one being
`x` and other being `o`.
The goal of the game is to get 5 blocks in a row.
The blocks can only be taken from the sides and must be placed in a line, pushing all the other blocks.
from [boardgamegeek](https://boardgamegeek.com/boardgame/3190/quixo):
> On a turn, the active player takes a cube that is blank or bearing his symbol from the outer ring of the grid,
rotates it so that it shows his symbol (if needed), then adds it to the grid by pushing it into one of the rows from
which it was removed. Thus, a few pieces of the grid change places each turn, and the cubes slowly go from blank to
crosses and circles. Play continues until someone forms an orthogonal or diagonal line of five cubes bearing his
symbol, with this person winning the game.
If the block comes from a corner, you have 2 options
Start:
| A | B | C | D | E
---|---|----|----|----|----
1 | **x** | _ | _ | _ | o
2 | _ | _ | _ | _ | _
3 | _ | _ | _ | _ | _
4 | x | _ | _ | _ | o
5 | _ | _ | _ | _ | _
Option 1:
| A | B | C | D | E
---|---|----|----|----|----
1 | _ | _ | _ | o | **x**
2 | _ | _ | _ | _ | _
3 | _ | _ | _ | _ | _
4 | x | _ | _ | _ | o
5 | _ | _ | _ | _ | _
Option 2:
| A | B | C | D | E
---|---|----|----|----|----
1 | _ | _ | _ | _ | o
2 | _ | _ | _ | _ | _
3 | x | _ | _ | _ | _
4 | _ | _ | _ | _ | o
5 | **x** | _ | _ | _ | _
If the block is from the middle of the row, you have 3 options
Start:
| A | B | C | D | E
---|---|----|----|----|----
1 | x | _ | _ | _ | o
2 | _ | _ | _ | _ | _
3 | _ | _ | _ | _ | _
4 | **x** | _ | _ | _ | o
5 | _ | _ | _ | _ | _
Option 1:
| A | B | C | D | E
---|---|----|----|----|----
1 | x | _ | _ | _ | o
2 | _ | _ | _ | _ | _
3 | _ | _ | _ | _ | _
4 | _ | _ | _ | _ | o
5 | **x** | _ | _ | _ | _
Option 2:
| A | B | C | D | E
---|---|----|----|----|----
1 | **x** | _ | _ | _ | o
2 | x | _ | _ | _ | _
3 | _ | _ | _ | _ | _
4 | _ | _ | _ | _ | o
5 | _ | _ | _ | _ | _
Option 3:
| A | B | C | D | E
---|---|----|----|----|----
1 | x | _ | _ | _ | o
2 | _ | _ | _ | _ | _
3 | _ | _ | _ | _ | _
4 | _ | _ | _ | o | **x**
5 | _ | _ | _ | _ | _
You can only move your own blocks or blanco block directly. If you use a blanco block, then that block becomes yours.
For those who can't make up the rules by reading this, you can watch [this 2 min instruction
video](https://www.youtube.com/watch?v=cZT5N6hIFYM).
If your move causes the other players block to line up as well as yours, then it's called a `draw`
#Challenge
You will be given a 5 by 5 grid with a game on that is almost finished, you only need to make the winning move.
You are always the player with `x`
##Input
The grid with the current game
x_xxx
_xo_o
o_ooo
oxox_
oooo_
## Output
The move that will make you have won the game
B1 -> B5
Here you have me doing this with the actual game
- [step 1](http://imgur.com/NywAMsM)
- [step 2](http://imgur.com/tb4sFIU)
- [step 3](http://imgur.com/zldLzcN)
##Challenge input 1
x_xxx
_xo_o
o_ooo
oxooo
ooxx_
##Challenge output 1
B1 -> A1
##Inputs from /u/zandekar
no winning moves
xxxox
__ooo
oooxo
xxxoo
xxooo
more than one winning move
xxxox
xxxxo
___ox
oooxo
xxx_o
a draw
oooxx
xxx_x
oooxo
xoxox
xoxox
#Note
Sometimes there is more then 1 correct answer, giving just one is fine.
#Bonus
Give all possible answers to win.
##Input 1
x_xxx
_xo_o
o_ooo
oxox_
oooo_
##Output 1
B1 -> B5
B1 -> A1
B1 -> E1
#Finally
Have a good challenge idea?
Consider submitting it to /r/dailyprogrammer_ideas
#Edits
Some additional challenges and info from /u/zandekar
""" |
"""Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, ypu must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
""" |
# Written by NAME NAME see LICENSE.txt for license information
# 24/06/11 boudewijn: this crawler uses cPickle for communication both from and to the client. This
# results in a security risk for both parties. Because it is not possible to solve this in a
# backward compatible way, I have disabled this crawler!
# import sys
# import cPickle
# from Tribler.Core.BitTornado.BT1.MessageID import CRAWLER_SEEDINGSTATS_QUERY
# from Tribler.Core.CacheDB.SqliteSeedingStatsCacheDB import *
# DEBUG = False
# class SeedingStatsCrawler:
# __single = None
# @classmethod
# def get_instance(cls, *args, **kargs):
# if not cls.__single:
# cls.__single = cls(*args, **kargs)
# return cls.__single
# def __init__(self):
# self._sqlite_cache_db = SQLiteSeedingStatsCacheDB.getInstance()
# def query_initiator(self, permid, selversion, request_callback):
# """
# Established a new connection. Send a CRAWLER_DATABASE_QUERY request.
# @param permid The Tribler peer permid
# @param selversion The oberlay protocol version
# @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
# """
# if DEBUG:
# print >>sys.stderr, "crawler: SeedingStatsDB_update_settings_initiator"
# read_query = "SELECT * FROM SeedingStats WHERE crawled = 0"
# write_query = "UPDATE SeedingStats SET crawled = 1 WHERE crawled = 0"
# return request_callback(CRAWLER_SEEDINGSTATS_QUERY, cPickle.dumps([("read", read_query), ("write", write_query)], 2))
# def update_settings_initiator(self, permid, selversion, request_callback):
# """
# Established a new connection. Send a CRAWLER_DATABASE_QUERY request.
# @param permid The Tribler peer permid
# @param selversion The oberlay protocol version
# @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
# """
# if DEBUG:
# print >>sys.stderr, "crawler: SeedingStatsDB_update_settings_initiator"
# try:
# sql_update = "UPDATE SeedingStatsSettings SET crawling_interval=%s WHERE crawling_enabled=%s"%(1800, 1)
# except:
# print_exc()
# else:
# return request_callback(CRAWLER_SEEDINGSTATS_QUERY, cPickle.dumps(sql_update, 2))
# def handle_crawler_request(self, permid, selversion, channel_id, message, reply_callback):
# """
# Received a CRAWLER_DATABASE_QUERY request.
# @param permid The Crawler permid
# @param selversion The overlay protocol version
# @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
# @param message The message payload
# @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123])
# MESSAGE contains a cPickled list. Each list element is a
# tuple. Each tuple consists of a string (either 'read' or
# 'write') and a string (the query)
# """
# if DEBUG:
# print >> sys.stderr, "crawler: handle_crawler_request", len(message)
# results = []
# try:
# items = cPickle.loads(message)
# if DEBUG:
# print >> sys.stderr, "crawler: handle_crawler_request", items
# for action, query in items:
# if action == "read":
# cursor = self._sqlite_cache_db.execute_read(query)
# elif action == "write":
# cursor = self._sqlite_cache_db.execute_write(query)
# else:
# raise Exception("invalid payload")
# if cursor:
# results.append(list(cursor))
# else:
# results.append(None)
# except Exception, e:
# if DEBUG:
# print >> sys.stderr, "crawler: handle_crawler_request", e
# results.append(str(e))
# reply_callback(cPickle.dumps(results, 2), 1)
# else:
# reply_callback(cPickle.dumps(results, 2))
# return True
# def handle_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, reply_callback):
# """
# Received a CRAWLER_DATABASE_QUERY request.
# @param permid The Crawler permid
# @param selversion The overlay protocol version
# @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
# @param error The error value. 0 indicates success.
# @param message The message payload
# @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
# """
# if error:
# if DEBUG:
# print >> sys.stderr, "seedingstatscrawler: handle_crawler_reply"
# print >> sys.stderr, "seedingstatscrawler: error", error
# else:
# try:
# results = cPickle.loads(message)
# if DEBUG:
# print >> sys.stderr, "seedingstatscrawler: handle_crawler_reply"
# print >> sys.stderr, "seedingstatscrawler:", results
# # the first item in the list contains the results from the select query
# if results[0]:
# values = map(tuple, results[0])
# self._sqlite_cache_db.insertMany("SeedingStats", values)
# except Exception, e:
# # 04/11/08 boudewijn: cPickle.loads(...) sometimes
# # results in EOFError. This may be caused by message
# # being interpreted as non-binary.
# f = open("seedingstats-EOFError.data", "ab")
# f.write("--\n%s\n--\n" % message)
# f.close()
# print_exc()
# return False
# return True
# def handle_crawler_update_settings_request(self, permid, selversion, channel_id, message, reply_callback):
# """
# Received a CRAWLER_DATABASE_QUERY request.
# @param permid The Crawler permid
# @param selversion The overlay protocol version
# @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
# @param message The message payload
# @param reply_callback Call this function once to send the reply: reply_callback(payload [, error=123])
# """
# if DEBUG:
# print >> sys.stderr, "crawler: handle_crawler_SeedingStats_request", message
# # execute the sql
# sql_update = cPickle.loads(message)
# try:
# self._sqlite_cache_db.execute_write(sql_update)
# except Exception, e:
# reply_callback(str(e), 1)
# else:
# reply_callback(cPickle.dumps('Update succeeded.', 2))
# return True
# def handle_crawler_update_setings_reply(self, permid, selversion, channel_id, message, reply_callback):
# """
# Received a CRAWLER_DATABASE_QUERY request.
# @param permid The Crawler permid
# @param selversion The overlay protocol version
# @param channel_id Identifies a CRAWLER_REQUEST/CRAWLER_REPLY pair
# @param message The message payload
# @param request_callback Call this function one or more times to send the requests: request_callback(message_id, payload)
# """
# if DEBUG:
# print >> sys.stderr, "olapps: handle_crawler_SeedingStats_reply"
# return True
|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is font utility code.
#
# The Initial Developer of the Original Code is Mozilla Corporation.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# NAME <EMAIL>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK ***** */
# eotlitetool.py - create EOT version of OpenType font for use with IE
#
# Usage: eotlitetool.py [-o output-filename] font1 [font2 ...]
#
# OpenType file structure
# http://www.microsoft.com/typography/otspec/otff.htm
#
# Types:
#
# BYTE 8-bit unsigned integer.
# CHAR 8-bit signed integer.
# USHORT 16-bit unsigned integer.
# SHORT 16-bit signed integer.
# ULONG 32-bit unsigned integer.
# Fixed 32-bit signed fixed-point number (16.16)
# LONGDATETIME Date represented in number of seconds since 12:00 midnight, January 1, 1904. The value is represented as a signed 64-bit integer.
#
# SFNT Header
#
# Fixed sfnt version // 0x00010000 for version 1.0.
# USHORT numTables // Number of tables.
# USHORT searchRange // (Maximum power of 2 <= numTables) x 16.
# USHORT entrySelector // Log2(maximum power of 2 <= numTables).
# USHORT rangeShift // NumTables x 16-searchRange.
#
# Table Directory
#
# ULONG tag // 4-byte identifier.
# ULONG checkSum // CheckSum for this table.
# ULONG offset // Offset from beginning of TrueType font file.
# ULONG length // Length of this table.
#
# OS/2 Table (Version 4)
#
# USHORT version // 0x0004
# SHORT xAvgCharWidth
# USHORT usWeightClass
# USHORT usWidthClass
# USHORT fsType
# SHORT ySubscriptXSize
# SHORT ySubscriptYSize
# SHORT ySubscriptXOffset
# SHORT ySubscriptYOffset
# SHORT ySuperscriptXSize
# SHORT ySuperscriptYSize
# SHORT ySuperscriptXOffset
# SHORT ySuperscriptYOffset
# SHORT yStrikeoutSize
# SHORT yStrikeoutPosition
# SHORT sFamilyClass
# BYTE panose[10]
# ULONG ulUnicodeRange1 // Bits 0-31
# ULONG ulUnicodeRange2 // Bits 32-63
# ULONG ulUnicodeRange3 // Bits 64-95
# ULONG ulUnicodeRange4 // Bits 96-127
# CHAR achVendID[4]
# USHORT fsSelection
# USHORT usFirstCharIndex
# USHORT usLastCharIndex
# SHORT sTypoAscender
# SHORT sTypoDescender
# SHORT sTypoLineGap
# USHORT usWinAscent
# USHORT usWinDescent
# ULONG ulCodePageRange1 // Bits 0-31
# ULONG ulCodePageRange2 // Bits 32-63
# SHORT sxHeight
# SHORT sCapHeight
# USHORT usDefaultChar
# USHORT usBreakChar
# USHORT usMaxContext
#
#
# The Naming Table is organized as follows:
#
# [name table header]
# [name records]
# [string data]
#
# Name Table Header
#
# USHORT format // Format selector (=0).
# USHORT count // Number of name records.
# USHORT stringOffset // Offset to start of string storage (from start of table).
#
# Name Record
#
# USHORT platformID // Platform ID.
# USHORT encodingID // Platform-specific encoding ID.
# USHORT languageID // Language ID.
# USHORT nameID // Name ID.
# USHORT length // String length (in bytes).
# USHORT offset // String offset from start of storage area (in bytes).
#
# head Table
#
# Fixed tableVersion // Table version number 0x00010000 for version 1.0.
# Fixed fontRevision // Set by font manufacturer.
# ULONG checkSumAdjustment // To compute: set it to 0, sum the entire font as ULONG, then store 0xB1B0AFBA - sum.
# ULONG magicNumber // Set to 0x5F0F3CF5.
# USHORT flags
# USHORT unitsPerEm // Valid range is from 16 to 16384. This value should be a power of 2 for fonts that have TrueType outlines.
# LONGDATETIME created // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# LONGDATETIME modified // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# SHORT xMin // For all glyph bounding boxes.
# SHORT yMin
# SHORT xMax
# SHORT yMax
# USHORT macStyle
# USHORT lowestRecPPEM // Smallest readable size in pixels.
# SHORT fontDirectionHint
# SHORT indexToLocFormat // 0 for short offsets, 1 for long.
# SHORT glyphDataFormat // 0 for current format.
#
#
#
# Embedded OpenType (EOT) file format
# http://www.w3.org/Submission/EOT/
#
# EOT version 0x00020001
#
# An EOT font consists of a header with the original OpenType font
# appended at the end. Most of the data in the EOT header is simply a
# copy of data from specific tables within the font data. The exceptions
# are the 'Flags' field and the root string name field. The root string
# is a set of names indicating domains for which the font data can be
# used. A null root string implies the font data can be used anywhere.
# The EOT header is in little-endian byte order but the font data remains
# in big-endian order as specified by the OpenType spec.
#
# Overall structure:
#
# [EOT header]
# [EOT name records]
# [font data]
#
# EOT header
#
# ULONG eotSize // Total structure length in bytes (including string and font data)
# ULONG fontDataSize // Length of the OpenType font (FontData) in bytes
# ULONG version // Version number of this format - 0x00020001
# ULONG flags // Processing Flags (0 == no special processing)
# BYTE fontPANOSE[10] // OS/2 Table panose
# BYTE charset // DEFAULT_CHARSET (0x01)
# BYTE italic // 0x01 if ITALIC in OS/2 Table fsSelection is set, 0 otherwise
# ULONG weight // OS/2 Table usWeightClass
# USHORT fsType // OS/2 Table fsType (specifies embedding permission flags)
# USHORT magicNumber // Magic number for EOT file - 0x504C.
# ULONG unicodeRange1 // OS/2 Table ulUnicodeRange1
# ULONG unicodeRange2 // OS/2 Table ulUnicodeRange2
# ULONG unicodeRange3 // OS/2 Table ulUnicodeRange3
# ULONG unicodeRange4 // OS/2 Table ulUnicodeRange4
# ULONG codePageRange1 // OS/2 Table ulCodePageRange1
# ULONG codePageRange2 // OS/2 Table ulCodePageRange2
# ULONG checkSumAdjustment // head Table CheckSumAdjustment
# ULONG reserved[4] // Reserved - must be 0
# USHORT padding1 // Padding - must be 0
#
# EOT name records
#
# USHORT FamilyNameSize // Font family name size in bytes
# BYTE FamilyName[FamilyNameSize] // Font family name (name ID = 1), little-endian UTF-16
# USHORT Padding2 // Padding - must be 0
#
# USHORT StyleNameSize // Style name size in bytes
# BYTE StyleName[StyleNameSize] // Style name (name ID = 2), little-endian UTF-16
# USHORT Padding3 // Padding - must be 0
#
# USHORT VersionNameSize // Version name size in bytes
# bytes VersionName[VersionNameSize] // Version name (name ID = 5), little-endian UTF-16
# USHORT Padding4 // Padding - must be 0
#
# USHORT FullNameSize // Full name size in bytes
# BYTE FullName[FullNameSize] // Full name (name ID = 4), little-endian UTF-16
# USHORT Padding5 // Padding - must be 0
#
# USHORT RootStringSize // Root string size in bytes
# BYTE RootString[RootStringSize] // Root string, little-endian UTF-16
|
#
# XML-RPC CLIENT LIBRARY
# $Id$
#
# an XML-RPC client interface for Python.
#
# the marshalling and response parser code can also be used to
# implement XML-RPC servers.
#
# Notes:
# this version is designed to work with Python 2.1 or newer.
#
# History:
# 1999-01-14 fl Created
# 1999-01-15 fl Changed dateTime to use localtime
# 1999-01-16 fl Added Binary/base64 element, default to RPC2 service
# 1999-01-19 fl Fixed array data element (from Skip Montanaro)
# 1999-01-21 fl Fixed dateTime constructor, etc.
# 1999-02-02 fl Added fault handling, handle empty sequences, etc.
# 1999-02-10 fl Fixed problem with empty responses (from Skip Montanaro)
# 1999-06-20 fl Speed improvements, pluggable parsers/transports (0.9.8)
# 2000-11-28 fl Changed boolean to check the truth value of its argument
# 2001-02-24 fl Added encoding/Unicode/SafeTransport patches
# 2001-02-26 fl Added compare support to wrappers (0.9.9/1.0b1)
# 2001-03-28 fl Make sure response tuple is a singleton
# 2001-03-29 fl Don't require empty params element (from NAME 2001-06-10 fl Folded in _xmlrpclib accelerator support (1.0b2)
# 2001-08-20 fl Base xmlrpclib.Error on built-in Exception (from NAME 2001-09-03 fl Allow Transport subclass to override getparser
# 2001-09-10 fl Lazy import of urllib, cgi, xmllib (20x import speedup)
# 2001-10-01 fl Remove containers from memo cache when done with them
# 2001-10-01 fl Use faster escape method (80% dumps speedup)
# 2001-10-02 fl More dumps microtuning
# 2001-10-04 fl Make sure import expat gets a parser (from NAME 2001-10-10 sm Allow long ints to be passed as ints if they don't overflow
# 2001-10-17 sm Test for int and long overflow (allows use on 64-bit systems)
# 2001-11-12 fl Use repr() to marshal doubles (from NAME 2002-03-17 fl Avoid buffered read when possible (from NAME 2002-04-07 fl Added pythondoc comments
# 2002-04-16 fl Added __str__ methods to datetime/binary wrappers
# 2002-05-15 fl Added error constants (from NAME 2002-06-27 fl Merged with Python CVS version
# 2002-10-22 fl Added basic authentication (based on code from NAME 2003-01-22 sm Add support for the bool type
# 2003-02-27 gvr Remove apply calls
# 2003-04-24 sm Use cStringIO if available
# 2003-04-25 ak Add support for nil
# 2003-06-15 gn Add support for time.struct_time
# 2003-07-12 gp Correct marshalling of Faults
# 2003-10-31 mvl Add multicall support
# 2004-08-20 mvl Bump minimum supported Python version to 2.1
#
# Copyright (c) 1999-2002 by Secret Labs AB.
# Copyright (c) 1999-2002 by NAME EMAIL http://www.pythonware.com
#
# --------------------------------------------------------------------
# The XML-RPC client interface is
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by NAME By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
|
"""The tests for the MQTT light platform.
Configuration for RGB Version with brightness:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
qos: 0
payload_on: "on"
payload_off: "off"
Configuration for XY Version with brightness:
light:
platform: mqtt
name: "Office Light XY"
state_topic: "office/xy1/light/status"
command_topic: "office/xy1/light/switch"
brightness_state_topic: "office/xy1/brightness/status"
brightness_command_topic: "office/xy1/brightness/set"
xy_state_topic: "office/xy1/xy/status"
xy_command_topic: "office/xy1/xy/set"
qos: 0
payload_on: "on"
payload_off: "off"
config without RGB:
light:
platform: mqtt
name: "Office Light"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
qos: 0
payload_on: "on"
payload_off: "off"
config without RGB and brightness:
light:
platform: mqtt
name: "Office Light"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with brightness and scale:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_scale: 99
qos: 0
payload_on: "on"
payload_off: "off"
config with brightness and color temp
light:
platform: mqtt
name: "Office Light Color Temp"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
color_temp_state_topic: "office/rgb1/color_temp/status"
color_temp_command_topic: "office/rgb1/color_temp/set"
qos: 0
payload_on: "on"
payload_off: "off"
config with brightness and effect
light:
platform: mqtt
name: "Office Light Color Temp"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
effect_state_topic: "office/rgb1/effect/status"
effect_command_topic: "office/rgb1/effect/set"
effect_list:
- rainbow
- colorloop
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with white value and scale:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
white_value_state_topic: "office/rgb1/white_value/status"
white_value_command_topic: "office/rgb1/white_value/set"
white_value_scale: 99
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_scale: 99
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with RGB command template:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_command_template: "{{ '#%02x%02x%02x' | format(red, green, blue)}}"
qos: 0
payload_on: "on"
payload_off: "off"
Configuration for HS Version with brightness:
light:
platform: mqtt
name: "Office Light HS"
state_topic: "office/hs1/light/status"
command_topic: "office/hs1/light/switch"
brightness_state_topic: "office/hs1/brightness/status"
brightness_command_topic: "office/hs1/brightness/set"
hs_state_topic: "office/hs1/hs/status"
hs_command_topic: "office/hs1/hs/set"
qos: 0
payload_on: "on"
payload_off: "off"
""" |
"""
=================
Django S3 storage
=================
Usage
=====
Settings
--------
``DEFAULT_FILE_STORAGE``
~~~~~~~~~~~~~~~~~~~~~~~~
This setting store the path to the S3 storage class, the first part correspond
to the filepath and the second the name of the class, if you've got
``example.com`` in your ``PYTHONPATH`` and store your storage file in
``example.com/libs/storages/S3Storage.py``, the resulting setting will be::
DEFAULT_FILE_STORAGE = 'libs.storages.S3Storage.S3Storage'
If you keep the same filename as in repository, it should always end with
``S3Storage.S3Storage``.
``AWS_ACCESS_KEY_ID``
~~~~~~~~~~~~~~~~~~~~~
Your Amazon Web Services access key, as a string.
``AWS_SECRET_ACCESS_KEY``
~~~~~~~~~~~~~~~~~~~~~~~~~
Your Amazon Web Services secret access key, as a string.
``AWS_STORAGE_BUCKET_NAME``
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Your Amazon Web Services storage bucket name, as a string.
``AWS_CALLING_FORMAT``
~~~~~~~~~~~~~~~~~~~~~~
The way you'd like to call the Amazon Web Services API, for instance if you
prefer subdomains::
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
``AWS_HEADERS`` (optionnal)
~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you'd like to set headers sent with each file of the storage::
# see http://developer.yahoo.com/performance/rules.html#expires
AWS_HEADERS = {
'Expires': 'Thu, 15 Apr 2010 20:00:00 GMT',
'Cache-Control': 'max-age=86400',
}
Fields
------
Once you're done, ``default_storage`` will be the S3 storage::
>>> from django.core.files.storage import default_storage
>>> print default_storage.__class__
<class 'backends.S3Storage.S3Storage'>
This way, if you define a new ``FileField``, it will use the S3 storage::
>>> from django.db import models
>>> class Resume(models.Model):
... pdf = models.FileField(upload_to='pdfs')
... photos = models.ImageField(upload_to='photos')
...
>>> resume = Resume()
>>> print resume.pdf.storage
<backends.S3Storage.S3Storage object at ...>
Tests
=====
Initialization::
>>> from django.core.files.storage import default_storage
>>> from django.core.files.base import ContentFile
>>> from django.core.cache import cache
>>> from models import MyStorage
Storage
-------
Standard file access options are available, and work as expected::
>>> default_storage.exists('storage_test')
False
>>> file = default_storage.open('storage_test', 'w')
>>> file.write('storage contents')
>>> file.close()
>>> default_storage.exists('storage_test')
True
>>> file = default_storage.open('storage_test', 'r')
>>> file.read()
'storage contents'
>>> file.close()
>>> default_storage.delete('storage_test')
>>> default_storage.exists('storage_test')
False
Model
-----
An object without a file has limited functionality::
>>> obj1 = MyStorage()
>>> obj1.normal
<FieldFile: None>
>>> obj1.normal.size
Traceback (most recent call last):
...
ValueError: The 'normal' attribute has no file associated with it.
Saving a file enables full functionality::
>>> obj1.normal.save('django_test.txt', ContentFile('content'))
>>> obj1.normal
<FieldFile: tests/django_test.txt>
>>> obj1.normal.size
7
>>> obj1.normal.read()
'content'
Files can be read in a little at a time, if necessary::
>>> obj1.normal.open()
>>> obj1.normal.read(3)
'con'
>>> obj1.normal.read()
'tent'
>>> '-'.join(obj1.normal.chunks(chunk_size=2))
'co-nt-en-t'
Save another file with the same name::
>>> obj2 = MyStorage()
>>> obj2.normal.save('django_test.txt', ContentFile('more content'))
>>> obj2.normal
<FieldFile: tests/django_test_.txt>
>>> obj2.normal.size
12
Push the objects into the cache to make sure they pickle properly::
>>> cache.set('obj1', obj1)
>>> cache.set('obj2', obj2)
>>> cache.get('obj2').normal
<FieldFile: tests/django_test_.txt>
Deleting an object deletes the file it uses, if there are no other objects
still using that file::
>>> obj2.delete()
>>> obj2.normal.save('django_test.txt', ContentFile('more content'))
>>> obj2.normal
<FieldFile: tests/django_test_.txt>
Default values allow an object to access a single file::
>>> obj3 = MyStorage.objects.create()
>>> obj3.default
<FieldFile: tests/default.txt>
>>> obj3.default.read()
'default content'
But it shouldn't be deleted, even if there are no more objects using it::
>>> obj3.delete()
>>> obj3 = MyStorage()
>>> obj3.default.read()
'default content'
Verify the fix for #5655, making sure the directory is only determined once::
>>> obj4 = MyStorage()
>>> obj4.random.save('random_file', ContentFile('random content'))
>>> obj4.random
<FieldFile: .../random_file>
Clean up the temporary files::
>>> obj1.normal.delete()
>>> obj2.normal.delete()
>>> obj3.default.delete()
>>> obj4.random.delete()
""" |
"""
2017/2/1 1.3h
exp name : exp001
desciption: Complexity of XGB model
fname : exp001.py
env : i7 4790k, 32G, GTX1070, ubuntu 14.04.5LTS, Python 3.4.3
preprocess: None
result : Logloss, Feature importance, w_L1, w_L2, Leaf counts, Time
params:
model : CPU, GPU, hist_256, hist_1024
n_train : 10K, 100K, 1M, 2M, 4M
n_valid : n_train/4
n_features : 32
n_rounds : 50
n_clusters_per_class: 8
max_depth : 5, 10, 15
time
CPU GPU hist_256 hist_1024
n_train max_depth
10000 5 0.3 0.3 0.6 2.5
15 0.8 1.6 6.0 21.4
100000 5 3.3 1.0 1.3 3.5
15 10.1 4.7 26.9 102.0
1000000 5 39.4 16.3 6.6 9.7
15 134.8 44.7 114.4 417.9
2000000 5 107.2 36.0 12.4 16.1
15 359.5 101.8 124.8 427.9
4000000 5 257.8 78.4 23.8 30.5
15 950.3 227.6 218.6 684.6
leaf_cnts
CPU GPU hist_256 hist_1024
n_train max_depth
10000 5 1350 1352 1381 1380
15 20122 19418 20467 20357
100000 5 1569 1567 1560 1557
15 91411 88088 92975 94300
1000000 5 1598 1599 1594 1595
15 341319 338454 356076 351983
2000000 5 1600 1600 1600 1600
15 332436 335480 335903 335697
4000000 5 1600 1600 1600 1600
15 542859 528910 551295 544035
w_L1
CPU GPU hist_256 hist_1024
n_train max_depth
10000 5 93.0 93.1 93.2 94.7
15 1044.9 1045.8 1066.3 1056.1
100000 5 107.6 108.5 105.3 106.7
15 5384.5 5369.3 5486.6 5521.3
1000000 5 108.4 109.9 109.0 107.0
15 21782.9 22101.1 22675.5 22448.2
2000000 5 116.3 116.2 112.4 118.2
15 22207.7 22916.0 22371.6 22370.2
4000000 5 101.6 108.2 101.6 100.5
15 37074.2 36807.7 37554.5 37091.1
w_L2
CPU GPU hist_256 hist_1024
n_train max_depth
10000 5 20.7 20.6 20.6 20.8
15 60.1 60.0 60.5 60.1
100000 5 22.5 22.8 22.1 22.4
15 140.3 139.7 141.9 142.3
1000000 5 22.5 22.7 22.7 22.3
15 301.7 305.2 308.8 308.2
2000000 5 23.7 23.7 23.0 24.0
15 317.5 325.5 317.8 319.1
4000000 5 21.1 27.7 21.2 20.8
15 410.2 414.9 412.2 410.6
logloss
CPU GPU hist_256 hist_1024
n_train max_depth
10000 5 0.4021 0.4075 0.4043 0.4007
15 0.3074 0.3129 0.3173 0.3127
100000 5 0.4333 0.4365 0.4335 0.4361
15 0.2672 0.2691 0.2689 0.2655
1000000 5 0.4373 0.4370 0.4383 0.4372
15 0.2225 0.2217 0.2221 0.2210
2000000 5 0.4947 0.4928 0.4903 0.4949
15 0.2284 0.2285 0.2307 0.2283
4000000 5 0.4582 0.4554 0.4572 0.4568
15 0.2300 0.2305 0.2306 0.2311
Done: 4681.082534313202 seconds
""" |