text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
import bisect
from collections import Iterable, Iterator
from datetime import datetime
from distutils.version import LooseVersion
import operator
from operator import getitem, setitem
from pprint import pformat
import uuid
from toolz import merge, partial, first, partition, unique
import pandas as pd
from pandas.util.decorators import cache_readonly
import numpy as np
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..array.core import partial_by_order
from .. import threaded
from ..compatibility import unicode, apply, operator_div, bind_method
from ..utils import (repr_long_list, IndexCallable,
pseudorandom, derived_from, different_seeds)
from ..base import Base, compute, tokenize, normalize_token
no_default = '__no_default__'
return_scalar = '__return_scalar__'
pd.computation.expressions.set_use_numexpr(False)
def _concat(args, **kwargs):
""" Generic concat operation """
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.concatenate3(args)
if len(args) == 1:
return args[0]
if isinstance(args[0], (pd.DataFrame, pd.Series)):
args2 = [arg for arg in args if len(arg)]
if not args2:
return args[0]
return pd.concat(args2)
if isinstance(args[0], (pd.Index)):
args = [arg for arg in args if len(arg)]
result = pd.concat(map(pd.Series, args))
result = type(args[0])(result.values)
result.name = args[0].name
return result
return args
def optimize(dsk, keys):
from .optimize import optimize
return optimize(dsk, keys)
def finalize(self, results):
return _concat(results)
class Scalar(Base):
""" A Dask-thing to represent a scalar
TODO: Clean up this abstraction
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(finalize)
def __init__(self, dsk, _name, name=None, divisions=None):
self.dask = dsk
self._name = _name
self.divisions = [None, None]
# name and divisions are ignored.
# There are dummies to be compat with Series and DataFrame
def __array__(self):
# array interface is required to support pandas instance + Scalar
# Otherwise, above op results in pd.Series of Scalar (object dtype)
return np.asarray(self.compute())
@property
def _args(self):
return (self.dask, self._name)
def _keys(self):
return [(self._name, 0)]
@classmethod
def _get_unary_operator(cls, op):
def f(self):
name = tokenize(self)
dsk = {(name, 0): (op, (self._name, 0))}
return Scalar(merge(dsk, self.dask), name)
return f
@classmethod
def _get_binary_operator(cls, op, inv=False):
return lambda self, other: _scalar_binary(op, self, other, inv=inv)
def _scalar_binary(op, a, b, inv=False):
name = '{0}-{1}'.format(op.__name__, tokenize(a, b))
dsk = a.dask
if not isinstance(b, Base):
pass
elif isinstance(b, Scalar):
dsk = merge(dsk, b.dask)
b = (b._name, 0)
else:
return NotImplemented
if inv:
dsk.update({(name, 0): (op, b, (a._name, 0))})
else:
dsk.update({(name, 0): (op, (a._name, 0), b)})
if isinstance(b, (pd.Series, pd.DataFrame)):
return _Frame(dsk, name, b, [b.index.min(), b.index.max()])
else:
return Scalar(dsk, name)
class _Frame(Base):
""" Superclass for DataFrame and Series
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
_name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame / Series
metadata: scalar, None, list, pandas.Series or pandas.DataFrame
metadata to specify data structure.
- If scalar or None is given, the result is Series.
- If list is given, the result is DataFrame.
- If pandas data is given, the result is the class corresponding to
pandas data.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(finalize)
def __new__(cls, dsk, _name, metadata, divisions):
if isinstance(metadata, (Series, pd.Series)):
metadata = metadata.name
elif isinstance(metadata, (DataFrame, pd.DataFrame)):
metadata = metadata.columns
if np.isscalar(metadata) or metadata is None:
return Series(dsk, _name, metadata, divisions)
else:
return DataFrame(dsk, _name, metadata, divisions)
# constructor properties
# http://pandas.pydata.org/pandas-docs/stable/internals.html#override-constructor-properties
@property
def _constructor_sliced(self):
"""Constructor used when a result has one lower dimension(s) as the original"""
raise NotImplementedError
@property
def _constructor(self):
"""Constructor used when a result has the same dimension(s) as the original"""
raise NotImplementedError
@property
def npartitions(self):
"""Return number of partitions"""
return len(self.divisions) - 1
@property
def _args(self):
return NotImplementedError
def __getnewargs__(self):
""" To load pickle """
return self._args
def _keys(self):
return [(self._name, i) for i in range(self.npartitions)]
@property
def index(self):
"""Return dask Index instance"""
name = self._name + '-index'
dsk = dict(((name, i), (getattr, key, 'index'))
for i, key in enumerate(self._keys()))
return Index(merge(dsk, self.dask), name, None, self.divisions)
@property
def known_divisions(self):
"""Whether divisions are already known"""
return len(self.divisions) > 0 and self.divisions[0] is not None
def get_division(self, n):
""" Get nth division of the data """
if 0 <= n < self.npartitions:
name = 'get-division-%s-%s' % (str(n), self._name)
dsk = {(name, 0): (self._name, n)}
divisions = self.divisions[n:n+2]
return self._constructor(merge(self.dask, dsk), name,
self.column_info, divisions)
else:
msg = "n must be 0 <= n < {0}".format(self.npartitions)
raise ValueError(msg)
def cache(self, cache=Cache):
""" Evaluate Dataframe and store in local cache
Uses chest by default to store data on disk
"""
if callable(cache):
cache = cache()
# Evaluate and store in cache
name = 'cache' + uuid.uuid1().hex
dsk = dict(((name, i), (setitem, cache, (tuple, list(key)), key))
for i, key in enumerate(self._keys()))
self._get(merge(dsk, self.dask), list(dsk.keys()))
# Create new dataFrame pointing to that cache
name = 'from-cache-' + self._name
dsk2 = dict(((name, i), (getitem, cache, (tuple, list(key))))
for i, key in enumerate(self._keys()))
return self._constructor(dsk2, name, self.column_info, self.divisions)
@derived_from(pd.DataFrame)
def drop_duplicates(self):
chunk = lambda s: s.drop_duplicates()
return aca(self, chunk=chunk, aggregate=chunk, columns=self.column_info,
token='drop-duplicates')
def __len__(self):
return reduction(self.index, len, np.sum, token='len').compute()
def map_partitions(self, func, columns=no_default, *args, **kwargs):
""" Apply Python function on each DataFrame block
When using ``map_partitions`` you should provide either the column
names (if the result is a DataFrame) or the name of the Series (if the
result is a Series). The output type will be determined by the type of
``columns``.
Parameters
----------
func : function
Function applied to each blocks
columns : tuple or scalar
Column names or name of the output. Defaults to names of data itself.
When tuple is passed, DataFrame is returned. When scalar is passed,
Series is returned.
Examples
--------
When str is passed as columns, the result will be Series.
>>> df.map_partitions(lambda df: df.x + 1, columns='x') # doctest: +SKIP
When tuple is passed as columns, the result will be Series.
>>> df.map_partitions(lambda df: df.head(), columns=df.columns) # doctest: +SKIP
"""
if columns == no_default:
columns = self.column_info
return map_partitions(func, columns, self, *args, **kwargs)
def random_split(self, p, random_state=None):
""" Pseudorandomly split dataframe into different pieces row-wise
Parameters
----------
frac : float, optional
Fraction of axis items to return.
random_state: int or np.random.RandomState
If int create a new RandomState with this as the seed
Otherwise draw from the passed RandomState
Examples
--------
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent random_state
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP
See Also
--------
dask.DataFrame.sample
"""
seeds = different_seeds(self.npartitions, random_state)
dsk_full = dict(((self._name + '-split-full', i),
(pd_split, (self._name, i), p, seed))
for i, seed in enumerate(seeds))
dsks = [dict(((self._name + '-split-%d' % i, j),
(getitem, (self._name + '-split-full', j), i))
for j in range(self.npartitions))
for i in range(len(p))]
return [type(self)(merge(self.dask, dsk_full, dsk),
self._name + '-split-%d' % i,
self.column_info,
self.divisions)
for i, dsk in enumerate(dsks)]
def head(self, n=5, compute=True):
""" First n rows of the dataset
Caveat, the only checks the first n rows of the first partition.
"""
name = 'head-%d-%s' % (n, self._name)
dsk = {(name, 0): (lambda x, n: x.head(n=n), (self._name, 0), n)}
result = self._constructor(merge(self.dask, dsk), name,
self.column_info, self.divisions[:2])
if compute:
result = result.compute()
return result
def tail(self, n=5, compute=True):
""" Last n rows of the dataset
Caveat, the only checks the last n rows of the last partition.
"""
name = 'tail-%d-%s' % (n, self._name)
dsk = {(name, 0): (lambda x, n: x.tail(n=n),
(self._name, self.npartitions - 1), n)}
result = self._constructor(merge(self.dask, dsk), name,
self.column_info, self.divisions[-2:])
if compute:
result = result.compute()
return result
def _loc(self, ind):
""" Helper function for the .loc accessor """
if isinstance(ind, Series):
return self._loc_series(ind)
elif isinstance(ind, slice):
return self._loc_slice(ind)
else:
return self._loc_element(ind)
def _loc_series(self, ind):
if not self.divisions == ind.divisions:
raise ValueError("Partitions of dataframe and index not the same")
return map_partitions(lambda df, ind: df.loc[ind],
self.columns, self, ind, token='loc-series')
def _loc_element(self, ind):
name = 'loc-element-%s-%s' % (str(ind), self._name)
part = _partition_of_index_value(self.divisions, ind)
if ind < self.divisions[0] or ind > self.divisions[-1]:
raise KeyError('the label [%s] is not in the index' % str(ind))
dsk = {(name, 0): (lambda df: df.loc[ind], (self._name, part))}
if self.ndim == 1:
columns = self.column_info
else:
columns = ind
return self._constructor_sliced(merge(self.dask, dsk), name,
columns, [ind, ind])
def _loc_slice(self, ind):
name = 'loc-slice-%s-%s' % (str(ind), self._name)
assert ind.step in (None, 1)
if ind.start:
start = _partition_of_index_value(self.divisions, ind.start)
else:
start = 0
if ind.stop is not None:
stop = _partition_of_index_value(self.divisions, ind.stop)
else:
stop = self.npartitions - 1
istart = _coerce_loc_index(self.divisions, ind.start)
istop = _coerce_loc_index(self.divisions, ind.stop)
if stop == start:
dsk = {(name, 0): (_loc, (self._name, start), ind.start, ind.stop)}
divisions = [istart, istop]
else:
dsk = merge(
{(name, 0): (_loc, (self._name, start), ind.start, None)},
dict(((name, i), (self._name, start + i))
for i in range(1, stop - start)),
{(name, stop - start): (_loc, (self._name, stop), None, ind.stop)})
divisions = ((max(istart, self.divisions[start])
if ind.start is not None
else self.divisions[0],) +
self.divisions[start+1:stop+1] +
(min(istop, self.divisions[stop+1])
if ind.stop is not None
else self.divisions[-1],))
assert len(divisions) == len(dsk) + 1
return self._constructor(merge(self.dask, dsk), name,
self.column_info, divisions)
@property
def loc(self):
""" Purely label-location based indexer for selection by label.
>>> df.loc["b"] # doctest: +SKIP
>>> df.loc["b":"d"] # doctest: +SKIP"""
return IndexCallable(self._loc)
@property
def iloc(self):
""" Not implemented """
# not implemented because of performance concerns.
# see https://github.com/blaze/dask/pull/507
raise NotImplementedError("Dask Dataframe does not support iloc")
def repartition(self, divisions, force=False):
""" Repartition dataframe along new divisions
Parameters
----------
divisions : list
List of partitions to be used
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
"""
return repartition(self, divisions, force=force)
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
@derived_from(pd.Series)
def fillna(self, value):
func = getattr(self._partition_type, 'fillna')
return map_partitions(func, self.column_info, self, value)
def sample(self, frac, random_state=None):
""" Random sample of items
Parameters
----------
frac : float, optional
Fraction of axis items to return.
random_state: int or np.random.RandomState
If int create a new RandomState with this as the seed
Otherwise draw from the passed RandomState
See Also
--------
dask.DataFrame.random_split, pd.DataFrame.sample
"""
if random_state is None:
random_state = np.random.randint(np.iinfo(np.int32).max)
name = 'sample-' + tokenize(self, frac, random_state)
func = getattr(self._partition_type, 'sample')
seeds = different_seeds(self.npartitions, random_state)
dsk = dict(((name, i),
(apply, func, (tuple, [(self._name, i)]),
{'frac': frac, 'random_state': seed}))
for i, seed in zip(range(self.npartitions), seeds))
return self._constructor(merge(self.dask, dsk), name,
self.column_info, self.divisions)
@derived_from(pd.DataFrame)
def to_hdf(self, path_or_buf, key, mode='a', append=False, complevel=0,
complib=None, fletcher32=False, **kwargs):
from .io import to_hdf
return to_hdf(self, path_or_buf, key, mode, append, complevel, complib,
fletcher32, **kwargs)
@derived_from(pd.DataFrame)
def to_csv(self, filename, **kwargs):
from .io import to_csv
return to_csv(self, filename, **kwargs)
@classmethod
def _get_unary_operator(cls, op):
return lambda self: elemwise(op, self)
@classmethod
def _get_binary_operator(cls, op, inv=False):
if inv:
return lambda self, other: elemwise(op, other, self)
else:
return lambda self, other: elemwise(op, self, other)
def _aca_agg(self, token, func, aggfunc=None):
""" Wrapper for aggregations """
raise NotImplementedError
@derived_from(pd.DataFrame)
def sum(self, axis=None):
axis = self._validate_axis(axis)
if axis == 1:
f = lambda x: x.sum(axis=1)
name = '{0}sum(axis=1)'.format(self._token_prefix)
return map_partitions(f, None, self, token=name)
else:
return self._aca_agg(token='sum', func=lambda x: x.sum())
@derived_from(pd.DataFrame)
def max(self, axis=None):
axis = self._validate_axis(axis)
if axis == 1:
f = lambda x: x.max(axis=1)
name = '{0}max(axis=1)'.format(self._token_prefix)
return map_partitions(f, None, self, token=name)
else:
return self._aca_agg(token='max', func=lambda x: x.max())
@derived_from(pd.DataFrame)
def min(self, axis=None):
axis = self._validate_axis(axis)
if axis == 1:
f = lambda x: x.min(axis=1)
name = '{0}min(axis=1)'.format(self._token_prefix)
return map_partitions(f, None, self, token=name)
else:
return self._aca_agg(token='min', func=lambda x: x.min())
@derived_from(pd.DataFrame)
def count(self, axis=None):
axis = self._validate_axis(axis)
if axis == 1:
f = lambda x: x.count(axis=1)
name = '{0}count(axis=1)'.format(self._token_prefix)
return map_partitions(f, None, self, token=name)
else:
return self._aca_agg(token='count', func=lambda x: x.count(),
aggfunc=lambda x: x.sum())
@derived_from(pd.DataFrame)
def mean(self, axis=None):
axis = self._validate_axis(axis)
if axis == 1:
f = lambda x: x.mean(axis=1)
name = '{0}mean(axis=1)'.format(self._token_prefix)
return map_partitions(f, None, self, token=name)
else:
num = self._get_numeric_data()
s = num.sum()
n = num.count()
def f(s, n):
try:
return s / n
except ZeroDivisionError:
return np.nan
name = '{0}mean-{1}'.format(self._token_prefix, tokenize(s))
return map_partitions(f, no_default, s, n, token=name)
@derived_from(pd.DataFrame)
def var(self, axis=None, ddof=1):
axis = self._validate_axis(axis)
if axis == 1:
f = lambda x, ddof=ddof: x.var(axis=1, ddof=ddof)
name = '{0}var(axis=1, ddof={1})'.format(self._token_prefix, ddof)
return map_partitions(f, None, self, token=name)
else:
num = self._get_numeric_data()
x = 1.0 * num.sum()
x2 = 1.0 * (num ** 2).sum()
n = num.count()
def f(x2, x, n):
try:
result = (x2 / n) - (x / n)**2
if ddof:
result = result * n / (n - ddof)
return result
except ZeroDivisionError:
return np.nan
name = '{0}var(ddof={1})'.format(self._token_prefix, ddof)
return map_partitions(f, no_default, x2, x, n, token=name)
@derived_from(pd.DataFrame)
def std(self, axis=None, ddof=1):
axis = self._validate_axis(axis)
if axis == 1:
f = lambda x, ddof=ddof: x.std(axis=1, ddof=ddof)
name = '{0}std(axis=1, ddof={1})'.format(self._token_prefix, ddof)
return map_partitions(f, None, self, token=name)
else:
v = self.var(ddof=ddof)
name = '{0}std(ddof={1})'.format(self._token_prefix, ddof)
return map_partitions(np.sqrt, no_default, v, token=name)
def quantile(self, q=0.5, axis=0):
""" Approximate row-wise and precise column-wise quantiles of DataFrame
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wis
"""
axis = self._validate_axis(axis)
name = 'quantiles-concat--' + tokenize(self, q, axis)
if axis == 1:
if isinstance(q, list):
# Not supported, the result will have current index as columns
raise ValueError("'q' must be scalar when axis=1 is specified")
return map_partitions(pd.DataFrame.quantile, None, self,
q, axis, token=name)
else:
num = self._get_numeric_data()
quantiles = tuple(quantile(self[c], q) for c in num.columns)
dask = {}
dask = merge(dask, *[q.dask for q in quantiles])
qnames = [(q._name, 0) for q in quantiles]
if isinstance(quantiles[0], Scalar):
dask[(name, 0)] = (pd.Series, (list, qnames), num.columns)
divisions = (min(num.columns), max(num.columns))
return Series(dask, name, num.columns, divisions)
else:
from .multi import _pdconcat
dask[(name, 0)] = (_pdconcat, (list, qnames), 1)
return DataFrame(dask, name, num.columns,
quantiles[0].divisions)
@derived_from(pd.DataFrame)
def describe(self):
name = 'describe--' + tokenize(self)
# currently, only numeric describe is supported
num = self._get_numeric_data()
stats = [num.count(), num.mean(), num.std(), num.min(),
num.quantile([0.25, 0.5, 0.75]), num.max()]
stats_names = [(s._name, 0) for s in stats]
def build_partition(values):
assert len(values) == 6
count, mean, std, min, q, max = values
part1 = self._partition_type([count, mean, std, min],
index=['count', 'mean', 'std', 'min'])
q.index = ['25%', '50%', '75%']
part3 = self._partition_type([max], index=['max'])
return pd.concat([part1, q, part3])
dsk = dict()
dsk[(name, 0)] = (build_partition, (list, stats_names))
dsk = merge(dsk, num.dask, *[s.dask for s in stats])
return self._constructor(dsk, name, num.column_info,
divisions=[None, None])
def _cum_agg(self, token, chunk, aggregate, agginit, axis):
""" Wrapper for cumulative operation """
axis = self._validate_axis(axis)
if axis == 1:
name = '{0}{1}(axis=1)'.format(self._token_prefix, token)
return map_partitions(chunk, self.column_info, self, 1, token=name)
else:
# cumulate each partitions
name1 = '{0}{1}-map'.format(self._token_prefix, token)
cumpart = map_partitions(chunk, self.column_info, self, token=name1)
# take last element of each cumulated partitions
name2 = '{0}{1}-take-last'.format(self._token_prefix, token)
cumlast = map_partitions(lambda x: x.iloc[-1],
self.column_info, cumpart, token=name2)
name = '{0}{1}'.format(self._token_prefix, token)
cname = '{0}{1}-cum-last'.format(self._token_prefix, token)
# aggregate cumulated partisions and its previous last element
dask = {}
if isinstance(self, DataFrame):
agginit = pd.Series(agginit, index=self.column_info)
dask[(cname, 0)] = agginit
dask[(name, 0)] = (cumpart._name, 0)
for i in range(1, self.npartitions):
# store each cumulative step to graph to reduce computation
dask[(cname, i)] = (aggregate, (cname, i - 1),
(cumlast._name, i - 1))
dask[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))
return self._constructor(merge(dask, cumpart.dask, cumlast.dask),
name, self.column_info, self.divisions)
@derived_from(pd.DataFrame)
def cumsum(self, axis=None):
return self._cum_agg('cumsum', self._partition_type.cumsum,
operator.add, 0, axis=axis)
@derived_from(pd.DataFrame)
def cumprod(self, axis=None):
return self._cum_agg('cumprod', self._partition_type.cumprod,
operator.mul, 1, axis=axis)
@derived_from(pd.DataFrame)
def cummax(self, axis=None):
def aggregate(x, y):
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.where(x > y, y, axis=x.ndim - 1)
else: # scalar
return x if x > y else y
return self._cum_agg('cummax', self._partition_type.cummax,
aggregate, np.nan, axis=axis)
@derived_from(pd.DataFrame)
def cummin(self, axis=None):
def aggregate(x, y):
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.where(x < y, y, axis=x.ndim - 1)
else: # scalar
return x if x < y else y
return self._cum_agg('cummin', self._partition_type.cummin,
aggregate, np.nan, axis=axis)
@derived_from(pd.DataFrame)
def where(self, cond, other=np.nan):
return map_partitions(self._partition_type.where,
self.column_info, self, cond, other)
@derived_from(pd.DataFrame)
def mask(self, cond, other=np.nan):
return map_partitions(self._partition_type.mask,
self.column_info, self, cond, other)
@derived_from(pd.Series)
def append(self, other):
# because DataFrame.append will override the method,
# wrap by pd.Series.append docstring
if isinstance(other, (list, dict)):
msg = "append doesn't support list or dict input"
raise NotImplementedError(msg)
if not isinstance(other, _Frame):
from .io import from_pandas
other = from_pandas(other, 1)
from .multi import _append
if self.known_divisions and other.known_divisions:
if self.divisions[-1] < other.divisions[0]:
divisions = self.divisions[:-1] + other.divisions
return _append(self, other, divisions)
else:
msg = ("Unable to append two dataframes to each other with known "
"divisions if those divisions are not ordered. "
"The divisions/index of the second dataframe must be "
"greater than the divisions/index of the first dataframe.")
raise ValueError(msg)
else:
divisions = [None] * (self.npartitions + other.npartitions + 1)
return _append(self, other, divisions)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
raise NotImplementedError
normalize_token.register((Scalar, _Frame), lambda a: a._name)
class Series(_Frame):
""" Out-of-core Series object
Mimics ``pandas.Series``.
Parameters
----------
dsk: dict
The dask graph to compute this Series
_name: str
The key prefix that specifies which keys in the dask comprise this
particular Series
name: scalar or None
Series name. This metadata aids usability
divisions: tuple of index values
Values along which we partition our blocks on the index
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
_token_prefix = 'series-'
def __new__(cls, dsk, _name, name, divisions):
result = object.__new__(cls)
result.dask = dsk
result._name = _name
result.name = name
result.divisions = tuple(divisions)
return result
@property
def _args(self):
return (self.dask, self._name, self.name, self.divisions)
@property
def _constructor_sliced(self):
return Scalar
@property
def _constructor(self):
return Series
@property
def _empty_partition(self):
""" Return empty dummy to emulate the result """
return self._partition_type(name=self.name)
@property
def ndim(self):
""" Return dimensionality """
return 1
@property
def dtype(self):
""" Return data type """
return self.head().dtype
@property
def column_info(self):
""" Return Series.name """
return self.name
@property
def columns(self):
""" Return 1 element tuple containing the name """
return (self.name,)
@property
def nbytes(self):
return reduction(self, lambda s: s.nbytes, np.sum, token='nbytes')
def __repr__(self):
return ("dd.%s<%s, divisions=%s>" %
(self.__class__.__name__, self._name,
repr_long_list(self.divisions)))
def __array__(self, dtype=None, **kwargs):
x = np.array(self.compute())
if dtype and x.dtype != dtype:
x = x.astype(dtype)
return x
def __array_wrap__(self, array, context=None):
return pd.Series(array, name=self.name)
@cache_readonly
def dt(self):
return DatetimeAccessor(self)
@cache_readonly
def str(self):
return StringAccessor(self)
def quantile(self, q=0.5):
""" Approximate quantiles of Series
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
"""
return quantile(self, q)
def resample(self, rule, how='mean', axis=0, fill_method=None, closed=None,
label=None, convention='start', kind=None, loffset=None,
limit=None, base=0):
"""Group by a DatetimeIndex values in time periods of size `rule`.
Parameters
----------
rule : str or pandas.datetools.Tick
The frequency to resample by. For example, 'H' is one hour
intervals.
how : str or callable
Method to use to summarize your data. For example, 'mean' takes the
average value of the Series in the time interval `rule`.
Notes
-----
For additional argument descriptions please consult the pandas
documentation.
Returns
-------
dask.dataframe.Series
See Also
--------
pandas.Series.resample
"""
# Validate Inputs
rule = pd.datetools.to_offset(rule)
day_nanos = pd.datetools.Day().nanos
if getattr(rule, 'nanos', None) and day_nanos % rule.nanos:
raise NotImplementedError('Resampling frequency %s that does'
' not evenly divide a day is not '
'implemented' % rule)
kwargs = {'fill_method': fill_method, 'limit': limit,
'loffset': loffset, 'base': base,
'convention': convention != 'start', 'kind': kind}
err = ', '.join('`{0}`'.format(k) for (k, v) in kwargs.items() if v)
if err:
raise NotImplementedError('Keywords: ' + err)
# Create a grouper to determine closed and label conventions
newdivs, outdivs = _resample_bin_and_out_divs(self.divisions, rule,
closed, label)
# Repartition divs into bins. These won't match labels after mapping
partitioned = self.repartition(newdivs, force=True)
kwargs = {'how': how, 'closed': closed, 'label': label}
name = tokenize(self, rule, kwargs)
dsk = partitioned.dask
def func(series, start, end, closed):
out = series.resample(rule, **kwargs)
return out.reindex(pd.date_range(start, end, freq=rule, closed=closed))
keys = partitioned._keys()
args = zip(keys, outdivs, outdivs[1:], ['left']*(len(keys)-1) + [None])
for i, (k, s, e, c) in enumerate(args):
dsk[(name, i)] = (func, k, s, e, c)
if how == 'ohlc':
return DataFrame(dsk, name, ['open', 'high', 'low', 'close'], outdivs)
return Series(dsk, name, self.name, outdivs)
def __getitem__(self, key):
if isinstance(key, Series) and self.divisions == key.divisions:
name = 'series-index-%s[%s]' % (self._name, key._name)
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return Series(merge(self.dask, key.dask, dsk), name,
self.name, self.divisions)
raise NotImplementedError()
@derived_from(pd.DataFrame)
def _get_numeric_data(self, how='any', subset=None):
return self
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 'index', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0}.get(axis, axis)
def _aca_agg(self, token, func, aggfunc=None):
""" Wrapper for aggregations """
if aggfunc is None:
aggfunc = func
return aca([self], chunk=func,
aggregate=lambda x: aggfunc(pd.Series(x)),
columns=return_scalar, token=self._token_prefix + token)
@derived_from(pd.Series)
def groupby(self, index, **kwargs):
return SeriesGroupBy(self, index, **kwargs)
@derived_from(pd.Series)
def sum(self, axis=None):
return super(Series, self).sum(axis=axis)
@derived_from(pd.Series)
def max(self, axis=None):
return super(Series, self).max(axis=axis)
@derived_from(pd.Series)
def min(self, axis=None):
return super(Series, self).min(axis=axis)
@derived_from(pd.Series)
def count(self):
return super(Series, self).count()
@derived_from(pd.Series)
def mean(self, axis=None):
return super(Series, self).mean(axis=axis)
@derived_from(pd.Series)
def var(self, axis=None, ddof=1):
return super(Series, self).var(axis=axis, ddof=ddof)
@derived_from(pd.Series)
def std(self, axis=None, ddof=1):
return super(Series, self).std(axis=axis, ddof=ddof)
@derived_from(pd.Series)
def cumsum(self, axis=None):
return super(Series, self).cumsum(axis=axis)
@derived_from(pd.Series)
def cumprod(self, axis=None):
return super(Series, self).cumprod(axis=axis)
@derived_from(pd.Series)
def cummax(self, axis=None):
return super(Series, self).cummax(axis=axis)
@derived_from(pd.Series)
def cummin(self, axis=None):
return super(Series, self).cummin(axis=axis)
@derived_from(pd.Series)
def nunique(self):
return self.drop_duplicates().count()
@derived_from(pd.Series)
def value_counts(self):
chunk = lambda s: s.value_counts()
if LooseVersion(pd.__version__) > '0.16.2':
agg = lambda s: s.groupby(level=0).sum().sort_values(ascending=False)
else:
agg = lambda s: s.groupby(level=0).sum().sort(inplace=False, ascending=False)
return aca(self, chunk=chunk, aggregate=agg, columns=self.name,
token='value-counts')
@derived_from(pd.Series)
def nlargest(self, n=5):
return nlargest(self, n)
@derived_from(pd.Series)
def isin(self, other):
return elemwise(pd.Series.isin, self, other)
@derived_from(pd.Series)
def map(self, arg, na_action=None):
return elemwise(pd.Series.map, self, arg, na_action, name=self.name)
@derived_from(pd.Series)
def astype(self, dtype):
return map_partitions(pd.Series.astype, self.name, self, dtype)
@derived_from(pd.Series)
def dropna(self):
return map_partitions(pd.Series.dropna, self.name, self)
@derived_from(pd.Series)
def between(self, left, right, inclusive=True):
return map_partitions(pd.Series.between, self.name, self, left, right,
inclusive)
@derived_from(pd.Series)
def clip(self, lower=None, upper=None):
return map_partitions(pd.Series.clip, self.name, self, lower, upper)
@derived_from(pd.Series)
def notnull(self):
return map_partitions(pd.Series.notnull, self.name, self)
def to_bag(self, index=False):
"""Convert to a dask Bag.
Parameters
----------
index : bool, optional
If True, the elements are tuples of ``(index, value)``, otherwise
they're just the ``value``. Default is False.
"""
from .io import to_bag
return to_bag(self, index)
@derived_from(pd.Series)
def to_frame(self, name=None):
_name = name if name is not None else self.name
return map_partitions(pd.Series.to_frame, [_name], self, name)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if not level is None:
raise NotImplementedError('level must be None')
return map_partitions(op, self.column_info, self, other,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
def apply(self, func, convert_dtype=True, name=no_default, args=(), **kwds):
""" Parallel version of pandas.Series.apply """
if name is no_default:
name = self.name
return map_partitions(pd.Series.apply, name, self, func,
convert_dtype, args, **kwds)
class Index(Series):
_token_prefix = 'index-'
@property
def index(self):
msg = "'{0}' object has no attribute 'index'"
raise AttributeError(msg.format(self.__class__.__name__))
@property
def _constructor(self):
return Index
def nunique(self):
return self.drop_duplicates().count()
def count(self):
f = lambda x: pd.notnull(x).sum()
return reduction(self, f, np.sum, token='index-count')
class DataFrame(_Frame):
"""
Implements out-of-core DataFrame as a sequence of pandas DataFrames
Parameters
----------
dask: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
columns: list of str
Column names. This metadata aids usability
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
_token_prefix = 'dataframe-'
def __new__(cls, dask, name, columns, divisions):
result = object.__new__(cls)
result.dask = dask
result._name = name
result.columns = tuple(columns)
result.divisions = tuple(divisions)
return result
@property
def _args(self):
return (self.dask, self._name, self.columns, self.divisions)
@property
def _constructor_sliced(self):
return Series
@property
def _constructor(self):
return DataFrame
@property
def _empty_partition(self):
""" Return empty dummy to emulate the result """
return self._partition_type(columns=self.columns)
def __getitem__(self, key):
if np.isscalar(key):
name = '{0}.{1}'.format(self._name, key)
if key in self.columns:
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return self._constructor_sliced(merge(self.dask, dsk), name,
key, self.divisions)
else:
raise KeyError(key)
if isinstance(key, list):
name = '%s[%s]' % (self._name, str(key))
if all(k in self.columns for k in key):
dsk = dict(((name, i), (operator.getitem,
(self._name, i),
(list, key)))
for i in range(self.npartitions))
return self._constructor(merge(self.dask, dsk), name,
key, self.divisions)
else:
raise KeyError([k for k in key if k not in self.columns])
if isinstance(key, Series):
if self.divisions != key.divisions:
from .multi import _maybe_align_partitions
self, key = _maybe_align_partitions([self, key])
name = 'series-slice-%s[%s]' % (self._name, key._name)
dsk = dict(((name, i), (self._partition_type._getitem_array,
(self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return self._constructor(merge(self.dask, key.dask, dsk), name,
self.columns, self.divisions)
raise NotImplementedError(key)
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError as e:
try:
return self[key]
except KeyError as e:
raise AttributeError(e)
def __dir__(self):
return sorted(set(dir(type(self)) + list(self.__dict__) +
list(self.columns)))
def __repr__(self):
return ("dd.DataFrame<%s, divisions=%s>" %
(self._name, repr_long_list(self.divisions)))
@property
def ndim(self):
""" Return dimensionality """
return 2
@cache_readonly
def _dtypes(self):
""" for cache, cache_readonly hides docstring """
return self._get(self.dask, self._keys()[0]).dtypes
@property
def dtypes(self):
""" Return data types """
return self._dtypes
@derived_from(pd.DataFrame)
def set_index(self, other, **kwargs):
from .shuffle import set_index
return set_index(self, other, **kwargs)
def set_partition(self, column, divisions, **kwargs):
""" Set explicit divisions for new column index
>>> df2 = df.set_partition('new-index-column', divisions=[10, 20, 50]) # doctest: +SKIP
See Also
--------
set_index
"""
from .shuffle import set_partition
return set_partition(self, column, divisions, **kwargs)
@property
def column_info(self):
""" Return DataFrame.columns """
return self.columns
@derived_from(pd.DataFrame)
def nlargest(self, n=5, columns=None):
return nlargest(self, n, columns)
@derived_from(pd.DataFrame)
def reset_index(self):
new_columns = ['index'] + list(self.columns)
reset_index = self._partition_type.reset_index
out = self.map_partitions(reset_index, columns=new_columns)
out.divisions = [None] * (self.npartitions + 1)
return out
@derived_from(pd.DataFrame)
def groupby(self, key, **kwargs):
return GroupBy(self, key, **kwargs)
def categorize(self, columns=None, **kwargs):
return categorize(self, columns, **kwargs)
@derived_from(pd.DataFrame)
def assign(self, **kwargs):
pairs = list(sum(kwargs.items(), ()))
# Figure out columns of the output
df2 = self._empty_partition.assign(**dict((k, []) for k in kwargs))
return elemwise(_assign, self, *pairs, columns=list(df2.columns))
@derived_from(pd.DataFrame)
def rename(self, index=None, columns=None):
if index is not None:
raise ValueError("Cannot rename index.")
column_info = (self._empty_partition.rename(columns=columns).columns)
func = pd.DataFrame.rename
# *args here is index, columns but columns arg is already used
return map_partitions(func, column_info, self, None, columns)
def query(self, expr, **kwargs):
""" Blocked version of pd.DataFrame.query
This is like the sequential version except that this will also happen
in many threads. This may conflict with ``numexpr`` which will use
multiple threads itself. We recommend that you set numexpr to use a
single thread
import numexpr
numexpr.set_nthreads(1)
The original docstring follows below:\n
""" + pd.DataFrame.query.__doc__
name = '%s.query(%s)' % (self._name, expr)
if kwargs:
name = name + '--' + tokenize(kwargs)
dsk = dict(((name, i), (apply, pd.DataFrame.query,
((self._name, i), (expr,), kwargs)))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (pd.DataFrame.query, (self._name, i), expr))
for i in range(self.npartitions))
return self._constructor(merge(dsk, self.dask), name,
self.columns, self.divisions)
@derived_from(pd.DataFrame)
def dropna(self, how='any', subset=None):
def f(df, how=how, subset=subset):
return df.dropna(how=how, subset=subset)
return map_partitions(f, self.columns, self)
def to_castra(self, fn=None, categories=None, sorted_index_column=None,
compute=True):
""" Write DataFrame to Castra on-disk store
See https://github.com/blosc/castra for details
See Also
--------
Castra.to_dask
"""
from .io import to_castra
return to_castra(self, fn, categories, sorted_index_column,
compute=compute)
def to_bag(self, index=False):
"""Convert to a dask Bag of tuples of each row.
Parameters
----------
index : bool, optional
If True, the index is included as the first element of each tuple.
Default is False.
"""
from .io import to_bag
return to_bag(self, index)
@cache_readonly
def _numeric_columns(self):
# Cache to avoid repeated calls
dummy = self._get(self.dask, self._keys()[0])._get_numeric_data()
return dummy.columns.tolist()
def _get_numeric_data(self, how='any', subset=None):
numeric_columns = [c for c, dtype in zip(self.columns, self.dtypes)
if issubclass(dtype.type, np.number)]
if len(numeric_columns) < len(self.columns):
name = self._token_prefix + '-get_numeric_data'
return map_partitions(pd.DataFrame._get_numeric_data,
numeric_columns, self, token=name)
else:
# use myself if all numerics
return self
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
def _aca_agg(self, token, func, aggfunc=None):
""" Wrapper for aggregations """
if aggfunc is None:
aggfunc = func
return aca([self], chunk=func,
aggregate=lambda x: aggfunc(x.groupby(level=0)),
columns=None, token=self._token_prefix + token)
@derived_from(pd.DataFrame)
def drop(self, labels, axis=0):
if axis != 1:
raise NotImplementedError("Drop currently only works for axis=1")
columns = list(self._empty_partition.drop(labels, axis=axis).columns)
return elemwise(pd.DataFrame.drop, self, labels, axis, columns=columns)
@derived_from(pd.DataFrame)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False,
suffixes=('_x', '_y'), npartitions=None):
if not isinstance(right, (DataFrame, pd.DataFrame)):
raise ValueError('right must be DataFrame')
from .multi import merge
return merge(self, right, how=how, on=on,
left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
suffixes=suffixes, npartitions=npartitions)
@derived_from(pd.DataFrame)
def join(self, other, on=None, how='left',
lsuffix='', rsuffix='', npartitions=None):
if not isinstance(other, (DataFrame, pd.DataFrame)):
raise ValueError('other must be DataFrame')
from .multi import merge
return merge(self, other, how=how,
left_index=on is None, right_index=True,
left_on=on, suffixes=[lsuffix, rsuffix],
npartitions=npartitions)
@derived_from(pd.DataFrame)
def append(self, other):
if isinstance(other, Series):
msg = ('Unable to appending dd.Series to dd.DataFrame.'
'Use pd.Series to append as row.')
raise ValueError(msg)
elif isinstance(other, pd.Series):
other = other.to_frame().T
return super(DataFrame, self).append(other)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
# name must be explicitly passed for div method whose name is truediv
def meth(self, other, axis='columns', level=None, fill_value=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
right = None
if axis == 1:
# when axis=1, series will be added to each row
# it not supported for dd.Series.
# dd.DataFrame is not affected as op is applied elemwise
if isinstance(other, Series):
msg = 'Unable to {0} dd.Series with axis=1'.format(name)
raise ValueError(msg)
elif isinstance(other, pd.Series):
right = other.index
if isinstance(other, (DataFrame, pd.DataFrame)):
right = other.columns
if right is not None:
left = self._empty_partition
right = pd.DataFrame(columns=right)
columns = op(left, right, axis=axis).columns.tolist()
else:
columns = self.columns
return map_partitions(op, columns, self, other,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
def apply(self, func, axis=0, args=(), columns=no_default, **kwds):
""" Parallel version of pandas.DataFrame.apply
This mimics the pandas version except for the following:
1. The user must specify axis=0 explicitly
2. The user must provide output columns or column
"""
if axis == 0:
raise NotImplementedError(
"dd.DataFrame.apply only supports axis=1\n"
" Try: df.apply(func, axis=1)")
if columns is no_default:
raise ValueError(
"Please supply column names of output dataframe or series\n"
" Before: df.apply(func)\n"
" After: df.apply(func, columns=['x', 'y']) for dataframe result\n"
" or: df.apply(func, columns='x') for series result")
return map_partitions(pd.DataFrame.apply, columns, self, func, axis,
False, False, None, args, **kwds)
# bind operators
for op in [operator.abs, operator.add, operator.and_, operator_div,
operator.eq, operator.gt, operator.ge, operator.inv,
operator.lt, operator.le, operator.mod, operator.mul,
operator.ne, operator.neg, operator.or_, operator.pow,
operator.sub, operator.truediv, operator.floordiv, operator.xor]:
_Frame._bind_operator(op)
Scalar._bind_operator(op)
for name in ['add', 'sub', 'mul', 'div',
'truediv', 'floordiv', 'mod', 'pow',
'radd', 'rsub', 'rmul', 'rdiv',
'rtruediv', 'rfloordiv', 'rmod', 'rpow']:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_operator_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_operator_method(name, meth)
def elemwise_property(attr, s):
return map_partitions(getattr, s.name, s, attr)
for name in ['nanosecond', 'microsecond', 'millisecond', 'second', 'minute',
'hour', 'day', 'dayofweek', 'dayofyear', 'week', 'weekday',
'weekofyear', 'month', 'quarter', 'year']:
setattr(Index, name, property(partial(elemwise_property, name)))
def nlargest(df, n=5, columns=None):
if isinstance(df, Index):
raise AttributeError("nlargest is not available for Index objects")
elif isinstance(df, Series):
token = 'series-nlargest-n={0}'.format(n)
f = lambda s: s.nlargest(n)
elif isinstance(df, DataFrame):
token = 'dataframe-nlargest-n={0}'.format(n)
f = lambda df: df.nlargest(n, columns)
columns = df.columns # this is a hack.
return aca(df, f, f, columns=columns, token=token)
def _assign(df, *pairs):
kwargs = dict(partition(2, pairs))
return df.assign(**kwargs)
def _partition_of_index_value(divisions, val):
""" In which partition does this value lie?
>>> _partition_of_index_value([0, 5, 10], 3)
0
>>> _partition_of_index_value([0, 5, 10], 8)
1
>>> _partition_of_index_value([0, 5, 10], 100)
1
>>> _partition_of_index_value([0, 5, 10], 5) # left-inclusive divisions
1
"""
if divisions[0] is None:
raise ValueError(
"Can not use loc on DataFrame without known divisions")
val = _coerce_loc_index(divisions, val)
i = bisect.bisect_right(divisions, val)
return min(len(divisions) - 2, max(0, i - 1))
def _loc(df, start, stop, include_right_boundary=True):
"""
>>> df = pd.DataFrame({'x': [10, 20, 30, 40, 50]}, index=[1, 2, 2, 3, 4])
>>> _loc(df, 2, None)
x
2 20
2 30
3 40
4 50
>>> _loc(df, 1, 3)
x
1 10
2 20
2 30
3 40
>>> _loc(df, 1, 3, include_right_boundary=False)
x
1 10
2 20
2 30
"""
result = df.loc[start:stop]
if not include_right_boundary:
right_index = result.index.get_slice_bound(stop, 'left',
result.index.inferred_type)
result = result.iloc[:right_index]
return result
def _coerce_loc_index(divisions, o):
""" Transform values to be comparable against divisions
This is particularly valuable to use with pandas datetimes
"""
if divisions and isinstance(divisions[0], datetime):
return pd.Timestamp(o)
if divisions and isinstance(divisions[0], np.datetime64):
return np.datetime64(o).astype(divisions[0].dtype)
return o
def elemwise(op, *args, **kwargs):
""" Elementwise operation for dask.Dataframes """
columns = kwargs.get('columns', no_default)
name = kwargs.get('name', None)
_name = 'elemwise-' + tokenize(op, kwargs, *args)
args = _maybe_from_pandas(args)
from .multi import _maybe_align_partitions
args = _maybe_align_partitions(args)
dasks = [arg for arg in args if isinstance(arg, (_Frame, Scalar))]
dfs = [df for df in dasks if isinstance(df, _Frame)]
divisions = dfs[0].divisions
n = len(divisions) - 1
other = [(i, arg) for i, arg in enumerate(args)
if not isinstance(arg, (_Frame, Scalar))]
if other:
op2 = partial_by_order(op, other)
else:
op2 = op
# adjust the key length of Scalar
keys = [d._keys() * n if isinstance(d, Scalar)
else d._keys() for d in dasks]
dsk = dict(((_name, i), (op2,) + frs) for i, frs in enumerate(zip(*keys)))
dsk = merge(dsk, *[d.dask for d in dasks])
if columns == no_default:
if len(dfs) >= 2 and len(dasks) != len(dfs):
# should not occur in current funcs
msg = 'elemwise with 2 or more DataFrames and Scalar is not supported'
raise NotImplementedError(msg)
if len(dfs) == 1:
columns = dfs[0]
else:
columns = op2(*[df._empty_partition for df in dfs])
return _Frame(dsk, _name, columns, divisions)
def remove_empties(seq):
""" Remove items of length 0
>>> remove_empties([1, 2, ('empty', np.nan), 4, 5])
[1, 2, 4, 5]
>>> remove_empties([('empty', np.nan)])
[nan]
>>> remove_empties([])
[]
"""
if not seq:
return seq
seq2 = [x for x in seq
if not (isinstance(x, tuple) and x and x[0] == 'empty')]
if seq2:
return seq2
else:
return [seq[0][1]]
def empty_safe(func, arg):
"""
>>> empty_safe(sum, [1, 2, 3])
6
>>> empty_safe(sum, [])
('empty', 0)
"""
if len(arg) == 0:
return ('empty', func(arg))
else:
return func(arg)
def reduction(x, chunk, aggregate, token=None):
""" General version of reductions
>>> reduction(my_frame, np.sum, np.sum) # doctest: +SKIP
"""
token_key = tokenize(x, token or (chunk, aggregate))
token = token or 'reduction'
a = '{0}--chunk-{1}'.format(token, token_key)
dsk = dict(((a, i), (empty_safe, chunk, (x._name, i)))
for i in range(x.npartitions))
b = '{0}--aggregation-{1}'.format(token, token_key)
dsk2 = {(b, 0): (aggregate, (remove_empties,
[(a,i) for i in range(x.npartitions)]))}
return Scalar(merge(x.dask, dsk, dsk2), b)
def _maybe_from_pandas(dfs):
from .io import from_pandas
dfs = [from_pandas(df, 1) if isinstance(df, (pd.Series, pd.DataFrame))
else df for df in dfs]
return dfs
def _groupby_apply(df, ind, func):
return df.groupby(ind).apply(func)
def _groupby_apply_level0(df, func):
return df.groupby(level=0).apply(func)
def _groupby_getitem_apply(df, ind, key, func):
return df.groupby(ind)[key].apply(func)
def _groupby_level0_getitem_apply(df, key, func):
return df.groupby(level=0)[key].apply(func)
def _groupby_get_group(df, by_key, get_key, columns):
grouped = df.groupby(by_key)
if isinstance(columns, tuple):
columns = list(columns)
if get_key in grouped.groups:
return grouped[columns].get_group(get_key)
else:
# to create empty DataFrame/Series, which has the same
# dtype as the original
return df[0:0][columns]
class _GroupBy(object):
def _aca_agg(self, token, func, aggfunc=None):
if aggfunc is None:
aggfunc = func
if isinstance(self.index, Series):
def chunk(df, index, func=func, key=self.key):
if isinstance(df, pd.Series):
return func(df.groupby(index))
else:
return func(df.groupby(index)[key])
agg = lambda df: aggfunc(df.groupby(level=0))
token = self._token_prefix + token
return aca([self.df, self.index], chunk=chunk, aggregate=agg,
columns=self.key, token=token)
else:
def chunk(df, index=self.index, func=func, key=self.key):
return func(df.groupby(index)[key])
if isinstance(self.index, list):
levels = list(range(len(self.index)))
else:
levels = 0
agg = lambda df: aggfunc(df.groupby(level=levels))
token = self._token_prefix + token
return aca(self.df, chunk=chunk, aggregate=agg,
columns=self.key, token=token)
@derived_from(pd.core.groupby.GroupBy)
def sum(self):
return self._aca_agg(token='sum', func=lambda x: x.sum())
@derived_from(pd.core.groupby.GroupBy)
def min(self):
return self._aca_agg(token='min', func=lambda x: x.min())
@derived_from(pd.core.groupby.GroupBy)
def max(self):
return self._aca_agg(token='max', func=lambda x: x.max())
@derived_from(pd.core.groupby.GroupBy)
def count(self):
return self._aca_agg(token='count', func=lambda x: x.count(),
aggfunc=lambda x: x.sum())
@derived_from(pd.core.groupby.GroupBy)
def mean(self):
return 1.0 * self.sum() / self.count()
@derived_from(pd.core.groupby.GroupBy)
def get_group(self, key):
token = self._token_prefix + 'get_group'
return map_partitions(_groupby_get_group, self.column_info,
self.df,
self.index, key, self.column_info, token=token)
class GroupBy(_GroupBy):
_token_prefix = 'dataframe-groupby-'
def __init__(self, df, index=None, key=None, **kwargs):
self.df = df
self.index = index
self.kwargs = kwargs
if not kwargs.get('as_index', True):
msg = ("The keyword argument `as_index=False` is not supported in "
"dask.dataframe.groupby")
raise NotImplementedError(msg)
if isinstance(index, list):
for i in index:
if i not in df.columns:
raise KeyError("Columns not found: '{0}'".format(i))
_key = [c for c in df.columns if c not in index]
elif isinstance(index, Series):
assert index.divisions == df.divisions
# check whether given Series is taken from given df and unchanged.
# If any operations are performed, _name will be changed to
# e.g. "elemwise-xxxx"
if (index.name is not None and
index._name == self.df._name + '.' + index.name):
_key = [c for c in df.columns if c != index.name]
else:
_key = list(df.columns)
else:
if index not in df.columns:
raise KeyError("Columns not found: '{0}'".format(index))
_key = [c for c in df.columns if c != index]
self.key = key or _key
@property
def column_info(self):
return self.df.columns
def apply(self, func, columns=None):
""" Apply function to each group.
If the grouper does not align with the index then this causes a full
shuffle. The order of rows within each group may not be preserved.
"""
if (isinstance(self.index, Series) and
self.index._name == self.df.index._name):
df = self.df
return map_partitions(_groupby_apply_level0,
columns or self.df.columns,
self.df, func)
else:
from .shuffle import shuffle
# df = set_index(self.df, self.index, **self.kwargs)
df = shuffle(self.df, self.index, **self.kwargs)
return map_partitions(_groupby_apply,
columns or self.df.columns,
self.df, self.index, func)
def __getitem__(self, key):
if isinstance(key, list):
for k in key:
if k not in self.df.columns:
raise KeyError("Columns not found: '{0}'".format(k))
return GroupBy(self.df, index=self.index, key=key, **self.kwargs)
else:
if key not in self.df.columns:
raise KeyError("Columns not found: '{0}'".format(key))
return SeriesGroupBy(self.df, self.index, key)
def __dir__(self):
return sorted(set(dir(type(self)) + list(self.__dict__) +
list(self.df.columns)))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
try:
return self[key]
except KeyError:
raise AttributeError()
class SeriesGroupBy(_GroupBy):
_token_prefix = 'series-groupby-'
def __init__(self, df, index, key=None, **kwargs):
self.df = df
self.index = index
self.key = key
self.kwargs = kwargs
if isinstance(df, Series):
if not isinstance(index, Series):
raise TypeError("A dask Series must be used as the index for a"
" Series groupby.")
if not df.divisions == index.divisions:
raise NotImplementedError("The Series and index of the groupby"
" must have the same divisions.")
@property
def column_info(self):
return self.key
def apply(self, func, columns=None):
""" Apply function to each group.
If the grouper does not align with the index then this causes a full
shuffle. The order of rows within each group may not be preserved.
"""
# df = set_index(self.df, self.index, **self.kwargs)
if self.index._name == self.df.index._name:
df = self.df
return map_partitions(_groupby_level0_getitem_apply,
self.df, self.key, func,
columns=columns)
else:
from .shuffle import shuffle
df = shuffle(self.df, self.index, **self.kwargs)
return map_partitions(_groupby_apply,
columns or self.df.columns,
self.df, self.index, func)
def nunique(self):
def chunk(df, index):
# we call set_index here to force a possibly duplicate index
# for our reduce step
if isinstance(df, pd.DataFrame):
grouped = (df.groupby(index)
.apply(pd.DataFrame.drop_duplicates, subset=self.key))
grouped.index = grouped.index.get_level_values(level=0)
else:
if isinstance(index, np.ndarray):
assert len(index) == len(df)
index = pd.Series(index, index=df.index)
grouped = pd.concat([df, index], axis=1).drop_duplicates()
return grouped
def agg(df):
if isinstance(self.df, Series):
return df.groupby(df.columns[1])[df.columns[0]].nunique()
else:
return df.groupby(level=0)[self.key].nunique()
return aca([self.df, self.index],
chunk=chunk, aggregate=agg, columns=self.key,
token='series-groupby-nunique')
def apply_concat_apply(args, chunk=None, aggregate=None,
columns=no_default, token=None):
""" Apply a function to blocks, the concat, then apply again
Parameters
----------
args: dask.DataFrames
All Dataframes should be partitioned and indexed equivalently
chunk: function [block-per-arg] -> block
Function to operate on each block of data
aggregate: function concatenated-block -> block
Function to operate on the concatenated result of chunk
>>> def chunk(a_block, b_block):
... pass
>>> def agg(df):
... pass
>>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP
"""
if not isinstance(args, (tuple, list)):
args = [args]
assert all(arg.npartitions == args[0].npartitions
for arg in args
if isinstance(arg, _Frame))
token_key = tokenize(token or (chunk, aggregate), columns, *args)
token = token or 'apply-concat-apply'
a = '{0}--first-{1}'.format(token, token_key)
dsk = dict(((a, i), (apply, chunk, (list, [(x._name, i)
if isinstance(x, _Frame)
else x for x in args])))
for i in range(args[0].npartitions))
b = '{0}--second-{1}'.format(token, token_key)
dsk2 = {(b, 0): (aggregate,
(_concat,
(list, [(a, i) for i in range(args[0].npartitions)])))}
if columns == no_default:
return_type = type(args[0])
columns = None
else:
return_type = _get_return_type(args[0], columns)
dasks = [a.dask for a in args if isinstance(a, _Frame)]
return return_type(merge(dsk, dsk2, *dasks), b, columns, [None, None])
aca = apply_concat_apply
def _get_return_type(arg, columns):
""" Get the class of the result
- When columns is str/unicode, the result is:
- Scalar when columns is ``return_scalar``
- Index if arg is Index
- Series otherwise
- Otherwise, result is DataFrame.
"""
if (isinstance(columns, (str, unicode)) or not
isinstance(columns, Iterable)):
if columns == return_scalar:
return Scalar
elif isinstance(arg, Index):
return Index
else:
return Series
else:
return DataFrame
def map_partitions(func, columns, *args, **kwargs):
""" Apply Python function on each DataFrame block
Parameters
----------
column_info : tuple or string
Column names or name of the output
targets : list
List of target DataFrame / Series.
"""
assert callable(func)
token = kwargs.pop('token', 'map-partitions')
token_key = tokenize(token or func, columns, kwargs, *args)
name = '{0}-{1}'.format(token, token_key)
if all(isinstance(arg, Scalar) for arg in args):
dask = {(name, 0):
(apply, func, (tuple, [(arg._name, 0) for arg in args]), kwargs)}
return Scalar(merge(dask, *[arg.dask for arg in args]), name)
args = _maybe_from_pandas(args)
if columns is no_default:
columns = None
from .multi import _maybe_align_partitions
args = _maybe_align_partitions(args)
dfs = [df for df in args if isinstance(df, _Frame)]
return_type = _get_return_type(dfs[0], columns)
dsk = {}
for i in range(dfs[0].npartitions):
values = [(arg._name, i if isinstance(arg, _Frame) else 0)
if isinstance(arg, (_Frame, Scalar)) else arg for arg in args]
dsk[(name, i)] = (_rename, columns, (apply, func, (tuple, values),
kwargs))
dasks = [arg.dask for arg in args if isinstance(arg, (_Frame, Scalar))]
return return_type(merge(dsk, *dasks), name, columns, args[0].divisions)
def _rename(columns, df):
""" Rename columns in dataframe or series """
if isinstance(columns, Iterator):
columns = list(columns)
if columns is no_default:
return df
if isinstance(df, pd.DataFrame) and len(columns) == len(df.columns):
return df.rename(columns=dict(zip(df.columns, columns)))
elif isinstance(df, pd.Series):
return pd.Series(df, name=columns)
else:
return df
def categorize_block(df, categories):
""" Categorize a dataframe with given categories
df: DataFrame
categories: dict mapping column name to iterable of categories
"""
df = df.copy()
for col, vals in categories.items():
df[col] = pd.Categorical(df[col], categories=vals, ordered=False)
return df
def categorize(df, columns=None, **kwargs):
"""
Convert columns of dataframe to category dtype
This aids performance, both in-memory and in spilling to disk
"""
if columns is None:
dtypes = df.dtypes
columns = [name for name, dt in zip(dtypes.index, dtypes.values)
if dt == 'O']
if not isinstance(columns, (list, tuple)):
columns = [columns]
distincts = [df[col].drop_duplicates() for col in columns]
values = compute(*distincts, **kwargs)
func = partial(categorize_block, categories=dict(zip(columns, values)))
return df.map_partitions(func, columns=df.columns)
def quantile(df, q):
""" Approximate quantiles of Series / single column DataFrame
Parameters
----------
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
assert len(df.columns) == 1
from dask.array.percentile import _percentile, merge_percentiles
# currently, only Series has quantile method
if isinstance(q, (list, tuple, np.ndarray)):
# make Series
merge_type = lambda v: df._partition_type(v, index=q, name=df.name)
return_type = df._constructor
if issubclass(return_type, Index):
return_type = Series
else:
merge_type = lambda v: df._partition_type(v).item()
return_type = df._constructor_sliced
q = [q]
# pandas uses quantile in [0, 1]
# numpy / everyone else uses [0, 100]
qs = np.asarray(q) * 100
token = tokenize(df, qs)
if len(qs) == 0:
name = 'quantiles-' + token
return Series({(name, 0): pd.Series([], name=df.name)},
name, df.name, [None, None])
else:
new_divisions = [np.min(q), np.max(q)]
name = 'quantiles-1-' + token
val_dsk = dict(((name, i), (_percentile, (getattr, key, 'values'), qs))
for i, key in enumerate(df._keys()))
name2 = 'quantiles-2-' + token
len_dsk = dict(((name2, i), (len, key)) for i, key in enumerate(df._keys()))
name3 = 'quantiles-3-' + token
merge_dsk = {(name3, 0): (merge_type, (merge_percentiles, qs, [qs] * df.npartitions,
sorted(val_dsk), sorted(len_dsk)))}
dsk = merge(df.dask, val_dsk, len_dsk, merge_dsk)
return return_type(dsk, name3, df.name, new_divisions)
def pd_split(df, p, random_state=None):
""" Split DataFrame into multiple pieces pseudorandomly
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [2, 3, 4, 5, 6, 7]})
>>> a, b = pd_split(df, [0.5, 0.5], random_state=123) # roughly 50/50 split
>>> a
a b
1 2 3
2 3 4
5 6 7
>>> b
a b
0 1 2
3 4 5
4 5 6
"""
p = list(p)
index = pseudorandom(len(df), p, random_state)
return [df.iloc[index == i] for i in range(len(p))]
def _resample_bin_and_out_divs(divisions, rule, closed, label):
rule = pd.datetools.to_offset(rule)
g = pd.TimeGrouper(rule, how='count', closed=closed, label=label)
# Determine bins to apply `how` to. Disregard labeling scheme.
divs = pd.Series(range(len(divisions)), index=divisions)
temp = divs.resample(rule, how='count', closed=closed, label='left')
tempdivs = temp.loc[temp > 0].index
# Cleanup closed == 'right' and label == 'right'
res = pd.offsets.Nano() if hasattr(rule, 'delta') else pd.offsets.Day()
if g.closed == 'right':
newdivs = tempdivs + res
else:
newdivs = tempdivs
if g.label == 'right':
outdivs = tempdivs + rule
else:
outdivs = tempdivs
newdivs = newdivs.tolist()
outdivs = outdivs.tolist()
# Adjust ends
if newdivs[0] < divisions[0]:
newdivs[0] = divisions[0]
if newdivs[-1] < divisions[-1]:
if len(newdivs) < len(divs):
setter = lambda a, val: a.append(val)
else:
setter = lambda a, val: a.__setitem__(-1, val)
setter(newdivs, divisions[-1])
if outdivs[-1] > divisions[-1]:
setter(outdivs, outdivs[-1])
elif outdivs[-1] < divisions[-1]:
setter(outdivs, temp.index[-1])
return tuple(map(pd.Timestamp, newdivs)), tuple(map(pd.Timestamp, outdivs))
def repartition_divisions(a, b, name, out1, out2, force=False):
""" dask graph to repartition dataframe by new divisions
Parameters
----------
a : tuple
old divisions
b : tuple, list
new divisions
name : str
name of old dataframe
out1 : str
name of temporary splits
out2 : str
name of new dataframe
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP
{('b', 0): (<function _loc at ...>, ('a', 0), 1, 3, False),
('b', 1): (<function _loc at ...>, ('a', 1), 3, 4, False),
('b', 2): (<function _loc at ...>, ('a', 1), 4, 6, False),
('b', 3): (<function _loc at ...>, ('a', 1), 6, 7, False)
('c', 0): (<function concat at ...>,
(<type 'list'>, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
"""
if not isinstance(b, (list, tuple)):
raise ValueError('New division must be list or tuple')
b = list(b)
if len(b) < 2:
# minimum division is 2 elements, like [0, 0]
raise ValueError('New division must be longer than 2 elements')
if b != sorted(b):
raise ValueError('New division must be sorted')
if len(b[:-1]) != len(list(unique(b[:-1]))):
msg = 'New division must be unique, except for the last element'
raise ValueError(msg)
if force:
if a[0] < b[0]:
msg = ('left side of the new division must be equal or smaller '
'than old division')
raise ValueError(msg)
if a[-1] > b[-1]:
msg = ('right side of the new division must be equal or larger '
'than old division')
raise ValueError(msg)
else:
if a[0] != b[0]:
msg = 'left side of old and new divisions are different'
raise ValueError(msg)
if a[-1] != b[-1]:
msg = 'right side of old and new divisions are different'
raise ValueError(msg)
def _is_single_last_div(x):
"""Whether last division only contains single label"""
return len(x) >= 2 and x[-1] == x[-2]
c = [a[0]]
d = dict()
low = a[0]
i, j = 1, 1 # indices for old/new divisions
k = 0 # index for temp divisions
last_elem = _is_single_last_div(a)
# process through old division
# left part of new division can be processed in this loop
while (i < len(a) and j < len(b)):
if a[i] < b[j]:
# tuple is something like:
# (_loc, ('from_pandas-#', 0), 3, 4, False))
d[(out1, k)] = (_loc, (name, i - 1), low, a[i], False)
low = a[i]
i += 1
elif a[i] > b[j]:
d[(out1, k)] = (_loc, (name, i - 1), low, b[j], False)
low = b[j]
j += 1
else:
d[(out1, k)] = (_loc, (name, i - 1), low, b[j], False)
low = b[j]
i += 1
j += 1
c.append(low)
k += 1
# right part of new division can remain
if a[-1] < b[-1]:
for _j in range(j, len(b)):
# always use right-most of old division
# because it may contain last element
m = len(a) - 2
d[(out1, k)] = (_loc, (name, m), low, b[_j], False)
low = b[_j]
c.append(low)
k += 1
else:
# even if new division is processed through,
# right-most element of old division can remain
if last_elem and i < len(a):
d[(out1, k)] = (_loc, (name, i - 1), a[i], a[i], False)
k += 1
c.append(a[-1])
# replace last element of tuple with True
d[(out1, k - 1)] = d[(out1, k - 1)][:-1] + (True,)
i, j = 0, 1
last_elem = _is_single_last_div(c)
while j < len(b):
tmp = []
while c[i] < b[j]:
tmp.append((out1, i))
i += 1
if last_elem and c[i] == b[-1] and i < k:
# append if last split is not included
tmp.append((out1, i))
i += 1
if len(tmp) == 0:
# dumy slice to return empty DataFrame or Series,
# which retain original data attributes (columns / name)
d[(out2, j - 1)] = (_loc, (name, 0), a[0], a[0], False)
elif len(tmp) == 1:
d[(out2, j - 1)] = tmp[0]
else:
if not tmp:
raise ValueError('check for duplicate partitions\nold:\n%s\n\n'
'new:\n%s\n\ncombined:\n%s'
% (pformat(a), pformat(b), pformat(c)))
d[(out2, j - 1)] = (pd.concat, (list, tmp))
j += 1
return d
def repartition(df, divisions, force=False):
""" Repartition dataframe along new divisions
Dask.DataFrame objects are partitioned along their index. Often when
multiple dataframes interact we need to align these partitionings. The
``repartition`` function constructs a new DataFrame object holding the same
data but partitioned on different values. It does this by performing a
sequence of ``loc`` and ``concat`` calls to split and merge the previous
generation of partitions.
Parameters
----------
divisions : list
List of partitions to be used
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
Also works on Pandas objects
>>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP
"""
token = tokenize(df, divisions)
if isinstance(df, _Frame):
tmp = 'repartition-split-' + token
out = 'repartition-merge-' + token
dsk = repartition_divisions(df.divisions, divisions,
df._name, tmp, out, force=force)
return df._constructor(merge(df.dask, dsk), out,
df.column_info, divisions)
elif isinstance(df, (pd.Series, pd.DataFrame)):
name = 'repartition-dataframe-' + token
from .utils import shard_df_on_index
dfs = shard_df_on_index(df, divisions[1:-1])
dsk = dict(((name, i), df) for i, df in enumerate(dfs))
return _Frame(dsk, name, df, divisions)
raise ValueError('Data must be DataFrame or Series')
class Accessor(object):
def __init__(self, series):
if not isinstance(series, Series):
raise ValueError('Accessor cannot be initialized')
self._series = series
def _property_map(self, key):
return map_partitions(self.getattr, self._series.name, self._series, key)
def _function_map(self, key, *args):
return map_partitions(self.call, self._series.name, self._series, key,
*args)
def __dir__(self):
return sorted(set(dir(type(self)) + list(self.__dict__) +
dir(self.ns)))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(self.ns):
if isinstance(getattr(self.ns, key), property):
return self._property_map(key)
else:
return partial(self._function_map, key)
else:
raise
class DatetimeAccessor(Accessor):
""" Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.microsecond # doctest: +SKIP
"""
ns = pd.Series.dt
@staticmethod
def getattr(obj, attr):
return getattr(obj.dt, attr)
@staticmethod
def call(obj, attr, *args):
return getattr(obj.dt, attr)(*args)
class StringAccessor(Accessor):
""" Accessor object for string properties of the Series values.
Examples
--------
>>> s.str.lower() # doctest: +SKIP
"""
ns = pd.Series.str
@staticmethod
def getattr(obj, attr):
return getattr(obj.str, attr)
@staticmethod
def call(obj, attr, *args):
return getattr(obj.str, attr)(*args)
| {
"repo_name": "pombredanne/dask",
"path": "dask/dataframe/core.py",
"copies": "1",
"size": "86516",
"license": "bsd-3-clause",
"hash": 2782504395902659600,
"line_mean": 33.304520222,
"line_max": 96,
"alpha_frac": 0.5573419945,
"autogenerated": false,
"ratio": 3.8544061302681993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49117481247681993,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import bisect
from collections import Iterator
from datetime import datetime
from distutils.version import LooseVersion
import math
import operator
from operator import getitem, setitem
from pprint import pformat
import uuid
import warnings
from toolz import merge, partial, first, partition, unique
import pandas as pd
from pandas.util.decorators import cache_readonly
import numpy as np
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..array.core import partial_by_order
from .. import threaded
from ..compatibility import apply, operator_div, bind_method
from ..utils import (repr_long_list, IndexCallable,
pseudorandom, derived_from, different_seeds, funcname)
from ..base import Base, compute, tokenize, normalize_token
from ..async import get_sync
no_default = '__no_default__'
return_scalar = '__return_scalar__'
pd.computation.expressions.set_use_numexpr(False)
def _concat(args, **kwargs):
""" Generic concat operation """
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.concatenate3(args)
if isinstance(args[0], (pd.DataFrame, pd.Series)):
args2 = [arg for arg in args if len(arg)]
if not args2:
return args[0]
return pd.concat(args2)
if isinstance(args[0], (pd.Index)):
args = [arg for arg in args if len(arg)]
return args[0].append(args[1:])
return args
def optimize(dsk, keys, **kwargs):
from .optimize import optimize
return optimize(dsk, keys, **kwargs)
def finalize(results):
return _concat(results)
class Scalar(Base):
""" A Dask-thing to represent a scalar
TODO: Clean up this abstraction
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(first)
def __init__(self, dsk, _name, name=None, divisions=None):
self.dask = dsk
self._name = _name
self.divisions = [None, None]
# name and divisions are ignored.
# There are dummies to be compat with Series and DataFrame
def __array__(self):
# array interface is required to support pandas instance + Scalar
# Otherwise, above op results in pd.Series of Scalar (object dtype)
return np.asarray(self.compute())
@property
def _args(self):
return (self.dask, self._name)
@property
def key(self):
return (self._name, 0)
def _keys(self):
return [self.key]
@classmethod
def _get_unary_operator(cls, op):
def f(self):
name = funcname(op) + '-' + tokenize(self)
dsk = {(name, 0): (op, (self._name, 0))}
return Scalar(merge(dsk, self.dask), name)
return f
@classmethod
def _get_binary_operator(cls, op, inv=False):
return lambda self, other: _scalar_binary(op, self, other, inv=inv)
def _scalar_binary(op, a, b, inv=False):
name = '{0}-{1}'.format(funcname(op), tokenize(a, b))
dsk = a.dask
if not isinstance(b, Base):
pass
elif isinstance(b, Scalar):
dsk = merge(dsk, b.dask)
b = (b._name, 0)
else:
return NotImplemented
if inv:
dsk.update({(name, 0): (op, b, (a._name, 0))})
else:
dsk.update({(name, 0): (op, (a._name, 0), b)})
if isinstance(b, (pd.Series, pd.DataFrame)):
return _Frame(dsk, name, b, [b.index.min(), b.index.max()])
else:
return Scalar(dsk, name)
class _Frame(Base):
""" Superclass for DataFrame and Series
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
_name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame / Series
metadata: scalar, None, list, pandas.Series or pandas.DataFrame
metadata to specify data structure.
- If scalar or None is given, the result is Series.
- If list is given, the result is DataFrame.
- If pandas data is given, the result is the class corresponding to
pandas data.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(finalize)
def __new__(cls, dsk, _name, metadata, divisions):
if (np.isscalar(metadata) or metadata is None or
isinstance(metadata, (Series, pd.Series))):
return Series(dsk, _name, metadata, divisions)
else:
return DataFrame(dsk, _name, metadata, divisions)
# constructor properties
# http://pandas.pydata.org/pandas-docs/stable/internals.html#override-constructor-properties
@property
def _constructor_sliced(self):
"""Constructor used when a result has one lower dimension(s) as the original"""
raise NotImplementedError
@property
def _constructor(self):
"""Constructor used when a result has the same dimension(s) as the original"""
raise NotImplementedError
@property
def npartitions(self):
"""Return number of partitions"""
return len(self.divisions) - 1
@classmethod
def _build_pd(cls, metadata):
""" build pandas instance from passed metadata """
if isinstance(metadata, cls):
# copy metadata
_pd = metadata._pd
known_dtype = metadata._known_dtype
elif isinstance(metadata, cls._partition_type):
if isinstance(metadata, pd.Index):
_pd = metadata[0:0]
else:
_pd = metadata.iloc[0:0]
known_dtype = True
else:
if np.isscalar(metadata) or metadata is None:
_pd = cls._partition_type([], name=metadata)
else:
_pd = cls._partition_type(columns=metadata)
known_dtype = False
return _pd, known_dtype
@property
def _args(self):
return NotImplementedError
def __getnewargs__(self):
""" To load pickle """
return self._args
def _keys(self):
return [(self._name, i) for i in range(self.npartitions)]
def __repr__(self):
name = self._name if len(self._name) < 10 else self._name[:7] + '...'
if self.known_divisions:
div_text = ', divisions=%s' % repr_long_list(self.divisions)
else:
div_text = ''
return ("dd.%s<%s, npartitions=%s%s>" %
(self.__class__.__name__, name, self.npartitions, div_text))
@property
def index(self):
"""Return dask Index instance"""
name = self._name + '-index'
dsk = dict(((name, i), (getattr, key, 'index'))
for i, key in enumerate(self._keys()))
return Index(merge(dsk, self.dask), name,
self._pd.index.name, self.divisions)
@property
def known_divisions(self):
"""Whether divisions are already known"""
return len(self.divisions) > 0 and self.divisions[0] is not None
def get_division(self, n):
""" Get nth division of the data """
if 0 <= n < self.npartitions:
name = 'get-division-%s-%s' % (str(n), self._name)
dsk = {(name, 0): (self._name, n)}
divisions = self.divisions[n:n+2]
return self._constructor(merge(self.dask, dsk), name,
self._pd, divisions)
else:
msg = "n must be 0 <= n < {0}".format(self.npartitions)
raise ValueError(msg)
def cache(self, cache=Cache):
""" Evaluate Dataframe and store in local cache
Uses chest by default to store data on disk
"""
if callable(cache):
cache = cache()
# Evaluate and store in cache
name = 'cache' + uuid.uuid1().hex
dsk = dict(((name, i), (setitem, cache, (tuple, list(key)), key))
for i, key in enumerate(self._keys()))
self._get(merge(dsk, self.dask), list(dsk.keys()))
# Create new dataFrame pointing to that cache
name = 'from-cache-' + self._name
dsk2 = dict(((name, i), (getitem, cache, (tuple, list(key))))
for i, key in enumerate(self._keys()))
return self._constructor(dsk2, name, self._pd, self.divisions)
@derived_from(pd.DataFrame)
def drop_duplicates(self, **kwargs):
assert all(k in ('keep', 'subset', 'take_last') for k in kwargs)
chunk = lambda s: s.drop_duplicates(**kwargs)
return aca(self, chunk=chunk, aggregate=chunk, columns=self._pd,
token='drop-duplicates')
def __len__(self):
return reduction(self, len, np.sum, token='len').compute()
def map_partitions(self, func, columns=no_default, *args, **kwargs):
""" Apply Python function on each DataFrame block
When using ``map_partitions`` you should provide either the column
names (if the result is a DataFrame) or the name of the Series (if the
result is a Series). The output type will be determined by the type of
``columns``.
Parameters
----------
func : function
Function applied to each blocks
columns : tuple or scalar
Column names or name of the output. Defaults to names of data itself.
When tuple is passed, DataFrame is returned. When scalar is passed,
Series is returned.
Examples
--------
When str is passed as columns, the result will be Series.
>>> df.map_partitions(lambda df: df.x + 1, columns='x') # doctest: +SKIP
When tuple is passed as columns, the result will be Series.
>>> df.map_partitions(lambda df: df.head(), columns=df.columns) # doctest: +SKIP
"""
return map_partitions(func, columns, self, *args, **kwargs)
def random_split(self, p, random_state=None):
""" Pseudorandomly split dataframe into different pieces row-wise
Parameters
----------
frac : float, optional
Fraction of axis items to return.
random_state: int or np.random.RandomState
If int create a new RandomState with this as the seed
Otherwise draw from the passed RandomState
Examples
--------
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent random_state
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP
See Also
--------
dask.DataFrame.sample
"""
seeds = different_seeds(self.npartitions, random_state)
dsk_full = dict(((self._name + '-split-full', i),
(pd_split, (self._name, i), p, seed))
for i, seed in enumerate(seeds))
dsks = [dict(((self._name + '-split-%d' % i, j),
(getitem, (self._name + '-split-full', j), i))
for j in range(self.npartitions))
for i in range(len(p))]
return [type(self)(merge(self.dask, dsk_full, dsk),
self._name + '-split-%d' % i,
self._pd, self.divisions)
for i, dsk in enumerate(dsks)]
def head(self, n=5, compute=True):
""" First n rows of the dataset
Caveat, this only checks the first n rows of the first partition.
"""
name = 'head-%d-%s' % (n, self._name)
dsk = {(name, 0): (lambda x, n: x.head(n=n), (self._name, 0), n)}
result = self._constructor(merge(self.dask, dsk), name,
self._pd, self.divisions[:2])
if compute:
result = result.compute()
return result
def tail(self, n=5, compute=True):
""" Last n rows of the dataset
Caveat, the only checks the last n rows of the last partition.
"""
name = 'tail-%d-%s' % (n, self._name)
dsk = {(name, 0): (lambda x, n: x.tail(n=n),
(self._name, self.npartitions - 1), n)}
result = self._constructor(merge(self.dask, dsk), name,
self._pd, self.divisions[-2:])
if compute:
result = result.compute()
return result
def _loc(self, ind):
""" Helper function for the .loc accessor """
if isinstance(ind, Series):
return self._loc_series(ind)
if self.known_divisions:
if isinstance(ind, slice):
return self._loc_slice(ind)
else:
return self._loc_element(ind)
else:
return map_partitions(try_loc, self, self, ind)
def _loc_series(self, ind):
if not self.divisions == ind.divisions:
raise ValueError("Partitions of dataframe and index not the same")
return map_partitions(lambda df, ind: df.loc[ind],
self._pd, self, ind, token='loc-series')
def _loc_element(self, ind):
name = 'loc-%s' % tokenize(ind, self)
part = _partition_of_index_value(self.divisions, ind)
if ind < self.divisions[0] or ind > self.divisions[-1]:
raise KeyError('the label [%s] is not in the index' % str(ind))
dsk = {(name, 0): (lambda df: df.loc[ind:ind], (self._name, part))}
return self._constructor(merge(self.dask, dsk), name, self, [ind, ind])
def _loc_slice(self, ind):
name = 'loc-%s' % tokenize(ind, self)
assert ind.step in (None, 1)
if ind.start:
start = _partition_of_index_value(self.divisions, ind.start)
else:
start = 0
if ind.stop is not None:
stop = _partition_of_index_value(self.divisions, ind.stop)
else:
stop = self.npartitions - 1
istart = _coerce_loc_index(self.divisions, ind.start)
istop = _coerce_loc_index(self.divisions, ind.stop)
if stop == start:
dsk = {(name, 0): (_loc, (self._name, start), ind.start, ind.stop)}
divisions = [istart, istop]
else:
dsk = merge(
{(name, 0): (_loc, (self._name, start), ind.start, None)},
dict(((name, i), (self._name, start + i))
for i in range(1, stop - start)),
{(name, stop - start): (_loc, (self._name, stop), None, ind.stop)})
divisions = ((max(istart, self.divisions[start])
if ind.start is not None
else self.divisions[0],) +
self.divisions[start+1:stop+1] +
(min(istop, self.divisions[stop+1])
if ind.stop is not None
else self.divisions[-1],))
assert len(divisions) == len(dsk) + 1
return self._constructor(merge(self.dask, dsk), name,
self._pd, divisions)
@property
def loc(self):
""" Purely label-location based indexer for selection by label.
>>> df.loc["b"] # doctest: +SKIP
>>> df.loc["b":"d"] # doctest: +SKIP"""
return IndexCallable(self._loc)
# NOTE: `iloc` is not implemented because of performance concerns.
# see https://github.com/dask/dask/pull/507
def repartition(self, divisions=None, npartitions=None, force=False):
""" Repartition dataframe along new divisions
Parameters
----------
divisions : list
List of partitions to be used
npartitions : int
Number of partitions of output, must be less than npartitions of
input
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition(npartitions=10) # doctest: +SKIP
>>> df = df.repartition(divisions=[0, 5, 10, 20]) # doctest: +SKIP
"""
if npartitions is not None:
if npartitions > self.npartitions:
raise ValueError("Can only repartition to fewer partitions")
return repartition_npartitions(self, npartitions)
elif divisions is not None:
return repartition(self, divisions, force=force)
else:
raise ValueError(
"Provide either divisions= or npartitions= to repartition")
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
@derived_from(pd.Series)
def fillna(self, value):
return self.map_partitions(self._partition_type.fillna, value=value)
def sample(self, frac, replace=False, random_state=None):
""" Random sample of items
Parameters
----------
frac : float, optional
Fraction of axis items to return.
replace: boolean, optional
Sample with or without replacement. Default = False.
random_state: int or ``np.random.RandomState``
If int we create a new RandomState with this as the seed
Otherwise we draw from the passed RandomState
See Also
--------
dask.DataFrame.random_split, pd.DataFrame.sample
"""
if random_state is None:
random_state = np.random.randint(np.iinfo(np.int32).max)
name = 'sample-' + tokenize(self, frac, replace, random_state)
func = getattr(self._partition_type, 'sample')
seeds = different_seeds(self.npartitions, random_state)
dsk = dict(((name, i),
(apply, func, (tuple, [(self._name, i)]),
{'frac': frac, 'random_state': seed,
'replace': replace}))
for i, seed in zip(range(self.npartitions), seeds))
return self._constructor(merge(self.dask, dsk), name,
self._pd, self.divisions)
@derived_from(pd.DataFrame)
def to_hdf(self, path_or_buf, key, mode='a', append=False, complevel=0,
complib=None, fletcher32=False, get=get_sync, **kwargs):
from .io import to_hdf
return to_hdf(self, path_or_buf, key, mode, append, complevel, complib,
fletcher32, get=get, **kwargs)
@derived_from(pd.DataFrame)
def to_csv(self, filename, get=get_sync, **kwargs):
from .io import to_csv
return to_csv(self, filename, get=get, **kwargs)
def to_imperative(self):
warnings.warn("Deprecation warning: moved to to_delayed")
return self.to_delayed()
def to_delayed(self):
""" Convert dataframe into dask Values
Returns a list of values, one value per partition.
"""
from ..delayed import Delayed
return [Delayed(k, [self.dask]) for k in self._keys()]
@classmethod
def _get_unary_operator(cls, op):
return lambda self: elemwise(op, self)
@classmethod
def _get_binary_operator(cls, op, inv=False):
if inv:
return lambda self, other: elemwise(op, other, self)
else:
return lambda self, other: elemwise(op, self, other)
def _aca_agg(self, token, func, aggfunc=None, **kwargs):
""" Wrapper for aggregations """
raise NotImplementedError
def rolling(self, window, min_periods=None, win_type=None, axis=0):
"""Provides rolling transformations.
Parameters
----------
window: int
Size of the moving window. This window must be smaller
than the size of the previous partition.
min_periods: int, default None
Minimum number of observations in window required to have
a value (otherwise result is NA).
win_type: string, default None
Provide a window type. (Identical to pandas.)
axis: int, default 0
Provide the axis to apply the function. (Identical to pandas.)
The center argument and deprecated freq argument are not supported.
"""
from dask.dataframe.rolling import Rolling
if not isinstance(window, int):
raise ValueError('window must be an integer')
if window < 0:
raise ValueError('window must be >= 0')
if min_periods is not None:
if not isinstance(min_periods, int):
raise ValueError('min_periods must be an integer')
if min_periods < 0:
raise ValueError('min_periods must be >= 0')
return Rolling(self, {'window': window,
'min_periods': min_periods,
'axis': axis,
'win_type': win_type})
@derived_from(pd.DataFrame)
def sum(self, axis=None, skipna=True):
axis = self._validate_axis(axis)
if axis == 1:
return map_partitions(_sum, None, self,
token=self._token_prefix + 'sum',
axis=axis, skipna=skipna)
else:
return self._aca_agg(token='sum', func=_sum,
skipna=skipna, axis=axis)
@derived_from(pd.DataFrame)
def max(self, axis=None, skipna=True):
axis = self._validate_axis(axis)
if axis == 1:
return map_partitions(_max, None, self,
token=self._token_prefix + 'max',
skipna=skipna, axis=axis)
else:
return self._aca_agg(token='max', func=_max,
skipna=skipna, axis=axis)
@derived_from(pd.DataFrame)
def min(self, axis=None, skipna=True):
axis = self._validate_axis(axis)
if axis == 1:
return map_partitions(_min, None, self,
token=self._token_prefix + 'min',
skipna=skipna, axis=axis)
else:
return self._aca_agg(token='min', func=_min,
skipna=skipna, axis=axis)
@derived_from(pd.DataFrame)
def count(self, axis=None):
axis = self._validate_axis(axis)
if axis == 1:
return map_partitions(_count, None, self,
token=self._token_prefix + 'count',
axis=axis)
else:
return self._aca_agg(token='count', func=_count,
aggfunc=lambda x: x.sum())
@derived_from(pd.DataFrame)
def mean(self, axis=None, skipna=True):
axis = self._validate_axis(axis)
if axis == 1:
return map_partitions(_mean, None, self,
token=self._token_prefix + 'mean',
axis=axis, skipna=skipna)
else:
num = self._get_numeric_data()
s = num.sum(skipna=skipna)
n = num.count()
def f(s, n):
try:
return s / n
except ZeroDivisionError:
return np.nan
name = self._token_prefix + 'mean-%s' % tokenize(self, axis, skipna)
return map_partitions(f, None, s, n, token=name)
@derived_from(pd.DataFrame)
def var(self, axis=None, skipna=True, ddof=1):
axis = self._validate_axis(axis)
if axis == 1:
return map_partitions(_var, None, self,
token=self._token_prefix + 'var',
axis=axis, skipna=skipna, ddof=ddof)
else:
num = self._get_numeric_data()
x = 1.0 * num.sum(skipna=skipna)
x2 = 1.0 * (num ** 2).sum(skipna=skipna)
n = num.count()
def f(x2, x, n):
try:
result = (x2 / n) - (x / n)**2
if ddof:
result = result * n / (n - ddof)
return result
except ZeroDivisionError:
return np.nan
name = self._token_prefix + 'var-%s' % tokenize(self, axis, skipna, ddof)
return map_partitions(f, None, x2, x, n, token=name)
@derived_from(pd.DataFrame)
def std(self, axis=None, skipna=True, ddof=1):
axis = self._validate_axis(axis)
if axis == 1:
return map_partitions(_std, None, self,
token=self._token_prefix + 'std',
axis=axis, skipna=skipna, ddof=ddof)
else:
v = self.var(skipna=skipna, ddof=ddof)
name = self._token_prefix + 'std-finish--%s' % tokenize(self, axis,
skipna, ddof)
return map_partitions(np.sqrt, None, v, token=name)
def quantile(self, q=0.5, axis=0):
""" Approximate row-wise and precise column-wise quantiles of DataFrame
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
"""
axis = self._validate_axis(axis)
name = 'quantiles-concat--' + tokenize(self, q, axis)
if axis == 1:
if isinstance(q, list):
# Not supported, the result will have current index as columns
raise ValueError("'q' must be scalar when axis=1 is specified")
return map_partitions(pd.DataFrame.quantile, None, self,
q, axis, token=name)
else:
num = self._get_numeric_data()
quantiles = tuple(quantile(self[c], q) for c in num.columns)
dask = {}
dask = merge(dask, *[q.dask for q in quantiles])
qnames = [(q._name, 0) for q in quantiles]
if isinstance(quantiles[0], Scalar):
dask[(name, 0)] = (pd.Series, (list, qnames), num.columns)
divisions = (min(num.columns), max(num.columns))
return Series(dask, name, None, divisions)
else:
from .multi import _pdconcat
dask[(name, 0)] = (_pdconcat, (list, qnames), 1)
return DataFrame(dask, name, num.columns,
quantiles[0].divisions)
@derived_from(pd.DataFrame)
def describe(self):
name = 'describe--' + tokenize(self)
# currently, only numeric describe is supported
num = self._get_numeric_data()
stats = [num.count(), num.mean(), num.std(), num.min(),
num.quantile([0.25, 0.5, 0.75]), num.max()]
stats_names = [(s._name, 0) for s in stats]
def build_partition(values):
assert len(values) == 6
count, mean, std, min, q, max = values
part1 = self._partition_type([count, mean, std, min],
index=['count', 'mean', 'std', 'min'])
q.index = ['25%', '50%', '75%']
part3 = self._partition_type([max], index=['max'])
return pd.concat([part1, q, part3])
dsk = dict()
dsk[(name, 0)] = (build_partition, (list, stats_names))
dsk = merge(dsk, num.dask, *[s.dask for s in stats])
return self._constructor(dsk, name, num._pd,
divisions=[None, None])
def _cum_agg(self, token, chunk, aggregate, axis, skipna=True,
chunk_kwargs=None):
""" Wrapper for cumulative operation """
axis = self._validate_axis(axis)
if axis == 1:
name = '{0}{1}(axis=1)'.format(self._token_prefix, token)
return self.map_partitions(chunk, token=name, **chunk_kwargs)
else:
# cumulate each partitions
name1 = '{0}{1}-map'.format(self._token_prefix, token)
cumpart = map_partitions(chunk, self._pd, self,
token=name1, **chunk_kwargs)
name2 = '{0}{1}-take-last'.format(self._token_prefix, token)
# cumlast must be a Series or Scalar
cumlast = map_partitions(_take_last, None, cumpart,
skipna, token=name2)
name = '{0}{1}'.format(self._token_prefix, token)
cname = '{0}{1}-cum-last'.format(self._token_prefix, token)
# aggregate cumulated partisions and its previous last element
dask = {}
dask[(name, 0)] = (cumpart._name, 0)
for i in range(1, self.npartitions):
# store each cumulative step to graph to reduce computation
if i == 1:
dask[(cname, i)] = (cumlast._name, i - 1)
else:
# aggregate with previous cumulation results
dask[(cname, i)] = (aggregate, (cname, i - 1),
(cumlast._name, i - 1))
dask[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))
return self._constructor(merge(dask, cumpart.dask, cumlast.dask),
name, chunk(self._pd), self.divisions)
@derived_from(pd.DataFrame)
def cumsum(self, axis=None, skipna=True):
cumsum = lambda x, **kwargs: x.cumsum(**kwargs)
return self._cum_agg('cumsum',
chunk=cumsum,
aggregate=operator.add,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cumprod(self, axis=None, skipna=True):
cumprod = lambda x, **kwargs: x.cumprod(**kwargs)
return self._cum_agg('cumprod',
chunk=cumprod,
aggregate=operator.mul,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cummax(self, axis=None, skipna=True):
def aggregate(x, y):
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.where((x > y) | x.isnull(), y, axis=x.ndim - 1)
else: # scalar
return x if x > y else y
cummax = lambda x, **kwargs: x.cummax(**kwargs)
return self._cum_agg('cummax',
chunk=cummax,
aggregate=aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cummin(self, axis=None, skipna=True):
def aggregate(x, y):
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.where((x < y) | x.isnull(), y, axis=x.ndim - 1)
else: # scalar
return x if x < y else y
cummin = lambda x, **kwargs: x.cummin(**kwargs)
return self._cum_agg('cummin',
chunk=cummin,
aggregate=aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def where(self, cond, other=np.nan):
# cond and other may be dask instance,
# passing map_partitions via keyword will not be aligned
return map_partitions(self._partition_type.where, no_default,
self, cond, other)
@derived_from(pd.DataFrame)
def mask(self, cond, other=np.nan):
return map_partitions(self._partition_type.mask, no_default,
self, cond, other)
@derived_from(pd.Series)
def append(self, other):
# because DataFrame.append will override the method,
# wrap by pd.Series.append docstring
if isinstance(other, (list, dict)):
msg = "append doesn't support list or dict input"
raise NotImplementedError(msg)
if not isinstance(other, _Frame):
from .io import from_pandas
other = from_pandas(other, 1)
from .multi import _append
if self.known_divisions and other.known_divisions:
if self.divisions[-1] < other.divisions[0]:
divisions = self.divisions[:-1] + other.divisions
return _append(self, other, divisions)
else:
msg = ("Unable to append two dataframes to each other with known "
"divisions if those divisions are not ordered. "
"The divisions/index of the second dataframe must be "
"greater than the divisions/index of the first dataframe.")
raise ValueError(msg)
else:
divisions = [None] * (self.npartitions + other.npartitions + 1)
return _append(self, other, divisions)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
raise NotImplementedError
normalize_token.register((Scalar, _Frame), lambda a: a._name)
class Series(_Frame):
""" Out-of-core Series object
Mimics ``pandas.Series``.
Parameters
----------
dsk: dict
The dask graph to compute this Series
_name: str
The key prefix that specifies which keys in the dask comprise this
particular Series
name: scalar or None
Series name. This metadata aids usability
divisions: tuple of index values
Values along which we partition our blocks on the index
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
_token_prefix = 'series-'
def __new__(cls, dsk, _name, name, divisions):
result = object.__new__(cls)
result.dask = dsk
result._name = _name
result._pd, result._known_dtype = cls._build_pd(name)
result.divisions = tuple(divisions)
return result
@property
def _args(self):
return (self.dask, self._name, self.name, self.divisions)
@property
def _constructor_sliced(self):
return Scalar
@property
def _constructor(self):
return Series
@property
def name(self):
return self._pd.name
@name.setter
def name(self, name):
self._pd.name = name
renamed = _rename_dask(self, name)
# update myself
self.dask.update(renamed.dask)
self._name = renamed._name
@property
def ndim(self):
""" Return dimensionality """
return 1
@property
def dtype(self):
""" Return data type """
if self._known_dtype:
return self._pd.dtype
else:
self._pd, self._known_dtype = self._build_pd(self.head())
return self._pd.dtype
def __getattr__(self, key):
if key == 'cat':
# If unknown dtype, need to infer from head.
if not self._known_dtype:
self.dtype
return self._pd.cat
raise AttributeError("'Series' object has no attribute %r" % key)
@property
def column_info(self):
""" Return Series.name """
warnings.warn('column_info is deprecated, use name')
return self.name
@property
def nbytes(self):
return reduction(self, lambda s: s.nbytes, np.sum, token='nbytes')
def __array__(self, dtype=None, **kwargs):
x = np.array(self.compute())
if dtype and x.dtype != dtype:
x = x.astype(dtype)
return x
def __array_wrap__(self, array, context=None):
return pd.Series(array, name=self.name)
@cache_readonly
def dt(self):
return DatetimeAccessor(self)
@cache_readonly
def str(self):
return StringAccessor(self)
def quantile(self, q=0.5):
""" Approximate quantiles of Series
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
"""
return quantile(self, q)
@derived_from(pd.Series)
def resample(self, rule, how=None, closed=None, label=None):
from .tseries.resample import _resample
return _resample(self, rule, how=how, closed=closed, label=label)
def __getitem__(self, key):
if isinstance(key, Series) and self.divisions == key.divisions:
name = 'index-%s' % tokenize(self, key)
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return Series(merge(self.dask, key.dask, dsk), name,
self.name, self.divisions)
raise NotImplementedError()
@derived_from(pd.DataFrame)
def _get_numeric_data(self, how='any', subset=None):
return self
@derived_from(pd.Series)
def iteritems(self):
for i in range(self.npartitions):
s = self.get_division(i).compute()
for item in s.iteritems():
yield item
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 'index', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0}.get(axis, axis)
def _aca_agg(self, token, func, aggfunc=None, **kwargs):
""" Wrapper for aggregations """
if aggfunc is None:
aggfunc = func
return aca([self], chunk=func,
aggregate=lambda x, **kwargs: aggfunc(pd.Series(x), **kwargs),
columns=return_scalar, token=self._token_prefix + token,
**kwargs)
@derived_from(pd.Series)
def groupby(self, index, **kwargs):
from dask.dataframe.groupby import SeriesGroupBy
return SeriesGroupBy(self, index, **kwargs)
@derived_from(pd.Series)
def sum(self, axis=None, skipna=True):
return super(Series, self).sum(axis=axis, skipna=skipna)
@derived_from(pd.Series)
def max(self, axis=None, skipna=True):
return super(Series, self).max(axis=axis, skipna=skipna)
@derived_from(pd.Series)
def min(self, axis=None, skipna=True):
return super(Series, self).min(axis=axis, skipna=skipna)
@derived_from(pd.Series)
def count(self):
return super(Series, self).count()
@derived_from(pd.Series)
def mean(self, axis=None, skipna=True):
return super(Series, self).mean(axis=axis, skipna=skipna)
@derived_from(pd.Series)
def var(self, axis=None, ddof=1, skipna=True):
return super(Series, self).var(axis=axis, ddof=ddof, skipna=skipna)
@derived_from(pd.Series)
def std(self, axis=None, ddof=1, skipna=True):
return super(Series, self).std(axis=axis, ddof=ddof, skipna=skipna)
@derived_from(pd.Series)
def cumsum(self, axis=None, skipna=True):
return super(Series, self).cumsum(axis=axis, skipna=skipna)
@derived_from(pd.Series)
def cumprod(self, axis=None, skipna=True):
return super(Series, self).cumprod(axis=axis, skipna=skipna)
@derived_from(pd.Series)
def cummax(self, axis=None, skipna=True):
return super(Series, self).cummax(axis=axis, skipna=skipna)
@derived_from(pd.Series)
def cummin(self, axis=None, skipna=True):
return super(Series, self).cummin(axis=axis, skipna=skipna)
def unique(self):
"""
Return Series of unique values in the object. Includes NA values.
Returns
-------
uniques : Series
"""
# unique returns np.ndarray, it must be wrapped
name = self.name
chunk = lambda x: pd.Series(pd.Series.unique(x), name=name)
return aca(self, chunk=chunk, aggregate=chunk,
columns=name, token='unique')
@derived_from(pd.Series)
def nunique(self):
return self.drop_duplicates().count()
@derived_from(pd.Series)
def value_counts(self):
chunk = lambda s: s.value_counts()
if LooseVersion(pd.__version__) > '0.16.2':
agg = lambda s: s.groupby(level=0).sum().sort_values(ascending=False)
else:
agg = lambda s: s.groupby(level=0).sum().sort(inplace=False, ascending=False)
return aca(self, chunk=chunk, aggregate=agg, columns=self.name,
token='value-counts')
@derived_from(pd.Series)
def nlargest(self, n=5):
return nlargest(self, n)
@derived_from(pd.Series)
def isin(self, other):
return elemwise(pd.Series.isin, self, list(other))
@derived_from(pd.Series)
def map(self, arg, na_action=None):
if not (isinstance(arg, (pd.Series, dict)) or callable(arg)):
raise TypeError("arg must be pandas.Series, dict or callable."
" Got {0}".format(type(arg)))
name = 'map-' + tokenize(self, arg, na_action)
dsk = dict(((name, i), (pd.Series.map, k, arg, na_action)) for i, k in
enumerate(self._keys()))
dsk.update(self.dask)
return Series(dsk, name, self.name, self.divisions)
@derived_from(pd.Series)
def astype(self, dtype):
return map_partitions(pd.Series.astype, self.name, self, dtype=dtype)
@derived_from(pd.Series)
def dropna(self):
return self.map_partitions(pd.Series.dropna)
@derived_from(pd.Series)
def between(self, left, right, inclusive=True):
return self.map_partitions(pd.Series.between, left=left,
right=right, inclusive=inclusive)
@derived_from(pd.Series)
def clip(self, lower=None, upper=None):
return self.map_partitions(pd.Series.clip, lower=lower, upper=upper)
@derived_from(pd.Series)
def notnull(self):
return self.map_partitions(pd.Series.notnull)
@derived_from(pd.Series)
def isnull(self):
return self.map_partitions(pd.Series.isnull)
def to_bag(self, index=False):
"""Convert to a dask Bag.
Parameters
----------
index : bool, optional
If True, the elements are tuples of ``(index, value)``, otherwise
they're just the ``value``. Default is False.
"""
from .io import to_bag
return to_bag(self, index)
@derived_from(pd.Series)
def to_frame(self, name=None):
return map_partitions(pd.Series.to_frame, self._pd.to_frame(name), self, name)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if not level is None:
raise NotImplementedError('level must be None')
return map_partitions(op, self._pd, self, other,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
def apply(self, func, convert_dtype=True, name=no_default, args=(), **kwds):
""" Parallel version of pandas.Series.apply
This mimics the pandas version except for the following:
1. The user should provide output name.
Parameters
----------
func: function
Function to apply
convert_dtype: boolean, default True
Try to find better dtype for elementwise function results.
If False, leave as dtype=object
name: list, scalar or None, optional
If list is given, the result is a DataFrame which columns is
specified list. Otherwise, the result is a Series which name is
given scalar or None (no name). If name keyword is not given, dask
tries to infer the result type using its beginning of data. This
inference may take some time and lead to unexpected result.
args: tuple
Positional arguments to pass to function in addition to the array/series
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Series or DataFrame depending on name keyword
"""
if name is no_default:
msg = ("name is not specified, inferred from partial data. "
"Please provide name if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, name=['x', 'y']) for dataframe result\n"
" or: .apply(func, name='x') for series result")
warnings.warn(msg)
name = _emulate(pd.Series.apply, self.head(), func,
convert_dtype=convert_dtype,
args=args, **kwds)
return map_partitions(pd.Series.apply, name, self, func,
convert_dtype, args, **kwds)
@derived_from(pd.Series)
def cov(self, other, min_periods=None):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, scalar=True)
@derived_from(pd.Series)
def corr(self, other, method='pearson', min_periods=None):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, corr=True, scalar=True)
class Index(Series):
_partition_type = pd.Index
_token_prefix = 'index-'
@property
def index(self):
msg = "'{0}' object has no attribute 'index'"
raise AttributeError(msg.format(self.__class__.__name__))
@property
def _constructor(self):
return Index
def head(self, n=5, compute=True):
""" First n items of the Index.
Caveat, this only checks the first partition.
"""
name = 'head-%d-%s' % (n, self._name)
dsk = {(name, 0): (lambda x, n: x[:n], (self._name, 0), n)}
result = self._constructor(merge(self.dask, dsk), name,
self._pd, self.divisions[:2])
if compute:
result = result.compute()
return result
def nunique(self):
return self.drop_duplicates().count()
@derived_from(pd.Index)
def max(self):
# it doesn't support axis and skipna kwds
return self._aca_agg(token='max', func=_max)
@derived_from(pd.Index)
def min(self):
return self._aca_agg(token='min', func=_min)
def count(self):
f = lambda x: pd.notnull(x).sum()
return reduction(self, f, np.sum, token='index-count')
class DataFrame(_Frame):
"""
Implements out-of-core DataFrame as a sequence of pandas DataFrames
Parameters
----------
dask: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
columns: list of str
Column names. This metadata aids usability
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
_token_prefix = 'dataframe-'
def __new__(cls, dask, name, columns, divisions):
result = object.__new__(cls)
result.dask = dask
result._name = name
result._pd, result._known_dtype = cls._build_pd(columns)
result.divisions = tuple(divisions)
return result
@property
def _args(self):
return (self.dask, self._name, self.columns, self.divisions)
@property
def _constructor_sliced(self):
return Series
@property
def _constructor(self):
return DataFrame
@property
def columns(self):
return self._pd.columns
@columns.setter
def columns(self, columns):
# if length mismatches, error is raised from pandas
self._pd.columns = columns
renamed = _rename_dask(self, columns)
# update myself
self.dask.update(renamed.dask)
self._name = renamed._name
def __getitem__(self, key):
name = 'getitem-%s' % tokenize(self, key)
if np.isscalar(key):
# error is raised from pandas
dummy = self._pd[_extract_pd(key)]
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return self._constructor_sliced(merge(self.dask, dsk), name,
dummy, self.divisions)
if isinstance(key, list):
# error is raised from pandas
dummy = self._pd[_extract_pd(key)]
dsk = dict(((name, i), (operator.getitem,
(self._name, i), (list, key)))
for i in range(self.npartitions))
return self._constructor(merge(self.dask, dsk), name,
dummy, self.divisions)
if isinstance(key, Series):
# do not perform dummy calculation, as columns will not be changed.
#
if self.divisions != key.divisions:
from .multi import _maybe_align_partitions
self, key = _maybe_align_partitions([self, key])
dsk = dict(((name, i), (self._partition_type._getitem_array,
(self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return self._constructor(merge(self.dask, key.dask, dsk), name,
self, self.divisions)
raise NotImplementedError(key)
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(e)
def __dir__(self):
return sorted(set(dir(type(self)) + list(self.__dict__) +
list(filter(pd.compat.isidentifier, self.columns))))
@property
def ndim(self):
""" Return dimensionality """
return 2
@property
def dtypes(self):
""" Return data types """
if self._known_dtype:
return self._pd.dtypes
else:
self._pd, self._known_dtype = self._build_pd(self.head())
return self._pd.dtypes
@derived_from(pd.DataFrame)
def notnull(self):
return self.map_partitions(pd.DataFrame.notnull)
@derived_from(pd.DataFrame)
def isnull(self):
return self.map_partitions(pd.DataFrame.isnull)
def set_index(self, other, drop=True, sorted=False, **kwargs):
""" Set the DataFrame index (row labels) using an existing column
This operation in dask.dataframe is expensive. If the input column is
sorted then we accomplish the set_index in a single full read of that
column. However, if the input column is not sorted then this operation
triggers a full shuffle, which can take a while and only works on a
single machine (not distributed).
Parameters
----------
other: Series or label
drop: boolean, default True
Delete columns to be used as the new index
sorted: boolean, default False
Set to True if the new index column is already sorted
Examples
--------
>>> df.set_index('x') # doctest: +SKIP
>>> df.set_index(d.x) # doctest: +SKIP
>>> df.set_index(d.timestamp, sorted=True) # doctest: +SKIP
"""
if sorted:
return set_sorted_index(self, other, drop=drop, **kwargs)
else:
from .shuffle import set_index
return set_index(self, other, drop=drop, **kwargs)
def set_partition(self, column, divisions, **kwargs):
""" Set explicit divisions for new column index
>>> df2 = df.set_partition('new-index-column', divisions=[10, 20, 50]) # doctest: +SKIP
See Also
--------
set_index
"""
from .shuffle import set_partition
return set_partition(self, column, divisions, **kwargs)
@property
def column_info(self):
""" Return DataFrame.columns """
warnings.warn('column_info is deprecated, use columns')
return self.columns
@derived_from(pd.DataFrame)
def nlargest(self, n=5, columns=None):
return nlargest(self, n, columns)
@derived_from(pd.DataFrame)
def reset_index(self):
out = self.map_partitions(self._partition_type.reset_index)
out.divisions = [None] * (self.npartitions + 1)
return out
@derived_from(pd.DataFrame)
def groupby(self, key, **kwargs):
from dask.dataframe.groupby import DataFrameGroupBy
return DataFrameGroupBy(self, key, **kwargs)
def categorize(self, columns=None, **kwargs):
from dask.dataframe.categorical import categorize
return categorize(self, columns, **kwargs)
@derived_from(pd.DataFrame)
def assign(self, **kwargs):
pairs = list(sum(kwargs.items(), ()))
# Figure out columns of the output
df2 = self._pd.assign(**_extract_pd(kwargs))
return elemwise(_assign, self, *pairs, columns=df2)
@derived_from(pd.DataFrame)
def rename(self, index=None, columns=None):
if index is not None:
raise ValueError("Cannot rename index.")
# *args here is index, columns but columns arg is already used
return map_partitions(pd.DataFrame.rename, no_default, self,
None, columns)
def query(self, expr, **kwargs):
""" Blocked version of pd.DataFrame.query
This is like the sequential version except that this will also happen
in many threads. This may conflict with ``numexpr`` which will use
multiple threads itself. We recommend that you set numexpr to use a
single thread
import numexpr
numexpr.set_nthreads(1)
The original docstring follows below:\n
""" + pd.DataFrame.query.__doc__
name = 'query-%s' % tokenize(self, expr)
if kwargs:
name = name + '--' + tokenize(kwargs)
dsk = dict(((name, i), (apply, pd.DataFrame.query,
((self._name, i), (expr,), kwargs)))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (pd.DataFrame.query, (self._name, i), expr))
for i in range(self.npartitions))
dummy = self._pd.query(expr, **kwargs)
return self._constructor(merge(dsk, self.dask), name,
dummy, self.divisions)
@derived_from(pd.DataFrame)
def eval(self, expr, inplace=None, **kwargs):
if '=' in expr and inplace in (True, None):
raise NotImplementedError("Inplace eval not supported."
" Please use inplace=False")
meta = self._pd.eval(expr, inplace=inplace, **kwargs)
return self.map_partitions(_eval, meta, expr, inplace=inplace, **kwargs)
@derived_from(pd.DataFrame)
def dropna(self, how='any', subset=None):
# for cloudpickle
def f(df, how=how, subset=subset):
return df.dropna(how=how, subset=subset)
return self.map_partitions(f, how=how, subset=subset)
def to_castra(self, fn=None, categories=None, sorted_index_column=None,
compute=True, get=get_sync):
""" Write DataFrame to Castra on-disk store
See https://github.com/blosc/castra for details
See Also
--------
Castra.to_dask
"""
from .io import to_castra
return to_castra(self, fn, categories, sorted_index_column,
compute=compute, get=get)
def to_bag(self, index=False):
"""Convert to a dask Bag of tuples of each row.
Parameters
----------
index : bool, optional
If True, the index is included as the first element of each tuple.
Default is False.
"""
from .io import to_bag
return to_bag(self, index)
def _get_numeric_data(self, how='any', subset=None):
# If unknown dtype, need to infer from head.
if not self._known_dtype:
self.dtypes
# calculate columns to avoid unnecessary calculation
numerics = self._pd._get_numeric_data()
if len(numerics.columns) < len(self.columns):
name = self._token_prefix + '-get_numeric_data'
return map_partitions(pd.DataFrame._get_numeric_data,
numerics, self, token=name)
else:
# use myself if all numerics
return self
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
def _aca_agg(self, token, func, aggfunc=None, **kwargs):
""" Wrapper for aggregations """
if aggfunc is None:
aggfunc = func
def aggregate(x, **kwargs):
return x.groupby(level=0).apply(aggfunc, **kwargs)
# groupby.aggregation doesn't support skipna,
# using gropuby.apply(aggfunc) is a workaround to handle each group as df
return aca([self], chunk=func, aggregate=aggregate,
columns=None, token=self._token_prefix + token,
**kwargs)
@derived_from(pd.DataFrame)
def drop(self, labels, axis=0):
if axis != 1:
raise NotImplementedError("Drop currently only works for axis=1")
return elemwise(pd.DataFrame.drop, self, labels, axis)
@derived_from(pd.DataFrame)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False,
suffixes=('_x', '_y'), npartitions=None):
if not isinstance(right, (DataFrame, pd.DataFrame)):
raise ValueError('right must be DataFrame')
from .multi import merge
return merge(self, right, how=how, on=on,
left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
suffixes=suffixes, npartitions=npartitions)
@derived_from(pd.DataFrame)
def join(self, other, on=None, how='left',
lsuffix='', rsuffix='', npartitions=None):
if not isinstance(other, (DataFrame, pd.DataFrame)):
raise ValueError('other must be DataFrame')
from .multi import merge
return merge(self, other, how=how,
left_index=on is None, right_index=True,
left_on=on, suffixes=[lsuffix, rsuffix],
npartitions=npartitions)
@derived_from(pd.DataFrame)
def append(self, other):
if isinstance(other, Series):
msg = ('Unable to appending dd.Series to dd.DataFrame.'
'Use pd.Series to append as row.')
raise ValueError(msg)
elif isinstance(other, pd.Series):
other = other.to_frame().T
return super(DataFrame, self).append(other)
@derived_from(pd.DataFrame)
def iterrows(self):
for i in range(self.npartitions):
df = self.get_division(i).compute()
for row in df.iterrows():
yield row
@derived_from(pd.DataFrame)
def itertuples(self):
for i in range(self.npartitions):
df = self.get_division(i).compute()
for row in df.itertuples():
yield row
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
# name must be explicitly passed for div method whose name is truediv
def meth(self, other, axis='columns', level=None, fill_value=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
if axis == 1:
# when axis=1, series will be added to each row
# it not supported for dd.Series.
# dd.DataFrame is not affected as op is applied elemwise
if isinstance(other, Series):
msg = 'Unable to {0} dd.Series with axis=1'.format(name)
raise ValueError(msg)
dummy = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, dummy, self, other,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
def apply(self, func, axis=0, args=(), columns=no_default, **kwds):
""" Parallel version of pandas.DataFrame.apply
This mimics the pandas version except for the following:
1. The user must specify axis=1 explicitly.
2. The user should provide output columns.
Parameters
----------
func: function
Function to apply to each column
axis: {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column (NOT SUPPORTED)
- 1 or 'columns': apply function to each row
columns: list, scalar or None
If list is given, the result is a DataFrame which columns is
specified list. Otherwise, the result is a Series which name is
given scalar or None (no name). If name keyword is not given, dask
tries to infer the result type using its beginning of data. This
inference may take some time and lead to unexpected result
args : tuple
Positional arguments to pass to function in addition to the array/series
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Series or DataFrame depending on name keyword
"""
axis = self._validate_axis(axis)
if axis == 0:
raise NotImplementedError(
"dd.DataFrame.apply only supports axis=1\n"
" Try: df.apply(func, axis=1)")
if columns is no_default:
msg = ("columns is not specified, inferred from partial data. "
"Please provide columns if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, columns=['x', 'y']) for dataframe result\n"
" or: .apply(func, columns='x') for series result")
warnings.warn(msg)
columns = _emulate(pd.DataFrame.apply, self.head(), func,
axis=axis, args=args, **kwds)
return map_partitions(pd.DataFrame.apply, columns, self, func, axis,
False, False, None, args, **kwds)
@derived_from(pd.DataFrame)
def cov(self, min_periods=None):
return cov_corr(self, min_periods)
@derived_from(pd.DataFrame)
def corr(self, method='pearson', min_periods=None):
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
return cov_corr(self, min_periods, True)
@derived_from(pd.DataFrame)
def astype(self, dtype):
empty = self._pd.astype(dtype)
return map_partitions(pd.DataFrame.astype, empty, self, dtype=dtype)
def info(self):
"""
Concise summary of a Dask DataFrame.
"""
lines = list()
lines.append(str(type(self)))
lines.append('Data columns (total %d columns):' % len(self.columns))
dtypes = self.dtypes
space = max([len(k) for k in self.columns]) + 4
template = "%s%s"
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
lines.append(template % (('%s' % col)[:space].ljust(space), dtype))
print('\n'.join(lines))
# bind operators
for op in [operator.abs, operator.add, operator.and_, operator_div,
operator.eq, operator.gt, operator.ge, operator.inv,
operator.lt, operator.le, operator.mod, operator.mul,
operator.ne, operator.neg, operator.or_, operator.pow,
operator.sub, operator.truediv, operator.floordiv, operator.xor]:
_Frame._bind_operator(op)
Scalar._bind_operator(op)
for name in ['add', 'sub', 'mul', 'div',
'truediv', 'floordiv', 'mod', 'pow',
'radd', 'rsub', 'rmul', 'rdiv',
'rtruediv', 'rfloordiv', 'rmod', 'rpow']:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_operator_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_operator_method(name, meth)
def elemwise_property(attr, s):
return map_partitions(getattr, s.name, s, attr)
for name in ['nanosecond', 'microsecond', 'millisecond', 'second', 'minute',
'hour', 'day', 'dayofweek', 'dayofyear', 'week', 'weekday',
'weekofyear', 'month', 'quarter', 'year']:
setattr(Index, name, property(partial(elemwise_property, name)))
def nlargest(df, n=5, columns=None):
if isinstance(df, Index):
raise AttributeError("nlargest is not available for Index objects")
elif isinstance(df, Series):
token = 'series-nlargest-n={0}'.format(n)
f = lambda s: s.nlargest(n)
elif isinstance(df, DataFrame):
token = 'dataframe-nlargest-n={0}'.format(n)
f = lambda df: df.nlargest(n, columns)
columns = df.columns # this is a hack.
return aca(df, f, f, columns=columns, token=token)
def _assign(df, *pairs):
kwargs = dict(partition(2, pairs))
return df.assign(**kwargs)
def _partition_of_index_value(divisions, val):
""" In which partition does this value lie?
>>> _partition_of_index_value([0, 5, 10], 3)
0
>>> _partition_of_index_value([0, 5, 10], 8)
1
>>> _partition_of_index_value([0, 5, 10], 100)
1
>>> _partition_of_index_value([0, 5, 10], 5) # left-inclusive divisions
1
"""
if divisions[0] is None:
raise ValueError(
"Can not use loc on DataFrame without known divisions")
val = _coerce_loc_index(divisions, val)
i = bisect.bisect_right(divisions, val)
return min(len(divisions) - 2, max(0, i - 1))
def _loc(df, start, stop, include_right_boundary=True):
"""
>>> df = pd.DataFrame({'x': [10, 20, 30, 40, 50]}, index=[1, 2, 2, 3, 4])
>>> _loc(df, 2, None)
x
2 20
2 30
3 40
4 50
>>> _loc(df, 1, 3)
x
1 10
2 20
2 30
3 40
>>> _loc(df, 1, 3, include_right_boundary=False)
x
1 10
2 20
2 30
"""
result = df.loc[start:stop]
if not include_right_boundary:
right_index = result.index.get_slice_bound(stop, 'left', 'loc')
result = result.iloc[:right_index]
return result
def _coerce_loc_index(divisions, o):
""" Transform values to be comparable against divisions
This is particularly valuable to use with pandas datetimes
"""
if divisions and isinstance(divisions[0], datetime):
return pd.Timestamp(o)
if divisions and isinstance(divisions[0], np.datetime64):
return np.datetime64(o).astype(divisions[0].dtype)
return o
def elemwise(op, *args, **kwargs):
""" Elementwise operation for dask.Dataframes """
columns = kwargs.pop('columns', no_default)
_name = funcname(op) + '-' + tokenize(op, kwargs, *args)
args = _maybe_from_pandas(args)
from .multi import _maybe_align_partitions
args = _maybe_align_partitions(args)
dasks = [arg for arg in args if isinstance(arg, (_Frame, Scalar))]
dfs = [df for df in dasks if isinstance(df, _Frame)]
divisions = dfs[0].divisions
n = len(divisions) - 1
other = [(i, arg) for i, arg in enumerate(args)
if not isinstance(arg, (_Frame, Scalar))]
# adjust the key length of Scalar
keys = [d._keys() * n if isinstance(d, Scalar)
else d._keys() for d in dasks]
if other:
dsk = dict(((_name, i),
(apply, partial_by_order, list(frs),
{'function': op, 'other': other}))
for i, frs in enumerate(zip(*keys)))
else:
dsk = dict(((_name, i), (op,) + frs) for i, frs in enumerate(zip(*keys)))
dsk = merge(dsk, *[d.dask for d in dasks])
if columns is no_default:
if len(dfs) >= 2 and len(dasks) != len(dfs):
# should not occur in current funcs
msg = 'elemwise with 2 or more DataFrames and Scalar is not supported'
raise NotImplementedError(msg)
columns = _emulate(op, *args, **kwargs)
return _Frame(dsk, _name, columns, divisions)
def remove_empties(seq):
""" Remove items of length 0
>>> remove_empties([1, 2, ('empty', np.nan), 4, 5])
[1, 2, 4, 5]
>>> remove_empties([('empty', np.nan)])
[nan]
>>> remove_empties([])
[]
"""
if not seq:
return seq
seq2 = [x for x in seq
if not (isinstance(x, tuple) and x and x[0] == 'empty')]
if seq2:
return seq2
else:
return [seq[0][1]]
def empty_safe(func, arg):
"""
>>> empty_safe(sum, [1, 2, 3])
6
>>> empty_safe(sum, [])
('empty', 0)
"""
if len(arg) == 0:
return ('empty', func(arg))
else:
return func(arg)
def reduction(x, chunk, aggregate, token=None):
""" General version of reductions
>>> reduction(my_frame, np.sum, np.sum) # doctest: +SKIP
"""
token_key = tokenize(x, token or (chunk, aggregate))
token = token or 'reduction'
a = '{0}--chunk-{1}'.format(token, token_key)
dsk = dict(((a, i), (empty_safe, chunk, (x._name, i)))
for i in range(x.npartitions))
b = '{0}--aggregation-{1}'.format(token, token_key)
dsk2 = {(b, 0): (aggregate, (remove_empties,
[(a,i) for i in range(x.npartitions)]))}
return Scalar(merge(x.dask, dsk, dsk2), b)
def _maybe_from_pandas(dfs):
from .io import from_pandas
dfs = [from_pandas(df, 1) if isinstance(df, (pd.Series, pd.DataFrame))
else df for df in dfs]
return dfs
def apply_concat_apply(args, chunk=None, aggregate=None,
columns=no_default, token=None, chunk_kwargs=None,
aggregate_kwargs=None, **kwargs):
""" Apply a function to blocks, the concat, then apply again
Parameters
----------
args: dask.DataFrames
All Dataframes should be partitioned and indexed equivalently
chunk: function [block-per-arg] -> block
Function to operate on each block of data
aggregate: function concatenated-block -> block
Function to operate on the concatenated result of chunk
Examples
--------
>>> def chunk(a_block, b_block):
... pass
>>> def agg(df):
... pass
>>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP
"""
if chunk_kwargs is None:
chunk_kwargs = dict()
if aggregate_kwargs is None:
aggregate_kwargs = dict()
chunk_kwargs.update(kwargs)
aggregate_kwargs.update(kwargs)
if not isinstance(args, (tuple, list)):
args = [args]
assert all(arg.npartitions == args[0].npartitions
for arg in args if isinstance(arg, _Frame))
token_key = tokenize(token or (chunk, aggregate), columns, *args)
token = token or 'apply-concat-apply'
a = '{0}--first-{1}'.format(token, token_key)
if len(args) == 1 and isinstance(args[0], _Frame) and not chunk_kwargs:
dsk = dict(((a, i), (chunk, key))
for i, key in enumerate(args[0]._keys()))
else:
dsk = dict(((a, i), (apply, chunk, [(x._name, i)
if isinstance(x, _Frame)
else x for x in args],
kwargs))
for i in range(args[0].npartitions))
b = '{0}--second-{1}'.format(token, token_key)
conc = (_concat,(list, [(a, i) for i in range(args[0].npartitions)]))
if not aggregate_kwargs:
dsk2 = {(b, 0): (aggregate, conc)}
else:
dsk2 = {(b, 0): (apply, aggregate, [conc], aggregate_kwargs)}
if columns is no_default:
return_type = type(args[0])
columns = None
else:
return_type = _get_return_type(args[0], columns)
dasks = [a.dask for a in args if isinstance(a, _Frame)]
return return_type(merge(dsk, dsk2, *dasks), b, columns, [None, None])
aca = apply_concat_apply
def _get_return_type(arg, metadata):
""" Get the class of the result
- When metadata is str/unicode, the result is:
- Scalar when columns is ``return_scalar``
- Index if arg is Index
- Series otherwise
- Otherwise, result is DataFrame.
"""
if isinstance(metadata, _Frame):
metadata = metadata._pd
if isinstance(metadata, pd.Series):
return Series
elif isinstance(metadata, pd.DataFrame):
return DataFrame
elif isinstance(metadata, pd.Index) and isinstance(arg, Index):
# DataFrame may pass df.columns (Index)
# thus needs to check arg
return Index
# legacy logic, required to handle user input
if np.isscalar(metadata) or metadata is None:
if metadata == return_scalar:
return Scalar
elif isinstance(arg, Index):
return Index
else:
return Series
else:
return DataFrame
def _extract_pd(x):
"""
Extract internal cache data (``_pd``) from dd.DataFrame / dd.Series
"""
if isinstance(x, _Frame):
return x._pd
elif isinstance(x, list):
return [_extract_pd(_x) for _x in x]
elif isinstance(x, tuple):
return tuple([_extract_pd(_x) for _x in x])
elif isinstance(x, dict):
res = {}
for k in x:
res[k] = _extract_pd(x[k])
return res
else:
return x
def _emulate(func, *args, **kwargs):
"""
Apply a function using args / kwargs. If arguments contain dd.DataFrame /
dd.Series, using internal cache (``_pd``) for calculation
"""
return func(*_extract_pd(args), **_extract_pd(kwargs))
def map_partitions(func, metadata, *args, **kwargs):
""" Apply Python function on each DataFrame block
Parameters
----------
metadata: _Frame, columns, name
Metadata for output
targets : list
List of target DataFrame / Series.
"""
metadata = _extract_pd(metadata)
assert callable(func)
token = kwargs.pop('token', None)
name = token or funcname(func)
name = '{0}-{1}'.format(name, tokenize(metadata, *args, **kwargs))
if all(isinstance(arg, Scalar) for arg in args):
dask = {(name, 0):
(apply, func, (tuple, [(arg._name, 0) for arg in args]), kwargs)}
return Scalar(merge(dask, *[arg.dask for arg in args]), name)
args = _maybe_from_pandas(args)
from .multi import _maybe_align_partitions
args = _maybe_align_partitions(args)
dfs = [df for df in args if isinstance(df, _Frame)]
if metadata is no_default:
# pass no_default as much, because it updates internal cache
try:
metadata = _emulate(func, *args, **kwargs)
except Exception:
# user function may fail
metadata = None
if isinstance(metadata, pd.DataFrame):
columns = metadata.columns
elif isinstance(metadata, pd.Series):
columns = metadata.name
else:
columns = metadata
return_type = _get_return_type(dfs[0], metadata)
dsk = {}
for i in range(dfs[0].npartitions):
values = [(arg._name, i if isinstance(arg, _Frame) else 0)
if isinstance(arg, (_Frame, Scalar)) else arg for arg in args]
values = (apply, func, (tuple, values), kwargs)
dsk[(name, i)] = (_rename, columns, values)
dasks = [arg.dask for arg in args if isinstance(arg, (_Frame, Scalar))]
return return_type(merge(dsk, *dasks), name, metadata, args[0].divisions)
def _rename(columns, df):
"""
Rename columns of pd.DataFrame or name of pd.Series.
Not for dd.DataFrame or dd.Series.
Parameters
----------
columns : tuple, string, pd.DataFrame or pd.Series
Column names, Series name or pandas instance which has the
target column names / name.
df : pd.DataFrame or pd.Series
target DataFrame / Series to be renamed
"""
assert not isinstance(df, _Frame)
if isinstance(columns, Iterator):
columns = list(columns)
if columns is no_default:
return df
if isinstance(df, pd.DataFrame):
if isinstance(columns, pd.DataFrame):
columns = columns.columns
columns = pd.Index(columns)
if len(columns) == len(df.columns):
if columns.equals(df.columns):
# if target is identical, rename is not necessary
return df
# each functions must be pure op, do not use df.columns = columns
return df.rename(columns=dict(zip(df.columns, columns)))
elif isinstance(df, (pd.Series, pd.Index)):
if isinstance(columns, (pd.Series, pd.Index)):
columns = columns.name
if name == columns:
return df
return pd.Series(df, name=columns)
# map_partition may pass other types
return df
def _rename_dask(df, metadata):
"""
Destructively rename columns of dd.DataFrame or name of dd.Series.
Not for pd.DataFrame or pd.Series.
Internaly used to overwrite dd.DataFrame.columns and dd.Series.name
We can't use map_partition because it applies function then rename
Parameters
----------
df : dd.DataFrame or dd.Series
target DataFrame / Series to be renamed
metadata : tuple, string, pd.DataFrame or pd.Series
Column names, Series name or pandas instance which has the
target column names / name.
"""
assert isinstance(df, _Frame)
metadata, _ = df._build_pd(metadata)
name = 'rename-{0}'.format(tokenize(df, metadata))
dsk = {}
for i in range(df.npartitions):
dsk[name, i] = (_rename, metadata, (df._name, i))
return _Frame(merge(dsk, df.dask), name, metadata, df.divisions)
def quantile(df, q):
""" Approximate quantiles of Series / single column DataFrame
Parameters
----------
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
assert (isinstance(df, DataFrame) and len(df.columns) == 1 or
isinstance(df, Series))
from dask.array.percentile import _percentile, merge_percentiles
# currently, only Series has quantile method
if isinstance(q, (list, tuple, np.ndarray)):
# Index.quantile(list-like) must be pd.Series, not pd.Index
df_name = df.name
merge_type = lambda v: pd.Series(v, index=q, name=df_name)
return_type = df._constructor
if issubclass(return_type, Index):
return_type = Series
else:
typ = df._partition_type
merge_type = lambda v: typ(v).item()
return_type = df._constructor_sliced
q = [q]
# pandas uses quantile in [0, 1]
# numpy / everyone else uses [0, 100]
qs = np.asarray(q) * 100
token = tokenize(df, qs)
if len(qs) == 0:
name = 'quantiles-' + token
empty_index = pd.Index([], dtype=float)
return Series({(name, 0): pd.Series([], name=df.name, index=empty_index)},
name, df.name, [None, None])
else:
new_divisions = [np.min(q), np.max(q)]
name = 'quantiles-1-' + token
val_dsk = dict(((name, i), (_percentile, (getattr, key, 'values'), qs))
for i, key in enumerate(df._keys()))
name2 = 'quantiles-2-' + token
len_dsk = dict(((name2, i), (len, key)) for i, key in enumerate(df._keys()))
name3 = 'quantiles-3-' + token
merge_dsk = {(name3, 0): (merge_type, (merge_percentiles, qs, [qs] * df.npartitions,
sorted(val_dsk), sorted(len_dsk)))}
dsk = merge(df.dask, val_dsk, len_dsk, merge_dsk)
return return_type(dsk, name3, df.name, new_divisions)
def cov_corr(df, min_periods=None, corr=False, scalar=False):
"""DataFrame covariance and pearson correlation.
Computes pairwise covariance or correlation of columns, excluding NA/null
values.
Parameters
----------
df : DataFrame
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
corr : bool, optional
If True, compute the Pearson correlation. If False [default], compute
the covariance.
scalar : bool, optional
If True, compute covariance between two variables as a scalar. Only
valid if `df` has 2 columns. If False [default], compute the entire
covariance/correlation matrix.
"""
if min_periods is None:
min_periods = 2
elif min_periods < 2:
raise ValueError("min_periods must be >= 2")
prefix = 'corr' if corr else 'cov'
df = df._get_numeric_data()
name = '{0}-agg-{1}'.format(prefix, tokenize(df, min_periods, scalar))
if scalar and len(df.columns) != 2:
raise ValueError("scalar only valid for 2 column dataframe")
k = '{0}-chunk-{1}'.format(prefix, df._name)
dsk = dict(((k, i), (cov_corr_chunk, f, corr))
for (i, f) in enumerate(df._keys()))
dsk[(name, 0)] = (cov_corr_agg, list(dsk.keys()), df._pd, min_periods,
corr, scalar)
dsk = merge(df.dask, dsk)
if scalar:
return Scalar(dsk, name)
return DataFrame(dsk, name, df._pd, (df.columns[0], df.columns[-1]))
def cov_corr_chunk(df, corr=False):
"""Chunk part of a covariance or correlation computation"""
mat = df.values
mask = np.isfinite(mat)
keep = np.bitwise_and(mask[:, None, :], mask[:, :, None])
x = np.where(keep, mat[:, None, :], np.nan)
sums = np.nansum(x, 0)
counts = keep.astype('int').sum(0)
cov = df.cov().values
dtype = [('sum', sums.dtype), ('count', counts.dtype), ('cov', cov.dtype)]
if corr:
m = np.nansum((x - sums/np.where(counts, counts, np.nan))**2, 0)
dtype.append(('m', m.dtype))
out = np.empty(counts.shape, dtype=dtype)
out['sum'] = sums
out['count'] = counts
out['cov'] = cov * (counts - 1)
if corr:
out['m'] = m
return out
def cov_corr_agg(data, meta, min_periods=2, corr=False, scalar=False):
"""Aggregation part of a covariance or correlation computation"""
data = np.concatenate(data).reshape((len(data),) + data[0].shape)
sums = np.nan_to_num(data['sum'])
counts = data['count']
cum_sums = np.cumsum(sums, 0)
cum_counts = np.cumsum(counts, 0)
s1 = cum_sums[:-1]
s2 = sums[1:]
n1 = cum_counts[:-1]
n2 = counts[1:]
d = (s2/n2) - (s1/n1)
C = (np.nansum((n1 * n2)/(n1 + n2) * (d * d.transpose((0, 2, 1))), 0) +
np.nansum(data['cov'], 0))
C[cum_counts[-1] < min_periods] = np.nan
nobs = np.where(cum_counts[-1], cum_counts[-1], np.nan)
if corr:
mu = cum_sums[-1] / nobs
counts_na = np.where(counts, counts, np.nan)
m2 = np.nansum(data['m'] + counts*(sums/counts_na - mu)**2, axis=0)
den = np.sqrt(m2 * m2.T)
else:
den = nobs - 1
mat = C/den
if scalar:
return mat[0, 1]
return pd.DataFrame(mat, columns=meta.columns, index=meta.columns)
def pd_split(df, p, random_state=None):
""" Split DataFrame into multiple pieces pseudorandomly
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [2, 3, 4, 5, 6, 7]})
>>> a, b = pd_split(df, [0.5, 0.5], random_state=123) # roughly 50/50 split
>>> a
a b
1 2 3
2 3 4
5 6 7
>>> b
a b
0 1 2
3 4 5
4 5 6
"""
p = list(p)
index = pseudorandom(len(df), p, random_state)
return [df.iloc[index == i] for i in range(len(p))]
def _take_last(a, skipna=True):
"""
take last row (Series) of DataFrame / last value of Seriese
considering NaN.
Parameters
----------
a : pd.DataFrame or pd.Series
skipna : bool, default True
Whether to exclude NaN
"""
if skipna is False:
return a.iloc[-1]
else:
# take last valid value excluding NaN, NaN location may be different
# in each columns
group_dummy = np.ones(len(a.index))
last_row = a.groupby(group_dummy).last()
if isinstance(a, pd.DataFrame):
return pd.Series(last_row.values[0], index=a.columns)
else:
return last_row.values[0]
def repartition_divisions(a, b, name, out1, out2, force=False):
""" dask graph to repartition dataframe by new divisions
Parameters
----------
a : tuple
old divisions
b : tuple, list
new divisions
name : str
name of old dataframe
out1 : str
name of temporary splits
out2 : str
name of new dataframe
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP
{('b', 0): (<function _loc at ...>, ('a', 0), 1, 3, False),
('b', 1): (<function _loc at ...>, ('a', 1), 3, 4, False),
('b', 2): (<function _loc at ...>, ('a', 1), 4, 6, False),
('b', 3): (<function _loc at ...>, ('a', 1), 6, 7, False)
('c', 0): (<function concat at ...>,
(<type 'list'>, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
"""
if not isinstance(b, (list, tuple)):
raise ValueError('New division must be list or tuple')
b = list(b)
if len(b) < 2:
# minimum division is 2 elements, like [0, 0]
raise ValueError('New division must be longer than 2 elements')
if b != sorted(b):
raise ValueError('New division must be sorted')
if len(b[:-1]) != len(list(unique(b[:-1]))):
msg = 'New division must be unique, except for the last element'
raise ValueError(msg)
if force:
if a[0] < b[0]:
msg = ('left side of the new division must be equal or smaller '
'than old division')
raise ValueError(msg)
if a[-1] > b[-1]:
msg = ('right side of the new division must be equal or larger '
'than old division')
raise ValueError(msg)
else:
if a[0] != b[0]:
msg = 'left side of old and new divisions are different'
raise ValueError(msg)
if a[-1] != b[-1]:
msg = 'right side of old and new divisions are different'
raise ValueError(msg)
def _is_single_last_div(x):
"""Whether last division only contains single label"""
return len(x) >= 2 and x[-1] == x[-2]
c = [a[0]]
d = dict()
low = a[0]
i, j = 1, 1 # indices for old/new divisions
k = 0 # index for temp divisions
last_elem = _is_single_last_div(a)
# process through old division
# left part of new division can be processed in this loop
while (i < len(a) and j < len(b)):
if a[i] < b[j]:
# tuple is something like:
# (_loc, ('from_pandas-#', 0), 3, 4, False))
d[(out1, k)] = (_loc, (name, i - 1), low, a[i], False)
low = a[i]
i += 1
elif a[i] > b[j]:
d[(out1, k)] = (_loc, (name, i - 1), low, b[j], False)
low = b[j]
j += 1
else:
d[(out1, k)] = (_loc, (name, i - 1), low, b[j], False)
low = b[j]
i += 1
j += 1
c.append(low)
k += 1
# right part of new division can remain
if a[-1] < b[-1] or b[-1] == b[-2]:
for _j in range(j, len(b)):
# always use right-most of old division
# because it may contain last element
m = len(a) - 2
d[(out1, k)] = (_loc, (name, m), low, b[_j], False)
low = b[_j]
c.append(low)
k += 1
else:
# even if new division is processed through,
# right-most element of old division can remain
if last_elem and i < len(a):
d[(out1, k)] = (_loc, (name, i - 1), a[i], a[i], False)
k += 1
c.append(a[-1])
# replace last element of tuple with True
d[(out1, k - 1)] = d[(out1, k - 1)][:-1] + (True,)
i, j = 0, 1
last_elem = _is_single_last_div(c)
while j < len(b):
tmp = []
while c[i] < b[j]:
tmp.append((out1, i))
i += 1
if last_elem and c[i] == b[-1] and (b[-1] != b[-2] or j == len(b) - 1) and i < k:
# append if last split is not included
tmp.append((out1, i))
i += 1
if len(tmp) == 0:
# dummy slice to return empty DataFrame or Series,
# which retain original data attributes (columns / name)
d[(out2, j - 1)] = (_loc, (name, 0), a[0], a[0], False)
elif len(tmp) == 1:
d[(out2, j - 1)] = tmp[0]
else:
if not tmp:
raise ValueError('check for duplicate partitions\nold:\n%s\n\n'
'new:\n%s\n\ncombined:\n%s'
% (pformat(a), pformat(b), pformat(c)))
d[(out2, j - 1)] = (pd.concat, (list, tmp))
j += 1
return d
def repartition_npartitions(df, npartitions):
""" Repartition dataframe to a smaller number of partitions """
npartitions = min(npartitions, df.npartitions)
k = int(math.ceil(df.npartitions / npartitions))
divisions = df.divisions[::k]
if len(divisions) <= npartitions:
divisions = divisions + (df.divisions[-1],)
return df.repartition(divisions=divisions)
def repartition(df, divisions=None, force=False):
""" Repartition dataframe along new divisions
Dask.DataFrame objects are partitioned along their index. Often when
multiple dataframes interact we need to align these partitionings. The
``repartition`` function constructs a new DataFrame object holding the same
data but partitioned on different values. It does this by performing a
sequence of ``loc`` and ``concat`` calls to split and merge the previous
generation of partitions.
Parameters
----------
divisions : list
List of partitions to be used
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
Also works on Pandas objects
>>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP
"""
token = tokenize(df, divisions)
if isinstance(df, _Frame):
tmp = 'repartition-split-' + token
out = 'repartition-merge-' + token
dsk = repartition_divisions(df.divisions, divisions,
df._name, tmp, out, force=force)
return df._constructor(merge(df.dask, dsk), out,
df._pd, divisions)
elif isinstance(df, (pd.Series, pd.DataFrame)):
name = 'repartition-dataframe-' + token
from .utils import shard_df_on_index
dfs = shard_df_on_index(df, divisions[1:-1])
dsk = dict(((name, i), df) for i, df in enumerate(dfs))
return _Frame(dsk, name, df, divisions)
raise ValueError('Data must be DataFrame or Series')
class Accessor(object):
def __init__(self, series):
if not isinstance(series, Series):
raise ValueError('Accessor cannot be initialized')
self._series = series
def _property_map(self, key):
return map_partitions(self.getattr, self._series.name, self._series, key)
def _function_map(self, key, *args):
return map_partitions(self.call, self._series.name, self._series, key,
*args)
def __dir__(self):
return sorted(set(dir(type(self)) + list(self.__dict__) +
dir(self.ns)))
def __getattr__(self, key):
if key in dir(self.ns):
if isinstance(getattr(self.ns, key), property):
return self._property_map(key)
else:
return partial(self._function_map, key)
else:
raise AttributeError(key)
class DatetimeAccessor(Accessor):
""" Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.microsecond # doctest: +SKIP
"""
ns = pd.Series.dt
@staticmethod
def getattr(obj, attr):
return getattr(obj.dt, attr)
@staticmethod
def call(obj, attr, *args):
return getattr(obj.dt, attr)(*args)
class StringAccessor(Accessor):
""" Accessor object for string properties of the Series values.
Examples
--------
>>> s.str.lower() # doctest: +SKIP
"""
ns = pd.Series.str
@staticmethod
def getattr(obj, attr):
return getattr(obj.str, attr)
@staticmethod
def call(obj, attr, *args):
return getattr(obj.str, attr)(*args)
def try_loc(df, ind):
try:
return df.loc[ind]
except KeyError:
return df.head(0)
def set_sorted_index(df, index, drop=True, **kwargs):
if not isinstance(index, Series):
index2 = df[index]
meta = df._pd.set_index(index, drop=drop)
else:
index2 = index
meta = df._pd.set_index(index._pd, drop=drop)
mins = index2.map_partitions(pd.Series.min)
maxes = index2.map_partitions(pd.Series.max)
mins, maxes = compute(mins, maxes, **kwargs)
if (sorted(mins) != list(mins) or
sorted(maxes) != list(maxes) or
any(a >= b for a, b in zip(mins, maxes))):
raise ValueError("Column not properly sorted", mins, maxes)
divisions = tuple(mins) + (list(maxes)[-1],)
result = map_partitions(_set_sorted_index, meta, df, index, drop=drop)
result.divisions = divisions
return result
def _set_sorted_index(df, idx, drop):
return df.set_index(idx, drop=drop)
def _eval(df, expr, **kwargs):
return df.eval(expr, **kwargs)
def _sum(x, **kwargs):
return x.sum(**kwargs)
def _min(x, **kwargs):
return x.min(**kwargs)
def _max(x, **kwargs):
return x.max(**kwargs)
def _count(x, **kwargs):
return x.count(**kwargs)
def _mean(x, **kwargs):
return x.mean(**kwargs)
def _var(x, **kwargs):
return x.var(**kwargs)
def _std(x, **kwargs):
return x.std(**kwargs)
| {
"repo_name": "mikegraham/dask",
"path": "dask/dataframe/core.py",
"copies": "1",
"size": "96459",
"license": "bsd-3-clause",
"hash": -1757877171288609500,
"line_mean": 33.25390625,
"line_max": 96,
"alpha_frac": 0.562695031,
"autogenerated": false,
"ratio": 3.8807129063405212,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9941048347812237,
"avg_score": 0.00047191790565683195,
"num_lines": 2816
} |
from __future__ import absolute_import, division, print_function
import bitarray
import numpy as np
from .hashfunctions import generate_hashfunctions
class BloomFilter(object):
"""Basic Bloom Filter."""
def __init__(self, capacity, error_rate):
self.error_rate = error_rate
self.capacity = capacity
self.nbr_slices = int(np.ceil(np.log2(1.0 / error_rate)))
self.bits_per_slice = int(np.ceil((capacity * abs(np.log(error_rate))) / (self.nbr_slices * (np.log(2) ** 2))))
self.nbr_bits = self.nbr_slices * self.bits_per_slice
self.initialize_bitarray()
self.count = 0
self.hashes = generate_hashfunctions(self.bits_per_slice, self.nbr_slices)
self.hashed_values = []
def initialize_bitarray(self):
self.bitarray = bitarray.bitarray(self.nbr_bits)
self.bitarray.setall(False)
def __contains__(self, key):
self.hashed_values = self.hashes(key)
offset = 0
for value in self.hashed_values:
if not self.bitarray[offset + value]:
return False
offset += self.bits_per_slice
return True
def add(self, key):
if key in self:
return True
offset = 0
if not self.hashed_values:
self.hashed_values = self.hashes(key)
for value in self.hashed_values:
self.bitarray[offset + value] = True
offset += self.bits_per_slice
self.count += 1
return False
if __name__ == "__main__":
import numpy as np
bf = BloomFilter(10000, 0.01)
random_items = [str(r) for r in np.random.randn(20000)]
for item in random_items[:10000]:
bf.add(item)
false_positive = 0
for item in random_items[10000:20000]:
if item in bf:
false_positive += 1
print("Error rate (false positive): %s" % str(float(false_positive) / 10000))
| {
"repo_name": "Parsely/probably",
"path": "probably/bloomfilter.py",
"copies": "1",
"size": "1924",
"license": "mit",
"hash": 1820114288714089500,
"line_mean": 30.0322580645,
"line_max": 119,
"alpha_frac": 0.6003118503,
"autogenerated": false,
"ratio": 3.650853889943074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9745551733380762,
"avg_score": 0.0011228013724624872,
"num_lines": 62
} |
from __future__ import absolute_import, division, print_function
import blaze
from blaze.datadescriptor import dd_as_py
import numpy as np
import unittest
from blaze.py2help import skip
from blaze.tests.common import MayBeUriTest
class getitem(unittest.TestCase):
caps={'compress': False} # the default is non-compressed arrays
def test_scalar(self):
a = blaze.array(np.arange(3), caps=self.caps)
self.assertEqual(dd_as_py(a[0]._data), 0)
@skip('slices should implemented')
def test_1d(self):
a = blaze.array(np.arange(3), caps=self.caps)
print("a:", a, self.caps)
self.assertEqual(dd_as_py(a[0:2]._data), [0,1])
def test_2d(self):
a = blaze.array(np.arange(3*3).reshape(3,3), caps=self.caps)
self.assertEqual(dd_as_py(a[1]._data), [3,4,5])
class getitem_blz(getitem):
caps={'compress': True}
class setitem(unittest.TestCase):
caps={'compress': False} # the default is non-compressed arrays
def test_scalar(self):
a = blaze.array(np.arange(3), caps=self.caps)
a[0] = 1
self.assertEqual(dd_as_py(a[0]._data), 1)
@skip('slices should be implemented')
def test_1d(self):
a = blaze.array(np.arange(3), caps=self.caps)
a[0:2] = 2
self.assertEqual(dd_as_py(a[0:2]._data), [2,2])
def test_2d(self):
a = blaze.array(np.arange(3*3).reshape(3,3), caps=self.caps)
a[1] = 2
self.assertEqual(dd_as_py(a[1]._data), [2,2,2])
# BLZ is going to be read and append only for the time being
# class setitem_blz(setitem):
# caps={'compress': True}
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "zzmjohn/blaze",
"path": "blaze/tests/test_get_set.py",
"copies": "7",
"size": "1665",
"license": "bsd-3-clause",
"hash": -4256940324492930000,
"line_mean": 29.2727272727,
"line_max": 68,
"alpha_frac": 0.6252252252,
"autogenerated": false,
"ratio": 2.9261862917398944,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7051411516939895,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import bz2
import gzip
import os
import os.path
import pickle
import posixpath
import sys
import tempfile
from contextlib import contextmanager
from distutils.version import LooseVersion
from pathlib import Path
from unittest.mock import patch
import pytest
import fsspec
from fsspec import compression
from fsspec.core import OpenFile, get_fs_token_paths, open_files
from fsspec.implementations.local import LocalFileSystem, make_path_posix
from fsspec.tests.test_utils import WIN
files = {
".test.accounts.1.json": (
b'{"amount": 100, "name": "Alice"}\n'
b'{"amount": 200, "name": "Bob"}\n'
b'{"amount": 300, "name": "Charlie"}\n'
b'{"amount": 400, "name": "Dennis"}\n'
),
".test.accounts.2.json": (
b'{"amount": 500, "name": "Alice"}\n'
b'{"amount": 600, "name": "Bob"}\n'
b'{"amount": 700, "name": "Charlie"}\n'
b'{"amount": 800, "name": "Dennis"}\n'
),
}
csv_files = {
".test.fakedata.1.csv": (b"a,b\n" b"1,2\n"),
".test.fakedata.2.csv": (b"a,b\n" b"3,4\n"),
}
odir = os.getcwd()
@contextmanager
def filetexts(d, open=open, mode="t"):
"""Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
Since this is meant for use in tests, this context manager will
automatically switch to a temporary current directory, to avoid
race conditions when running tests in parallel.
"""
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
for filename, text in d.items():
f = open(filename, "w" + mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
try:
os.remove(filename)
except (IOError, OSError):
pass
finally:
os.chdir(odir)
def test_urlpath_inference_strips_protocol(tmpdir):
tmpdir = make_path_posix(str(tmpdir))
paths = ["/".join([tmpdir, "test.%02d.csv" % i]) for i in range(20)]
for path in paths:
with open(path, "wb") as f:
f.write(b"1,2,3\n" * 10)
# globstring
protocol = "file:///" if sys.platform == "win32" else "file://"
urlpath = protocol + os.path.join(tmpdir, "test.*.csv")
_, _, paths2 = get_fs_token_paths(urlpath)
assert paths2 == paths
# list of paths
_, _, paths2 = get_fs_token_paths([protocol + p for p in paths])
assert paths2 == paths
def test_urlpath_inference_errors():
# Empty list
with pytest.raises(ValueError) as err:
get_fs_token_paths([])
assert "empty" in str(err.value)
# Protocols differ
with pytest.raises(ValueError) as err:
get_fs_token_paths(["s3://test/path.csv", "/other/path.csv"])
assert "protocol" in str(err.value)
def test_urlpath_expand_read():
"""Make sure * is expanded in file paths when reading."""
# when reading, globs should be expanded to read files by mask
with filetexts(csv_files, mode="b"):
_, _, paths = get_fs_token_paths("./.*.csv")
assert len(paths) == 2
_, _, paths = get_fs_token_paths(["./.*.csv"])
assert len(paths) == 2
def test_cats():
with filetexts(csv_files, mode="b"):
fs = fsspec.filesystem("file")
assert fs.cat(".test.fakedata.1.csv") == b"a,b\n" b"1,2\n"
out = set(fs.cat([".test.fakedata.1.csv", ".test.fakedata.2.csv"]).values())
assert out == {b"a,b\n" b"1,2\n", b"a,b\n" b"3,4\n"}
assert fs.cat(".test.fakedata.1.csv", None, None) == b"a,b\n" b"1,2\n"
assert fs.cat(".test.fakedata.1.csv", start=1, end=6) == b"a,b\n" b"1,2\n"[1:6]
assert fs.cat(".test.fakedata.1.csv", start=-1) == b"a,b\n" b"1,2\n"[-1:]
assert (
fs.cat(".test.fakedata.1.csv", start=1, end=-2) == b"a,b\n" b"1,2\n"[1:-2]
)
out = set(
fs.cat(
[".test.fakedata.1.csv", ".test.fakedata.2.csv"], start=1, end=-1
).values()
)
assert out == {b"a,b\n" b"1,2\n"[1:-1], b"a,b\n" b"3,4\n"[1:-1]}
def test_urlpath_expand_write():
"""Make sure * is expanded in file paths when writing."""
_, _, paths = get_fs_token_paths("prefix-*.csv", mode="wb", num=2)
assert all(
[p.endswith(pa) for p, pa in zip(paths, ["/prefix-0.csv", "/prefix-1.csv"])]
)
_, _, paths = get_fs_token_paths(["prefix-*.csv"], mode="wb", num=2)
assert all(
[p.endswith(pa) for p, pa in zip(paths, ["/prefix-0.csv", "/prefix-1.csv"])]
)
# we can read with multiple masks, but not write
with pytest.raises(ValueError):
_, _, paths = get_fs_token_paths(
["prefix1-*.csv", "prefix2-*.csv"], mode="wb", num=2
)
def test_open_files():
with filetexts(files, mode="b"):
myfiles = open_files("./.test.accounts.*")
assert len(myfiles) == len(files)
for lazy_file, data_file in zip(myfiles, sorted(files)):
with lazy_file as f:
x = f.read()
assert x == files[data_file]
@pytest.mark.parametrize("encoding", ["utf-8", "ascii"])
def test_open_files_text_mode(encoding):
with filetexts(files, mode="b"):
myfiles = open_files("./.test.accounts.*", mode="rt", encoding=encoding)
assert len(myfiles) == len(files)
data = []
for file in myfiles:
with file as f:
data.append(f.read())
assert list(data) == [files[k].decode(encoding) for k in sorted(files)]
@pytest.mark.parametrize("mode", ["rt", "rb"])
@pytest.mark.parametrize("fmt", list(compression.compr))
def test_compressions(fmt, mode, tmpdir):
if fmt == "zip" and sys.version_info < (3, 6):
pytest.xfail("zip compression requires python3.6 or higher")
tmpdir = str(tmpdir)
fn = os.path.join(tmpdir, ".tmp.getsize")
fs = LocalFileSystem()
f = OpenFile(fs, fn, compression=fmt, mode="wb")
data = b"Long line of readily compressible text"
with f as fo:
fo.write(data)
if fmt is None:
assert fs.size(fn) == len(data)
else:
assert fs.size(fn) != len(data)
f = OpenFile(fs, fn, compression=fmt, mode=mode)
with f as fo:
if mode == "rb":
assert fo.read() == data
else:
assert fo.read() == data.decode()
def test_bad_compression():
with filetexts(files, mode="b"):
for func in [open_files]:
with pytest.raises(ValueError):
func("./.test.accounts.*", compression="not-found")
def test_not_found():
fn = "not-a-file"
fs = LocalFileSystem()
with pytest.raises((FileNotFoundError, OSError)):
with OpenFile(fs, fn, mode="rb"):
pass
def test_isfile():
fs = LocalFileSystem()
with filetexts(files, mode="b"):
for f in files.keys():
assert fs.isfile(f)
assert fs.isfile("file://" + f)
assert not fs.isfile("not-a-file")
assert not fs.isfile("file://not-a-file")
def test_isdir():
fs = LocalFileSystem()
with filetexts(files, mode="b"):
for f in files.keys():
assert fs.isdir(os.path.dirname(os.path.abspath(f)))
assert not fs.isdir(f)
assert not fs.isdir("not-a-dir")
@pytest.mark.parametrize("compression_opener", [(None, open), ("gzip", gzip.open)])
def test_open_files_write(tmpdir, compression_opener):
tmpdir = str(tmpdir)
compression, opener = compression_opener
fn = str(tmpdir) + "/*.part"
files = open_files(fn, num=2, mode="wb", compression=compression)
assert len(files) == 2
assert {f.mode for f in files} == {"wb"}
for fil in files:
with fil as f:
f.write(b"000")
files = sorted(os.listdir(tmpdir))
assert files == ["0.part", "1.part"]
with opener(os.path.join(tmpdir, files[0]), "rb") as f:
d = f.read()
assert d == b"000"
def test_pickability_of_lazy_files(tmpdir):
tmpdir = str(tmpdir)
cloudpickle = pytest.importorskip("cloudpickle")
with filetexts(files, mode="b"):
myfiles = open_files("./.test.accounts.*")
myfiles2 = cloudpickle.loads(cloudpickle.dumps(myfiles))
for f, f2 in zip(myfiles, myfiles2):
assert f.path == f2.path
assert isinstance(f.fs, type(f2.fs))
with f as f_open, f2 as f2_open:
assert f_open.read() == f2_open.read()
def test_abs_paths(tmpdir):
tmpdir = str(tmpdir)
here = os.getcwd()
os.chdir(tmpdir)
with open("tmp", "w") as f:
f.write("hi")
out = LocalFileSystem().glob("./*")
assert len(out) == 1
assert "/" in out[0]
assert "tmp" in out[0]
# I don't know what this was testing - but should avoid local paths anyway
# fs = LocalFileSystem()
os.chdir(here)
# with fs.open('tmp', 'r') as f:
# res = f.read()
# assert res == 'hi'
@pytest.mark.parametrize("sep", ["/", "\\"])
@pytest.mark.parametrize("chars", ["+", "++", "(", ")", "|", "\\"])
def test_glob_weird_characters(tmpdir, sep, chars):
tmpdir = make_path_posix(str(tmpdir))
subdir = tmpdir + sep + "test" + chars + "x"
try:
os.makedirs(subdir, exist_ok=True)
except OSError as e:
if WIN and "label syntax" in str(e):
pytest.xfail("Illegal windows directory name")
else:
raise
with open(subdir + sep + "tmp", "w") as f:
f.write("hi")
out = LocalFileSystem().glob(subdir + sep + "*")
assert len(out) == 1
assert "/" in out[0]
assert "tmp" in out[0]
def test_globfind_dirs(tmpdir):
tmpdir = make_path_posix(str(tmpdir))
fs = fsspec.filesystem("file")
fs.mkdir(tmpdir + "/dir")
fs.touch(tmpdir + "/dir/afile")
assert [tmpdir + "/dir"] == fs.glob(tmpdir + "/*")
assert fs.glob(tmpdir + "/*", detail=True)[tmpdir + "/dir"]["type"] == "directory"
assert (
fs.glob(tmpdir + "/dir/*", detail=True)[tmpdir + "/dir/afile"]["type"] == "file"
)
assert [tmpdir + "/dir/afile"] == fs.find(tmpdir)
assert [tmpdir + "/dir", tmpdir + "/dir/afile"] == fs.find(tmpdir, withdirs=True)
def test_touch(tmpdir):
import time
fn = str(tmpdir + "/in/file")
fs = fsspec.filesystem("file", auto_mkdir=False)
with pytest.raises(OSError):
fs.touch(fn)
fs = fsspec.filesystem("file", auto_mkdir=True)
fs.touch(fn)
info = fs.info(fn)
time.sleep(0.2)
fs.touch(fn)
info2 = fs.info(fn)
if not WIN:
assert info2["mtime"] > info["mtime"]
def test_get_pyarrow_filesystem():
pa = pytest.importorskip("pyarrow")
fs = LocalFileSystem()
if LooseVersion(pa.__version__) < LooseVersion("2.0"):
assert isinstance(fs, pa.filesystem.FileSystem)
assert fs._get_pyarrow_filesystem() is fs
else:
assert not isinstance(fs, pa.filesystem.FileSystem)
class UnknownFileSystem(object):
pass
assert not isinstance(UnknownFileSystem(), pa.filesystem.FileSystem)
def test_directories(tmpdir):
tmpdir = make_path_posix(str(tmpdir))
fs = LocalFileSystem()
fs.mkdir(tmpdir + "/dir")
assert tmpdir + "/dir" in fs.ls(tmpdir)
assert fs.ls(tmpdir, True)[0]["type"] == "directory"
fs.rmdir(tmpdir + "/dir")
assert not fs.ls(tmpdir)
def test_file_ops(tmpdir):
tmpdir = make_path_posix(str(tmpdir))
fs = LocalFileSystem(auto_mkdir=True)
with pytest.raises(FileNotFoundError):
fs.info(tmpdir + "/nofile")
fs.touch(tmpdir + "/afile")
i1 = fs.ukey(tmpdir + "/afile")
assert tmpdir + "/afile" in fs.ls(tmpdir)
with fs.open(tmpdir + "/afile", "wb") as f:
f.write(b"data")
i2 = fs.ukey(tmpdir + "/afile")
assert i1 != i2 # because file changed
fs.copy(tmpdir + "/afile", tmpdir + "/afile2")
assert tmpdir + "/afile2" in fs.ls(tmpdir)
fs.move(tmpdir + "/afile", tmpdir + "/afile3")
assert not fs.exists(tmpdir + "/afile")
fs.cp(tmpdir + "/afile3", tmpdir + "/deeply/nested/file")
assert fs.exists(tmpdir + "/deeply/nested/file")
fs.rm(tmpdir + "/afile3", recursive=True)
assert not fs.exists(tmpdir + "/afile3")
files = [tmpdir + "/afile4", tmpdir + "/afile5"]
[fs.touch(f) for f in files]
fs.rm(files)
assert all(not fs.exists(f) for f in files)
fs.rm(tmpdir, recursive=True)
assert not fs.exists(tmpdir)
def test_recursive_get_put(tmpdir):
tmpdir = make_path_posix(str(tmpdir))
fs = LocalFileSystem(auto_mkdir=True)
fs.mkdir(tmpdir + "/a1/a2/a3")
fs.touch(tmpdir + "/a1/a2/a3/afile")
fs.touch(tmpdir + "/a1/afile")
fs.get("file://{0}/a1".format(tmpdir), tmpdir + "/b1", recursive=True)
assert fs.isfile(tmpdir + "/b1/afile")
assert fs.isfile(tmpdir + "/b1/a2/a3/afile")
fs.put(tmpdir + "/b1", "file://{0}/c1".format(tmpdir), recursive=True)
assert fs.isfile(tmpdir + "/c1/afile")
assert fs.isfile(tmpdir + "/c1/a2/a3/afile")
def test_commit_discard(tmpdir):
tmpdir = str(tmpdir)
fs = LocalFileSystem()
with fs.transaction:
with fs.open(tmpdir + "/afile", "wb") as f:
assert not fs.exists(tmpdir + "/afile")
f.write(b"data")
assert not fs.exists(tmpdir + "/afile")
assert fs._transaction is None
assert fs.cat(tmpdir + "/afile") == b"data"
try:
with fs.transaction:
with fs.open(tmpdir + "/bfile", "wb") as f:
f.write(b"data")
raise KeyboardInterrupt
except KeyboardInterrupt:
assert not fs.exists(tmpdir + "/bfile")
def test_make_path_posix():
cwd = os.getcwd()
if WIN:
drive = cwd[0]
assert make_path_posix("/a/posix/path") == f"{drive}:/a/posix/path"
assert make_path_posix("/posix") == f"{drive}:/posix"
else:
assert make_path_posix("/a/posix/path") == "/a/posix/path"
assert make_path_posix("/posix") == "/posix"
assert make_path_posix("relpath") == posixpath.join(make_path_posix(cwd), "relpath")
assert make_path_posix("rel/path") == posixpath.join(
make_path_posix(cwd), "rel/path"
)
if WIN:
assert make_path_posix("C:\\path") == "C:/path"
if WIN:
assert (
make_path_posix(
"\\\\windows-server\\someshare\\path\\more\\path\\dir\\foo.parquet"
)
== "//windows-server/someshare/path/more/path/dir/foo.parquet"
)
assert (
make_path_posix(
r"\\SERVER\UserHomeFolder$\me\My Documents\project1\data\filen.csv"
)
== "//SERVER/UserHomeFolder$/me/My Documents/project1/data/filen.csv"
)
assert "/" in make_path_posix("rel\\path")
def test_linked_files(tmpdir):
tmpdir = str(tmpdir)
fn0 = os.path.join(tmpdir, "target")
fn1 = os.path.join(tmpdir, "link1")
fn2 = os.path.join(tmpdir, "link2")
data = b"my target data"
with open(fn0, "wb") as f:
f.write(data)
try:
os.symlink(fn0, fn1)
os.symlink(fn0, fn2)
except OSError:
if WIN:
pytest.xfail("Ran on win without admin permissions")
else:
raise
fs = LocalFileSystem()
assert fs.info(fn0)["type"] == "file"
assert fs.info(fn1)["type"] == "file"
assert fs.info(fn2)["type"] == "file"
assert not fs.info(fn0)["islink"]
assert fs.info(fn1)["islink"]
assert fs.info(fn2)["islink"]
assert fs.info(fn0)["size"] == len(data)
assert fs.info(fn1)["size"] == len(data)
assert fs.info(fn2)["size"] == len(data)
of = fsspec.open(fn1, "rb")
with of as f:
assert f.read() == data
of = fsspec.open(fn2, "rb")
with of as f:
assert f.read() == data
def test_linked_directories(tmpdir):
tmpdir = str(tmpdir)
subdir0 = os.path.join(tmpdir, "target")
subdir1 = os.path.join(tmpdir, "link1")
subdir2 = os.path.join(tmpdir, "link2")
os.makedirs(subdir0)
try:
os.symlink(subdir0, subdir1)
os.symlink(subdir0, subdir2)
except OSError:
if WIN:
pytest.xfail("Ran on win without admin permissions")
else:
raise
fs = LocalFileSystem()
assert fs.info(subdir0)["type"] == "directory"
assert fs.info(subdir1)["type"] == "directory"
assert fs.info(subdir2)["type"] == "directory"
assert not fs.info(subdir0)["islink"]
assert fs.info(subdir1)["islink"]
assert fs.info(subdir2)["islink"]
def test_isfilestore():
fs = LocalFileSystem(auto_mkdir=False)
assert fs._isfilestore()
def test_pickle(tmpdir):
fs = LocalFileSystem()
tmpdir = str(tmpdir)
fn0 = os.path.join(tmpdir, "target")
with open(fn0, "wb") as f:
f.write(b"data")
f = fs.open(fn0, "rb")
f.seek(1)
f2 = pickle.loads(pickle.dumps(f))
assert f2.read() == f.read()
f = fs.open(fn0, "wb")
with pytest.raises(ValueError):
pickle.dumps(f)
def test_strip_protocol_expanduser():
path = "file://~\\foo\\bar" if WIN else "file://~/foo/bar"
stripped = LocalFileSystem._strip_protocol(path)
assert path != stripped
assert "file://" not in stripped
assert stripped.startswith(os.path.expanduser("~").replace("\\", "/"))
assert not LocalFileSystem._strip_protocol("./").endswith("/")
def test_mkdir_twice_faile(tmpdir):
fn = os.path.join(tmpdir, "test")
fs = fsspec.filesystem("file")
fs.mkdir(fn)
with pytest.raises(FileExistsError):
fs.mkdir(fn)
def test_iterable(tmpdir):
data = b"a\nhello\noi"
fn = os.path.join(tmpdir, "test")
with open(fn, "wb") as f:
f.write(data)
of = fsspec.open("file://%s" % fn, "rb")
with of as f:
out = list(f)
assert b"".join(out) == data
def test_mv_empty(tmpdir):
localfs = fsspec.filesystem("file")
src = os.path.join(str(tmpdir), "src")
dest = os.path.join(str(tmpdir), "dest")
assert localfs.isdir(src) is False
localfs.mkdir(src)
assert localfs.isdir(src)
localfs.move(src, dest, recursive=True)
assert localfs.isdir(src) is False
assert localfs.isdir(dest)
assert localfs.info(dest)
def test_mv_recursive(tmpdir):
localfs = fsspec.filesystem("file")
src = os.path.join(str(tmpdir), "src")
dest = os.path.join(str(tmpdir), "dest")
assert localfs.isdir(src) is False
localfs.mkdir(src)
assert localfs.isdir(src)
localfs.touch(os.path.join(src, "afile"))
localfs.move(src, dest, recursive=True)
assert localfs.isdir(src) is False
assert localfs.isdir(dest)
assert localfs.info(os.path.join(dest, "afile"))
@pytest.mark.xfail(WIN, reason="windows expand path to be revisited")
def test_copy_errors(tmpdir):
localfs = fsspec.filesystem("file")
dest1 = os.path.join(str(tmpdir), "dest1")
dest2 = os.path.join(str(tmpdir), "dest2")
src = os.path.join(str(tmpdir), "src")
file1 = os.path.join(src, "afile1")
file2 = os.path.join(src, "afile2")
dne = os.path.join(str(tmpdir), "src", "notafile")
localfs.mkdir(src)
localfs.mkdir(dest1)
localfs.mkdir(dest2)
localfs.touch(file1)
localfs.touch(file2)
# Non recursive should raise an error unless we specify ignore
with pytest.raises(FileNotFoundError):
localfs.copy([file1, file2, dne], dest1)
localfs.copy([file1, file2, dne], dest1, on_error="ignore")
assert sorted(localfs.ls(dest1)) == [
make_path_posix(os.path.join(dest1, "afile1")),
make_path_posix(os.path.join(dest1, "afile2")),
]
# Recursive should raise an error only if we specify raise
# the patch simulates the filesystem finding a file that does not
# exist in the directory
current_files = localfs.expand_path(src, recursive=True)
with patch.object(localfs, "expand_path", return_value=current_files + [dne]):
with pytest.raises(FileNotFoundError):
localfs.copy(src, dest2, recursive=True, on_error="raise")
localfs.copy(src, dest2, recursive=True)
assert sorted(localfs.ls(dest2)) == [
make_path_posix(os.path.join(dest2, "afile1")),
make_path_posix(os.path.join(dest2, "afile2")),
]
def test_transaction(tmpdir):
file = str(tmpdir / "test.txt")
fs = LocalFileSystem()
with fs.transaction:
content = "hello world"
with fs.open(file, "w") as fp:
fp.write(content)
with fs.open(file, "r") as fp:
read_content = fp.read()
assert content == read_content
def test_delete_cwd(tmpdir):
cwd = os.getcwd()
fs = LocalFileSystem()
try:
os.chdir(tmpdir)
with pytest.raises(ValueError):
fs.rm(".", recursive=True)
finally:
os.chdir(cwd)
@pytest.mark.parametrize(
"opener, ext", [(bz2.open, ".bz2"), (gzip.open, ".gz"), (open, "")]
)
def test_infer_compression(tmpdir, opener, ext):
filename = str(tmpdir / f"test{ext}")
content = b"hello world"
with opener(filename, "wb") as fp:
fp.write(content)
fs = LocalFileSystem()
with fs.open(f"file://{filename}", "rb", compression="infer") as fp:
read_content = fp.read()
assert content == read_content
def test_info_path_like(tmpdir):
path = Path(tmpdir / "test_info")
path.write_text("fsspec")
fs = LocalFileSystem()
assert fs.exists(path)
| {
"repo_name": "intake/filesystem_spec",
"path": "fsspec/implementations/tests/test_local.py",
"copies": "1",
"size": "21647",
"license": "bsd-3-clause",
"hash": -5022377358730488000,
"line_mean": 29.2755244755,
"line_max": 88,
"alpha_frac": 0.58894997,
"autogenerated": false,
"ratio": 3.213628266033254,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9301176560507391,
"avg_score": 0.000280335105172447,
"num_lines": 715
} |
from __future__ import absolute_import, division, print_function
import calendar
import datetime
import json
import platform
import time
import uuid
import warnings
from collections import OrderedDict
import stripe
from stripe import error, oauth_error, http_client, version, util, six
from stripe.multipart_data_generator import MultipartDataGenerator
from stripe.six.moves.urllib.parse import urlencode, urlsplit, urlunsplit
from stripe.stripe_response import StripeResponse, StripeStreamResponse
def _encode_datetime(dttime):
if dttime.tzinfo and dttime.tzinfo.utcoffset(dttime) is not None:
utc_timestamp = calendar.timegm(dttime.utctimetuple())
else:
utc_timestamp = time.mktime(dttime.timetuple())
return int(utc_timestamp)
def _encode_nested_dict(key, data, fmt="%s[%s]"):
d = OrderedDict()
for subkey, subvalue in six.iteritems(data):
d[fmt % (key, subkey)] = subvalue
return d
def _api_encode(data):
for key, value in six.iteritems(data):
key = util.utf8(key)
if value is None:
continue
elif hasattr(value, "stripe_id"):
yield (key, value.stripe_id)
elif isinstance(value, list) or isinstance(value, tuple):
for i, sv in enumerate(value):
if isinstance(sv, dict):
subdict = _encode_nested_dict("%s[%d]" % (key, i), sv)
for k, v in _api_encode(subdict):
yield (k, v)
else:
yield ("%s[%d]" % (key, i), util.utf8(sv))
elif isinstance(value, dict):
subdict = _encode_nested_dict(key, value)
for subkey, subvalue in _api_encode(subdict):
yield (subkey, subvalue)
elif isinstance(value, datetime.datetime):
yield (key, _encode_datetime(value))
else:
yield (key, util.utf8(value))
def _build_api_url(url, query):
scheme, netloc, path, base_query, fragment = urlsplit(url)
if base_query:
query = "%s&%s" % (base_query, query)
return urlunsplit((scheme, netloc, path, query, fragment))
class APIRequestor(object):
def __init__(
self,
key=None,
client=None,
api_base=None,
api_version=None,
account=None,
):
self.api_base = api_base or stripe.api_base
self.api_key = key
self.api_version = api_version or stripe.api_version
self.stripe_account = account
self._default_proxy = None
from stripe import verify_ssl_certs as verify
from stripe import proxy
if client:
self._client = client
elif stripe.default_http_client:
self._client = stripe.default_http_client
if proxy != self._default_proxy:
warnings.warn(
"stripe.proxy was updated after sending a "
"request - this is a no-op. To use a different proxy, "
"set stripe.default_http_client to a new client "
"configured with the proxy."
)
else:
# If the stripe.default_http_client has not been set by the user
# yet, we'll set it here. This way, we aren't creating a new
# HttpClient for every request.
stripe.default_http_client = http_client.new_default_http_client(
verify_ssl_certs=verify, proxy=proxy
)
self._client = stripe.default_http_client
self._default_proxy = proxy
@classmethod
def format_app_info(cls, info):
str = info["name"]
if info["version"]:
str += "/%s" % (info["version"],)
if info["url"]:
str += " (%s)" % (info["url"],)
return str
def request(self, method, url, params=None, headers=None):
rbody, rcode, rheaders, my_api_key = self.request_raw(
method.lower(), url, params, headers, is_streaming=False
)
resp = self.interpret_response(rbody, rcode, rheaders)
return resp, my_api_key
def request_stream(self, method, url, params=None, headers=None):
stream, rcode, rheaders, my_api_key = self.request_raw(
method.lower(), url, params, headers, is_streaming=True
)
resp = self.interpret_streaming_response(stream, rcode, rheaders)
return resp, my_api_key
def handle_error_response(self, rbody, rcode, resp, rheaders):
try:
error_data = resp["error"]
except (KeyError, TypeError):
raise error.APIError(
"Invalid response object from API: %r (HTTP response code "
"was %d)" % (rbody, rcode),
rbody,
rcode,
resp,
)
err = None
# OAuth errors are a JSON object where `error` is a string. In
# contrast, in API errors, `error` is a hash with sub-keys. We use
# this property to distinguish between OAuth and API errors.
if isinstance(error_data, six.string_types):
err = self.specific_oauth_error(
rbody, rcode, resp, rheaders, error_data
)
if err is None:
err = self.specific_api_error(
rbody, rcode, resp, rheaders, error_data
)
raise err
def specific_api_error(self, rbody, rcode, resp, rheaders, error_data):
util.log_info(
"Stripe API error received",
error_code=error_data.get("code"),
error_type=error_data.get("type"),
error_message=error_data.get("message"),
error_param=error_data.get("param"),
)
# Rate limits were previously coded as 400's with code 'rate_limit'
if rcode == 429 or (
rcode == 400 and error_data.get("code") == "rate_limit"
):
return error.RateLimitError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
elif rcode in [400, 404]:
if error_data.get("type") == "idempotency_error":
return error.IdempotencyError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
else:
return error.InvalidRequestError(
error_data.get("message"),
error_data.get("param"),
error_data.get("code"),
rbody,
rcode,
resp,
rheaders,
)
elif rcode == 401:
return error.AuthenticationError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
elif rcode == 402:
return error.CardError(
error_data.get("message"),
error_data.get("param"),
error_data.get("code"),
rbody,
rcode,
resp,
rheaders,
)
elif rcode == 403:
return error.PermissionError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
else:
return error.APIError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
def specific_oauth_error(self, rbody, rcode, resp, rheaders, error_code):
description = resp.get("error_description", error_code)
util.log_info(
"Stripe OAuth error received",
error_code=error_code,
error_description=description,
)
args = [error_code, description, rbody, rcode, resp, rheaders]
if error_code == "invalid_client":
return oauth_error.InvalidClientError(*args)
elif error_code == "invalid_grant":
return oauth_error.InvalidGrantError(*args)
elif error_code == "invalid_request":
return oauth_error.InvalidRequestError(*args)
elif error_code == "invalid_scope":
return oauth_error.InvalidScopeError(*args)
elif error_code == "unsupported_grant_type":
return oauth_error.UnsupportedGrantTypError(*args)
elif error_code == "unsupported_response_type":
return oauth_error.UnsupportedResponseTypError(*args)
return None
def request_headers(self, api_key, method):
user_agent = "Stripe/v1 PythonBindings/%s" % (version.VERSION,)
if stripe.app_info:
user_agent += " " + self.format_app_info(stripe.app_info)
ua = {
"bindings_version": version.VERSION,
"lang": "python",
"publisher": "stripe",
"httplib": self._client.name,
}
for attr, func in [
["lang_version", platform.python_version],
["platform", platform.platform],
["uname", lambda: " ".join(platform.uname())],
]:
try:
val = func()
except Exception as e:
val = "!! %s" % (e,)
ua[attr] = val
if stripe.app_info:
ua["application"] = stripe.app_info
headers = {
"X-Stripe-Client-User-Agent": json.dumps(ua),
"User-Agent": user_agent,
"Authorization": "Bearer %s" % (api_key,),
}
if self.stripe_account:
headers["Stripe-Account"] = self.stripe_account
if method == "post":
headers["Content-Type"] = "application/x-www-form-urlencoded"
headers.setdefault("Idempotency-Key", str(uuid.uuid4()))
if self.api_version is not None:
headers["Stripe-Version"] = self.api_version
return headers
def request_raw(
self,
method,
url,
params=None,
supplied_headers=None,
is_streaming=False,
):
"""
Mechanism for issuing an API call
"""
if self.api_key:
my_api_key = self.api_key
else:
from stripe import api_key
my_api_key = api_key
if my_api_key is None:
raise error.AuthenticationError(
"No API key provided. (HINT: set your API key using "
'"stripe.api_key = <API-KEY>"). You can generate API keys '
"from the Stripe web interface. See https://stripe.com/api "
"for details, or email [email protected] if you have any "
"questions."
)
abs_url = "%s%s" % (self.api_base, url)
encoded_params = urlencode(list(_api_encode(params or {})))
# Don't use strict form encoding by changing the square bracket control
# characters back to their literals. This is fine by the server, and
# makes these parameter strings easier to read.
encoded_params = encoded_params.replace("%5B", "[").replace("%5D", "]")
if method == "get" or method == "delete":
if params:
abs_url = _build_api_url(abs_url, encoded_params)
post_data = None
elif method == "post":
if (
supplied_headers is not None
and supplied_headers.get("Content-Type")
== "multipart/form-data"
):
generator = MultipartDataGenerator()
generator.add_params(params or {})
post_data = generator.get_post_data()
supplied_headers[
"Content-Type"
] = "multipart/form-data; boundary=%s" % (generator.boundary,)
else:
post_data = encoded_params
else:
raise error.APIConnectionError(
"Unrecognized HTTP method %r. This may indicate a bug in the "
"Stripe bindings. Please contact [email protected] for "
"assistance." % (method,)
)
headers = self.request_headers(my_api_key, method)
if supplied_headers is not None:
for key, value in six.iteritems(supplied_headers):
headers[key] = value
util.log_info("Request to Stripe api", method=method, path=abs_url)
util.log_debug(
"Post details",
post_data=encoded_params,
api_version=self.api_version,
)
if is_streaming:
(
rcontent,
rcode,
rheaders,
) = self._client.request_stream_with_retries(
method, abs_url, headers, post_data
)
else:
rcontent, rcode, rheaders = self._client.request_with_retries(
method, abs_url, headers, post_data
)
util.log_info("Stripe API response", path=abs_url, response_code=rcode)
util.log_debug("API response body", body=rcontent)
if "Request-Id" in rheaders:
request_id = rheaders["Request-Id"]
util.log_debug(
"Dashboard link for request",
link=util.dashboard_link(request_id),
)
return rcontent, rcode, rheaders, my_api_key
def _should_handle_code_as_error(self, rcode):
return not 200 <= rcode < 300
def interpret_response(self, rbody, rcode, rheaders):
try:
if hasattr(rbody, "decode"):
rbody = rbody.decode("utf-8")
resp = StripeResponse(rbody, rcode, rheaders)
except Exception:
raise error.APIError(
"Invalid response body from API: %s "
"(HTTP response code was %d)" % (rbody, rcode),
rbody,
rcode,
rheaders,
)
if self._should_handle_code_as_error(rcode):
self.handle_error_response(rbody, rcode, resp.data, rheaders)
return resp
def interpret_streaming_response(self, stream, rcode, rheaders):
# Streaming response are handled with minimal processing for the success
# case (ie. we don't want to read the content). When an error is
# received, we need to read from the stream and parse the received JSON,
# treating it like a standard JSON response.
if self._should_handle_code_as_error(rcode):
if hasattr(stream, "getvalue"):
json_content = stream.getvalue()
elif hasattr(stream, "read"):
json_content = stream.read()
else:
raise NotImplementedError(
"HTTP client %s does not return an IOBase object which "
"can be consumed when streaming a response."
)
return self.interpret_response(json_content, rcode, rheaders)
else:
return StripeStreamResponse(stream, rcode, rheaders)
| {
"repo_name": "stripe/stripe-python",
"path": "stripe/api_requestor.py",
"copies": "1",
"size": "14921",
"license": "mit",
"hash": -3244826328741139500,
"line_mean": 34.5261904762,
"line_max": 80,
"alpha_frac": 0.5456068628,
"autogenerated": false,
"ratio": 4.185413744740533,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00005878894767783656,
"num_lines": 420
} |
from __future__ import (absolute_import, division, print_function)
import cgi
from owslib.etree import etree
from datetime import datetime
try: # Python 3
from urllib.parse import urlencode
except ImportError: # Python 2
from urllib import urlencode
from owslib import ows
from owslib.crs import Crs
from owslib.fes import FilterCapabilities
from owslib.util import openURL, testXMLValue, nspath_eval, nspath, extract_time
from owslib.namespaces import Namespaces
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["ogc","sa","sml","gml","sos","swe","xlink"])
ns["ows"] = n.get_namespace("ows110")
return ns
namespaces = get_namespaces()
class SensorObservationService_1_0_0(object):
"""
Abstraction for OGC Sensor Observation Service (SOS).
Implements ISensorObservationService.
"""
def __new__(self,url, version, xml=None, username=None, password=None):
"""overridden __new__ method"""
obj=object.__new__(self)
obj.__init__(url, version, xml, username, password)
return obj
def __getitem__(self,id):
''' check contents dictionary to allow dict like access to service observational offerings'''
if id in self.__getattribute__('contents').keys():
return self.__getattribute__('contents')[id]
else:
raise KeyError("No Observational Offering with id: %s" % id)
def __init__(self, url, version='1.0.0', xml=None, username=None, password=None):
"""Initialize."""
self.url = url
self.username = username
self.password = password
self.version = version
self._capabilities = None
# Authentication handled by Reader
reader = SosCapabilitiesReader(
version=self.version, url=self.url, username=self.username, password=self.password
)
if xml: # read from stored xml
self._capabilities = reader.read_string(xml)
else: # read from server
self._capabilities = reader.read(self.url)
# Avoid building metadata if the response is an Exception
if self._capabilities.tag == nspath_eval("ows:ExceptionReport", namespaces):
raise ows.ExceptionReport(self._capabilities)
# build metadata objects
self._build_metadata()
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
def _build_metadata(self):
"""
Set up capabilities metadata objects
"""
# ows:ServiceIdentification metadata
service_id_element = self._capabilities.find(nspath_eval('ows:ServiceIdentification', namespaces))
self.identification = ows.ServiceIdentification(service_id_element)
# ows:ServiceProvider metadata
service_provider_element = self._capabilities.find(nspath_eval('ows:ServiceProvider', namespaces))
self.provider = ows.ServiceProvider(service_provider_element)
# ows:OperationsMetadata metadata
self.operations=[]
for elem in self._capabilities.findall(nspath_eval('ows:OperationsMetadata/ows:Operation', namespaces)):
self.operations.append(ows.OperationsMetadata(elem))
# sos:FilterCapabilities
filters = self._capabilities.find(nspath_eval('sos:Filter_Capabilities', namespaces))
if filters is not None:
self.filters = FilterCapabilities(filters)
else:
self.filters = None
# sos:Contents metadata
self.contents = {}
self.offerings = []
for offering in self._capabilities.findall(nspath_eval('sos:Contents/sos:ObservationOfferingList/sos:ObservationOffering', namespaces)):
off = SosObservationOffering(offering)
self.contents[off.id] = off
self.offerings.append(off)
def describe_sensor(self, outputFormat=None,
procedure=None,
method='Get',
**kwargs):
try:
base_url = next((m.get('url') for m in self.getOperationByName('DescribeSensor').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
request = {'service': 'SOS', 'version': self.version, 'request': 'DescribeSensor'}
# Required Fields
assert isinstance(outputFormat, str)
request['outputFormat'] = outputFormat
assert isinstance(procedure, str)
request['procedure'] = procedure
url_kwargs = {}
if 'timeout' in kwargs:
url_kwargs['timeout'] = kwargs.pop('timeout') # Client specified timeout value
# Optional Fields
if kwargs:
for kw in kwargs:
request[kw]=kwargs[kw]
data = urlencode(request)
response = openURL(base_url, data, method, username=self.username, password=self.password, **url_kwargs).read()
tr = etree.fromstring(response)
if tr.tag == nspath_eval("ows:ExceptionReport", namespaces):
raise ows.ExceptionReport(tr)
return response
def get_observation(self, responseFormat=None,
offerings=None,
observedProperties=None,
eventTime=None,
procedure=None,
method='Get',
**kwargs):
"""
Parameters
----------
format : string
Output format. Provide one that is available for all offerings
method : string
Optional. HTTP DCP method name: Get or Post. Must
**kwargs : extra arguments
anything else e.g. vendor specific parameters
"""
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetObservation').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
request = {'service': 'SOS', 'version': self.version, 'request': 'GetObservation'}
# Required Fields
assert isinstance(offerings, list) and len(offerings) > 0
request['offering'] = ','.join(offerings)
assert isinstance(observedProperties, list) and len(observedProperties) > 0
request['observedProperty'] = ','.join(observedProperties)
assert isinstance(responseFormat, str)
request['responseFormat'] = responseFormat
# Optional Fields
if eventTime is not None:
request['eventTime'] = eventTime
url_kwargs = {}
if 'timeout' in kwargs:
url_kwargs['timeout'] = kwargs.pop('timeout') # Client specified timeout value
if procedure is not None:
request['procedure'] = procedure
if kwargs:
for kw in kwargs:
request[kw]=kwargs[kw]
data = urlencode(request)
response = openURL(base_url, data, method, username=self.username,
password=self.password, **url_kwargs).read()
try:
tr = etree.fromstring(response)
if tr.tag == nspath_eval("ows:ExceptionReport", namespaces):
raise ows.ExceptionReport(tr)
else:
return response
except ows.ExceptionReport:
raise
except BaseException:
return response
def get_operation_by_name(self, name):
"""
Return a Operation item by name, case insensitive
"""
for item in self.operations:
if item.name.lower() == name.lower():
return item
raise KeyError("No Operation named %s" % name)
class SosObservationOffering(object):
def __init__(self, element):
self._root = element
self.id = testXMLValue(self._root.attrib.get(nspath_eval('gml:id', namespaces)), True)
self.description = testXMLValue(self._root.find(nspath_eval('gml:description', namespaces)))
self.name = testXMLValue(self._root.find(nspath_eval('gml:name', namespaces)))
val = testXMLValue(self._root.find(nspath_eval('gml:srsName', namespaces)))
if val is not None:
self.srs = Crs(val)
# LOOK: Check on GML boundedBy to make sure we handle all of the cases
# gml:boundedBy
try:
envelope = self._root.find(nspath_eval('gml:boundedBy/gml:Envelope', namespaces))
lower_left_corner = testXMLValue(envelope.find(nspath_eval('gml:lowerCorner', namespaces))).split()
upper_right_corner = testXMLValue(envelope.find(nspath_eval('gml:upperCorner', namespaces))).split()
# (left, bottom, right, top) in self.bbox_srs units
self.bbox = (float(lower_left_corner[1]), float(lower_left_corner[0]), float(upper_right_corner[1]), float(upper_right_corner[0]))
self.bbox_srs = Crs(testXMLValue(envelope.attrib.get('srsName'), True))
except Exception:
self.bbox = None
self.bbox_srs = None
# LOOK: Support all gml:TimeGeometricPrimitivePropertyType
# Right now we are just supporting gml:TimePeriod
# sos:Time
begin_position_element = self._root.find(nspath_eval('sos:time/gml:TimePeriod/gml:beginPosition', namespaces))
self.begin_position = extract_time(begin_position_element)
end_position_element = self._root.find(nspath_eval('sos:time/gml:TimePeriod/gml:endPosition', namespaces))
self.end_position = extract_time(end_position_element)
self.result_model = testXMLValue(self._root.find(nspath_eval('sos:resultModel', namespaces)))
self.procedures = []
for proc in self._root.findall(nspath_eval('sos:procedure', namespaces)):
self.procedures.append(testXMLValue(proc.attrib.get(nspath_eval('xlink:href', namespaces)), True))
# LOOK: Support swe:Phenomenon here
# this includes compound properties
self.observed_properties = []
for op in self._root.findall(nspath_eval('sos:observedProperty', namespaces)):
self.observed_properties.append(testXMLValue(op.attrib.get(nspath_eval('xlink:href', namespaces)), True))
self.features_of_interest = []
for fot in self._root.findall(nspath_eval('sos:featureOfInterest', namespaces)):
self.features_of_interest.append(testXMLValue(fot.attrib.get(nspath_eval('xlink:href', namespaces)), True))
self.response_formats = []
for rf in self._root.findall(nspath_eval('sos:responseFormat', namespaces)):
self.response_formats.append(testXMLValue(rf))
self.response_modes = []
for rm in self._root.findall(nspath_eval('sos:responseMode', namespaces)):
self.response_modes.append(testXMLValue(rm))
def __str__(self):
return 'Offering id: %s, name: %s' % (self.id, self.name)
def __repr__(self):
return "<SosObservationOffering '%s'>" % self.name
class SosCapabilitiesReader(object):
def __init__(self, version="1.0.0", url=None, username=None, password=None):
self.version = version
self.url = url
self.username = username
self.password = password
def capabilities_url(self, service_url):
"""
Return a capabilities url
"""
qs = []
if service_url.find('?') != -1:
qs = cgi.parse_qsl(service_url.split('?')[1])
params = [x[0] for x in qs]
if 'service' not in params:
qs.append(('service', 'SOS'))
if 'request' not in params:
qs.append(('request', 'GetCapabilities'))
if 'acceptVersions' not in params:
qs.append(('acceptVersions', self.version))
urlqs = urlencode(tuple(qs))
return service_url.split('?')[0] + '?' + urlqs
def read(self, service_url):
"""
Get and parse a WMS capabilities document, returning an
elementtree instance
service_url is the base url, to which is appended the service,
acceptVersions, and request parameters
"""
getcaprequest = self.capabilities_url(service_url)
spliturl=getcaprequest.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get', username=self.username, password=self.password)
return etree.fromstring(u.read())
def read_string(self, st):
"""
Parse a SOS capabilities document, returning an elementtree instance
st should be an XML capabilities document
"""
if not isinstance(st, bytes):
raise ValueError("String must be of type bytes, not %s" % type(st))
return etree.fromstring(st)
| {
"repo_name": "b-cube/OWSLib",
"path": "owslib/swe/observation/sos100.py",
"copies": "18",
"size": "12996",
"license": "bsd-3-clause",
"hash": -6258259001158457000,
"line_mean": 38.2628398792,
"line_max": 145,
"alpha_frac": 0.6094952293,
"autogenerated": false,
"ratio": 4.241514360313316,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003401592778769913,
"num_lines": 331
} |
from __future__ import absolute_import, division, print_function
import cgi
import json
from twisted.internet.defer import Deferred, succeed
from twisted.internet.protocol import Protocol
from twisted.web.client import ResponseDone
from twisted.web.http import PotentialDataLoss
def _encoding_from_headers(headers):
content_types = headers.getRawHeaders(u'content-type')
if content_types is None:
return None
# This seems to be the choice browsers make when encountering multiple
# content-type headers.
content_type, params = cgi.parse_header(content_types[-1])
if 'charset' in params:
return params.get('charset').strip("'\"")
if content_type == 'application/json':
return 'UTF-8'
class _BodyCollector(Protocol):
def __init__(self, finished, collector):
self.finished = finished
self.collector = collector
def dataReceived(self, data):
self.collector(data)
def connectionLost(self, reason):
if reason.check(ResponseDone):
self.finished.callback(None)
elif reason.check(PotentialDataLoss):
# http://twistedmatrix.com/trac/ticket/4840
self.finished.callback(None)
else:
self.finished.errback(reason)
def collect(response, collector):
"""
Incrementally collect the body of the response.
This function may only be called **once** for a given response.
:param IResponse response: The HTTP response to collect the body from.
:param collector: A callable to be called each time data is available
from the response body.
:type collector: single argument callable
:rtype: Deferred that fires with None when the entire body has been read.
"""
if response.length == 0:
return succeed(None)
d = Deferred()
response.deliverBody(_BodyCollector(d, collector))
return d
def content(response):
"""
Read the contents of an HTTP response.
This function may be called multiple times for a response, it uses a
``WeakKeyDictionary`` to cache the contents of the response.
:param IResponse response: The HTTP Response to get the contents of.
:rtype: Deferred that fires with the content as a str.
"""
_content = []
d = collect(response, _content.append)
d.addCallback(lambda _: b''.join(_content))
return d
def json_content(response, **kwargs):
"""
Read the contents of an HTTP response and attempt to decode it as JSON.
This function relies on :py:func:`content` and so may be called more than
once for a given response.
:param IResponse response: The HTTP Response to get the contents of.
:param kwargs: Any keyword arguments accepted by :py:func:`json.loads`
:rtype: Deferred that fires with the decoded JSON.
"""
# RFC7159 (8.1): Default JSON character encoding is UTF-8
d = text_content(response, encoding='utf-8')
d.addCallback(lambda text: json.loads(text, **kwargs))
return d
def text_content(response, encoding='ISO-8859-1'):
"""
Read the contents of an HTTP response and decode it with an appropriate
charset, which may be guessed from the ``Content-Type`` header.
:param IResponse response: The HTTP Response to get the contents of.
:param str encoding: A charset, such as ``UTF-8`` or ``ISO-8859-1``,
used if the response does not specify an encoding.
:rtype: Deferred that fires with a unicode string.
"""
def _decode_content(c):
e = _encoding_from_headers(response.headers)
if e is not None:
return c.decode(e)
return c.decode(encoding)
d = content(response)
d.addCallback(_decode_content)
return d
| {
"repo_name": "pexip/os-python-treq",
"path": "src/treq/content.py",
"copies": "2",
"size": "3734",
"license": "mit",
"hash": 2018549367453233700,
"line_mean": 28.4015748031,
"line_max": 77,
"alpha_frac": 0.678361007,
"autogenerated": false,
"ratio": 4.186098654708521,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5864459661708521,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import cgi
import json
from twisted.python.compat import _PY3
from twisted.internet.defer import Deferred, succeed
from twisted.internet.protocol import Protocol
from twisted.web.client import ResponseDone
from twisted.web.http import PotentialDataLoss
from twisted.web.http_headers import Headers
def _encoding_from_headers(headers):
content_types = headers.getRawHeaders('content-type')
if content_types is None:
return None
# This seems to be the choice browsers make when encountering multiple
# content-type headers.
content_type, params = cgi.parse_header(content_types[-1])
if 'charset' in params:
return params.get('charset').strip("'\"")
class _BodyCollector(Protocol):
def __init__(self, finished, collector):
self.finished = finished
self.collector = collector
def dataReceived(self, data):
self.collector(data)
def connectionLost(self, reason):
if reason.check(ResponseDone):
self.finished.callback(None)
elif reason.check(PotentialDataLoss):
# http://twistedmatrix.com/trac/ticket/4840
self.finished.callback(None)
else:
self.finished.errback(reason)
def collect(response, collector):
"""
Incrementally collect the body of the response.
This function may only be called **once** for a given response.
:param IResponse response: The HTTP response to collect the body from.
:param collector: A callable to be called each time data is available
from the response body.
:type collector: single argument callable
:rtype: Deferred that fires with None when the entire body has been read.
"""
if response.length == 0:
return succeed(None)
d = Deferred()
response.deliverBody(_BodyCollector(d, collector))
return d
def content(response):
"""
Read the contents of an HTTP response.
This function may be called multiple times for a response, it uses a
``WeakKeyDictionary`` to cache the contents of the response.
:param IResponse response: The HTTP Response to get the contents of.
:rtype: Deferred that fires with the content as a str.
"""
_content = []
d = collect(response, _content.append)
d.addCallback(lambda _: b''.join(_content))
return d
def json_content(response):
"""
Read the contents of an HTTP response and attempt to decode it as JSON.
This function relies on :py:func:`content` and so may be called more than
once for a given response.
:param IResponse response: The HTTP Response to get the contents of.
:rtype: Deferred that fires with the decoded JSON.
"""
if _PY3:
# RFC7159 (8.1): Default JSON character encoding is UTF-8
d = text_content(response, encoding='utf-8')
else:
d = content(response)
d.addCallback(json.loads)
return d
def text_content(response, encoding='ISO-8859-1'):
"""
Read the contents of an HTTP response and decode it with an appropriate
charset, which may be guessed from the ``Content-Type`` header.
:param IResponse response: The HTTP Response to get the contents of.
:param str encoding: An valid charset, such as ``UTF-8`` or ``ISO-8859-1``.
:rtype: Deferred that fires with a unicode.
"""
def _decode_content(c):
if _PY3:
headers = Headers({
key.decode('ascii'): [y.decode('ascii') for y in val]
for key, val in response.headers.getAllRawHeaders()})
else:
headers = response.headers
e = _encoding_from_headers(headers)
if e is not None:
return c.decode(e)
return c.decode(encoding)
d = content(response)
d.addCallback(_decode_content)
return d
| {
"repo_name": "mithrandi/treq",
"path": "src/treq/content.py",
"copies": "1",
"size": "3871",
"license": "mit",
"hash": 1902973390431397000,
"line_mean": 28.1052631579,
"line_max": 79,
"alpha_frac": 0.6667527771,
"autogenerated": false,
"ratio": 4.207608695652174,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5374361472752174,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import cgi
try: # Python 3
from urllib.parse import urlencode
except ImportError: # Python 2
from urllib import urlencode
from owslib.etree import etree
from owslib.util import openURL, strip_bom
class WMSCapabilitiesReader(object):
"""Read and parse capabilities document into a lxml.etree infoset
"""
def __init__(self, version='1.1.1', url=None, un=None, pw=None, headers=None):
"""Initialize"""
self.version = version
self._infoset = None
self.url = url
self.username = un
self.password = pw
self.headers = headers
self.request = None
#if self.username and self.password:
## Provide login information in order to use the WMS server
## Create an OpenerDirector with support for Basic HTTP
## Authentication...
#passman = HTTPPasswordMgrWithDefaultRealm()
#passman.add_password(None, self.url, self.username, self.password)
#auth_handler = HTTPBasicAuthHandler(passman)
#opener = build_opener(auth_handler)
#self._open = opener.open
def capabilities_url(self, service_url):
"""Return a capabilities url
"""
qs = []
if service_url.find('?') != -1:
qs = cgi.parse_qsl(service_url.split('?')[1])
params = [x[0] for x in qs]
if 'service' not in params:
qs.append(('service', 'WMS'))
if 'request' not in params:
qs.append(('request', 'GetCapabilities'))
if 'version' not in params:
qs.append(('version', self.version))
urlqs = urlencode(tuple(qs))
return service_url.split('?')[0] + '?' + urlqs
def read(self, service_url, timeout=30):
"""Get and parse a WMS capabilities document, returning an
elementtree instance
service_url is the base url, to which is appended the service,
version, and request parameters
"""
self.request = self.capabilities_url(service_url)
# now split it up again to use the generic openURL function...
spliturl = self.request.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get',
username=self.username,
password=self.password,
timeout=timeout,
headers=self.headers)
raw_text = strip_bom(u.read())
return etree.fromstring(raw_text)
def readString(self, st):
"""Parse a WMS capabilities document, returning an elementtree instance.
string should be an XML capabilities document
"""
if not isinstance(st, str) and not isinstance(st, bytes):
raise ValueError("String must be of type string or bytes, not %s" % type(st))
raw_text = strip_bom(st)
return etree.fromstring(raw_text)
| {
"repo_name": "geographika/OWSLib",
"path": "owslib/map/common.py",
"copies": "5",
"size": "2971",
"license": "bsd-3-clause",
"hash": -3685629638672800000,
"line_mean": 34.369047619,
"line_max": 89,
"alpha_frac": 0.5950858297,
"autogenerated": false,
"ratio": 4.244285714285715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7339371543985715,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import click
from future import standard_library
import cleanthermostat as ct
from caar.configparser_read import THERMOSTATS_FILE, POSTAL_FILE
standard_library.install_aliases()
"""This script file creates a Python pickle file from a raw data file that
contains either operating data from thermostats or temperature data (indoor
or outdoor). The pickle file can be loaded from the same project later and
read as a Python dict (a hash file in which the indexes or keys are
combinations of ID numbers and time stamps).
Run the script from the command line. It may be necessary to add the full
path for the directory 'caar' to the PYTHONPATH first. For example:
PYTHONPATH = '/home/tl_cycling/caar'
import sys
sys.path.append(PYTHONPATH)
The columns in the input file should form an intersecting set with the
headings listed in config.ini under the [file_headers] section. The
leading column should have a thermostat or location ID in numeric form.
For example, a file containing a column that has a thermostat ID could have
the default heading for the first column as shown in the example
data/cycles.csv file, 'ThermostatId', or the heading could be just 'Id'.
The file contents and config.ini just need to match.
See the example input files in the data directory to see the general format.
The script takes a single input file as an argument, such as 'cycles.csv',
'inside.csv', or 'outside.csv'. This is the only required argument.
The first optional argument is a state abbreviation, as will be explained.
The other arguments are taken as defaults ('thermostats.csv',
'us_postal_codes.csv') unless specified with an option.
To run from the command line, the general form is:
python picklert.py [Input file] --states=[Two-letter abbreviation as string]
An example is:
python picklert.py 'cycles.csv' --states='TX'
This example is based on the assumption that only a single state is of interest.
Data from all states in the input file can be included by leaving out the --states option.
Otherwise, multiples states can be selected, such as --states='TX,IA'.
"""
@click.command()
@click.argument('rawfile')
@click.option('--picklepath', default=None,
help='Output file path (generated automatically in current directory by default).')
@click.option('--states', default=None,
help='List of state abbreviations, capitalized.')
@click.option('--thermostats', default=THERMOSTATS_FILE,
help='File for thermostat metadata.')
@click.option('--postal', default=POSTAL_FILE, help='File for postal codes.')
@click.option('--cycle', default='Cool',
help='Cool or Heat (Default: Cool).')
def picklert(rawfile, picklepath, states, thermostats, postal, cycle):
print('Raw file :', rawfile)
if picklepath is None:
picklepath = ct._pickle_filename(rawfile, states)
print('Pickle output file:', picklepath)
if states:
print('States :', states)
print('Thermostats :', thermostats)
print('Postal codes :', postal)
else:
print('All states : no states selected')
print('Cycle :', cycle)
parameters_accepted = input('Pickle: enter y to proceed')
if parameters_accepted == 'y':
kwargs = {'picklepath': picklepath, 'states': states,
'thermostats_file': thermostats,
'postal_file': postal,
'cycle': cycle}
dump_file = ct.pickle_from_file(rawfile, **kwargs)
click.echo('{} created.'.format(dump_file))
if __name__ == '__main__':
picklert()
| {
"repo_name": "nickpowersys/CaaR",
"path": "caar/picklert.py",
"copies": "1",
"size": "3677",
"license": "bsd-3-clause",
"hash": 6094675567280728000,
"line_mean": 36.1414141414,
"line_max": 97,
"alpha_frac": 0.7022028828,
"autogenerated": false,
"ratio": 3.949516648764769,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5151719531564769,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import click
import os
import yaml
from bag8.exceptions import NoProjectYaml
from bag8.utils import simple_name
CURR_DIR = os.path.realpath('.')
class Yaml(object):
def __init__(self, project):
self.project = project
self._data = None
self._bag8_names = {}
def _get_customized_yml(self, project):
"""Prefixes project sections with project name, ex: pg > busyboxpg.
"""
custom_yml = {}
try:
__ = project.yaml_path # noqa
except NoProjectYaml as e:
click.echo(e.message)
return custom_yml
for k, v in yaml.load(open(project.yaml_path)).items():
# ensure environment for coming overinding
if 'environment' not in v:
v['environment'] = {}
# ensure common env section format -> list
elif isinstance(v['environment'], list):
v['environment'] = dict([l.split('=')
for l in v['environment']])
# shortcuts
name = k if k != 'app' else project.simple_name
domain_suffix = project.config.domain_suffix
domainname = v.get('domainname',
'{0}.{1}'.format(name, domain_suffix))
if 'DNSDOCK_ALIAS' not in v['environment']:
v['environment']['DNSDOCK_ALIAS'] = domainname
v['environment']['DNSDOCK_IMAGE'] = ''
# update sections
custom_yml[name] = v
links = []
for link in v.get('links', []):
try:
_, name = link.split(':', 1)
except ValueError:
name = link
links.append(name)
v['environment']['BAG8_LINKS'] = ' '.join(links)
return custom_yml
def _update_yml_dict(self, yml_dict, project):
for p in project.deps:
yml_dict = self._update_yml_dict(yml_dict, p)
yml_dict.update(self._get_customized_yml(project))
return yml_dict
def render(self):
self._data = self._update_yml_dict({}, self.project)
# ensure good app name
app = self.project.simple_name
# keep bag8 name mapping
self._bag8_names[app] = self.project.bag8_name
# clean links according tree permitted names and project accepted ones,
# ex.: dummy.js:dummyjs.docker > dummyjs:dummyjs.docker
links = []
for link in self._data[app].get('links', []):
bag8_name = link.split(':')[0]
name = simple_name(bag8_name)
self._bag8_names[name] = bag8_name
link = name + ':' + link.split(':')[1] if ':' in link else name
links.append(link)
self._data[app]['links'] = links
# ensure environment for coming overinding
if 'environment' not in self._data[app]:
self._data[app]['environment'] = {}
# ensure common env section format -> list
elif isinstance(self._data[app]['environment'], list):
self._data[app]['environment'] = \
dict([l.split('=') for l in self._data[app]['environment']])
# Setup develop mode
if self.project.develop:
for volume in self._data[app].get('dev_volumes', []):
if 'volumes' not in self._data[app]:
self._data[app]['volumes'] = []
self._data[app]['volumes'].append(volume % dict({
'PWD': CURR_DIR,
}, **os.environ))
dev_environment = self._data[app].get('dev_environment', {})
if isinstance(dev_environment, list):
dev_environment = dict([l.split('=') for l in dev_environment])
self._data[app]['environment'].update(dev_environment)
if 'dev_command' in self._data[app]:
self._data[app]['command'] = self._data[app]['dev_command']
# Clean compose extensions
for key in self._data:
for k in ['dev_command', 'dev_environment', 'dev_volumes']:
if k in self._data[key]:
del self._data[key][k]
# add dockerfile info for build
self._data[app]['dockerfile'] = os.path.join(self.project.bag8_path,
'Dockerfile')
@property
def data(self):
if not self._data:
self.render()
return self._data
@property
def service_dicts(self):
service_dicts = []
for k, v in self.data.items():
v['name'] = k
v['bag8_name'] = self._bag8_names.get(k)
service_dicts.append(v)
return service_dicts
def write(self):
# write to tmp path
with open(self.project.temp_path, 'wb') as out_yml:
out_yml.write(yaml.safe_dump(self.data))
| {
"repo_name": "novafloss/bag8",
"path": "bag8/yaml.py",
"copies": "1",
"size": "4983",
"license": "mit",
"hash": 1986384885231814700,
"line_mean": 32.4429530201,
"line_max": 79,
"alpha_frac": 0.5243829019,
"autogenerated": false,
"ratio": 4.125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00026832058127651944,
"num_lines": 149
} |
from __future__ import absolute_import, division, print_function
import click
import os
import yaml
class Config(object):
def __init__(self):
# used in projects site.conf files and hosts command
self.tmpfolder = os.path.expanduser('~/.local/bag8/')
# load config
self.config_path = os.path.expanduser('~/.config/bag8.yml')
if os.path.exists(self.config_path):
data = yaml.load(open(self.config_path))
else:
click.echo('No config found at: {0}.'.format(self.config_path))
click.echo('Loads default values.')
data = {}
self.account = data.get('account', 'bag8')
self.domain_suffix = data.get('domain_suffix', 'docker')
self.insecure_registry = data.get('insecure_registry', False)
self.prefix = data.get('prefix', 'bag8')
self.registry = data.get('registry', None)
self.docker_interface = data.get('docker_interface', 'docker0')
self.docker_ip = data.get('docker_ip', '172.17.42.1')
self._data_paths = [
'.' # current dir before all
] + data.get('data_paths', [])
self.dnsdock_image = data.get('dnsdock_image',
'tonistiigi/dnsdock:v1.10.0')
self.nameserver = data.get('nameserver', '8.8.8.8:53')
self.wait_seconds = data.get('wait_seconds', 10)
self.skip_wait = data.get('skip_wait', False)
def iter_data_paths(self):
for p in self._data_paths:
if not os.path.exists(p):
click.echo('skip path: {0}'.format(p))
continue
for d in os.listdir(p):
if not os.path.exists(os.path.join(p, d, 'fig.yml')):
continue
yield p, d
@property
def data_paths(self):
return [p for p, d in self.iter_data_paths()]
| {
"repo_name": "novafloss/bag8",
"path": "bag8/config.py",
"copies": "1",
"size": "1890",
"license": "mit",
"hash": 1011683812807282800,
"line_mean": 37.5714285714,
"line_max": 75,
"alpha_frac": 0.5597883598,
"autogenerated": false,
"ratio": 3.6416184971098264,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47014068569098266,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import codecs
import functools
import inspect
import io
import math
import os
import re
import shutil
import struct
import sys
import tempfile
from errno import ENOENT
from collections import Iterator
from contextlib import contextmanager
from importlib import import_module
from threading import Lock
import multiprocessing as mp
from . import multiprocessing
import uuid
from weakref import WeakValueDictionary
from .compatibility import (long, getargspec, BZ2File, GzipFile, LZMAFile, PY3,
urlsplit, unicode)
from .core import get_deps
from .context import _globals
from .optimize import key_split # noqa: F401
system_encoding = sys.getdefaultencoding()
if system_encoding == 'ascii':
system_encoding = 'utf-8'
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
def homogeneous_deepmap(func, seq):
n = 0
tmp = seq
while isinstance(tmp, list):
n += 1
tmp = tmp[0]
return ndeepmap(n, func, seq)
def ndeepmap(n, func, seq):
""" Call a function on every element within a nested container
>>> def inc(x):
... return x + 1
>>> L = [[1, 2], [3, 4, 5]]
>>> ndeepmap(2, inc, L)
[[2, 3], [4, 5, 6]]
"""
if n == 1:
return [func(item) for item in seq]
elif n > 1:
return [ndeepmap(n - 1, func, item) for item in seq]
else:
return func(seq)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
def import_required(mod_name, error_msg):
"""Attempt to import a required dependency.
Raises a RuntimeError if the requested module is not available.
"""
try:
return import_module(mod_name)
except ImportError:
raise RuntimeError(error_msg)
@contextmanager
def tmpfile(extension='', dir=None):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension, dir=dir)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
with ignoring(OSError):
os.remove(filename)
@contextmanager
def tmpdir(dir=None):
dirname = tempfile.mkdtemp(dir=dir)
try:
yield dirname
finally:
if os.path.exists(dirname):
if os.path.isdir(dirname):
with ignoring(OSError):
shutil.rmtree(dirname)
else:
with ignoring(OSError):
os.remove(dirname)
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
@contextmanager
def changed_cwd(new_cwd):
old_cwd = os.getcwd()
os.chdir(new_cwd)
try:
yield
finally:
os.chdir(old_cwd)
@contextmanager
def tmp_cwd(dir=None):
with tmpdir(dir) as dirname:
with changed_cwd(dirname):
yield dirname
@contextmanager
def noop_context():
yield
def repr_long_list(seq):
"""
>>> repr_long_list(list(range(100)))
'[0, 1, 2, ..., 98, 99]'
"""
if len(seq) < 8:
return repr(seq)
else:
return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = 'fn',
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open, mode='t', use_tmpdir=True):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
Since this is meant for use in tests, this context manager will
automatically switch to a temporary current directory, to avoid
race conditions when running tests in parallel.
"""
with (tmp_cwd() if use_tmpdir else noop_context()):
for filename, text in d.items():
f = open(filename, 'w' + mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
with ignoring(OSError):
os.remove(filename)
compressions = {'gz': 'gzip', 'bz2': 'bz2', 'xz': 'xz'}
def infer_compression(filename):
extension = os.path.splitext(filename)[-1].strip('.')
return compressions.get(extension, None)
opens = {'gzip': GzipFile, 'bz2': BZ2File, 'xz': LZMAFile}
def open(filename, mode='rb', compression=None, **kwargs):
if compression == 'infer':
compression = infer_compression(filename)
return opens.get(compression, io.open)(filename, mode, **kwargs)
def get_bom(fn, compression=None):
"""
Get the Byte Order Mark (BOM) if it exists.
"""
boms = set((codecs.BOM_UTF16, codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE))
with open(fn, mode='rb', compression=compression) as f:
f.seek(0)
bom = f.read(2)
f.seek(0)
if bom in boms:
return bom
else:
return b''
def get_bin_linesep(encoding, linesep):
"""
Simply doing `linesep.encode(encoding)` does not always give you
*just* the linesep bytes, for some encodings this prefix's the
linesep bytes with the BOM. This function ensures we just get the
linesep bytes.
"""
if encoding == 'utf-16':
return linesep.encode('utf-16')[2:] # [2:] strips bom
else:
return linesep.encode(encoding)
def textblock(filename, start, end, compression=None, encoding=system_encoding,
linesep=os.linesep, buffersize=4096):
"""Pull out a block of text from a file given start and stop bytes.
This gets data starting/ending from the next linesep delimiter. Each block
consists of bytes in the range [start,end[, i.e. the stop byte is excluded.
If `start` is 0, then `start` corresponds to the true start byte. If
`start` is greater than 0 and does not point to the beginning of a new
line, then `start` is incremented until it corresponds to the start byte of
the next line. If `end` does not point to the beginning of a new line, then
the line that begins before `end` is included in the block although its
last byte exceeds `end`.
Examples
--------
>> with open('myfile.txt', 'wb') as f:
.. f.write('123\n456\n789\nabc')
In the example below, 1 and 10 don't line up with endlines.
>> u''.join(textblock('myfile.txt', 1, 10))
'456\n789\n'
"""
# Make sure `linesep` is not a byte string because
# `io.TextIOWrapper` in Python versions other than 2.7 dislike byte
# strings for the `newline` argument.
linesep = str(linesep)
# Get byte representation of the line separator.
bin_linesep = get_bin_linesep(encoding, linesep)
bin_linesep_len = len(bin_linesep)
if buffersize < bin_linesep_len:
error = ('`buffersize` ({0:d}) must be at least as large as the '
'number of line separator bytes ({1:d}).')
raise ValueError(error.format(buffersize, bin_linesep_len))
chunksize = end - start
with open(filename, 'rb', compression) as f:
with io.BufferedReader(f) as fb:
# If `start` does not correspond to the beginning of the file, we
# need to move the file pointer to `start - len(bin_linesep)`,
# search for the position of the next a line separator, and set
# `start` to the position after that line separator.
if start > 0:
# `start` is decremented by `len(bin_linesep)` to detect the
# case where the original `start` value corresponds to the
# beginning of a line.
start = max(0, start - bin_linesep_len)
# Set the file pointer to `start`.
fb.seek(start)
# Number of bytes to shift the file pointer before reading a
# new chunk to make sure that a multi-byte line separator, that
# is split by the chunk reader, is still detected.
shift = 1 - bin_linesep_len
while True:
buf = f.read(buffersize)
if len(buf) < bin_linesep_len:
raise StopIteration
try:
# Find the position of the next line separator and add
# `len(bin_linesep)` which yields the position of the
# first byte of the next line.
start += buf.index(bin_linesep)
start += bin_linesep_len
except ValueError:
# No line separator was found in the current chunk.
# Before reading the next chunk, we move the file
# pointer back `len(bin_linesep) - 1` bytes to make
# sure that a multi-byte line separator, that may have
# been split by the chunk reader, is still detected.
start += len(buf)
start += shift
fb.seek(shift, os.SEEK_CUR)
else:
# We have found the next line separator, so we need to
# set the file pointer to the first byte of the next
# line.
fb.seek(start)
break
with io.TextIOWrapper(fb, encoding, newline=linesep) as fbw:
# Retrieve and yield lines until the file pointer reaches
# `end`.
while start < end:
line = next(fbw)
# We need to encode the line again to get the byte length
# in order to correctly update `start`.
bin_line_len = len(line.encode(encoding))
if chunksize < bin_line_len:
error = ('`chunksize` ({0:d}) is less than the line '
'length ({1:d}). This may cause duplicate '
'processing of this line. It is advised to '
'increase `chunksize`.')
raise IOError(error.format(chunksize, bin_line_len))
yield line
start += bin_line_len
def concrete(seq):
""" Make nested iterators concrete lists
>>> data = [[1, 2], [3, 4]]
>>> seq = iter(map(iter, data))
>>> concrete(seq)
[[1, 2], [3, 4]]
"""
if isinstance(seq, Iterator):
seq = list(seq)
if isinstance(seq, (tuple, list)):
seq = list(map(concrete, seq))
return seq
def skip(func):
pass
def pseudorandom(n, p, random_state=None):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], random_state=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], random_state=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
import numpy as np
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
x = random_state.random_sample(n)
out = np.empty(n, dtype='i1')
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out
def random_state_data(n, random_state=None):
"""Return a list of arrays that can initialize
``np.random.RandomState``.
Parameters
----------
n : int
Number of tuples to return.
random_state : int or np.random.RandomState, optional
If an int, is used to seed a new ``RandomState``.
"""
import numpy as np
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
maxuint32 = np.iinfo(np.uint32).max
return [(random_state.rand(624) * maxuint32).astype('uint32')
for i in range(n)]
def is_integer(i):
"""
>>> is_integer(6)
True
>>> is_integer(42.0)
True
>>> is_integer('abc')
False
"""
import numpy as np
if isinstance(i, (int, long)):
return True
if isinstance(i, float):
return (i).is_integer()
if issubclass(type(i), np.integer):
return i
else:
return False
def file_size(fn, compression=None):
""" Size of a file on disk
If compressed then return the uncompressed file size
"""
if compression == 'gzip':
with open(fn, 'rb') as f:
f.seek(-4, 2)
result = struct.unpack('I', f.read(4))[0]
elif compression:
# depending on the implementation, this may be inefficient
with open(fn, 'rb', compression) as f:
result = f.seek(0, 2)
else:
result = os.stat(fn).st_size
return result
ONE_ARITY_BUILTINS = set([abs, all, any, bool, bytearray, bytes, callable, chr,
classmethod, complex, dict, dir, enumerate, eval,
float, format, frozenset, hash, hex, id, int, iter,
len, list, max, min, next, oct, open, ord, range,
repr, reversed, round, set, slice, sorted,
staticmethod, str, sum, tuple,
type, vars, zip, memoryview])
if PY3:
ONE_ARITY_BUILTINS.add(ascii) # noqa: F821
MULTI_ARITY_BUILTINS = set([compile, delattr, divmod, filter, getattr, hasattr,
isinstance, issubclass, map, pow, setattr])
def takes_multiple_arguments(func):
""" Does this function take multiple arguments?
>>> def f(x, y): pass
>>> takes_multiple_arguments(f)
True
>>> def f(x): pass
>>> takes_multiple_arguments(f)
False
>>> def f(x, y=None): pass
>>> takes_multiple_arguments(f)
False
>>> def f(*args): pass
>>> takes_multiple_arguments(f)
True
>>> class Thing(object):
... def __init__(self, a): pass
>>> takes_multiple_arguments(Thing)
False
"""
if func in ONE_ARITY_BUILTINS:
return False
elif func in MULTI_ARITY_BUILTINS:
return True
try:
spec = getargspec(func)
except:
return False
try:
is_constructor = spec.args[0] == 'self' and isinstance(func, type)
except:
is_constructor = False
if spec.varargs:
return True
if spec.defaults is None:
return len(spec.args) - is_constructor != 1
return len(spec.args) - len(spec.defaults) - is_constructor > 1
class Dispatch(object):
"""Simple single dispatch."""
def __init__(self):
self._lookup = {}
self._lazy = {}
def register(self, type, func=None):
"""Register dispatch of `func` on arguments of type `type`"""
def wrapper(func):
if isinstance(type, tuple):
for t in type:
self.register(t, func)
else:
self._lookup[type] = func
return func
return wrapper(func) if func is not None else wrapper
def register_lazy(self, toplevel, func=None):
"""
Register a registration function which will be called if the
*toplevel* module (e.g. 'pandas') is ever loaded.
"""
def wrapper(func):
self._lazy[toplevel] = func
return func
return wrapper(func) if func is not None else wrapper
def __call__(self, arg):
# Fast path with direct lookup on type
lk = self._lookup
typ = type(arg)
try:
impl = lk[typ]
except KeyError:
pass
else:
return impl(arg)
# Is a lazy registration function present?
toplevel, _, _ = typ.__module__.partition('.')
try:
register = self._lazy.pop(toplevel)
except KeyError:
pass
else:
register()
return self(arg) # recurse
# Walk the MRO and cache the lookup result
for cls in inspect.getmro(typ)[1:]:
if cls in lk:
lk[typ] = lk[cls]
return lk[cls](arg)
raise TypeError("No dispatch for {0} type".format(typ))
def ensure_not_exists(filename):
"""
Ensure that a file does not exist.
"""
try:
os.unlink(filename)
except OSError as e:
if e.errno != ENOENT:
raise
def _skip_doctest(line):
# NumPy docstring contains cursor and comment only example
stripped = line.strip()
if stripped == '>>>' or stripped.startswith('>>> #'):
return stripped
elif '>>>' in stripped:
return line + ' # doctest: +SKIP'
else:
return line
def skip_doctest(doc):
if doc is None:
return ''
return '\n'.join([_skip_doctest(line) for line in doc.split('\n')])
def derived_from(original_klass, version=None, ua_args=[]):
"""Decorator to attach original class's docstring to the wrapped method.
Parameters
----------
original_klass: type
Original class which the method is derived from
version : str
Original package version which supports the wrapped method
ua_args : list
List of keywords which Dask doesn't support. Keywords existing in
original but not in Dask will automatically be added.
"""
def wrapper(method):
method_name = method.__name__
try:
# do not use wraps here, as it hides keyword arguments displayed
# in the doc
original_method = getattr(original_klass, method_name)
doc = original_method.__doc__
if doc is None:
doc = ''
try:
method_args = getargspec(method).args
original_args = getargspec(original_method).args
not_supported = [m for m in original_args if m not in method_args]
except TypeError:
not_supported = []
if len(ua_args) > 0:
not_supported.extend(ua_args)
if len(not_supported) > 0:
note = ("\n Notes\n -----\n"
" Dask doesn't supports following argument(s).\n\n")
args = ''.join([' * {0}\n'.format(a) for a in not_supported])
doc = doc + note + args
doc = skip_doctest(doc)
method.__doc__ = doc
return method
except AttributeError:
module_name = original_klass.__module__.split('.')[0]
@functools.wraps(method)
def wrapped(*args, **kwargs):
msg = "Base package doesn't support '{0}'.".format(method_name)
if version is not None:
msg2 = " Use {0} {1} or later to use this method."
msg += msg2.format(module_name, version)
raise NotImplementedError(msg)
return wrapped
return wrapper
def funcname(func):
"""Get the name of a function."""
# functools.partial
if isinstance(func, functools.partial):
return funcname(func.func)
# methodcaller
if isinstance(func, methodcaller):
return func.method
module_name = getattr(func, '__module__', None) or ''
type_name = getattr(type(func), '__name__', None) or ''
# toolz.curry
if 'toolz' in module_name and 'curry' == type_name:
return func.func_name
# multipledispatch objects
if 'multipledispatch' in module_name and 'Dispatcher' == type_name:
return func.name
# All other callables
try:
name = func.__name__
if name == '<lambda>':
return 'lambda'
return name
except:
return str(func)
def ensure_bytes(s):
""" Turn string or bytes to bytes
>>> ensure_bytes(u'123')
'123'
>>> ensure_bytes('123')
'123'
>>> ensure_bytes(b'123')
'123'
"""
if isinstance(s, bytes):
return s
if hasattr(s, 'encode'):
return s.encode()
msg = "Object %s is neither a bytes object nor has an encode method"
raise TypeError(msg % s)
def ensure_unicode(s):
""" Turn string or bytes to bytes
>>> ensure_unicode(u'123')
u'123'
>>> ensure_unicode('123')
u'123'
>>> ensure_unicode(b'123')
u'123'
"""
if isinstance(s, unicode):
return s
if hasattr(s, 'decode'):
return s.decode()
msg = "Object %s is neither a bytes object nor has an encode method"
raise TypeError(msg % s)
def digit(n, k, base):
"""
>>> digit(1234, 0, 10)
4
>>> digit(1234, 1, 10)
3
>>> digit(1234, 2, 10)
2
>>> digit(1234, 3, 10)
1
"""
return n // base**k % base
def insert(tup, loc, val):
"""
>>> insert(('a', 'b', 'c'), 0, 'x')
('x', 'b', 'c')
"""
L = list(tup)
L[loc] = val
return tuple(L)
def build_name_function(max_int):
""" Returns a function that receives a single integer
and returns it as a string padded by enough zero characters
to align with maximum possible integer
>>> name_f = build_name_function(57)
>>> name_f(7)
'07'
>>> name_f(31)
'31'
>>> build_name_function(1000)(42)
'0042'
>>> build_name_function(999)(42)
'042'
>>> build_name_function(0)(0)
'0'
"""
# handle corner cases max_int is 0 or exact power of 10
max_int += 1e-8
pad_length = int(math.ceil(math.log10(max_int)))
def name_function(i):
return str(i).zfill(pad_length)
return name_function
def infer_storage_options(urlpath, inherit_storage_options=None):
""" Infer storage options from URL path and merge it with existing storage
options.
Parameters
----------
urlpath: str or unicode
Either local absolute file path or URL (hdfs://namenode:8020/file.csv)
storage_options: dict (optional)
Its contents will get merged with the inferred information from the
given path
Returns
-------
Storage options dict.
Examples
--------
>>> infer_storage_options('/mnt/datasets/test.csv') # doctest: +SKIP
{"protocol": "file", "path", "/mnt/datasets/test.csv"}
>>> infer_storage_options(
... 'hdfs://username:pwd@node:123/mnt/datasets/test.csv?q=1',
... inherit_storage_options={'extra': 'value'}) # doctest: +SKIP
{"protocol": "hdfs", "username": "username", "password": "pwd",
"host": "node", "port": 123, "path": "/mnt/datasets/test.csv",
"url_query": "q=1", "extra": "value"}
"""
# Handle Windows paths including disk name in this special case
if re.match(r'^[a-zA-Z]:[\\/]', urlpath):
return {'protocol': 'file',
'path': urlpath}
parsed_path = urlsplit(urlpath)
protocol = parsed_path.scheme or 'file'
path = parsed_path.path
if protocol == 'file':
# Special case parsing file protocol URL on Windows according to:
# https://msdn.microsoft.com/en-us/library/jj710207.aspx
windows_path = re.match(r'^/([a-zA-Z])[:|]([\\/].*)$', path)
if windows_path:
path = '%s:%s' % windows_path.groups()
inferred_storage_options = {
'protocol': protocol,
'path': path,
}
if parsed_path.netloc:
# Parse `hostname` from netloc manually because `parsed_path.hostname`
# lowercases the hostname which is not always desirable (e.g. in S3):
# https://github.com/dask/dask/issues/1417
inferred_storage_options['host'] = parsed_path.netloc.rsplit('@', 1)[-1].rsplit(':', 1)[0]
if parsed_path.port:
inferred_storage_options['port'] = parsed_path.port
if parsed_path.username:
inferred_storage_options['username'] = parsed_path.username
if parsed_path.password:
inferred_storage_options['password'] = parsed_path.password
if parsed_path.query:
inferred_storage_options['url_query'] = parsed_path.query
if parsed_path.fragment:
inferred_storage_options['url_fragment'] = parsed_path.fragment
if inherit_storage_options:
if set(inherit_storage_options) & set(inferred_storage_options):
raise KeyError("storage options (%r) and path url options (%r) "
"collision is detected"
% (inherit_storage_options, inferred_storage_options))
inferred_storage_options.update(inherit_storage_options)
return inferred_storage_options
def dependency_depth(dsk):
import toolz
deps, _ = get_deps(dsk)
@toolz.memoize
def max_depth_by_deps(key):
if not deps[key]:
return 1
d = 1 + max(max_depth_by_deps(dep_key) for dep_key in deps[key])
return d
return max(max_depth_by_deps(dep_key) for dep_key in deps.keys())
def eq_strict(a, b):
"""Returns True if both values have the same type and are equal."""
if type(a) is type(b):
return a == b
return False
def memory_repr(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def put_lines(buf, lines):
if any(not isinstance(x, unicode) for x in lines):
lines = [unicode(x) for x in lines]
buf.write('\n'.join(lines))
_method_cache = {}
class methodcaller(object):
"""Return a callable object that calls the given method on its operand.
Unlike the builtin `methodcaller`, this class is serializable"""
__slots__ = ('method',)
func = property(lambda self: self.method) # For `funcname` to work
def __new__(cls, method):
if method in _method_cache:
return _method_cache[method]
self = object.__new__(cls)
self.method = method
_method_cache[method] = self
return self
def __call__(self, obj, *args, **kwargs):
return getattr(obj, self.method)(*args, **kwargs)
def __reduce__(self):
return (methodcaller, (self.method,))
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.method)
__repr__ = __str__
class MethodCache(object):
"""Attribute access on this object returns a methodcaller for that
attribute.
Examples
--------
>>> a = [1, 3, 3]
>>> M.count(a, 3) == a.count(3)
True
"""
__getattr__ = staticmethod(methodcaller)
__dir__ = lambda self: list(_method_cache)
M = MethodCache()
class SerializableLock(object):
_locks = WeakValueDictionary()
""" A Serializable per-process Lock
This wraps a normal ``threading.Lock`` object and satisfies the same
interface. However, this lock can also be serialized and sent to different
processes. It will not block concurrent operations between processes (for
this you should look at ``multiprocessing.Lock`` or ``locket.lock_file``
but will consistently deserialize into the same lock.
So if we make a lock in one process::
lock = SerializableLock()
And then send it over to another process multiple times::
bytes = pickle.dumps(lock)
a = pickle.loads(bytes)
b = pickle.loads(bytes)
Then the deserialized objects will operate as though they were the same
lock, and collide as appropriate.
This is useful for consistently protecting resources on a per-process
level.
The creation of locks is itself not threadsafe.
"""
def __init__(self, token=None):
self.token = token or str(uuid.uuid4())
if self.token in SerializableLock._locks:
self.lock = SerializableLock._locks[self.token]
else:
self.lock = Lock()
SerializableLock._locks[self.token] = self.lock
def acquire(self, *args):
return self.lock.acquire(*args)
def release(self, *args):
return self.lock.release(*args)
def __enter__(self):
self.lock.__enter__()
def __exit__(self, *args):
self.lock.__exit__(*args)
@property
def locked(self):
return self.locked
def __getstate__(self):
return self.token
def __setstate__(self, token):
self.__init__(token)
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.token)
__repr__ = __str__
def effective_get(get=None, collection=None):
"""Get the effective get method used in a given situation"""
collection_get = collection._default_get if collection else None
return get or _globals.get('get') or collection_get
def get_scheduler_lock(get=None, collection=None):
"""Get an instance of the appropriate lock for a certain situation based on
scheduler used."""
actual_get = effective_get(get, collection)
if actual_get == multiprocessing.get:
return mp.Manager().Lock()
return SerializableLock()
| {
"repo_name": "chrisbarber/dask",
"path": "dask/utils.py",
"copies": "1",
"size": "30069",
"license": "bsd-3-clause",
"hash": 5544105893736050000,
"line_mean": 27.9682080925,
"line_max": 98,
"alpha_frac": 0.5690245768,
"autogenerated": false,
"ratio": 3.9424413268650844,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003172354415752501,
"num_lines": 1038
} |
from __future__ import absolute_import, division, print_function
import codecs
import grp
import json
import logging
import os
import pwd
# from typing import Dict, Union
import click
import jinja2.sandbox
import credsmash.api
from .util import read_many, read_many_str, minjson, envfile_quote, shell_quote, parse_manifest, detect_format, ItemNotFound
from .cli import main
logger = logging.getLogger(__name__)
class CredsmashProxy(object):
def __init__(self, key_service, storage_service, key_fmt, encoding='utf-8', errors='strict'):
self._key_service = key_service
self._storage_service = storage_service
self._key_fmt = key_fmt
self._data = {}
self._encoding = encoding
self._errors = errors
def __getitem__(self, key):
# type: (str) -> str
return self.get_str(key)
def get_str(self, key):
# type: (str) -> str
b = self.get_bytes(key)
return b.decode(self._encoding, self._errors)
def get_bytes(self, key):
# type: (str) -> bytes
if key in self._data:
return self._data[key]
if isinstance(key, tuple):
lookup_key = self._key_fmt.format(*key)
else:
lookup_key = self._key_fmt.format(key)
logger.debug('key=%s lookup_key=%s', key, lookup_key)
try:
res = credsmash.api.get_secret(
self._storage_service,
self._key_service,
key,
)
except ItemNotFound:
raise KeyError(repr(key))
self._data[key] = res
return res
def __contains__(self, key):
try:
self.get_bytes(key)
return True
except KeyError:
return False
class DictProxy(object):
def __init__(self, items, key_fmt, encoding='utf-8', errors='strict'):
self._items = items # type: Dict[str,bytes]
self._key_fmt = key_fmt
self._encoding = encoding
self._errors = errors
def __getitem__(self, key):
# type: (str) -> str
return self.get_str(key)
def get_str(self, key):
# type: (str) -> str
b = self.get_bytes(key)
return b.decode(self._encoding, self._errors)
def get_bytes(self, key):
# type: (str) -> bytes
if isinstance(key, tuple):
lookup_key = self._key_fmt.format(*key)
else:
lookup_key = self._key_fmt.format(key)
return self._items[lookup_key]
def __contains__(self, key):
try:
self.get_bytes(key)
return True
except KeyError:
return False
@main.command('render-template')
@click.argument('template', type=click.File(mode='r', encoding='utf-8'))
@click.argument('destination', type=click.File(mode='w', encoding='utf-8'))
@click.option('--obj-name', default='secrets',
help='The variable/object name provided to the template')
@click.option('--key-fmt', default='{0}',
help='Re-use templates by tweaking which variable it maps to- '
'eg, "dev.{0}" converts {{secrets.potato}} to the secret "dev.potato"')
@click.option('--template-vars', type=click.File(mode='r', encoding='utf-8'))
@click.option('--template-vars-format', default=None)
@click.option('--secrets-file', type=click.File(mode='rb'),
help="Source from a local file instead of credential store "
"(useful for caching/testing)")
@click.option('--secrets-file-format', default=None)
@click.pass_context
def cmd_render_template(
ctx, template, destination,
obj_name='secrets', key_fmt='{0}',
template_vars=None, template_vars_format=None,
secrets_file=None, secrets_file_format=None
):
"""
Render a configuration template....
"""
if secrets_file:
if not secrets_file_format:
secrets_file_format = detect_format(secrets_file, 'json')
local_secrets = read_many(secrets_file, secrets_file_format)
secrets = DictProxy(local_secrets, key_fmt)
else:
secrets = CredsmashProxy(
ctx.obj.key_service,
ctx.obj.storage_service,
key_fmt,
)
render_args = {} # type: Dict[str,Union[str,DictProxy,CredsmashProxy]]
if template_vars:
if not template_vars_format:
template_vars_format = detect_format(template_vars, 'json')
render_args = read_many_str(template_vars, template_vars_format)
if obj_name in render_args:
logger.warning('Overwrote %r from template vars with secrets var.', obj_name)
render_args[obj_name] = secrets
env = _make_env()
output = env.from_string(template.read()).render(render_args)
destination.write(output)
@main.command('render-templates')
@click.argument('manifest', type=click.File(mode='r', encoding='utf-8'))
@click.option('--manifest-format', default=None)
@click.option('--obj-name', default='secrets',
help='The variable/object name provided to the template')
@click.option('--key-fmt', default='{0}',
help='Re-use templates by tweaking which variable it maps to- '
'eg, "dev.{0}" converts {{secrets.potato}} to the secret "dev.potato"')
@click.option('--template-vars', type=click.File(mode='r', encoding='utf-8'))
@click.option('--template-vars-format', default=None)
@click.option('--secrets-file', type=click.File(mode='rb'),
help="Source from a local file instead of credential store "
"(useful for caching/testing)")
@click.option('--secrets-file-format', default=None)
@click.pass_context
def cmd_render_template(
ctx, manifest, manifest_format=None,
obj_name='secrets', key_fmt='{0}',
template_vars=None, template_vars_format=None,
secrets_file=None, secrets_file_format=None
):
"""
Render multiple configuration templates - reads from a manifest file.
"""
if secrets_file:
if not secrets_file_format:
secrets_file_format = detect_format(secrets_file, 'json')
local_secrets = read_many(secrets_file, secrets_file_format)
secrets = DictProxy(local_secrets, key_fmt)
else:
secrets = CredsmashProxy(
ctx.obj.key_service,
ctx.obj.storage_service,
key_fmt,
)
render_args = {} # type: Dict[str,Union[str,DictProxy,CredsmashProxy]]
if template_vars:
if not template_vars_format:
template_vars_format = detect_format(template_vars, 'json')
render_args = read_many_str(template_vars, template_vars_format)
if obj_name in render_args:
logger.warning('Overwrote %r from template vars with secrets var.', obj_name)
render_args[obj_name] = secrets
env = _make_env()
if not manifest_format:
manifest_format = detect_format(manifest, 'json')
for entry in parse_manifest(manifest, manifest_format):
destination_path = os.path.realpath(entry['destination'])
if 'source' in entry:
source_path = os.path.realpath(entry['source'])
with codecs.open(source_path, 'r', encoding='utf-8') as template:
output = env.from_string(template.read()).render(render_args)
# Only open the file after rendering the template
# as we truncate the file when opening.
with codecs.open(destination_path, 'w', encoding='utf-8') as destination:
destination.write(output)
logger.info('Rendered template="%s" destination="%s"', entry['source'], entry['destination'])
elif 'secret' in entry:
output = secrets.get_bytes(entry['secret'])
with open(destination_path, 'wb') as destination:
destination.write(output)
logger.info('Wrote secret="%s" destination="%s"', entry['secret'], entry['destination'])
else:
raise RuntimeError('Manifest entry must contain a secret or source')
if 'mode' in entry:
os.chmod(
destination_path,
entry['mode']
)
if 'owner' in entry and 'group' in entry:
os.chown(
destination_path,
pwd.getpwnam(entry['owner']).pw_uid,
grp.getgrnam(entry['group']).gr_gid
)
def _make_env():
env = jinja2.sandbox.SandboxedEnvironment(
trim_blocks=True,
lstrip_blocks=True,
autoescape=False,
)
env.filters['sh'] = shell_quote
env.filters['jsonify'] = json.dumps
env.filters['minjson'] = minjson
env.filters['env'] = envfile_quote
return env
| {
"repo_name": "3stack-software/credsmash",
"path": "credsmash/templates.py",
"copies": "1",
"size": "8699",
"license": "apache-2.0",
"hash": -2841289603700595000,
"line_mean": 35.0954356846,
"line_max": 124,
"alpha_frac": 0.6021381768,
"autogenerated": false,
"ratio": 3.8355379188712524,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49376760956712523,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import codecs
import os
import platform
import re
import sys
from distutils.command.build import build
from distutils.command.build_clib import build_clib
from distutils.errors import DistutilsSetupError
from setuptools import find_packages, setup
from setuptools.command.install import install
###############################################################################
NAME = "argon2-cffi"
PACKAGES = find_packages(where="src")
use_sse2 = os.environ.get("ARGON2_CFFI_USE_SSE2", None)
if use_sse2 == "1":
optimized = True
elif use_sse2 == "0":
optimized = False
else:
# Optimized version requires SSE2 extensions. They have been around since
# 2001 so we try to compile it on every recent-ish x86.
optimized = platform.machine() in ("i686", "x86", "x86_64", "AMD64")
CFFI_MODULES = ["src/argon2/_ffi_build.py:ffi"]
lib_base = os.path.join("extras", "libargon2", "src")
include_dirs = [
os.path.join(lib_base, "..", "include"),
os.path.join(lib_base, "blake2"),
]
sources = [
os.path.join(lib_base, path)
for path in [
"argon2.c",
os.path.join("blake2", "blake2b.c"),
"core.c",
"encoding.c",
"opt.c" if optimized else "ref.c",
"thread.c",
]
]
# Add vendored integer types headers if necessary.
windows = "win32" in str(sys.platform).lower()
if windows:
int_base = "extras/msinttypes/"
inttypes = int_base + "inttypes"
stdint = int_base + "stdint"
vi = sys.version_info[0:2]
if vi in [(2, 6), (2, 7)]:
# VS 2008 needs both.
include_dirs += [inttypes, stdint]
elif vi in [(3, 3), (3, 4)]:
# VS 2010 needs inttypes.h and fails with both.
include_dirs += [inttypes]
LIBRARIES = [("argon2", {"include_dirs": include_dirs, "sources": sources})]
META_PATH = os.path.join("src", "argon2", "__init__.py")
KEYWORDS = ["password", "hash", "hashing", "security"]
PROJECT_URLS = {
"Documentation": "https://argon2-cffi.readthedocs.io/",
"Bug Tracker": "https://github.com/hynek/argon2-cffi/issues",
"Source Code": "https://github.com/hynek/argon2-cffi",
}
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python",
"Topic :: Security :: Cryptography",
"Topic :: Security",
"Topic :: Software Development :: Libraries :: Python Modules",
]
SETUP_REQUIRES = ["cffi"]
if windows and sys.version_info[0] == 2:
# required for "Microsoft Visual C++ Compiler for Python 2.7"
# https://www.microsoft.com/en-us/download/details.aspx?id=44266
SETUP_REQUIRES.append("setuptools>=6.0")
INSTALL_REQUIRES = ["cffi>=1.0.0", "six", "enum34; python_version<'3.4'"]
EXTRAS_REQUIRE = {
"docs": ["sphinx"],
"tests": ["coverage[toml]>=5.0.2", "hypothesis", "pytest"],
}
EXTRAS_REQUIRE["dev"] = (
EXTRAS_REQUIRE["tests"] + EXTRAS_REQUIRE["docs"] + ["wheel", "pre-commit"]
)
###############################################################################
def keywords_with_side_effects(argv):
"""
Get a dictionary with setup keywords that (can) have side effects.
:param argv: A list of strings with command line arguments.
:returns: A dictionary with keyword arguments for the ``setup()`` function.
This setup.py script uses the setuptools 'setup_requires' feature
because this is required by the cffi package to compile extension
modules. The purpose of ``keywords_with_side_effects()`` is to avoid
triggering the cffi build process as a result of setup.py invocations
that don't need the cffi module to be built (setup.py serves the dual
purpose of exposing package metadata).
Stolen from pyca/cryptography.
"""
no_setup_requires_arguments = (
"-h",
"--help",
"-n",
"--dry-run",
"-q",
"--quiet",
"-v",
"--verbose",
"-V",
"--version",
"--author",
"--author-email",
"--classifiers",
"--contact",
"--contact-email",
"--description",
"--egg-base",
"--fullname",
"--help-commands",
"--keywords",
"--licence",
"--license",
"--long-description",
"--maintainer",
"--maintainer-email",
"--name",
"--no-user-cfg",
"--obsoletes",
"--platforms",
"--provides",
"--requires",
"--url",
"clean",
"egg_info",
"register",
"sdist",
"upload",
)
def is_short_option(argument):
"""Check whether a command line argument is a short option."""
return len(argument) >= 2 and argument[0] == "-" and argument[1] != "-"
def expand_short_options(argument):
"""Expand combined short options into canonical short options."""
return ("-" + char for char in argument[1:])
def argument_without_setup_requirements(argv, i):
"""Check whether a command line argument needs setup requirements."""
if argv[i] in no_setup_requires_arguments:
# Simple case: An argument which is either an option or a command
# which doesn't need setup requirements.
return True
elif is_short_option(argv[i]) and all(
option in no_setup_requires_arguments
for option in expand_short_options(argv[i])
):
# Not so simple case: Combined short options none of which need
# setup requirements.
return True
elif argv[i - 1 : i] == ["--egg-base"]:
# Tricky case: --egg-info takes an argument which should not make
# us use setup_requires (defeating the purpose of this code).
return True
else:
return False
if all(
argument_without_setup_requirements(argv, i)
for i in range(1, len(argv))
):
return {"cmdclass": {"build": DummyBuild, "install": DummyInstall}}
else:
use_system_argon2 = (
os.environ.get("ARGON2_CFFI_USE_SYSTEM", "0") == "1"
)
if use_system_argon2:
disable_subcommand(build, "build_clib")
cmdclass = {"build_clib": BuildCLibWithCompilerFlags}
if BDistWheel is not None:
cmdclass["bdist_wheel"] = BDistWheel
return {
"setup_requires": SETUP_REQUIRES,
"cffi_modules": CFFI_MODULES,
"libraries": LIBRARIES,
"cmdclass": cmdclass,
}
def disable_subcommand(command, subcommand_name):
for name, method in command.sub_commands:
if name == subcommand_name:
command.sub_commands.remove((subcommand_name, method))
break
setup_requires_error = (
"Requested setup command that needs 'setup_requires' while command line "
"arguments implied a side effect free command or option."
)
class DummyBuild(build):
"""
This class makes it very obvious when ``keywords_with_side_effects()`` has
incorrectly interpreted the command line arguments to ``setup.py build`` as
one of the 'side effect free' commands or options.
"""
def run(self):
raise RuntimeError(setup_requires_error)
class DummyInstall(install):
"""
This class makes it very obvious when ``keywords_with_side_effects()`` has
incorrectly interpreted the command line arguments to ``setup.py install``
as one of the 'side effect free' commands or options.
"""
def run(self):
raise RuntimeError(setup_requires_error)
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
"""
Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta), META_FILE, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
VERSION = find_meta("version")
URL = find_meta("url")
LONG = (
read("README.rst")
+ "\n\n"
+ "Release Information\n"
+ "===================\n\n"
+ re.search(
r"(\d+.\d.\d \(.*?\)\r?\n.*?)\r?\n\r?\n\r?\n----\r?\n\r?\n\r?\n",
read("CHANGELOG.rst"),
re.S,
).group(1)
+ "\n\n`Full changelog "
+ "<{url}en/stable/changelog.html>`_.\n\n".format(url=URL)
+ read("AUTHORS.rst")
)
class BuildCLibWithCompilerFlags(build_clib):
"""
We need to pass ``-msse2`` for the optimized build.
"""
def build_libraries(self, libraries):
"""
Mostly copy pasta from ``distutils.command.build_clib``.
"""
for (lib_name, build_info) in libraries:
sources = build_info.get("sources")
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name
)
sources = list(sources)
print("building '%s' library" % (lib_name,))
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get("macros")
include_dirs = build_info.get("include_dirs")
objects = self.compiler.compile(
sources,
extra_preargs=["-msse2"] if optimized and not windows else [],
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(
objects, lib_name, output_dir=self.build_clib, debug=self.debug
)
if (
sys.platform != "win32"
and sys.version_info > (3,)
and platform.python_implementation() == "CPython"
):
try:
import wheel.bdist_wheel
except ImportError:
BDistWheel = None
else:
class BDistWheel(wheel.bdist_wheel.bdist_wheel):
def finalize_options(self):
self.py_limited_api = "cp3{}".format(sys.version_info[1])
wheel.bdist_wheel.bdist_wheel.finalize_options(self)
else:
BDistWheel = None
if __name__ == "__main__":
setup(
name=NAME,
description=find_meta("description"),
license=find_meta("license"),
url=URL,
project_urls=PROJECT_URLS,
version=VERSION,
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
maintainer_email=find_meta("email"),
long_description=LONG,
long_description_content_type="text/x-rst",
keywords=KEYWORDS,
packages=PACKAGES,
package_dir={"": "src"},
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
# CFFI
zip_safe=False,
ext_package="argon2",
**keywords_with_side_effects(sys.argv)
)
| {
"repo_name": "hynek/argon2_cffi",
"path": "setup.py",
"copies": "1",
"size": "12414",
"license": "mit",
"hash": 8916101834499119000,
"line_mean": 30.9948453608,
"line_max": 79,
"alpha_frac": 0.5807153214,
"autogenerated": false,
"ratio": 3.837403400309119,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.991785025091874,
"avg_score": 0.000053694158075601374,
"num_lines": 388
} |
from __future__ import (absolute_import, division, print_function)
import codecs
import os
from setuptools import find_packages, setup
import versioneer
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
return codecs.open(os.path.join(here, *parts), 'r').read()
long_description = read('README.md')
# Dependencies.
with open('requirements.txt') as f:
requirements = f.readlines()
install_requires = [t.strip() for t in requirements]
def walk_subpkg(name):
data_files = []
package_dir = 'yodatools'
for parent, dirs, files in os.walk(os.path.join(package_dir, name)):
# Remove package_dir from the path.
sub_dir = os.sep.join(parent.split(os.sep)[1:])
for f in files:
data_files.append(os.path.join(sub_dir, f))
return data_files
pkg_data = {
'': walk_subpkg('dataloader/templates') + walk_subpkg('yodaparser')
}
setup(
name='YODA-Tools',
version=versioneer.get_version(),
author='Stephanie Reeder',
author_email='[email protected]',
description='Tools to validate and manage YODA files',
long_description=long_description,
packages=find_packages(),
package_data=pkg_data,
entry_points="""
[console_scripts]
yodatools=yodatools.dataloader.controller.Main:main
""",
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=install_requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules'
],
cmdclass=versioneer.get_cmdclass(),
)
| {
"repo_name": "ODM2/YODA-Tools",
"path": "setup.py",
"copies": "2",
"size": "2003",
"license": "bsd-3-clause",
"hash": -4946691523238783,
"line_mean": 27.2112676056,
"line_max": 72,
"alpha_frac": 0.6440339491,
"autogenerated": false,
"ratio": 3.628623188405797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5272657137505797,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import collections
from contextlib import closing
import errno
import gzip
import logging
import os
import re
import socket
import ssl
import sys
from tornado.escape import to_unicode
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import HTTPHeaders, ResponseStartLine
from tornado.ioloop import IOLoop
from tornado.log import gen_log
from tornado.concurrent import Future
from tornado.netutil import Resolver, bind_sockets
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler, RedirectHandler
from tornado.test import httpclient_test
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog
from tornado.test.util import skipOnTravis, skipIfNoIPv6, refusing_port, unittest, skipBefore35, exec_test
from tornado.web import RequestHandler, Application, asynchronous, url, stream_request_body
class SimpleHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
def get_http_client(self):
client = SimpleAsyncHTTPClient(force_instance=True)
self.assertTrue(isinstance(client, SimpleAsyncHTTPClient))
return client
class TriggerHandler(RequestHandler):
def initialize(self, queue, wake_callback):
self.queue = queue
self.wake_callback = wake_callback
@asynchronous
def get(self):
logging.debug("queuing trigger")
self.queue.append(self.finish)
if self.get_argument("wake", "true") == "true":
self.wake_callback()
class HangHandler(RequestHandler):
@asynchronous
def get(self):
pass
class ContentLengthHandler(RequestHandler):
def get(self):
self.set_header("Content-Length", self.get_argument("value"))
self.write("ok")
class HeadHandler(RequestHandler):
def head(self):
self.set_header("Content-Length", "7")
class OptionsHandler(RequestHandler):
def options(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.write("ok")
class NoContentHandler(RequestHandler):
def get(self):
self.set_status(204)
self.finish()
class SeeOtherPostHandler(RequestHandler):
def post(self):
redirect_code = int(self.request.body)
assert redirect_code in (302, 303), "unexpected body %r" % self.request.body
self.set_header("Location", "/see_other_get")
self.set_status(redirect_code)
class SeeOtherGetHandler(RequestHandler):
def get(self):
if self.request.body:
raise Exception("unexpected body %r" % self.request.body)
self.write("ok")
class HostEchoHandler(RequestHandler):
def get(self):
self.write(self.request.headers["Host"])
class NoContentLengthHandler(RequestHandler):
@asynchronous
def get(self):
if self.request.version.startswith('HTTP/1'):
# Emulate the old HTTP/1.0 behavior of returning a body with no
# content-length. Tornado handles content-length at the framework
# level so we have to go around it.
stream = self.request.connection.detach()
stream.write(b"HTTP/1.0 200 OK\r\n\r\n"
b"hello")
stream.close()
else:
self.finish('HTTP/1 required')
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
@stream_request_body
class RespondInPrepareHandler(RequestHandler):
def prepare(self):
self.set_status(403)
self.finish("forbidden")
class SimpleHTTPClientTestMixin(object):
def get_app(self):
# callable objects to finish pending /trigger requests
self.triggers = collections.deque()
return Application([
url("/trigger", TriggerHandler, dict(queue=self.triggers,
wake_callback=self.stop)),
url("/chunk", ChunkHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/hang", HangHandler),
url("/hello", HelloWorldHandler),
url("/content_length", ContentLengthHandler),
url("/head", HeadHandler),
url("/options", OptionsHandler),
url("/no_content", NoContentHandler),
url("/see_other_post", SeeOtherPostHandler),
url("/see_other_get", SeeOtherGetHandler),
url("/host_echo", HostEchoHandler),
url("/no_content_length", NoContentLengthHandler),
url("/echo_post", EchoPostHandler),
url("/respond_in_prepare", RespondInPrepareHandler),
url("/redirect", RedirectHandler),
], gzip=True)
def test_singleton(self):
# Class "constructor" reuses objects on the same IOLoop
self.assertTrue(SimpleAsyncHTTPClient() is
SimpleAsyncHTTPClient())
# unless force_instance is used
self.assertTrue(SimpleAsyncHTTPClient() is not
SimpleAsyncHTTPClient(force_instance=True))
# different IOLoops use different objects
with closing(IOLoop()) as io_loop2:
client1 = self.io_loop.run_sync(gen.coroutine(SimpleAsyncHTTPClient))
client2 = io_loop2.run_sync(gen.coroutine(SimpleAsyncHTTPClient))
self.assertTrue(client1 is not client2)
def test_connection_limit(self):
with closing(self.create_client(max_clients=2)) as client:
self.assertEqual(client.max_clients, 2)
seen = []
# Send 4 requests. Two can be sent immediately, while the others
# will be queued
for i in range(4):
client.fetch(self.get_url("/trigger"),
lambda response, i=i: (seen.append(i), self.stop()))
self.wait(condition=lambda: len(self.triggers) == 2)
self.assertEqual(len(client.queue), 2)
# Finish the first two requests and let the next two through
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: (len(self.triggers) == 2 and
len(seen) == 2))
self.assertEqual(set(seen), set([0, 1]))
self.assertEqual(len(client.queue), 0)
# Finish all the pending requests
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: len(seen) == 4)
self.assertEqual(set(seen), set([0, 1, 2, 3]))
self.assertEqual(len(self.triggers), 0)
def test_redirect_connection_limit(self):
# following redirects should not consume additional connections
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/countdown/3'), self.stop,
max_redirects=3)
response = self.wait()
response.rethrow()
def test_gzip(self):
# All the tests in this file should be using gzip, but this test
# ensures that it is in fact getting compressed.
# Setting Accept-Encoding manually bypasses the client's
# decompression so we can see the raw data.
response = self.fetch("/chunk", use_gzip=False,
headers={"Accept-Encoding": "gzip"})
self.assertEqual(response.headers["Content-Encoding"], "gzip")
self.assertNotEqual(response.body, b"asdfqwer")
# Our test data gets bigger when gzipped. Oops. :)
# Chunked encoding bypasses the MIN_LENGTH check.
self.assertEqual(len(response.body), 34)
f = gzip.GzipFile(mode="r", fileobj=response.buffer)
self.assertEqual(f.read(), b"asdfqwer")
def test_max_redirects(self):
response = self.fetch("/countdown/5", max_redirects=3)
self.assertEqual(302, response.code)
# We requested 5, followed three redirects for 4, 3, 2, then the last
# unfollowed redirect is to 1.
self.assertTrue(response.request.url.endswith("/countdown/5"))
self.assertTrue(response.effective_url.endswith("/countdown/2"))
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
def test_header_reuse(self):
# Apps may reuse a headers object if they are only passing in constant
# headers like user-agent. The header object should not be modified.
headers = HTTPHeaders({'User-Agent': 'Foo'})
self.fetch("/hello", headers=headers)
self.assertEqual(list(headers.get_all()), [('User-Agent', 'Foo')])
def test_see_other_redirect(self):
for code in (302, 303):
response = self.fetch("/see_other_post", method="POST", body="%d" % code)
self.assertEqual(200, response.code)
self.assertTrue(response.request.url.endswith("/see_other_post"))
self.assertTrue(response.effective_url.endswith("/see_other_get"))
# request is the original request, is a POST still
self.assertEqual("POST", response.request.method)
@skipOnTravis
def test_connect_timeout(self):
timeout = 0.1
timeout_min, timeout_max = 0.099, 1.0
class TimeoutResolver(Resolver):
def resolve(self, *args, **kwargs):
return Future() # never completes
with closing(self.create_client(resolver=TimeoutResolver())) as client:
client.fetch(self.get_url('/hello'), self.stop,
connect_timeout=timeout)
response = self.wait()
self.assertEqual(response.code, 599)
self.assertTrue(timeout_min < response.request_time < timeout_max,
response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout while connecting")
@skipOnTravis
def test_request_timeout(self):
timeout = 0.1
timeout_min, timeout_max = 0.099, 0.15
if os.name == 'nt':
timeout = 0.5
timeout_min, timeout_max = 0.4, 0.6
response = self.fetch('/trigger?wake=false', request_timeout=timeout)
self.assertEqual(response.code, 599)
self.assertTrue(timeout_min < response.request_time < timeout_max,
response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout during request")
# trigger the hanging request to let it clean up after itself
self.triggers.popleft()()
@skipIfNoIPv6
def test_ipv6(self):
try:
[sock] = bind_sockets(None, '::1', family=socket.AF_INET6)
port = sock.getsockname()[1]
self.http_server.add_socket(sock)
except socket.gaierror as e:
if e.args[0] == socket.EAI_ADDRFAMILY:
# python supports ipv6, but it's not configured on the network
# interface, so skip this test.
return
raise
url = '%s://[::1]:%d/hello' % (self.get_protocol(), port)
# ipv6 is currently enabled by default but can be disabled
self.http_client.fetch(url, self.stop, allow_ipv6=False)
response = self.wait()
self.assertEqual(response.code, 599)
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(response.body, b"Hello world!")
def xtest_multiple_content_length_accepted(self):
response = self.fetch("/content_length?value=2,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,%202,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,4")
self.assertEqual(response.code, 599)
response = self.fetch("/content_length?value=2,%202,3")
self.assertEqual(response.code, 599)
def test_head_request(self):
response = self.fetch("/head", method="HEAD")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "7")
self.assertFalse(response.body)
def test_options_request(self):
response = self.fetch("/options", method="OPTIONS")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "2")
self.assertEqual(response.headers["access-control-allow-origin"], "*")
self.assertEqual(response.body, b"ok")
def test_no_content(self):
response = self.fetch("/no_content")
self.assertEqual(response.code, 204)
# 204 status shouldn't have a content-length
#
# Tests with a content-length header are included below
# in HTTP204NoContentTestCase.
self.assertNotIn("Content-Length", response.headers)
def test_host_header(self):
host_re = re.compile(b"^localhost:[0-9]+$")
response = self.fetch("/host_echo")
self.assertTrue(host_re.match(response.body))
url = self.get_url("/host_echo").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertTrue(host_re.match(response.body), response.body)
def test_connection_refused(self):
cleanup_func, port = refusing_port()
self.addCleanup(cleanup_func)
with ExpectLog(gen_log, ".*", required=False):
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
response = self.wait()
self.assertEqual(599, response.code)
if sys.platform != 'cygwin':
# cygwin returns EPERM instead of ECONNREFUSED here
contains_errno = str(errno.ECONNREFUSED) in str(response.error)
if not contains_errno and hasattr(errno, "WSAECONNREFUSED"):
contains_errno = str(errno.WSAECONNREFUSED) in str(response.error)
self.assertTrue(contains_errno, response.error)
# This is usually "Connection refused".
# On windows, strerror is broken and returns "Unknown error".
expected_message = os.strerror(errno.ECONNREFUSED)
self.assertTrue(expected_message in str(response.error),
response.error)
def test_queue_timeout(self):
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/trigger'), self.stop,
request_timeout=10)
# Wait for the trigger request to block, not complete.
self.wait()
client.fetch(self.get_url('/hello'), self.stop,
connect_timeout=0.1)
response = self.wait()
self.assertEqual(response.code, 599)
self.assertTrue(response.request_time < 1, response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout in request queue")
self.triggers.popleft()()
self.wait()
def test_no_content_length(self):
response = self.fetch("/no_content_length")
if response.body == b"HTTP/1 required":
self.skipTest("requires HTTP/1.x")
else:
self.assertEquals(b"hello", response.body)
def sync_body_producer(self, write):
write(b'1234')
write(b'5678')
@gen.coroutine
def async_body_producer(self, write):
yield write(b'1234')
yield gen.Task(IOLoop.current().add_callback)
yield write(b'5678')
def test_sync_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_sync_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
@skipBefore35
def test_native_body_producer_chunked(self):
namespace = exec_test(globals(), locals(), """
async def body_producer(write):
await write(b'1234')
await gen.Task(IOLoop.current().add_callback)
await write(b'5678')
""")
response = self.fetch("/echo_post", method="POST",
body_producer=namespace["body_producer"])
response.rethrow()
self.assertEqual(response.body, b"12345678")
@skipBefore35
def test_native_body_producer_content_length(self):
namespace = exec_test(globals(), locals(), """
async def body_producer(write):
await write(b'1234')
await gen.Task(IOLoop.current().add_callback)
await write(b'5678')
""")
response = self.fetch("/echo_post", method="POST",
body_producer=namespace["body_producer"],
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_100_continue(self):
response = self.fetch("/echo_post", method="POST",
body=b"1234",
expect_100_continue=True)
self.assertEqual(response.body, b"1234")
def test_100_continue_early_response(self):
def body_producer(write):
raise Exception("should not be called")
response = self.fetch("/respond_in_prepare", method="POST",
body_producer=body_producer,
expect_100_continue=True)
self.assertEqual(response.code, 403)
def test_streaming_follow_redirects(self):
# When following redirects, header and streaming callbacks
# should only be called for the final result.
# TODO(bdarnell): this test belongs in httpclient_test instead of
# simple_httpclient_test, but it fails with the version of libcurl
# available on travis-ci. Move it when that has been upgraded
# or we have a better framework to skip tests based on curl version.
headers = []
chunks = []
self.fetch("/redirect?url=/hello",
header_callback=headers.append,
streaming_callback=chunks.append)
chunks = list(map(to_unicode, chunks))
self.assertEqual(chunks, ['Hello world!'])
# Make sure we only got one set of headers.
num_start_lines = len([h for h in headers if h.startswith("HTTP/")])
self.assertEqual(num_start_lines, 1)
class SimpleHTTPClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPTestCase):
def setUp(self):
super(SimpleHTTPClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(force_instance=True, **kwargs)
class SimpleHTTPSClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPSTestCase):
def setUp(self):
super(SimpleHTTPSClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(force_instance=True,
defaults=dict(validate_cert=False),
**kwargs)
def test_ssl_options(self):
resp = self.fetch("/hello", ssl_options={})
self.assertEqual(resp.body, b"Hello world!")
def test_ssl_context(self):
resp = self.fetch("/hello",
ssl_options=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
self.assertEqual(resp.body, b"Hello world!")
def test_ssl_options_handshake_fail(self):
with ExpectLog(gen_log, "SSL Error|Uncaught exception",
required=False):
resp = self.fetch(
"/hello", ssl_options=dict(cert_reqs=ssl.CERT_REQUIRED))
self.assertRaises(ssl.SSLError, resp.rethrow)
def test_ssl_context_handshake_fail(self):
with ExpectLog(gen_log, "SSL Error|Uncaught exception"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
resp = self.fetch("/hello", ssl_options=ctx)
self.assertRaises(ssl.SSLError, resp.rethrow)
def test_error_logging(self):
# No stack traces are logged for SSL errors (in this case,
# failure to validate the testing self-signed cert).
# The SSLError is exposed through ssl.SSLError.
with ExpectLog(gen_log, '.*') as expect_log:
response = self.fetch("/", validate_cert=True)
self.assertEqual(response.code, 599)
self.assertIsInstance(response.error, ssl.SSLError)
self.assertFalse(expect_log.logged_stack)
class CreateAsyncHTTPClientTestCase(AsyncTestCase):
def setUp(self):
super(CreateAsyncHTTPClientTestCase, self).setUp()
self.saved = AsyncHTTPClient._save_configuration()
def tearDown(self):
AsyncHTTPClient._restore_configuration(self.saved)
super(CreateAsyncHTTPClientTestCase, self).tearDown()
def test_max_clients(self):
AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
with closing(AsyncHTTPClient(force_instance=True)) as client:
self.assertEqual(client.max_clients, 10)
with closing(AsyncHTTPClient(
max_clients=11, force_instance=True)) as client:
self.assertEqual(client.max_clients, 11)
# Now configure max_clients statically and try overriding it
# with each way max_clients can be passed
AsyncHTTPClient.configure(SimpleAsyncHTTPClient, max_clients=12)
with closing(AsyncHTTPClient(force_instance=True)) as client:
self.assertEqual(client.max_clients, 12)
with closing(AsyncHTTPClient(
max_clients=13, force_instance=True)) as client:
self.assertEqual(client.max_clients, 13)
with closing(AsyncHTTPClient(
max_clients=14, force_instance=True)) as client:
self.assertEqual(client.max_clients, 14)
class HTTP100ContinueTestCase(AsyncHTTPTestCase):
def respond_100(self, request):
self.http1 = request.version.startswith('HTTP/1.')
if not self.http1:
request.connection.write_headers(ResponseStartLine('', 200, 'OK'),
HTTPHeaders())
request.connection.finish()
return
self.request = request
self.request.connection.stream.write(
b"HTTP/1.1 100 CONTINUE\r\n\r\n",
self.respond_200)
def respond_200(self):
self.request.connection.stream.write(
b"HTTP/1.1 200 OK\r\nContent-Length: 1\r\n\r\nA",
self.request.connection.stream.close)
def get_app(self):
# Not a full Application, but works as an HTTPServer callback
return self.respond_100
def test_100_continue(self):
res = self.fetch('/')
if not self.http1:
self.skipTest("requires HTTP/1.x")
self.assertEqual(res.body, b'A')
class HTTP204NoContentTestCase(AsyncHTTPTestCase):
def respond_204(self, request):
self.http1 = request.version.startswith('HTTP/1.')
if not self.http1:
# Close the request cleanly in HTTP/2; it will be skipped anyway.
request.connection.write_headers(ResponseStartLine('', 200, 'OK'),
HTTPHeaders())
request.connection.finish()
return
# A 204 response never has a body, even if doesn't have a content-length
# (which would otherwise mean read-until-close). We simulate here a
# server that sends no content length and does not close the connection.
#
# Tests of a 204 response with no Content-Length header are included
# in SimpleHTTPClientTestMixin.
stream = request.connection.detach()
stream.write(b"HTTP/1.1 204 No content\r\n")
if request.arguments.get("error", [False])[-1]:
stream.write(b"Content-Length: 5\r\n")
else:
stream.write(b"Content-Length: 0\r\n")
stream.write(b"\r\n")
stream.close()
def get_app(self):
return self.respond_204
def test_204_no_content(self):
resp = self.fetch('/')
if not self.http1:
self.skipTest("requires HTTP/1.x")
self.assertEqual(resp.code, 204)
self.assertEqual(resp.body, b'')
def test_204_invalid_content_length(self):
# 204 status with non-zero content length is malformed
with ExpectLog(gen_log, ".*Response with code 204 should not have body"):
response = self.fetch("/?error=1")
if not self.http1:
self.skipTest("requires HTTP/1.x")
if self.http_client.configured_class != SimpleAsyncHTTPClient:
self.skipTest("curl client accepts invalid headers")
self.assertEqual(response.code, 599)
class HostnameMappingTestCase(AsyncHTTPTestCase):
def setUp(self):
super(HostnameMappingTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
hostname_mapping={
'www.example.com': '127.0.0.1',
('foo.example.com', 8000): ('127.0.0.1', self.get_http_port()),
})
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_hostname_mapping(self):
self.http_client.fetch(
'http://www.example.com:%d/hello' % self.get_http_port(), self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
def test_port_mapping(self):
self.http_client.fetch('http://foo.example.com:8000/hello', self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
class ResolveTimeoutTestCase(AsyncHTTPTestCase):
def setUp(self):
# Dummy Resolver subclass that never invokes its callback.
class BadResolver(Resolver):
def resolve(self, *args, **kwargs):
pass
super(ResolveTimeoutTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
resolver=BadResolver())
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_resolve_timeout(self):
response = self.fetch('/hello', connect_timeout=0.1)
self.assertEqual(response.code, 599)
class MaxHeaderSizeTest(AsyncHTTPTestCase):
def get_app(self):
class SmallHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 100)
self.write("ok")
class LargeHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 1000)
self.write("ok")
return Application([('/small', SmallHeaders),
('/large', LargeHeaders)])
def get_http_client(self):
return SimpleAsyncHTTPClient(max_header_size=1024)
def test_small_headers(self):
response = self.fetch('/small')
response.rethrow()
self.assertEqual(response.body, b'ok')
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read"):
response = self.fetch('/large')
self.assertEqual(response.code, 599)
class MaxBodySizeTest(AsyncHTTPTestCase):
def get_app(self):
class SmallBody(RequestHandler):
def get(self):
self.write("a" * 1024 * 64)
class LargeBody(RequestHandler):
def get(self):
self.write("a" * 1024 * 100)
return Application([('/small', SmallBody),
('/large', LargeBody)])
def get_http_client(self):
return SimpleAsyncHTTPClient(max_body_size=1024 * 64)
def test_small_body(self):
response = self.fetch('/small')
response.rethrow()
self.assertEqual(response.body, b'a' * 1024 * 64)
def test_large_body(self):
with ExpectLog(gen_log, "Malformed HTTP message from None: Content-Length too long"):
response = self.fetch('/large')
self.assertEqual(response.code, 599)
class MaxBufferSizeTest(AsyncHTTPTestCase):
def get_app(self):
class LargeBody(RequestHandler):
def get(self):
self.write("a" * 1024 * 100)
return Application([('/large', LargeBody)])
def get_http_client(self):
# 100KB body with 64KB buffer
return SimpleAsyncHTTPClient(max_body_size=1024 * 100, max_buffer_size=1024 * 64)
def test_large_body(self):
response = self.fetch('/large')
response.rethrow()
self.assertEqual(response.body, b'a' * 1024 * 100)
class ChunkedWithContentLengthTest(AsyncHTTPTestCase):
def get_app(self):
class ChunkedWithContentLength(RequestHandler):
def get(self):
# Add an invalid Transfer-Encoding to the response
self.set_header('Transfer-Encoding', 'chunked')
self.write("Hello world")
return Application([('/chunkwithcl', ChunkedWithContentLength)])
def get_http_client(self):
return SimpleAsyncHTTPClient()
def test_chunked_with_content_length(self):
# Make sure the invalid headers are detected
with ExpectLog(gen_log, ("Malformed HTTP message from None: Response "
"with both Transfer-Encoding and Content-Length")):
response = self.fetch('/chunkwithcl')
self.assertEqual(response.code, 599)
| {
"repo_name": "SuminAndrew/tornado",
"path": "tornado/test/simple_httpclient_test.py",
"copies": "1",
"size": "30620",
"license": "apache-2.0",
"hash": 3493872818087759400,
"line_mean": 38.5607235142,
"line_max": 107,
"alpha_frac": 0.6159046375,
"autogenerated": false,
"ratio": 4.177353342428376,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002974025596435202,
"num_lines": 774
} |
from __future__ import absolute_import, division, print_function
import collections
from functools import wraps
import inspect
import numpy as np
try:
import scipy
import scipy.fftpack
except ImportError:
scipy = None
from .creation import linspace as _linspace
from .core import (
concatenate as _concatenate,
normalize_chunks as _normalize_chunks,
)
chunk_error = ("Dask array only supports taking an FFT along an axis that \n"
"has a single chunk. An FFT operation was tried on axis %s \n"
"which has chunks %s. To change the array's chunks use "
"dask.Array.rechunk.")
fft_preamble = """
Wrapping of %s
The axis along which the FFT is applied must have a one chunk. To change
the array's chunking use dask.Array.rechunk.
The %s docstring follows below:
"""
def _fft_out_chunks(a, s, axes):
""" For computing the output chunks of [i]fft*"""
if s is None:
return a.chunks
chunks = list(a.chunks)
for i, axis in enumerate(axes):
chunks[axis] = (s[i],)
return chunks
def _rfft_out_chunks(a, s, axes):
""" For computing the output chunks of rfft*"""
if s is None:
s = [a.chunks[axis][0] for axis in axes]
s = list(s)
s[-1] = s[-1] // 2 + 1
chunks = list(a.chunks)
for i, axis in enumerate(axes):
chunks[axis] = (s[i],)
return chunks
def _irfft_out_chunks(a, s, axes):
""" For computing the output chunks of irfft*"""
if s is None:
s = [a.chunks[axis][0] for axis in axes]
s[-1] = 2 * (s[-1] - 1)
chunks = list(a.chunks)
for i, axis in enumerate(axes):
chunks[axis] = (s[i],)
return chunks
def _hfft_out_chunks(a, s, axes):
assert len(axes) == 1
axis = axes[0]
if s is None:
s = [2 * (a.chunks[axis][0] - 1)]
n = s[0]
chunks = list(a.chunks)
chunks[axis] = (n,)
return chunks
def _ihfft_out_chunks(a, s, axes):
assert len(axes) == 1
axis = axes[0]
if s is None:
s = [a.chunks[axis][0]]
else:
assert len(s) == 1
n = s[0]
chunks = list(a.chunks)
if n % 2 == 0:
m = (n // 2) + 1
else:
m = (n + 1) // 2
chunks[axis] = (m,)
return chunks
_out_chunk_fns = {'fft': _fft_out_chunks,
'ifft': _fft_out_chunks,
'rfft': _rfft_out_chunks,
'irfft': _irfft_out_chunks,
'hfft': _hfft_out_chunks,
'ihfft': _ihfft_out_chunks}
def fft_wrap(fft_func, kind=None, dtype=None):
""" Wrap 1D complex FFT functions
Takes a function that behaves like ``numpy.fft`` functions and
a specified kind to match it to that are named after the functions
in the ``numpy.fft`` API.
Supported kinds include:
* fft
* ifft
* rfft
* irfft
* hfft
* ihfft
Examples
--------
>>> parallel_fft = fft_wrap(np.fft.fft)
>>> parallel_ifft = fft_wrap(np.fft.ifft)
"""
if scipy is not None:
if fft_func is scipy.fftpack.rfft:
raise ValueError("SciPy's `rfft` doesn't match the NumPy API.")
elif fft_func is scipy.fftpack.irfft:
raise ValueError("SciPy's `irfft` doesn't match the NumPy API.")
if kind is None:
kind = fft_func.__name__
try:
out_chunk_fn = _out_chunk_fns[kind.rstrip("2n")]
except KeyError:
raise ValueError("Given unknown `kind` %s." % kind)
def func(a, s=None, axes=None):
if axes is None:
if kind.endswith('2'):
axes = (-2, -1)
elif kind.endswith('n'):
if s is None:
axes = tuple(range(a.ndim))
else:
axes = tuple(range(len(s)))
else:
axes = (-1,)
else:
if len(set(axes)) < len(axes):
raise ValueError("Duplicate axes not allowed.")
_dtype = dtype
if _dtype is None:
_dtype = fft_func(np.ones(len(axes) * (8,),
dtype=a.dtype)).dtype
for each_axis in axes:
if len(a.chunks[each_axis]) != 1:
raise ValueError(chunk_error % (each_axis, a.chunks[each_axis]))
chunks = out_chunk_fn(a, s, axes)
args = (s, axes)
if kind.endswith('fft'):
axis = None if axes is None else axes[0]
n = None if s is None else s[0]
args = (n, axis)
return a.map_blocks(fft_func, *args, dtype=_dtype,
chunks=chunks)
if kind.endswith('fft'):
_func = func
def func(a, n=None, axis=None):
s = None
if n is not None:
s = (n,)
axes = None
if axis is not None:
axes = (axis,)
return _func(a, s, axes)
func_mod = inspect.getmodule(fft_func)
func_name = fft_func.__name__
func_fullname = func_mod.__name__ + "." + func_name
if fft_func.__doc__ is not None:
func.__doc__ = (fft_preamble % (2 * (func_fullname,)))
func.__doc__ += fft_func.__doc__
func.__name__ = func_name
return func
fft = fft_wrap(np.fft.fft, dtype=np.complex_)
fft2 = fft_wrap(np.fft.fft2, dtype=np.complex_)
fftn = fft_wrap(np.fft.fftn, dtype=np.complex_)
ifft = fft_wrap(np.fft.ifft, dtype=np.complex_)
ifft2 = fft_wrap(np.fft.ifft2, dtype=np.complex_)
ifftn = fft_wrap(np.fft.ifftn, dtype=np.complex_)
rfft = fft_wrap(np.fft.rfft, dtype=np.complex_)
rfft2 = fft_wrap(np.fft.rfft2, dtype=np.complex_)
rfftn = fft_wrap(np.fft.rfftn, dtype=np.complex_)
irfft = fft_wrap(np.fft.irfft, dtype=np.float_)
irfft2 = fft_wrap(np.fft.irfft2, dtype=np.float_)
irfftn = fft_wrap(np.fft.irfftn, dtype=np.float_)
hfft = fft_wrap(np.fft.hfft, dtype=np.float_)
ihfft = fft_wrap(np.fft.ihfft, dtype=np.complex_)
def _fftfreq_helper(n, d=1.0, chunks=None, real=False):
n = int(n)
l = n + 1
s = n // 2 + 1 if real else n
t = l - s
chunks = _normalize_chunks(chunks, (s,))[0] + (t,)
r = _linspace(0, 1, l, chunks=chunks)
if real:
n_2 = n // 2 + 1
r = r[:n_2]
else:
n_2 = (n + 1) // 2
r = _concatenate([r[:n_2], r[n_2:-1] - 1])
r /= d
return r
@wraps(np.fft.fftfreq)
def fftfreq(n, d=1.0, chunks=None):
return _fftfreq_helper(n, d=d, chunks=chunks, real=False)
@wraps(np.fft.rfftfreq)
def rfftfreq(n, d=1.0, chunks=None):
return _fftfreq_helper(n, d=d, chunks=chunks, real=True)
def _fftshift_helper(x, axes=None, inverse=False):
if axes is None:
axes = list(range(x.ndim))
elif not isinstance(axes, collections.Sequence):
axes = (axes,)
y = x
for i in axes:
n = y.shape[i]
n_2 = (n + int(inverse is False)) // 2
l = y.ndim * [slice(None)]
l[i] = slice(None, n_2)
l = tuple(l)
r = y.ndim * [slice(None)]
r[i] = slice(n_2, None)
r = tuple(r)
y = _concatenate([y[r], y[l]], axis=i)
return y
@wraps(np.fft.fftshift)
def fftshift(x, axes=None):
return _fftshift_helper(x, axes=axes, inverse=False)
@wraps(np.fft.ifftshift)
def ifftshift(x, axes=None):
return _fftshift_helper(x, axes=axes, inverse=True)
| {
"repo_name": "mraspaud/dask",
"path": "dask/array/fft.py",
"copies": "1",
"size": "7358",
"license": "bsd-3-clause",
"hash": -2483915042790306000,
"line_mean": 24.5486111111,
"line_max": 80,
"alpha_frac": 0.5453927698,
"autogenerated": false,
"ratio": 3.15929583512237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9201911439529464,
"avg_score": 0.0005554330785812266,
"num_lines": 288
} |
from __future__ import absolute_import, division, print_function
import collections
from operator import itemgetter
import pytest
from appr.exception import (ChannelNotFound, Forbidden, InvalidRelease, PackageAlreadyExists,
PackageNotFound, PackageReleaseNotFound, InvalidUsage)
def convert_utf8(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(convert_utf8, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert_utf8, data))
else:
return data
@pytest.mark.models
class TestModels:
from appr.models.kv.filesystem.db import ApprDB
DB_CLASS = ApprDB
@pytest.fixture()
def db(self):
return self.DB_CLASS
def test_package_init(self, db_class):
p = db_class.Package("titi/toot", '1.2.0', 'helm')
assert p.name == "toot"
assert p.namespace == "titi"
assert p.release == "1.2.0"
assert p.package == "titi/toot"
def test_package_set_blob(self, db_class, package_b64blob):
p = db_class.Package("titi/toot", '1.2.0', 'helm', None)
assert p._data is None
p.blob = db_class.Blob("titi/rocketchat", package_b64blob)
assert p.blob.b64blob == package_b64blob
assert p.data['content'][
'digest'] == "8dc8a2c479f770fd710e0dddaa7af0e6b495bc6375cdf2b4a649f4c2b03e27d5"
assert p.blob.digest == "8dc8a2c479f770fd710e0dddaa7af0e6b495bc6375cdf2b4a649f4c2b03e27d5"
@pytest.mark.integration
def test_db_restore(self, newdb, dbdata1):
assert newdb.Package.dump_all(newdb.Blob) == []
assert newdb.Channel.dump_all(newdb.Blob) == []
newdb.restore_backup(dbdata1)
dump = newdb.Package.dump_all(newdb.Blob)
sorting = itemgetter('package', "mediaType", "release")
dump = convert_utf8(dump)
expected_packages = convert_utf8(dbdata1['packages'])
for x in xrange(len(expected_packages)):
dump[x].pop("created_at")
expected_packages[x].pop("created_at")
assert sorted(dump, key=sorting) == sorted(expected_packages, key=sorting)
assert sorted(newdb.Channel.dump_all(newdb.Blob)) == sorted(dbdata1['channels'])
@pytest.mark.integration
def test_get_default_package_media_type(self, db_with_data1):
p = db_with_data1.Package.get("titi/rocketchat", 'default', 'kpm')
assert p.package == "titi/rocketchat"
@pytest.mark.integration
def test_get_default_package(self, db_with_data1):
p = db_with_data1.Package.get("titi/rocketchat", 'default', '-')
assert p.package == "titi/rocketchat"
assert p.media_type == "kpm"
@pytest.mark.integration
def test_get_package_release_query(self, db_with_data1):
p = db_with_data1.Package.get("titi/rocketchat", ">1.2", 'kpm')
assert p.package == "titi/rocketchat"
assert p.release == "2.0.1"
assert p.digest == "d3b54b7912fe770a61b59ab612a442eac52a8a5d8d05dbe92bf8f212d68aaa80"
assert p.media_type == "kpm"
@pytest.mark.integration
def test_get_package_detect_format(self, db_with_data1):
p = db_with_data1.Package.get("titi/rocketchat", ">1.2", '-')
assert p.package == "titi/rocketchat"
assert p.release == "2.0.1"
assert p.media_type == "kpm"
assert p.digest == "d3b54b7912fe770a61b59ab612a442eac52a8a5d8d05dbe92bf8f212d68aaa80"
@pytest.mark.integration
def test_get_blob(self, db_with_data1):
blob = db_with_data1.Blob.get(
"titi/rocketchat", "d3b54b7912fe770a61b59ab612a442eac52a8a5d8d05dbe92bf8f212d68aaa80")
assert blob.digest == "d3b54b7912fe770a61b59ab612a442eac52a8a5d8d05dbe92bf8f212d68aaa80"
assert blob.size == 778L
@pytest.mark.integration
def test_get_package_absent_manifest(self, db_with_data1):
with pytest.raises(PackageNotFound):
db_with_data1.Package.get("titi/rocketchat", ">1.2", 'bad')
@pytest.mark.integration
def test_get_package_absent_release(self, db_with_data1):
with pytest.raises(PackageReleaseNotFound):
db_with_data1.Package.get("titi/rocketchat", "2.0.2", 'kpm')
@pytest.mark.integration
def test_get_package_bad_release_query(self, db_with_data1):
with pytest.raises(InvalidRelease):
db_with_data1.Package.get("titi/rocketchat", "abc", 'kpm')
@pytest.mark.integration
def test_pull_package_absent_release(self, db_with_data1):
with pytest.raises(PackageReleaseNotFound):
p = db_with_data1.Package("titi/rocketchat", '4.3.0', 'kpm')
p.pull()
@pytest.mark.integration
def test_save_package_bad_release(self, newdb):
assert newdb.Package.all() == []
with pytest.raises(InvalidRelease):
p = newdb.Package("titi/rocketchat", 'abc', 'kpm')
p.save()
@pytest.mark.integration
def test_save_package(self, newdb, package_b64blob):
assert newdb.Package.all() == []
blob = newdb.Blob("titi/rocketchat", package_b64blob)
p = newdb.Package("titi/rocketchat", '2.3.4', 'kpm', blob)
p.save()
fetchpackage = newdb.Package.get('titi/rocketchat', '2.3.4', 'kpm')
assert fetchpackage.digest == "8dc8a2c479f770fd710e0dddaa7af0e6b495bc6375cdf2b4a649f4c2b03e27d5"
assert newdb.Blob.get('titi/rocketchat',
fetchpackage.digest).size == fetchpackage.blob_size
@pytest.mark.integration
def test_save_package_exists(self, newdb, package_b64blob):
assert newdb.Package.all() == []
blob = newdb.Blob("titi/rocketchat", package_b64blob)
p = newdb.Package("titi/rocketchat", '2.3.4', 'kpm', blob)
p.save()
assert newdb.Package.get("titi/rocketchat", "2.3.4", "kpm") is not None
with pytest.raises(PackageAlreadyExists):
p.save()
@pytest.mark.integration
def test_save_package_exists_force(self, newdb, package_b64blob):
assert newdb.Package.all() == []
blob = newdb.Blob("titi/rocketchat", package_b64blob)
p = newdb.Package("titi/rocketchat", '2.3.4', 'kpm', blob)
p.save()
p.save(True)
# @TODO store deleted releases
@pytest.mark.integration
@pytest.mark.xfail
def test_save_package_deleted(self, newdb, package_b64blob):
assert newdb.Package.all() == []
blob = newdb.Blob("titi/rocketchat", package_b64blob)
p = newdb.Package("titi/rocketchat", '2.3.4', 'kpm', blob)
p.save()
newdb.Package.delete("titi/rocketchat", '2.3.4', 'kpm')
with pytest.raises(PackageAlreadyExists):
p.save()
@pytest.mark.integration
def test_list_package_releases(self, db_with_data1):
p = db_with_data1.Package.get("titi/rocketchat", "default", "kpm")
assert sorted(p.releases()) == sorted(['0.0.1', '1.0.1', '2.0.1'])
@pytest.mark.integration
def test_list_package_media_types(self, db_with_data1):
assert sorted(db_with_data1.Package.manifests("titi/rocketchat", "0.0.1")) == [
'helm', 'kpm']
@pytest.mark.integration
def test_get_package_multi_media_type(self, db_with_data1):
with pytest.raises(InvalidUsage):
db_with_data1.Package.get("titi/rocketchat", "0.0.1", "-")
@pytest.mark.integration
def test_list_package_channels(self, db_with_data1):
p = db_with_data1.Package.get("titi/rocketchat", '2.0.1', "kpm")
assert p.channels(db_with_data1.Channel) == ['stable']
p2 = db_with_data1.Package.get("titi/rocketchat", '1.0.1', "kpm")
assert sorted(p2.channels(db_with_data1.Channel)) == sorted(['dev'])
assert sorted(p2.channels(db_with_data1.Channel, iscurrent=False)) == sorted([
'dev', 'stable'])
p3 = db_with_data1.Package.get("titi/rocketchat", '0.0.1', "kpm")
assert sorted(p3.channels(db_with_data1.Channel)) == sorted([])
def test_forbiddeb_db_reset(self, db_class):
with pytest.raises(Forbidden):
db_class.reset_db()
@pytest.mark.integration
def test_all_channels(self, db_with_data1):
channels = [c.name for c in db_with_data1.Channel.all('titi/rocketchat')]
assert sorted(channels) == sorted([u'dev', u'stable'])
@pytest.mark.integration
def test_all_channels_absent_package(self, db_with_data1):
with pytest.raises(PackageNotFound):
db_with_data1.Channel.all('titi/doesntexists')
@pytest.mark.integration
def test_all_channels_no_data(self, newdb):
with pytest.raises(PackageNotFound):
newdb.Channel.all('titi/doesntexists')
@pytest.mark.integration
def test_channel_releases(self, db_with_data1):
channel = db_with_data1.Channel.get('stable', 'titi/rocketchat')
assert sorted(channel.releases()) == sorted([u'1.0.1', u'2.0.1'])
@pytest.mark.integration
def test_channel_no_releases(self, db_with_data1):
channel = db_with_data1.Channel('default', 'titi/rocketchat')
with pytest.raises(ChannelNotFound):
channel.releases()
@pytest.mark.integration
def test_channel_add_release(self, db_with_data1):
channel = db_with_data1.Channel('default', 'titi/rocketchat')
package = db_with_data1.Package.get('titi/rocketchat', '1.0.1', "kpm")
with pytest.raises(ChannelNotFound):
channel.releases()
assert 'default' not in package.channels(db_with_data1.Channel)
channel.add_release('1.0.1', db_with_data1.Package)
assert sorted(channel.releases()) == sorted(['1.0.1'])
assert "default" in package.channels(db_with_data1.Channel)
@pytest.mark.integration
def test_channel_add_release_new_channel(self, db_with_data1):
channel = db_with_data1.Channel('newone', 'titi/rocketchat')
assert channel.exists() is False
package = db_with_data1.Package.get('titi/rocketchat', '1.0.1', "kpm")
assert 'newone' not in package.channels(db_with_data1.Channel)
channel.add_release('1.0.1', db_with_data1.Package)
assert sorted(channel.releases()) == sorted(['1.0.1'])
assert "newone" in package.channels(db_with_data1.Channel)
assert channel.exists() is True
@pytest.mark.integration
def test_channel_delete_releases(self, db_with_data1):
channel = db_with_data1.Channel.get('stable', 'titi/rocketchat')
package = db_with_data1.Package.get('titi/rocketchat', '2.0.1', "kpm")
assert sorted(channel.releases()) == sorted([u'1.0.1', u'2.0.1'])
assert 'stable' in package.channels(db_with_data1.Channel)
assert channel.current == "2.0.1"
channel.remove_release('2.0.1')
assert sorted(channel.releases()) == sorted(['1.0.1'])
channel = db_with_data1.Channel.get('stable', 'titi/rocketchat')
assert channel.current is not None
assert "stable" not in package.channels(db_with_data1.Channel)
@pytest.mark.integration
def test_channel_delete_all_releases(self, db_with_data1):
channel = db_with_data1.Channel.get('dev', 'titi/rocketchat')
package = db_with_data1.Package.get('titi/rocketchat', '1.0.1', "kpm")
assert sorted(channel.releases()) == sorted([u'1.0.1'])
assert 'dev' in package.channels(db_with_data1.Channel)
assert channel.current == "1.0.1"
channel.remove_release('1.0.1')
with pytest.raises(ChannelNotFound):
channel = db_with_data1.Channel.get('dev', 'titi/rocketchat')
@pytest.mark.integration
def test_channel_delete_absent_releases(self, db_with_data1):
channel = db_with_data1.Channel('new', 'titi/rocketchat')
with pytest.raises(ChannelNotFound):
channel.remove_release('1.0.1')
@pytest.mark.integration
def test_channel_add_bad_releases(self, db_with_data1):
channel = db_with_data1.Channel('stable', 'titi/rocketchat')
db_with_data1.Package.get('titi/rocketchat', '1.0.1', "kpm")
with pytest.raises(InvalidRelease):
channel.add_release('1.a.1', db_with_data1.Package)
@pytest.mark.integration
def test_channel_add_absent_releases(self, db_with_data1):
channel = db_with_data1.Channel('stable', 'titi/rocketchat')
db_with_data1.Package.get('titi/rocketchat', '1.0.1', "kpm")
with pytest.raises(PackageReleaseNotFound):
channel.add_release('1.4.1', db_with_data1.Package)
@pytest.mark.integration
def test_create_channel_absent_package(self, db_with_data1):
channel = db_with_data1.Channel('newone', 'titi/doest')
with pytest.raises(PackageNotFound):
channel.save()
@pytest.mark.integration
def test_channel_current_release(self, db_with_data1):
channel = db_with_data1.Channel.get('stable', 'titi/rocketchat')
assert channel.current_release() == '2.0.1'
@pytest.mark.integration
def test_get_channel_absent(self, db_with_data1):
with pytest.raises(ChannelNotFound):
db_with_data1.Channel.get('stableh', 'titi/rocketchat')
# @todo better all test
@pytest.mark.integration
def test_list_all_package(self, db_with_data1):
result = sorted(db_with_data1.Package.all())
assert len(result) == 1
@pytest.mark.integration
def test_search_empty_package(self, db_with_data1):
assert db_with_data1.Package.search('fdsf') == []
@pytest.mark.integration
def test_search_package(self, db_with_data1):
assert db_with_data1.Package.search('rocket') == ['titi/rocketchat']
ApprTestModels = TestModels
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "appr/tests/test_models.py",
"copies": "2",
"size": "13801",
"license": "apache-2.0",
"hash": 3880998322085061600,
"line_mean": 42.128125,
"line_max": 104,
"alpha_frac": 0.6461850591,
"autogenerated": false,
"ratio": 3.147320410490308,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47935054695903084,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import collections
import functools
import math
import maya.OpenMaya
from pymel.core import aimConstraint, addAttr, arclen, cluster, createNode, delete, duplicate, dt, group, hide, \
orientConstraint, parentConstraint, pointConstraint, PyNode, scaleConstraint, selected, upAxis, warning, xform
from ....add import simpleName
from .... import core
from .... import lib
from .. import log
from .. import controllerShape
from .. import node
ConstraintResults = collections.namedtuple( 'ConstraintResults', 'point orient' )
class EndOrient:
TRUE_ZERO = 'True_Zero' # Matches world but has true zero to return to bind
JOINT = 'Joint' # Match the orient of the last joint (VERIFY this just mean it matches the joint, no true zero)
TRUE_ZERO_FOOT = 'True_Zero_Foot' # Same as TRUE_ZERO but only in xz plane
WORLD = 'World'
@classmethod
def asChoices(cls):
choices = collections.OrderedDict()
choices[cls.TRUE_ZERO.replace('_', ' ')] = cls.TRUE_ZERO
choices[cls.JOINT.replace('_', ' ')] = cls.JOINT
choices[cls.TRUE_ZERO_FOOT.replace('_', ' ')] = cls.TRUE_ZERO_FOOT
choices[cls.WORLD] = cls.WORLD
return choices
def adds(*attributes):
'''
Marks a function with fossilDynamicAttrs to track the attributes made so
special sauce can be identified.
'''
def realDecorator(func):
setattr(func, 'fossilDynamicAttrs', attributes)
return func
return realDecorator
def defaultspec(defSpec, **additionalSpecs):
'''
Decorator to used to specify the default control building values.
ex:
@defaultspec( {'shape':control.box, 'size', 10: 'color': 'blue 0.22'} )
def buildLeg( ... , controlSpec={})
...
control.build( 'FootControl', controlsSpec['main'] )
Or, for multiple controls:
@defaultspec( {'shape':control.box, 'size', 10: 'color': 'blue'},
pv={'shape':control.sphere, 'size', 8: 'color': 'green'})
def buildLegIk( ... , controlSpec={})
...
control.build( 'FootControl', controlsSpec['main'] )
...
control.build( 'FootControl', controlsSpec['pv'] ) # Same keyword as was passed in to defaultspec
The reason is this allows for partial overriding, if a value isn't specifice,
the default is used. This also saves from having a long default argument
list which varies from control to control.
If some aspect of a rig adds an additional control, it is trivial to add it
as a spec into the system.
.. todo::
I might want to log spec errors is some better way to show them all at the end
'''
def realDecorator(func):
# allSpecs will be an alterable, the source remains untouched.
allSpecs = { 'main': defSpec.copy() }
if 'visGroup' not in allSpecs['main']:
allSpecs['main']['visGroup'] = ''
if 'align' not in allSpecs['main']:
allSpecs['main']['align'] = 'y'
for specName, spec in additionalSpecs.items():
allSpecs[specName] = spec.copy()
if 'visGroup' not in allSpecs[specName]:
allSpecs[specName]['visGroup'] = ''
if 'align' not in allSpecs[specName]:
allSpecs[specName]['align'] = 'y'
def newFunc(*args, **kwargs):
# Make a copy of the spec that can be modified
tempSpec = {}
for specName, spec in allSpecs.items():
tempSpec[specName] = spec.copy()
# Override default controlSpecs with whatever the user provides
if 'controlSpec' in kwargs:
# Apply any overridden spec data
for specName, spec in kwargs['controlSpec'].items():
if specName in tempSpec:
tempSpec[specName].update( spec )
else:
warning( 'Ignoring unused spec {0}'.format(specName) )
kwargs['controlSpec'] = tempSpec
#argspec = inspect.getargspec(func)
#print argspec
#print args, kwargs
res = func(*args, **kwargs)
# Now that all the controls are made, we can safely apply the
# visGroup, since they apply to the '_space' group, not the actual
# control which is connected to the ik/fk switch attr
if tempSpec['main']['visGroup']:
lib.sharedShape.connect(res[0], (tempSpec['main']['visGroup'], 1) )
subControls = res[0].subControl.items()
if subControls:
# If there is one spec and sub controls, it is a chain so apply the same visgroup
if len(tempSpec) == 1 and tempSpec['main']['visGroup']:
for name, ctrl in subControls:
lib.sharedShape.connect(ctrl, (tempSpec['main']['visGroup'], 1) )
# If there are 2 specs, the non-main is the repeating one
elif len(tempSpec) == 2:
specName = tempSpec.keys()[:].remove('main')
visGroup = tempSpec['main']['visGroup']
if visGroup:
for name, ctrl in subControls:
lib.sharedShape.connect(ctrl, (visGroup, 1) )
# Finally, each additional spec should match a sub control
else:
for specName in tempSpec:
if specName == 'main':
continue
if tempSpec[specName]['visGroup']:
try: # &&& Eventually this needs to not ignore errors
lib.sharedShape.connect(
res[0].subControl[specName],
(tempSpec[specName]['visGroup'], 1)
)
except Exception:
pass
return res
# Store the default spec so it's easy to access for other things.
setattr( newFunc, '__defaultSpec__', allSpecs )
functools.update_wrapper( newFunc, func )
return newFunc
return realDecorator
# Chain stuff -----------------------------------------------------------------
def getChain(start, end):
'''
Returns a list of joints from start to end or an empty list if end isn't
descended from start.
'''
joints = []
current = end
while current and current != start:
joints.append( current )
current = current.getParent()
# If we never hit the start, start and end are unrelated.
if current != start:
return []
joints.append( start )
joints.reverse()
return joints
def chainLength(joints):
return abs(sum( [j.tx.get() for j in joints[1:]] ))
def dupChain(start, end, nameFormat='{0}_dup'):
'''
Creates a duplicate chain, pruned of all branches and children. Can handle
same joints and start and end.
:param string nameFormat: The str.format used on the duped chain
'''
chain = getChain(start, end)
assert chain, '{0} and {1} are not in the same hierarchy, dupChain() failed'.format(start, end)
dup = duplicate(start)[0]
if start != end:
child = findChild( dup, simpleName(end) )
assert child, 'dupChain failed to find duped child {0} in {1}'.format(end, start)
prune( dup, child )
else:
child = dup
dupedChain = getChain( dup, child )
ends = dupedChain[-1].getChildren(type='transform')
if ends:
delete(ends)
for src, d in zip(chain, dupedChain):
dupName = simpleName(src, nameFormat)
d.rename(dupName)
return dupedChain
def chainMeasure(joints):
n = createNode('plusMinusAverage')
n.operation.set(1)
for i, j in enumerate(joints[1:]):
j.tx >> n.input1D[i]
cl = chainLength(joints)
if n.output1D.get() < 0:
cl *= -1
return core.math.divide( n.output1D, cl)
def findChild(chain, target):
'''
Given a joint chain, find the child of the target name
'''
for child in chain.listRelatives(type='joint'):
if child.name().rsplit('|')[-1] == target:
return child
for child in chain.listRelatives(type='joint'):
t = findChild(child, target)
if t:
return t
return None
def prune(start, end, trimEnd=True):
'''
Cut the joint chain to just the start and end joints, no branching.
:param bool trimEnd: True by default, removing any children of `end`.
'''
p = end.getParent()
keep = end
if trimEnd:
ends = end.listRelatives(type='transform')
if ends:
delete(ends)
if not end.longName().startswith( start.longName() ):
raise Exception( "{0} is not a descendant of {1}".format( end, start) )
while True:
for child in p.listRelatives():
if child != keep:
delete(child)
keep = p
p = p.getParent()
if keep == start:
return
def constrainTo(constrainee, target, includeScale=False):
'''
Point, orient, optionally scale constrains the first to the second, returning
a list of the controlling plugs.
'''
o = orientConstraint( target, constrainee, mo=True )
p = pointConstraint( target, constrainee, mo=True )
if not includeScale:
return o.getWeightAliasList()[-1], p.getWeightAliasList()[-1]
else:
s = scaleConstraint( target, constrainee, mo=True )
return o.getWeightAliasList()[-1], p.getWeightAliasList()[-1], s.getWeightAliasList()[-1]
def constrainAtoB(chain, controlChain, mo=True):
'''
Point/orient constraint the first chain to the second, driving all their
weights by the lead joint.
'''
points = []
orients = []
for _controller, orig in zip( controlChain, chain ):
points.append( pointConstraint( _controller, orig, mo=mo ).getWeightAliasList()[-1] )
orients.append( orientConstraint( _controller, orig, mo=mo ).getWeightAliasList()[-1] )
for p in points[1:]:
points[0] >> p
for o in orients[1:]:
orients[0] >> o
return ConstraintResults(points[0], orients[0])
# True zero stuff -------------------------------------------------------------
def storeTrueZero(obj, rot):
'''
True zero puts control's zero state to be world aligned so we have to store
what the "neutral" pose is.
'''
obj.addAttr( 'trueZero', at='double3' )
obj.addAttr( 'trueZeroX', at='double', p='trueZero' )
obj.addAttr( 'trueZeroY', at='double', p='trueZero' )
obj.addAttr( 'trueZeroZ', at='double', p='trueZero' )
obj.trueZero.set( channelBox=True )
obj.trueZeroX.set( channelBox=abs(rot[0]) > 0.00000000001 )
obj.trueZeroY.set( channelBox=abs(rot[1]) > 0.00000000001 )
obj.trueZeroZ.set( channelBox=abs(rot[2]) > 0.00000000001 )
obj.trueZero.set( rot )
obj.trueZero.lock()
obj.trueZeroX.lock()
obj.trueZeroY.lock()
obj.trueZeroZ.lock()
def trueZeroSetup(rotationTarget, ctrl):
'''
Stores the closest world orient of the rotation target on the given control.
.. todo::
Use this function in all the places where matchOrient exists.
'''
rot = determineClosestWorldOrient(rotationTarget)
ctrl.r.set( rot )
storeTrueZero(ctrl, rot)
def trueZeroFloorPlane(rotationTarget, ctrl):
"""
trans = xform(rotationTarget, q=True, ws=True, t=True)
# Make a unit X vector (assume left side is +x, right is -x)
if trans[0] >= 0:
tx = dt.Matrix([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 1.0]])
else:
tx = dt.Matrix([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [-1.0, 0.0, 0.0, 1.0]])
# Move out from the rotator by the unit X vector (in world space)
altered = tx * rotationTarget.worldMatrix.get()
# Get the X and Z world position of the new point
alteredX = altered[3][0]
alteredZ = altered[3][2]
# Find the difference in X and Z world positions to calc Y
deltaX = alteredX - trans[0]
deltaZ = alteredZ - trans[2]
rad = math.atan2(deltaX, deltaZ)
degrees = math.degrees(rad)
"""
degrees = trueWorldFloorAngle(rotationTarget)
ctrl.ry.set(degrees)
storeTrueZero(ctrl, [0, degrees, 0])
def trueWorldFloorAngle(obj):
'''
Only true for Y up, returns the smallest Y axis worldspace angle needed to
rotate to be axis aligned.
To rotate the object run `rotate([0, a, 0], r=1, ws=1, fo=True)`
'''
m = xform(obj, q=True, ws=True, m=True)
# The valid axes to check
rows = (0, 1, 2)
cols = (2,)
hirow = rows[0]
hicol = cols[0]
highest = m[ hirow * 4 + hicol ]
for col in cols:
for row in rows:
if abs(m[ row * 4 + col]) > abs(highest):
highest = m[ row * 4 + col]
hirow = row
hicol = col
#print 'col: {} row: {} h: {}'.format(hicol, hirow, highest)
# The `col` determines the world axis
if hicol == 0:
worldAxis = dt.Vector([1.0, 0, 0])
elif hicol == 1:
worldAxis = dt.Vector([0, 1.0, 0])
elif hicol == 2:
worldAxis = dt.Vector([0, 0, 1.0])
# If the highest is negative, flip it; i.e a local axis closely matched -z
if highest < 0:
worldAxis *= -1.0
# The `row` determins the local axis
if hirow == 0:
localAxis = dt.Vector(m[0], 0, m[2]).normal()
elif hirow == 1:
localAxis = dt.Vector(m[4], 0, m[6]).normal()
elif hirow == 2:
localAxis = dt.Vector(m[8], 0, m[10]).normal()
a = math.degrees(localAxis.angle(worldAxis))
# If the cross in negative, flip the angle
if localAxis.cross(worldAxis).y < 0:
a *= -1
return a
# Stretchiness ----------------------------------------------------------------
def recordFloat(obj, name, val):
if not obj.hasAttr(name):
obj.addAttr( name, at='double' )
obj.attr(name).set(val)
def saveRestLength(j, jointAxis='x'):
recordFloat(j, 'restLength', j.attr('t' + jointAxis).get() )
def makeStretchySpline(controller, ik, stretchDefault=1):
start, chain, jointAxis, switcher = _makeStretchyPrep( controller, ik, stretchDefault )
crv = ik.inCurve.listConnections()[0]
length = arclen(crv, ch=1).arcLength
lengthMax = arclen(crv, ch=1).arcLength.get()
# Spline squashes and stretches
multiplier = core.math.divide( length, lengthMax )
jointLenMultiplier = switcher.output
multiplier >> switcher.input[1]
for i, j in enumerate(chain[1:], 1):
#util.recordFloat(j, 'restLength', j.attr('t' + jointAxis).get() )
saveRestLength(j, jointAxis)
core.math.multiply( jointLenMultiplier, j.restLength) >> j.attr('t' + jointAxis)
return controller.attr('stretch'), jointLenMultiplier
def _makeStretchyPrep(controller, ik, stretchDefault=1):
start = ik.startJoint.listConnections()[0]
end = ik.endEffector.listConnections()[0].tz.listConnections()[0]
chain = getChain( start, end )
jointAxis = identifyAxis( end )
switcher = createNode('blendTwoAttr', n='stretchSlider')
switcher.input[0].set(1)
drive(controller, 'stretch', switcher.attributesBlender, minVal=0, maxVal=1, dv=max(min(stretchDefault, 1), 0) )
controller.stretch.set(1)
controller.addAttr('modAmount', at='double', k=False)
controller.modAmount.set(cb=True)
chainMeasure(chain) >> controller.modAmount
return start, chain, jointAxis, switcher
def makeStretchyNonSpline(controller, ik, stretchDefault=1):
start, chain, jointAxis, switcher = _makeStretchyPrep( controller, ik, stretchDefault )
dist, grp = core.dagObj.measure(start, ik)
grp.setParent( controller )
dist.setParent( ik.getParent() )
length = dist.distance
lengthMax = chainLength(chain)
# Regular IK only stretches
# ratio = (abs distance between start and end) / (length of chain)
ratio = core.math.divide( length, lengthMax )
# multiplier is either 1 or a number greater than one needed for the chain to reach the end.
multiplier = core.math.condition( ratio, '>', 1.0, true=ratio, false=1 )
controller.addAttr( 'length', at='double', min=-10.0, dv=0.0, max=10.0, k=True )
'''
lengthMod is the below formula:
if controller.length >= 0:
controller.length/10.0 + 1.0 # 1.0 to 2.0 double the length of the limb
else:
controller.length/20.0 + 1.0 # .5 to 1.0 halve the length of the limb
'''
lengthMod = core.math.add(
core.math.divide(
controller.length,
core.math.condition(controller.length, '>=', 0, 10.0, 20.0)
),
1.0
)
jointLenMultiplier = core.math.multiply(switcher.output, lengthMod)
multiplier >> switcher.input[1]
for i, j in enumerate(chain[1:], 1):
saveRestLength(j, jointAxis)
#util.recordFloat(j, 'restLength', j.attr('t' + jointAxis).get() )
# Make an attribute that is -10 to 10 map to multiplying the restLength by 0 to 2
attrName = 'segLen' + str(i)
controller.addAttr( attrName, at='double', k=True, min=-10, max=10 )
normalizedMod = core.math.add(core.math.divide( controller.attr(attrName), 10), 1)
"j.attr('t' + jointAxis) = lockSwitcher.output = jointLenMultiplier * normalizedMod * j.restLength"
# As of 2/9/2019 it looks to be fine to make this even if it's not used by the ik to lock the elbow (like in dogleg)
lockSwitcher = createNode('blendTwoAttr', n='lockSwitcher')
core.math.multiply(
jointLenMultiplier,
core.math.multiply( normalizedMod, j.restLength)
) >> lockSwitcher.input[0] # >> j.attr('t' + jointAxis)
lockSwitcher.output >> j.attr('t' + jointAxis)
return controller.attr('stretch'), jointLenMultiplier
# IK / Spline stuff -----------------------
def advancedTwist(start, end, baseCtrl, endCtrl, ik):
# Setup advanced twist
startAxis = duplicate( start, po=True )[0]
startAxis.rename( 'startAxis' )
startAxis.setParent( baseCtrl )
core.dagObj.lockTrans(core.dagObj.lockRot(core.dagObj.lockScale(startAxis)))
endAxis = duplicate( start, po=True )[0]
endAxis.rename( 'endAxis' )
endAxis.setParent( endCtrl )
endAxis.t.set(0, 0, 0)
core.dagObj.lockTrans(core.dagObj.lockRot(core.dagObj.lockScale(endAxis)))
hide(startAxis, endAxis)
ik.dTwistControlEnable.set(1)
ik.dWorldUpType.set(4)
startAxis.worldMatrix[0] >> ik.dWorldUpMatrix
endAxis.worldMatrix[0] >> ik.dWorldUpMatrixEnd
def midAimer(start, end, midCtrl, name='aimer', upVector=None):
'''
Creates an object point contrained to two others, aiming at the second. Up
vector defaults to the control's Y.
'''
aimer = group(em=True, name=name)
#aimer.setParent(container)
#aimer = polyCone(axis=[1, 0, 0])[0]
core.dagObj.moveTo(aimer, midCtrl)
pointConstraint(end, start, aimer, mo=True)
aimV = dt.Vector(xform(end, q=True, ws=True, t=True)) - dt.Vector( xform(aimer, q=1, ws=1, t=1) )
aimV.normalize()
if upVector:
midCtrlYUp = upVector
else:
temp = xform(midCtrl, q=True, ws=True, m=True)
midCtrlYUp = dt.Vector( temp[4:7] )
"""
# Generally the X axis is a good default up since things are normally on that plane
if abs(aimV[0]) < 0.0001 or min([abs(v) for v in aimV]) == abs(aimV[0]):
upV = dt.Vector([-1, 0, 0])
forwardV = aimV.cross(upV)
recalcUp = forwardV.cross(aimV)
# Reference
#xrow = aimV
#yrow = recalcUp
#zrow = forwardV
midCtrlYUp = recalcUp
print( 'midCtrlYUp', midCtrlYUp )
else:
# Choose Y up as the up (hopefully this works)
if abs(aimV[1]) < abs(aimV[0]) and abs(aimV[1]) < abs(aimV[2]):
upV = dt.Vector([0, 1, 0])
forwardV = aimV.cross(upV)
recalcUp = forwardV.cross(aimV)
# Reference
#xrow = aimV
#yrow = recalcUp
#zrow = forwardV
midCtrlYUp = recalcUp
pass
#
"""
# Determine which axis of the end is closest to the midControl's Y axis.
endMatrix = xform(end, q=True, ws=True, m=True)
#midMatrix = xform(aimer, q=True, ws=True, m=True)
#midCtrlYUp = dt.Vector(midMatrix[4:7])
choices = [
(endMatrix[:3], [1, 0, 0]),
([-x for x in endMatrix[:3]], [-1, 0, 0]),
(endMatrix[4:7], [0, 1, 0]),
([-x for x in endMatrix[4:7]], [0, -1, 0]),
(endMatrix[8:11], [0, 0, -1]),
([-x for x in endMatrix[8:11]], [0, 0, 1]),
]
# Seed with the first choice as the best...
low = midCtrlYUp.angle(dt.Vector(choices[0][0]))
axis = choices[0][1]
# ... and see if any others are better
for vector, destAxis in choices[1:]:
vector = dt.Vector(vector) # Just passing 3 numbers sometimes gets a math error.
if midCtrlYUp.angle(vector) < low:
low = midCtrlYUp.angle(vector)
axis = destAxis
aimConstraint( end, aimer, wut='objectrotation', aim=[1, 0, 0], wuo=end, upVector=[0, 1, 0], wu=axis, mo=False)
return aimer
_45_DEGREES = math.radians(45)
def slerp(start, end, percent):
dot = start.dot(end)
theta = math.acos(dot) * percent # angle between * percent
relativeVec = end - start * dot
relativeVec.normalize()
return ((start * math.cos(theta)) + (relativeVec * math.sin(theta)))
def calcOutVector(start, middle, end):
'''
Given the lead joint of 3 (or dt.Vectors), determine the vector pointing directly away along the xz plane.
.. todo::
Gracefully handle if the ik is on the xz plane already.
'''
s = dt.Vector( xform(start, q=1, ws=1, t=1) ) if isinstance( start, PyNode) else start
m = dt.Vector( xform(middle, q=1, ws=1, t=1) ) if isinstance( middle, PyNode) else middle
e = dt.Vector( xform(end, q=1, ws=1, t=1) ) if isinstance( end, PyNode) else end
up = s - e
if upAxis(q=True, ax=True) == 'y':
kneeScale = ( m.y - e.y ) / up.y if up.y else 0.0
else:
kneeScale = ( m.z - e.z ) / up.z if up.z else 0.0
modifiedUp = kneeScale * up
newPos = modifiedUp + e
outFromKnee = m - newPos
outFromKnee.normalize()
# If we are halfway to the x/z plane, lerp between the old formula and a new one
testUp = dt.Vector(up)
if testUp.y < 0:
testUp.y *= -1.0
angleToVertical = dt.Vector( 0, 1, 0 ).angle( testUp )
if angleToVertical > _45_DEGREES:
# Calculate a point perpendicular to the line created by the start and end
# going through the middle
theta = up.angle( m - e )
distToMidpoint = math.cos(theta) * (m - e).length()
midPoint = distToMidpoint * up.normal() + e
altOutFromKnee = m - midPoint
altOutFromKnee.normalize()
# lerp between the vectors
percent = (angleToVertical - _45_DEGREES) / _45_DEGREES # 45 to up axis will favor old, on y axis favors new
outFromKnee = slerp(outFromKnee, altOutFromKnee, percent)
angleBetween = (m - s).angle( e - m )
log.TooStraight.check(angleBetween)
outFromKnee.normalize()
return outFromKnee
# Ik/Fk Switching -----------------------
def getChainFromIk(ikHandle):
'''
Given an ikHandle, return a chain of the joints affected by it.
'''
start = ikHandle.startJoint.listConnections()[0]
endEffector = ikHandle.endEffector.listConnections()[0]
end = endEffector.tx.listConnections()[0]
chain = getChain(start, end)
return chain
def getConstraineeChain(chain):
'''
If the given chain has another rotate constrained to it, return it
'''
boundJoints = []
for j in chain:
temp = core.constraints.getOrientConstrainee(j)
if temp:
boundJoints.append(temp)
else:
break
return boundJoints
def createMatcher(ctrl, target):
'''
Creates an object that follows target, based on ctrl so ctrl can match it
easily.
'''
matcher = duplicate(ctrl, po=True)[0]
parentConstraint( target, matcher, mo=True )
matcher.rename( ctrl.name() + '_matcher' )
hide(matcher)
if not ctrl.hasAttr( 'matcher' ):
ctrl.addAttr('matcher', at='message')
matcher.message >> ctrl.matcher
if matcher.hasAttr('fossilCtrlType'):
matcher.deleteAttr( 'fossilCtrlType' )
return matcher
def getMatcher(ctrl):
try:
matcher = ctrl.matcher.listConnections()[0]
return matcher
except Exception:
warning('{0} does not have a matcher setup'.format(ctrl))
def alignToMatcher(ctrl):
try:
matcher = getMatcher(ctrl)
xform( ctrl, ws=True, t=xform(matcher, q=True, ws=True, t=True) )
xform( ctrl, ws=True, ro=xform(matcher, q=True, ws=True, ro=True) )
except Exception:
warning('{0} does not have a matcher setup'.format(ctrl))
def angleBetween( a, mid, c ):
# Give 3 points, return the angle and axis between the vectors
aPos = dt.Vector(xform(a, q=True, ws=True, t=True))
midPos = dt.Vector(xform(mid, q=True, ws=True, t=True))
cPos = dt.Vector(xform(c, q=True, ws=True, t=True))
aLine = midPos - aPos
bLine = midPos - cPos
aLine.normalize()
bLine.normalize()
axis = aLine.cross(bLine)
if axis.length() > 0.01:
return math.degrees(math.acos(aLine.dot(bLine))), axis
else:
return 0, axis
def worldInfo(obj):
return [xform(obj, q=True, ws=True, t=True), xform(obj, q=True, ws=True, ro=True)]
def applyWorldInfo(obj, info):
xform(obj, ws=True, t=info[0])
xform(obj, ws=True, ro=info[1])
# -----------------------
def parentGroup(target):
'''
Returns a group that is constrained to the parent of the target.
Used to allow control hierarchies to live elsewhere.
.. todo::
Get rid of parentProxy, which is dumb
'''
name = simpleName(target, '{0}_Proxy' )
grp = group( em=True, name=name )
# Don't constrain top level nodes since they need to follow main, not b_Root
if target.getParent() != node.getTrueRoot():
parentConstraint( target.getParent(), grp, mo=False )
return grp
def trimName(jnt):
'''
Given an joint, return its simple name without b_ or rig_ if those prefixes exist.
'''
name = simpleName(jnt)
if name.startswith( 'b_' ):
return name[2:]
return name
def drive(control, attr, driven, minVal=None, maxVal=None, asInt=False, dv=None, flipped=False):
'''
Add the attr to the control and feed it into driven.
'''
attrType = 'short' if asInt else 'double'
if not control.hasAttr( attr ):
control.addAttr( attr, at=attrType, k=True )
if minVal is not None:
control.attr( attr ).setMin(minVal)
if maxVal is not None:
control.attr( attr ).setMax(maxVal)
if dv is not None:
defaultVal = dv
if maxVal is not None:
defaultVal = min(defaultVal, maxVal)
if minVal is not None:
defaultVal = max(defaultVal, minVal)
addAttr(control.attr(attr), e=True, dv=dv)
if flipped:
core.math.multiply(control.attr(attr), -1) >> driven
else:
control.attr(attr) >> driven
return control.attr(attr)
def shortestAxis(srcAngle):
angle = abs(srcAngle) % 90
if angle >= 89.99999: # Due to float error, allow for some negligible slop to align the axis
angle -= 90
return math.copysign(angle, srcAngle)
def determineClosestWorldOrient(obj):
'''
Given an object, returns the shortest rotation that aligns the object with
the world. This is used to allow IK elements to have world alignment but
easily return to the bind pose.
'''
''' # This is essentially a math version of the following:
x = spaceLocator()
y = spaceLocator()
core.dagObj.moveTo( x, obj )
core.dagObj.moveTo( y, obj )
x.tx.set( 1 + x.tx.get() )
y.ty.set( 1 + y.ty.get() )
x.setParent(obj)
y.setParent(obj)
def zeroSmaller(loc):
vals = [abs(v) for v in loc.t.get() ]
largetVal = max(vals)
index = vals.index(largetVal)
for i, attr in enumerate('xyz'):
if i == index:
continue
loc.attr( 't' + attr ).set(0)
zeroSmaller( x )
zeroSmaller( y )
ref = spaceLocator()
core.dagObj.moveTo( ref, obj )
aimConstraint( x, ref, wut='object', wuo=y )
rot = ref.r.get()
delete( x, y, ref )
return rot
'''
# Make 2 world spaced points one unit along x and y
x = dt.Matrix( [ (1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (1, 0, 0, 0) ] )
y = dt.Matrix( [ (1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (0, 1, 0, 0) ] )
#z = dt.Matrix( [ (1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (0, 0, 1, 0,) ] )
world = obj.worldMatrix.get()
inv = world.inverse()
# Find the local matrices respective of the obj
localX = x * inv
localY = y * inv
# For X, zero out the two smaller axes for each, ex t=.2, .3, .8 -> t=0, 0, .8
def useX(matrix):
return dt.Matrix( [matrix[0], matrix[1], matrix[2], [matrix[3][0], 0, 0, matrix[3][3]]] )
def useY(matrix):
return dt.Matrix( [matrix[0], matrix[1], matrix[2], [0, matrix[3][1], 0, matrix[3][3]]] )
def useZ(matrix):
return dt.Matrix( [matrix[0], matrix[1], matrix[2], [0, 0, matrix[3][2], matrix[3][3]]] )
xUsed, yUsed, zUsed = [False] * 3
if abs(localX[3][0]) > abs(localX[3][1]) and abs(localX[3][0]) > abs(localX[3][2]):
localX = useX(localX)
xUsed = True
elif abs(localX[3][1]) > abs(localX[3][0]) and abs(localX[3][1]) > abs(localX[3][2]):
localX = useY(localX)
yUsed = True
else:
localX = useZ(localX)
zUsed = True
# Do the same for Y
if xUsed:
if abs(localY[3][1]) > abs(localY[3][2]):
localY = useY(localY)
yUsed = True
else:
localY = useZ(localY)
zUsed = True
elif yUsed:
if abs(localY[3][0]) > abs(localY[3][2]):
localY = useX(localY)
xUsed = True
else:
localY = useZ(localY)
zUsed = True
elif zUsed:
if abs(localY[3][0]) > abs(localY[3][1]):
localY = useX(localX)
xUsed = True
else:
localY = useY(localY)
yUsed = True
# Find the 'world' (but treating the obj's pos as the origin) positions.
worldX = localX * world
worldY = localY * world
# Convert this into a rotation matrix by mimicing an aim constraint
x = dt.Vector(worldX[-1][:-1])
y = dt.Vector(worldY[-1][:-1])
x.normalize()
y.normalize()
z = x.cross(y)
y = z.cross(x)
msutil = maya.OpenMaya.MScriptUtil()
mat = maya.OpenMaya.MMatrix()
msutil.createMatrixFromList([
x[0], x[1], x[2], 0.0,
y[0], y[1], y[2], 0.0,
z[0], z[1], z[2], 0.0,
0.0, 0.0, 0.0, 1.0
], mat) # noqa e123
rot = maya.OpenMaya.MEulerRotation.decompose(mat, maya.OpenMaya.MEulerRotation.kXYZ)
return dt.Vector(math.degrees( rot.x), math.degrees(rot.y), math.degrees(rot.z))
def identifyAxis(jnt, asVector=False):
'''
Determines the primary axis of the joint in relation to its parent,
returning 'x', 'y' or 'z' or the appropriate vector if asVector is True.
'''
jointAxis = max( zip( [abs(n) for n in jnt.t.get()], 'xyz' ) )[1]
if asVector:
jointAxis = {'x': [1, 0, 0], 'y': [0, 1, 0], 'z': [0, 0, 1]}[jointAxis]
return jointAxis
def driveConstraints(srcConstraintResult, destConstraintResult):
'''
Have the destConstraintResult controlled by the source.
Intended use is for chains where some joints, likely the tip, are constrained
to the controller instead of the drive chain
'''
srcConstraintResult.point >> destConstraintResult.point
srcConstraintResult.orient >> destConstraintResult.orient
def addControlsToCurve(name, crv=None,
spec={'shape': 'sphere', 'size': 10, 'color': 'blue 0.22'} ): # noqa e128
'''
Given a curve, make a control sphere at each CV.
:return: List of newly made controls.
'''
if not crv:
crv = selected()[0]
controls = []
for i, cv in enumerate(crv.cv):
#l = control.sphere( '{0}{1:0>2}'.format( name, i+1), size, 'blue', type=control.SPLINE )
shape = controllerShape.build('{0}{1:0>2}'.format(name, i + 1), spec, type=controllerShape.ControlType.SPLINE)
core.dagObj.moveTo( shape, cv )
handle = cluster(cv)[1]
handle.setParent(shape)
hide(handle)
controls.append(shape)
return controls
| {
"repo_name": "patcorwin/fossil",
"path": "pdil/tool/fossil/rigging/_util.py",
"copies": "1",
"size": "34335",
"license": "bsd-3-clause",
"hash": -7297588423068803000,
"line_mean": 30.6160220994,
"line_max": 135,
"alpha_frac": 0.5744575506,
"autogenerated": false,
"ratio": 3.551774076755974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9474749482184942,
"avg_score": 0.0302964290342064,
"num_lines": 1086
} |
from __future__ import absolute_import, division, print_function
import collections
import importlib
import gc
import gevent
import hashlib
import math
import os
import sys
import threading
import traceback
import uuid
import six
class UndefinedType(object):
def __repr__(self):
return "Undefined"
def __bool__(self):
return False
def __nonzero__(self):
return False
Undefined = UndefinedType()
def import_object(module_name, object_path=None):
if not object_path:
if ':' not in module_name:
raise ValueError("cannot import object %r" % module_name)
module_name, object_path = module_name.split(':')
mod = importlib.import_module(module_name)
obj = mod
for objname in object_path.split('.'):
obj = getattr(obj, objname)
return obj
def make_id():
return uuid.uuid4().hex
def hash_id(*bits):
return hashlib.md5(six.text_type(bits).encode('utf-8')).hexdigest()
_sqrt2 = math.sqrt(2)
class Accumulator(object):
def __init__(self):
self.n = 0
self.sum = 0
self.square_sum = 0
self._mean = None
self._stddev = None
def add(self, value):
self.n += 1
self.sum += value
self.square_sum += value * value
self._mean = None
self._stddev = None
def remove(self, value):
self.n -= 1
self.sum -= value
self.square_sum -= value * value
self._mean = None
self._stddev = None
@property
def mean(self):
if not self.n:
return 0.
if self._mean is None:
self._mean = self.sum / self.n
return self._mean
@property
def stddev(self):
if not self.n:
return 0.
if self._stddev is None:
mean = self.mean
self._stddev = math.sqrt(self.square_sum / self.n - mean * mean)
return self._stddev
@property
def stats(self):
return {'mean': self.mean, 'stddev': self.stddev, 'n': self.n}
class SampleWindow(Accumulator):
def __init__(self, n=100, factor=1):
super(SampleWindow, self).__init__()
self.size = n
self.factor = factor
self.values = collections.deque([])
self.total = Accumulator()
def __len__(self):
return len(self.values)
def is_full(self):
return len(self.values) == self.size
def add(self, value):
value = value * self.factor
super(SampleWindow, self).add(value)
self.total.add(value)
if self.is_full():
self.remove(self.values.popleft())
self.values.append(value)
def p(self, value):
"""
returns the probability for samples greater than `value` given a normal
distribution with mean and standard deviation derived from this window.
"""
if self.stddev == 0:
return 1. if value == self.mean else 0.
return 1 - math.erf(abs(value * self.factor - self.mean) / (self.stddev * _sqrt2))
def get_greenlets():
for object in gc.get_objects():
if isinstance(object, gevent.Greenlet):
yield object
def get_greenlets_frames():
for greenlet in get_greenlets():
yield str(greenlet), greenlet.gr_frame
def get_threads_frames():
threads = {thread.ident: thread.name for thread in threading.enumerate()}
for ident, frame in sys._current_frames().items():
name = threads.get(ident)
if name:
yield '%s:%s' % (ident, name), frame
def format_stack(frame):
tb = traceback.format_stack(frame)
return ''.join(tb)
def dump_stacks(output=print):
output('PID: %s' % os.getpid())
output('Threads')
for i, (name, frame) in enumerate(get_threads_frames()):
output('Thread #%d: %s' % (i, name))
output(format_stack(frame))
output('Greenlets')
for i, (name, frame) in enumerate(get_greenlets_frames()):
output('Greenlet #%d: %s' % (i, name))
output(format_stack(frame))
| {
"repo_name": "vpikulik/lymph",
"path": "lymph/utils/__init__.py",
"copies": "3",
"size": "4048",
"license": "apache-2.0",
"hash": 6311165038677819000,
"line_mean": 23.6829268293,
"line_max": 90,
"alpha_frac": 0.5886857708,
"autogenerated": false,
"ratio": 3.790262172284644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5878947943084645,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import collections
import importlib
import gc
import gevent
import math
import os
import sys
import threading
import traceback
import uuid
class UndefinedType(object):
def __repr__(self):
return "Undefined"
def __bool__(self):
return False
def __nonzero__(self):
return False
Undefined = UndefinedType()
def import_object(module_name, object_path=None):
if not object_path:
if ':' not in module_name:
raise ValueError("cannot import object %r" % module_name)
module_name, object_path = module_name.split(':')
mod = importlib.import_module(module_name)
obj = mod
for objname in object_path.split('.'):
obj = getattr(obj, objname)
return obj
def make_id():
return uuid.uuid4().hex
_sqrt2 = math.sqrt(2)
class Accumulator(object):
def __init__(self):
self.n = 0
self.sum = 0
self.square_sum = 0
self._mean = None
self._stddev = None
def add(self, value):
self.n += 1
self.sum += value
self.square_sum += value * value
self._mean = None
self._stddev = None
def remove(self, value):
self.n -= 1
self.sum -= value
self.square_sum -= value * value
self._mean = None
self._stddev = None
@property
def mean(self):
if not self.n:
return 0.
if self._mean is None:
self._mean = self.sum / self.n
return self._mean
@property
def stddev(self):
if not self.n:
return 0.
if self._stddev is None:
mean = self.mean
self._stddev = math.sqrt(self.square_sum / self.n - mean * mean)
return self._stddev
@property
def stats(self):
return {'mean': self.mean, 'stddev': self.stddev, 'n': self.n}
class SampleWindow(Accumulator):
def __init__(self, n=100, factor=1):
super(SampleWindow, self).__init__()
self.size = n
self.factor = factor
self.values = collections.deque([])
self.total = Accumulator()
def __len__(self):
return len(self.values)
def is_full(self):
return len(self.values) == self.size
def add(self, value):
value = value * self.factor
super(SampleWindow, self).add(value)
self.total.add(value)
if self.is_full():
self.remove(self.values.popleft())
self.values.append(value)
def p(self, value):
"""
returns the probability for samples greater than `value` given a normal
distribution with mean and standard deviation derived from this window.
"""
if self.stddev == 0:
return 1. if value == self.mean else 0.
return 1 - math.erf(abs(value * self.factor - self.mean) / (self.stddev * _sqrt2))
def get_greenlets():
for object in gc.get_objects():
if isinstance(object, gevent.Greenlet):
yield object
def get_greenlets_frames():
for greenlet in get_greenlets():
yield str(greenlet), greenlet.gr_frame
def get_threads_frames():
threads = {thread.ident: thread.name for thread in threading.enumerate()}
for ident, frame in sys._current_frames().items():
name = threads.get(ident)
if name:
yield '%s:%s' % (ident, name), frame
def format_stack(frame):
tb = traceback.format_stack(frame)
return ''.join(tb)
def dump_stacks(output=print):
output('PID: %s' % os.getpid())
output('Threads')
for i, (name, frame) in enumerate(get_threads_frames()):
output('Thread #%d: %s' % (i, name))
output(format_stack(frame))
output('Greenlets')
for i, (name, frame) in enumerate(get_greenlets_frames()):
output('Greenlet #%d: %s' % (i, name))
output(format_stack(frame))
| {
"repo_name": "mamachanko/lymph",
"path": "lymph/utils/__init__.py",
"copies": "7",
"size": "3927",
"license": "apache-2.0",
"hash": 1253384241574027800,
"line_mean": 24.0127388535,
"line_max": 90,
"alpha_frac": 0.5851795264,
"autogenerated": false,
"ratio": 3.816326530612245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00006999370056694898,
"num_lines": 157
} |
from __future__ import absolute_import, division, print_function
import collections
import importlib
import inspect
import gc
import gevent
import math
import os
import re
import sys
import threading
import traceback
import uuid
class UndefinedType(object):
def __repr__(self):
return "Undefined"
def __bool__(self):
return False
def __nonzero__(self):
return False
Undefined = UndefinedType()
def import_object(module_name, object_path=None):
if not object_path:
if ':' not in module_name:
raise ValueError("cannot import object %r" % module_name)
module_name, object_path = module_name.split(':')
mod = importlib.import_module(module_name)
obj = mod
for objname in object_path.split('.'):
obj = getattr(obj, objname)
return obj
def make_id():
return uuid.uuid4().hex
_sqrt2 = math.sqrt(2)
class Accumulator(object):
def __init__(self):
self.n = 0
self.sum = 0
self.square_sum = 0
self._mean = None
self._stddev = None
def add(self, value):
self.n += 1
self.sum += value
self.square_sum += value * value
self._mean = None
self._stddev = None
def remove(self, value):
self.n -= 1
self.sum -= value
self.square_sum -= value * value
self._mean = None
self._stddev = None
@property
def mean(self):
if not self.n:
return 0.
if self._mean is None:
self._mean = self.sum / self.n
return self._mean
@property
def stddev(self):
if not self.n:
return 0.
if self._stddev is None:
mean = self.mean
self._stddev = math.sqrt(self.square_sum / self.n - mean * mean)
return self._stddev
@property
def stats(self):
return {'mean': self.mean, 'stddev': self.stddev, 'n': self.n}
class SampleWindow(Accumulator):
def __init__(self, n=100, factor=1):
super(SampleWindow, self).__init__()
self.size = n
self.factor = factor
self.values = collections.deque([])
self.total = Accumulator()
def __len__(self):
return len(self.values)
def is_full(self):
return len(self.values) == self.size
def add(self, value):
value = value * self.factor
super(SampleWindow, self).add(value)
self.total.add(value)
if self.is_full():
self.remove(self.values.popleft())
self.values.append(value)
def p(self, value):
"""
returns the probability for samples greater than `value` given a normal
distribution with mean and standard deviation derived from this window.
"""
if self.stddev == 0:
return 1. if value == self.mean else 0.
return 1 - math.erf(abs(value * self.factor - self.mean) / (self.stddev * _sqrt2))
def get_greenlets():
for object in gc.get_objects():
if isinstance(object, gevent.Greenlet):
yield object
def get_greenlets_frames():
for greenlet in get_greenlets():
yield str(greenlet), greenlet.gr_frame
def get_threads_frames():
threads = {thread.ident: thread.name for thread in threading.enumerate()}
for ident, frame in sys._current_frames().items():
name = threads.get(ident)
if name:
yield '%s:%s' % (ident, name), frame
def format_stack(frame):
tb = traceback.format_stack(frame)
return ''.join(tb)
def dump_stacks(output=print):
output('PID: %s' % os.getpid())
output('Threads')
for i, (name, frame) in enumerate(get_threads_frames()):
output('Thread #%d: %s' % (i, name))
output(format_stack(frame))
output('Greenlets')
for i, (name, frame) in enumerate(get_greenlets_frames()):
output('Greenlet #%d: %s' % (i, name))
output(format_stack(frame))
| {
"repo_name": "alazaro/lymph",
"path": "lymph/utils/__init__.py",
"copies": "1",
"size": "3952",
"license": "apache-2.0",
"hash": 7529416317088365000,
"line_mean": 23.8553459119,
"line_max": 90,
"alpha_frac": 0.586791498,
"autogenerated": false,
"ratio": 3.818357487922705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4905148985922705,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import collections
import itertools as it
import operator
import warnings
import numpy as np
import pandas as pd
from .core import (DataFrame, Series, aca, map_partitions, merge,
new_dd_object, no_default, split_out_on_index)
from .methods import drop_columns
from .shuffle import shuffle
from .utils import make_meta, insert_meta_param_description, raise_on_meta_error
from ..base import tokenize
from ..utils import derived_from, M, funcname
# #############################################
#
# GroupBy implementation notes
#
# Dask groupby supports reductions, i.e., mean, sum and alike, and apply. The
# former do not shuffle the data and are efficiently implemented as tree
# reductions. The latter is implemented by shuffling the underlying partiitons
# such that all items of a group can be found in the same parititon.
#
# The argument to ``.groupby``, the index, can be a ``str``, ``dd.DataFrame``,
# ``dd.Series``, or a list thereof. In operations on the grouped object, the
# divisions of the the grouped object and the items of index have to align.
# Currently, there is no support to shuffle the index values as part of the
# groupby operation. Therefore, the alignment has to be guaranteed by the
# caller.
#
# To operate on matchings paritions, most groupby operations exploit the
# corresponding support in ``apply_concat_apply``. Specifically, this function
# operates on matching paritiotns of frame-like objects passed as varargs.
#
# After the inital chunk step, the passed index is implicitly passed along to
# subsequent operations as the index of the parittions. Groupby operations on
# the individual parttions can then access the index via the ``levels``
# parameter of the ``groupby`` function. The correct arguments is determined by
# the ``_determine_levels`` function.
#
# To minimize overhead, series in an index that were obtained by getitem on the
# object to group are not passed as series to the various operations, but as
# columnn keys. This transformation is implemented as ``_normalize_index``.
#
# #############################################
def _determine_levels(index):
"""Determine the correct levels argument to groupby.
"""
if isinstance(index, (tuple, list)) and len(index) > 1:
return list(range(len(index)))
else:
return 0
def _normalize_index(df, index):
"""Replace series with column names in an index wherever possible.
"""
if not isinstance(df, DataFrame):
return index
elif isinstance(index, list):
return [_normalize_index(df, col) for col in index]
elif (isinstance(index, Series) and index.name in df.columns and
index._name == df[index.name]._name):
return index.name
elif (isinstance(index, DataFrame) and
set(index.columns).issubset(df.columns) and
index._name == df[index.columns]._name):
return list(index.columns)
else:
return index
def _maybe_slice(grouped, columns):
"""
Slice columns if grouped is pd.DataFrameGroupBy
"""
if isinstance(grouped, pd.core.groupby.DataFrameGroupBy):
if columns is not None:
if isinstance(columns, (tuple, list, set, pd.Index)):
columns = list(columns)
return grouped[columns]
return grouped
def _is_aligned(df, by):
"""Check if `df` and `by` have aligned indices"""
if isinstance(by, (pd.Series, pd.DataFrame)):
return df.index.equals(by.index)
elif isinstance(by, (list, tuple)):
return all(_is_aligned(df, i) for i in by)
else:
return True
def _groupby_raise_unaligned(df, **kwargs):
"""Groupby, but raise if df and `by` key are unaligned.
Pandas supports grouping by a column that doesn't align with the input
frame/series/index. However, the reindexing this causes doesn't seem to be
threadsafe, and can result in incorrect results. Since grouping by an
unaligned key is generally a bad idea, we just error loudly in dask.
For more information see pandas GH issue #15244 and Dask GH issue #1876."""
by = kwargs.get('by', None)
if by is not None and not _is_aligned(df, by):
msg = ("Grouping by an unaligned index is unsafe and unsupported.\n"
"This can be caused by filtering only one of the object or\n"
"grouping key. For example, the following works in pandas,\n"
"but not in dask:\n"
"\n"
"df[df.foo < 0].groupby(df.bar)\n"
"\n"
"This can be avoided by either filtering beforehand, or\n"
"passing in the name of the column instead:\n"
"\n"
"df2 = df[df.foo < 0]\n"
"df2.groupby(df2.bar)\n"
"# or\n"
"df[df.foo < 0].groupby('bar')\n"
"\n"
"For more information see dask GH issue #1876.")
raise ValueError(msg)
return df.groupby(**kwargs)
def _groupby_slice_apply(df, grouper, key, func):
# No need to use raise if unaligned here - this is only called after
# shuffling, which makes everything aligned already
g = df.groupby(grouper)
if key:
g = g[key]
return g.apply(func)
def _groupby_get_group(df, by_key, get_key, columns):
# SeriesGroupBy may pass df which includes group key
grouped = _groupby_raise_unaligned(df, by=by_key)
if get_key in grouped.groups:
if isinstance(df, pd.DataFrame):
grouped = grouped[columns]
return grouped.get_group(get_key)
else:
# to create empty DataFrame/Series, which has the same
# dtype as the original
if isinstance(df, pd.DataFrame):
# may be SeriesGroupBy
df = df[columns]
return df.iloc[0:0]
###############################################################
# Aggregation
###############################################################
def _groupby_aggregate(df, aggfunc=None, levels=None):
return aggfunc(df.groupby(level=levels, sort=False))
def _apply_chunk(df, *index, **kwargs):
func = kwargs.pop('chunk')
columns = kwargs.pop('columns')
g = _groupby_raise_unaligned(df, by=index)
if isinstance(df, pd.Series) or columns is None:
return func(g)
else:
if isinstance(columns, (tuple, list, set, pd.Index)):
columns = list(columns)
return func(g[columns])
def _var_chunk(df, *index):
if isinstance(df, pd.Series):
df = df.to_frame()
g = _groupby_raise_unaligned(df, by=index)
x = g.sum()
x2 = g.agg(lambda x: (x**2).sum()).rename(columns=lambda c: c + '-x2')
n = g.count().rename(columns=lambda c: c + '-count')
return pd.concat([x, x2, n], axis=1)
def _var_combine(g, levels):
return g.groupby(level=levels, sort=False).sum()
def _var_agg(g, levels, ddof):
g = g.groupby(level=levels, sort=False).sum()
nc = len(g.columns)
x = g[g.columns[:nc // 3]]
x2 = g[g.columns[nc // 3:2 * nc // 3]].rename(columns=lambda c: c[:-3])
n = g[g.columns[-nc // 3:]].rename(columns=lambda c: c[:-6])
# TODO: replace with _finalize_var?
result = x2 - x ** 2 / n
div = (n - ddof)
div[div < 0] = 0
result /= div
result[(n - ddof) == 0] = np.nan
assert isinstance(result, pd.DataFrame)
return result
###############################################################
# nunique
###############################################################
def _nunique_df_chunk(df, *index, **kwargs):
levels = kwargs.pop('levels')
name = kwargs.pop('name')
g = _groupby_raise_unaligned(df, by=index)
grouped = g[[name]].apply(pd.DataFrame.drop_duplicates)
# we set the index here to force a possibly duplicate index
# for our reduce step
if isinstance(levels, list):
grouped.index = pd.MultiIndex.from_arrays([
grouped.index.get_level_values(level=level) for level in levels
])
else:
grouped.index = grouped.index.get_level_values(level=levels)
return grouped
def _nunique_df_combine(df, levels):
result = df.groupby(level=levels, sort=False).apply(pd.DataFrame.drop_duplicates)
if isinstance(levels, list):
result.index = pd.MultiIndex.from_arrays([
result.index.get_level_values(level=level) for level in levels
])
else:
result.index = result.index.get_level_values(level=levels)
return result
def _nunique_df_aggregate(df, levels, name):
return df.groupby(level=levels, sort=False)[name].nunique()
def _nunique_series_chunk(df, *index, **_ignored_):
# convert series to data frame, then hand over to dataframe code path
assert isinstance(df, pd.Series)
df = df.to_frame()
kwargs = dict(name=df.columns[0], levels=_determine_levels(index))
return _nunique_df_chunk(df, *index, **kwargs)
###############################################################
# Aggregate support
#
# Aggregate is implemented as:
#
# 1. group-by-aggregate all partitions into intermediate values
# 2. collect all partitions into a single partition
# 3. group-by-aggregate the result into intermediate values
# 4. transform all intermediate values into the result
#
# In Step 1 and 3 the dataframe is grouped on the same columns.
#
###############################################################
def _make_agg_id(func, column):
return '{!s}-{!s}-{}'.format(func, column, tokenize(func, column))
def _normalize_spec(spec, non_group_columns):
"""
Return a list of ``(result_column, func, input_column)`` tuples.
Spec can be
- a function
- a list of functions
- a dictionary that maps input-columns to functions
- a dictionary that maps input-columns to a lists of functions
- a dictionary that maps input-columns to a dictionaries that map
output-columns to functions.
The non-group columns are a list of all column names that are not used in
the groupby operation.
Usually, the result columns are mutli-level names, returned as tuples.
If only a single function is supplied or dictionary mapping columns
to single functions, simple names are returned as strings (see the first
two examples below).
Examples
--------
>>> _normalize_spec('mean', ['a', 'b', 'c'])
[('a', 'mean', 'a'), ('b', 'mean', 'b'), ('c', 'mean', 'c')]
>>> spec = collections.OrderedDict([('a', 'mean'), ('b', 'count')])
>>> _normalize_spec(spec, ['a', 'b', 'c'])
[('a', 'mean', 'a'), ('b', 'count', 'b')]
>>> _normalize_spec(['var', 'mean'], ['a', 'b', 'c'])
... # doctest: +NORMALIZE_WHITESPACE
[(('a', 'var'), 'var', 'a'), (('a', 'mean'), 'mean', 'a'), \
(('b', 'var'), 'var', 'b'), (('b', 'mean'), 'mean', 'b'), \
(('c', 'var'), 'var', 'c'), (('c', 'mean'), 'mean', 'c')]
>>> spec = collections.OrderedDict([('a', 'mean'), ('b', ['sum', 'count'])])
>>> _normalize_spec(spec, ['a', 'b', 'c'])
... # doctest: +NORMALIZE_WHITESPACE
[(('a', 'mean'), 'mean', 'a'), (('b', 'sum'), 'sum', 'b'), \
(('b', 'count'), 'count', 'b')]
>>> spec = collections.OrderedDict()
>>> spec['a'] = ['mean', 'size']
>>> spec['b'] = collections.OrderedDict([('e', 'count'), ('f', 'var')])
>>> _normalize_spec(spec, ['a', 'b', 'c'])
... # doctest: +NORMALIZE_WHITESPACE
[(('a', 'mean'), 'mean', 'a'), (('a', 'size'), 'size', 'a'), \
(('b', 'e'), 'count', 'b'), (('b', 'f'), 'var', 'b')]
"""
if not isinstance(spec, dict):
spec = collections.OrderedDict(zip(non_group_columns, it.repeat(spec)))
res = []
if isinstance(spec, dict):
for input_column, subspec in spec.items():
if isinstance(subspec, dict):
res.extend(((input_column, result_column), func, input_column)
for result_column, func in subspec.items())
else:
if not isinstance(subspec, list):
subspec = [subspec]
res.extend(((input_column, funcname(func)), func, input_column)
for func in subspec)
else:
raise ValueError("unsupported agg spec of type {}".format(type(spec)))
compounds = (list, tuple, dict)
use_flat_columns = not any(isinstance(subspec, compounds)
for subspec in spec.values())
if use_flat_columns:
res = [(input_col, func, input_col) for (_, func, input_col) in res]
return res
def _build_agg_args(spec):
"""
Create transformation functions for a normalized aggregate spec.
Parameters
----------
spec: a list of (result-column, aggregation-function, input-column) triples.
To work with all arugment forms understood by pandas use
``_normalize_spec`` to normalize the argment before passing it on to
``_build_agg_args``.
Returns
-------
chunk_funcs: a list of (intermediate-column, function, keyword) triples
that are applied on grouped chunks of the initial dataframe.
agg_funcs: a list of (intermediate-column, functions, keword) triples that
are applied on the grouped concatination of the preprocessed chunks.
finalizers: a list of (result-column, function, keyword) triples that are
applied after the ``agg_funcs``. They are used to create final results
from intermediate representations.
"""
known_np_funcs = {np.min: 'min', np.max: 'max'}
chunks = {}
aggs = {}
finalizers = []
for (result_column, func, input_column) in spec:
func = funcname(known_np_funcs.get(func, func))
impls = _build_agg_args_single(result_column, func, input_column)
# overwrite existing result-columns, generate intermedates only once
chunks.update((spec[0], spec) for spec in impls['chunk_funcs'])
aggs.update((spec[0], spec) for spec in impls['aggregate_funcs'])
finalizers.append(impls['finalizer'])
chunks = sorted(chunks.values())
aggs = sorted(aggs.values())
return chunks, aggs, finalizers
def _build_agg_args_single(result_column, func, input_column):
simple_impl = {
'sum': (M.sum, M.sum),
'min': (M.min, M.min),
'max': (M.max, M.max),
'count': (M.count, M.sum),
'size': (M.size, M.sum),
}
if func in simple_impl.keys():
return _build_agg_args_simple(result_column, func, input_column,
simple_impl[func])
elif func == 'var':
return _build_agg_args_var(result_column, func, input_column)
elif func == 'std':
return _build_agg_args_std(result_column, func, input_column)
elif func == 'mean':
return _build_agg_args_mean(result_column, func, input_column)
else:
raise ValueError("unknown aggregate {}".format(func))
def _build_agg_args_simple(result_column, func, input_column, impl_pair):
intermediate = _make_agg_id(func, input_column)
chunk_impl, agg_impl = impl_pair
return dict(
chunk_funcs=[(intermediate, _apply_func_to_column,
dict(column=input_column, func=chunk_impl))],
aggregate_funcs=[(intermediate, _apply_func_to_column,
dict(column=intermediate, func=agg_impl))],
finalizer=(result_column, operator.itemgetter(intermediate), dict()),
)
def _build_agg_args_var(result_column, func, input_column):
int_sum = _make_agg_id('sum', input_column)
int_sum2 = _make_agg_id('sum2', input_column)
int_count = _make_agg_id('count', input_column)
return dict(
chunk_funcs=[
(int_sum, _apply_func_to_column,
dict(column=input_column, func=M.sum)),
(int_count, _apply_func_to_column,
dict(column=input_column, func=M.count)),
(int_sum2, _compute_sum_of_squares,
dict(column=input_column)),
],
aggregate_funcs=[
(col, _apply_func_to_column, dict(column=col, func=M.sum))
for col in (int_sum, int_count, int_sum2)
],
finalizer=(result_column, _finalize_var,
dict(sum_column=int_sum, count_column=int_count,
sum2_column=int_sum2)),
)
def _build_agg_args_std(result_column, func, input_column):
impls = _build_agg_args_var(result_column, func, input_column)
result_column, _, kwargs = impls['finalizer']
impls['finalizer'] = (result_column, _finalize_std, kwargs)
return impls
def _build_agg_args_mean(result_column, func, input_column):
int_sum = _make_agg_id('sum', input_column)
int_count = _make_agg_id('count', input_column)
return dict(
chunk_funcs=[
(int_sum, _apply_func_to_column,
dict(column=input_column, func=M.sum)),
(int_count, _apply_func_to_column,
dict(column=input_column, func=M.count)),
],
aggregate_funcs=[
(col, _apply_func_to_column, dict(column=col, func=M.sum))
for col in (int_sum, int_count)
],
finalizer=(result_column, _finalize_mean,
dict(sum_column=int_sum, count_column=int_count)),
)
def _groupby_apply_funcs(df, *index, **kwargs):
"""
Group a dataframe and apply multiple aggregation functions.
Parameters
----------
df: pandas.DataFrame
The dataframe to work on.
index: list of groupers
If given, they are added to the keyword arguments as the ``by``
argument.
funcs: list of result-colum, function, keywordargument triples
The list of functions that are applied on the grouped data frame.
Has to be passed as a keyword argument.
kwargs:
All keyword arguments, but ``funcs``, are passed verbatim to the groupby
operation of the dataframe
Returns
-------
aggregated:
the aggregated dataframe.
"""
if len(index):
kwargs.update(by=list(index))
funcs = kwargs.pop('funcs')
grouped = _groupby_raise_unaligned(df, **kwargs)
result = collections.OrderedDict()
for result_column, func, func_kwargs in funcs:
result[result_column] = func(grouped, **func_kwargs)
return pd.DataFrame(result)
def _compute_sum_of_squares(grouped, column):
base = grouped[column] if column is not None else grouped
return base.apply(lambda x: (x ** 2).sum())
def _agg_finalize(df, funcs):
result = collections.OrderedDict()
for result_column, func, kwargs in funcs:
result[result_column] = func(df, **kwargs)
return pd.DataFrame(result)
def _apply_func_to_column(df_like, column, func):
if column is None:
return func(df_like)
return func(df_like[column])
def _finalize_mean(df, sum_column, count_column):
return df[sum_column] / df[count_column]
def _finalize_var(df, count_column, sum_column, sum2_column, ddof=1):
n = df[count_column]
x = df[sum_column]
x2 = df[sum2_column]
result = x2 - x ** 2 / n
div = (n - ddof)
div[div < 0] = 0
result /= div
result[(n - ddof) == 0] = np.nan
return result
def _finalize_std(df, count_column, sum_column, sum2_column, ddof=1):
result = _finalize_var(df, count_column, sum_column, sum2_column, ddof)
return np.sqrt(result)
def _cum_agg_aligned(part, cum_last, index, columns, func, initial):
align = cum_last.reindex(part.set_index(index).index, fill_value=initial)
align.index = part.index
return func(part[columns], align)
def _cum_agg_filled(a, b, func, initial):
union = a.index.union(b.index)
return func(a.reindex(union, fill_value=initial),
b.reindex(union, fill_value=initial), fill_value=initial)
def _cumcount_aggregate(a, b, fill_value=None):
return a.add(b, fill_value=fill_value) + 1
class _GroupBy(object):
""" Superclass for DataFrameGroupBy and SeriesGroupBy
Parameters
----------
obj: DataFrame or Series
DataFrame or Series to be grouped
by: str, list or Series
The key for grouping
slice: str, list
The slice keys applied to GroupBy result
"""
def __init__(self, df, by=None, slice=None):
assert isinstance(df, (DataFrame, Series))
self.obj = df
# grouping key passed via groupby method
self.index = _normalize_index(df, by)
if isinstance(self.index, list):
do_index_partition_align = all(
item.divisions == df.divisions if isinstance(item, Series) else True
for item in self.index
)
elif isinstance(self.index, Series):
do_index_partition_align = df.divisions == self.index.divisions
else:
do_index_partition_align = True
if not do_index_partition_align:
raise NotImplementedError("The grouped object and index of the "
"groupby must have the same divisions.")
# slicing key applied to _GroupBy instance
self._slice = slice
if isinstance(self.index, list):
index_meta = [item._meta if isinstance(item, Series) else item for item in self.index]
elif isinstance(self.index, Series):
index_meta = self.index._meta
else:
index_meta = self.index
self._meta = self.obj._meta.groupby(index_meta)
@property
def _meta_nonempty(self):
"""
Return a pd.DataFrameGroupBy / pd.SeriesGroupBy which contains sample data.
"""
sample = self.obj._meta_nonempty
if isinstance(self.index, list):
index_meta = [item._meta_nonempty if isinstance(item, Series) else item for item in self.index]
elif isinstance(self.index, Series):
index_meta = self.index._meta_nonempty
else:
index_meta = self.index
grouped = sample.groupby(index_meta)
return _maybe_slice(grouped, self._slice)
def _aca_agg(self, token, func, aggfunc=None, split_every=None,
split_out=1):
if aggfunc is None:
aggfunc = func
meta = func(self._meta)
columns = meta.name if isinstance(meta, pd.Series) else meta.columns
token = self._token_prefix + token
levels = _determine_levels(self.index)
return aca([self.obj, self.index] if not isinstance(self.index, list) else [self.obj] + self.index,
chunk=_apply_chunk,
chunk_kwargs=dict(chunk=func, columns=columns),
aggregate=_groupby_aggregate,
meta=meta, token=token, split_every=split_every,
aggregate_kwargs=dict(aggfunc=aggfunc, levels=levels),
split_out=split_out, split_out_setup=split_out_on_index)
def _cum_agg(self, token, chunk, aggregate, initial):
""" Wrapper for cumulative groupby operation """
meta = chunk(self._meta)
columns = meta.name if isinstance(meta, pd.Series) else meta.columns
index = self.index if isinstance(self.index, list) else [self.index]
name = self._token_prefix + token
name_part = name + '-map'
name_last = name + '-take-last'
name_cum = name + '-cum-last'
# cumulate each partitions
cumpart_raw = map_partitions(_apply_chunk, self.obj, *index,
chunk=chunk,
columns=columns,
token=name_part,
meta=meta)
cumpart_ext = (cumpart_raw.to_frame()
if isinstance(meta, pd.Series)
else cumpart_raw).assign(**{i: self.obj[i]
for i in index})
cumlast = map_partitions(_apply_chunk, cumpart_ext, *index,
columns=0 if columns is None else columns,
chunk=M.last,
meta=meta,
token=name_last)
# aggregate cumulated partisions and its previous last element
dask = {}
dask[(name, 0)] = (cumpart_raw._name, 0)
for i in range(1, self.obj.npartitions):
# store each cumulative step to graph to reduce computation
if i == 1:
dask[(name_cum, i)] = (cumlast._name, i - 1)
else:
# aggregate with previous cumulation results
dask[(name_cum, i)] = (_cum_agg_filled,
(name_cum, i - 1),
(cumlast._name, i - 1),
aggregate, initial)
dask[(name, i)] = (_cum_agg_aligned,
(cumpart_ext._name, i), (name_cum, i),
index, 0 if columns is None else columns,
aggregate, initial)
return new_dd_object(merge(dask, cumpart_ext.dask, cumlast.dask),
name, chunk(self._meta), self.obj.divisions)
@derived_from(pd.core.groupby.GroupBy)
def cumsum(self, axis=0):
if axis:
return self.obj.cumsum(axis=axis)
else:
return self._cum_agg('cumsum',
chunk=M.cumsum,
aggregate=M.add,
initial=0)
@derived_from(pd.core.groupby.GroupBy)
def cumprod(self, axis=0):
if axis:
return self.obj.cumprod(axis=axis)
else:
return self._cum_agg('cumprod',
chunk=M.cumprod,
aggregate=M.mul,
initial=1)
@derived_from(pd.core.groupby.GroupBy)
def cumcount(self, axis=None):
return self._cum_agg('cumcount',
chunk=M.cumcount,
aggregate=_cumcount_aggregate,
initial=-1)
@derived_from(pd.core.groupby.GroupBy)
def sum(self, split_every=None, split_out=1):
return self._aca_agg(token='sum', func=M.sum, split_every=split_every,
split_out=split_out)
@derived_from(pd.core.groupby.GroupBy)
def min(self, split_every=None, split_out=1):
return self._aca_agg(token='min', func=M.min, split_every=split_every,
split_out=split_out)
@derived_from(pd.core.groupby.GroupBy)
def max(self, split_every=None, split_out=1):
return self._aca_agg(token='max', func=M.max, split_every=split_every,
split_out=split_out)
@derived_from(pd.core.groupby.GroupBy)
def count(self, split_every=None, split_out=1):
return self._aca_agg(token='count', func=M.count,
aggfunc=M.sum, split_every=split_every,
split_out=split_out)
@derived_from(pd.core.groupby.GroupBy)
def mean(self, split_every=None, split_out=1):
return (self.sum(split_every=split_every, split_out=split_out) /
self.count(split_every=split_every, split_out=split_out))
@derived_from(pd.core.groupby.GroupBy)
def size(self, split_every=None, split_out=1):
return self._aca_agg(token='size', func=M.size, aggfunc=M.sum,
split_every=split_every, split_out=split_out)
@derived_from(pd.core.groupby.GroupBy)
def var(self, ddof=1, split_every=None, split_out=1):
levels = _determine_levels(self.index)
result = aca([self.obj, self.index] if not isinstance(self.index, list) else [self.obj] + self.index,
chunk=_var_chunk,
aggregate=_var_agg, combine=_var_combine,
token=self._token_prefix + 'var',
aggregate_kwargs={'ddof': ddof, 'levels': levels},
combine_kwargs={'levels': levels},
split_every=split_every, split_out=split_out,
split_out_setup=split_out_on_index)
if isinstance(self.obj, Series):
result = result[result.columns[0]]
if self._slice:
result = result[self._slice]
return result
@derived_from(pd.core.groupby.GroupBy)
def std(self, ddof=1, split_every=None, split_out=1):
v = self.var(ddof, split_every=split_every, split_out=split_out)
result = map_partitions(np.sqrt, v, meta=v)
return result
@derived_from(pd.core.groupby.GroupBy)
def get_group(self, key):
token = self._token_prefix + 'get_group'
meta = self._meta.obj
if isinstance(meta, pd.DataFrame) and self._slice is not None:
meta = meta[self._slice]
columns = meta.columns if isinstance(meta, pd.DataFrame) else meta.name
return map_partitions(_groupby_get_group, self.obj, self.index, key,
columns, meta=meta, token=token)
def aggregate(self, arg, split_every, split_out=1):
if isinstance(self.obj, DataFrame):
if isinstance(self.index, tuple) or np.isscalar(self.index):
group_columns = {self.index}
elif isinstance(self.index, list):
group_columns = {i for i in self.index
if isinstance(i, tuple) or np.isscalar(i)}
else:
group_columns = set()
if self._slice:
# pandas doesn't exclude the grouping column in a SeriesGroupBy
# like df.groupby('a')['a'].agg(...)
non_group_columns = self._slice
if not isinstance(non_group_columns, list):
non_group_columns = [non_group_columns]
else:
# NOTE: this step relies on the index normalization to replace
# series with their name in an index.
non_group_columns = [col for col in self.obj.columns
if col not in group_columns]
spec = _normalize_spec(arg, non_group_columns)
elif isinstance(self.obj, Series):
# implementation detail: if self.obj is a series, a pseudo column
# None is used to denote the series itself. This pseudo column is
# removed from the result columns before passing the spec along.
spec = _normalize_spec({None: arg}, [])
spec = [(result_column, func, input_column)
for ((_, result_column), func, input_column) in spec]
else:
raise ValueError("aggregate on unknown object {}".format(self.obj))
chunk_funcs, aggregate_funcs, finalizers = _build_agg_args(spec)
if isinstance(self.index, (tuple, list)) and len(self.index) > 1:
levels = list(range(len(self.index)))
else:
levels = 0
if not isinstance(self.index, list):
chunk_args = [self.obj, self.index]
else:
chunk_args = [self.obj] + self.index
obj = aca(chunk_args,
chunk=_groupby_apply_funcs,
chunk_kwargs=dict(funcs=chunk_funcs),
aggregate=_groupby_apply_funcs,
aggregate_kwargs=dict(funcs=aggregate_funcs, level=levels),
combine=_groupby_apply_funcs,
combine_kwargs=dict(funcs=aggregate_funcs, level=levels),
token='aggregate', split_every=split_every,
split_out=split_out, split_out_setup=split_out_on_index)
return map_partitions(_agg_finalize, obj, token='aggregate-finalize',
funcs=finalizers)
@insert_meta_param_description(pad=12)
def apply(self, func, meta=no_default):
""" Parallel version of pandas GroupBy.apply
This mimics the pandas version except for the following:
1. The user should provide output metadata.
2. If the grouper does not align with the index then this causes a full
shuffle. The order of rows within each group may not be preserved.
Parameters
----------
func: function
Function to apply
$META
Returns
-------
applied : Series or DataFrame depending on columns keyword
"""
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
with raise_on_meta_error("groupby.apply({0})".format(funcname(func))):
meta = self._meta_nonempty.apply(func)
meta = make_meta(meta)
df = self.obj
if isinstance(self.index, DataFrame): # add index columns to dataframe
df2 = df.assign(**{'_index_' + c: self.index[c]
for c in self.index.columns})
index = self.index
elif isinstance(self.index, Series):
df2 = df.assign(_index=self.index)
index = self.index
elif (isinstance(self.index, list) and
any(isinstance(item, Series) for item in self.index)):
raise NotImplementedError("groupby-apply with a multiple Series "
"is currently not supported")
else:
df2 = df
index = df[self.index]
df3 = shuffle(df2, index) # shuffle dataframe and index
if isinstance(self.index, DataFrame): # extract index from dataframe
cols = ['_index_' + c for c in self.index.columns]
index2 = df3[cols]
if isinstance(meta, pd.DataFrame):
df4 = df3.map_partitions(drop_columns, cols, meta.columns.dtype)
else:
df4 = df3.drop(cols, axis=1)
elif isinstance(self.index, Series):
index2 = df3['_index']
index2.name = self.index.name
if isinstance(meta, pd.DataFrame):
df4 = df3.map_partitions(drop_columns, '_index',
meta.columns.dtype)
else:
df4 = df3.drop('_index', axis=1)
else:
df4 = df3
index2 = self.index
# Perform embarrassingly parallel groupby-apply
df5 = map_partitions(_groupby_slice_apply, df4, index2,
self._slice, func, meta=meta)
return df5
class DataFrameGroupBy(_GroupBy):
_token_prefix = 'dataframe-groupby-'
def __getitem__(self, key):
if isinstance(key, list):
g = DataFrameGroupBy(self.obj, by=self.index, slice=key)
else:
g = SeriesGroupBy(self.obj, by=self.index, slice=key)
# error is raised from pandas
g._meta = g._meta[key]
return g
def __dir__(self):
return sorted(set(dir(type(self)) + list(self.__dict__) +
list(filter(pd.compat.isidentifier, self.obj.columns))))
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(e)
@derived_from(pd.core.groupby.DataFrameGroupBy)
def aggregate(self, arg, split_every=None, split_out=1):
if arg == 'size':
return self.size()
return super(DataFrameGroupBy, self).aggregate(arg, split_every=split_every, split_out=split_out)
@derived_from(pd.core.groupby.DataFrameGroupBy)
def agg(self, arg, split_every=None, split_out=1):
return self.aggregate(arg, split_every=split_every, split_out=split_out)
class SeriesGroupBy(_GroupBy):
_token_prefix = 'series-groupby-'
def __init__(self, df, by=None, slice=None):
# for any non series object, raise pandas-compat error message
if isinstance(df, Series):
if isinstance(by, Series):
pass
elif isinstance(by, list):
if len(by) == 0:
raise ValueError("No group keys passed!")
non_series_items = [item for item in by
if not isinstance(item, Series)]
# raise error from pandas, if applicable
df._meta.groupby(non_series_items)
else:
# raise error from pandas, if applicable
df._meta.groupby(by)
super(SeriesGroupBy, self).__init__(df, by=by, slice=slice)
def nunique(self, split_every=None, split_out=1):
name = self._meta.obj.name
levels = _determine_levels(self.index)
if isinstance(self.obj, DataFrame):
chunk = _nunique_df_chunk
else:
chunk = _nunique_series_chunk
return aca([self.obj, self.index] if not isinstance(self.index, list) else [self.obj] + self.index,
chunk=chunk,
aggregate=_nunique_df_aggregate,
combine=_nunique_df_combine,
token='series-groupby-nunique',
chunk_kwargs={'levels': levels, 'name': name},
aggregate_kwargs={'levels': levels, 'name': name},
combine_kwargs={'levels': levels},
split_every=split_every, split_out=split_out,
split_out_setup=split_out_on_index)
@derived_from(pd.core.groupby.SeriesGroupBy)
def aggregate(self, arg, split_every=None, split_out=1):
# short-circuit 'simple' aggregations
if (
not isinstance(arg, (list, dict)) and
arg in {'sum', 'mean', 'var', 'size', 'std', 'count'}
):
return getattr(self, arg)(split_every=split_every,
split_out=split_out)
result = super(SeriesGroupBy, self).aggregate(arg, split_every=split_every, split_out=split_out)
if self._slice:
result = result[self._slice]
return result
@derived_from(pd.core.groupby.SeriesGroupBy)
def agg(self, arg, split_every=None, split_out=1):
return self.aggregate(arg, split_every=split_every, split_out=split_out)
| {
"repo_name": "mraspaud/dask",
"path": "dask/dataframe/groupby.py",
"copies": "1",
"size": "38664",
"license": "bsd-3-clause",
"hash": -8083008032709018000,
"line_mean": 35.406779661,
"line_max": 109,
"alpha_frac": 0.5759621353,
"autogenerated": false,
"ratio": 3.901513622603431,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9976377242365411,
"avg_score": 0.0002197031076039776,
"num_lines": 1062
} |
from __future__ import absolute_import, division, print_function
import collections
import itertools as it
import operator
import warnings
import numpy as np
import pandas as pd
from .core import DataFrame, Series, aca, map_partitions, no_default
from .shuffle import shuffle
from .utils import make_meta, insert_meta_param_description, raise_on_meta_error
from ..base import tokenize
from ..utils import derived_from, M, funcname
def _maybe_slice(grouped, columns):
"""
Slice columns if grouped is pd.DataFrameGroupBy
"""
if isinstance(grouped, pd.core.groupby.DataFrameGroupBy):
if columns is not None:
columns = columns if isinstance(columns, str) else list(columns)
return grouped[columns]
return grouped
def _groupby_slice_apply(df, grouper, key, func):
g = df.groupby(grouper)
if key:
g = g[key]
return g.apply(func)
def _groupby_get_group(df, by_key, get_key, columns):
# SeriesGroupBy may pass df which includes group key
grouped = df.groupby(by_key)
if get_key in grouped.groups:
if isinstance(df, pd.DataFrame):
grouped = grouped[columns]
return grouped.get_group(get_key)
else:
# to create empty DataFrame/Series, which has the same
# dtype as the original
if isinstance(df, pd.DataFrame):
# may be SeriesGroupBy
df = df[columns]
return df.iloc[0:0]
###############################################################
# Aggregation
###############################################################
def _groupby_aggregate(df, aggfunc=None, levels=None):
return aggfunc(df.groupby(level=levels))
def _apply_chunk(df, index, func, columns):
if isinstance(df, pd.Series) or columns is None:
return func(df.groupby(index))
else:
columns = columns if isinstance(columns, str) else list(columns)
return func(df.groupby(index)[columns])
def _var_chunk(df, index):
if isinstance(df, pd.Series):
df = df.to_frame()
g = df.groupby(index)
x = g.sum()
x2 = g.agg(lambda x: (x**2).sum()).rename(columns=lambda c: c + '-x2')
n = g.count().rename(columns=lambda c: c + '-count')
return pd.concat([x, x2, n], axis=1)
def _var_combine(g):
return g.groupby(level=0).sum()
def _var_agg(g, ddof):
g = g.groupby(level=0).sum()
nc = len(g.columns)
x = g[g.columns[:nc // 3]]
x2 = g[g.columns[nc // 3:2 * nc // 3]].rename(columns=lambda c: c[:-3])
n = g[g.columns[-nc // 3:]].rename(columns=lambda c: c[:-6])
# TODO: replace with _finalize_var?
result = x2 - x ** 2 / n
div = (n - ddof)
div[div < 0] = 0
result /= div
result[(n - ddof) == 0] = np.nan
assert isinstance(result, pd.DataFrame)
return result
###############################################################
# nunique
###############################################################
def _nunique_df_chunk(df, index):
# we call set_index here to force a possibly duplicate index
# for our reduce step
grouped = df.groupby(index).apply(pd.DataFrame.drop_duplicates)
grouped.index = grouped.index.get_level_values(level=0)
return grouped
def _nunique_df_combine(df):
result = df.groupby(level=0).apply(pd.DataFrame.drop_duplicates)
result.index = result.index.get_level_values(level=0)
return result
def _nunique_df_aggregate(df, name):
return df.groupby(level=0)[name].nunique()
def _nunique_series_chunk(df, index):
assert isinstance(df, pd.Series)
if isinstance(index, np.ndarray):
assert len(index) == len(df)
index = pd.Series(index, index=df.index)
grouped = pd.concat([df, index], axis=1).drop_duplicates()
return grouped
def _nunique_series_combine(df):
return df.drop_duplicates()
def _nunique_series_aggregate(df):
return df.groupby(df.columns[1])[df.columns[0]].nunique()
###############################################################
# Aggregate support
#
# Aggregate is implemented as:
#
# 1. group-by-aggregate all partitions into intermediate values
# 2. collect all partitions into a single partition
# 3. group-by-aggregate the result into intermediate values
# 4. transform all intermediate values into the result
#
# In Step 1 and 3 the dataframe is grouped on the same columns.
#
###############################################################
def _make_agg_id(func, column):
return '{!s}-{!s}-{}'.format(func, column, tokenize(func, column))
def _normalize_spec(spec, non_group_columns):
"""
Return a list of ``(result_column, func, input_column)`` tuples.
Spec can be
- a function
- a list of functions
- a dictionary that maps input-columns to functions
- a dictionary that maps input-columns to a lists of functions
- a dictionary that maps input-columns to a dictionaries that map
output-columns to functions.
The non-group columns are a list of all column names that are not used in
the groupby operation.
Usually, the result columns are mutli-level names, returned as tuples.
If only a single function is supplied or dictionary mapping columns
to single functions, simple names are returned as strings (see the first
two examples below).
Examples
--------
>>> _normalize_spec('mean', ['a', 'b', 'c'])
[('a', 'mean', 'a'), ('b', 'mean', 'b'), ('c', 'mean', 'c')]
>>> spec = collections.OrderedDict([('a', 'mean'), ('b', 'count')])
>>> _normalize_spec(spec, ['a', 'b', 'c'])
[('a', 'mean', 'a'), ('b', 'count', 'b')]
>>> _normalize_spec(['var', 'mean'], ['a', 'b', 'c'])
... # doctest: +NORMALIZE_WHITESPACE
[(('a', 'var'), 'var', 'a'), (('a', 'mean'), 'mean', 'a'), \
(('b', 'var'), 'var', 'b'), (('b', 'mean'), 'mean', 'b'), \
(('c', 'var'), 'var', 'c'), (('c', 'mean'), 'mean', 'c')]
>>> spec = collections.OrderedDict([('a', 'mean'), ('b', ['sum', 'count'])])
>>> _normalize_spec(spec, ['a', 'b', 'c'])
... # doctest: +NORMALIZE_WHITESPACE
[(('a', 'mean'), 'mean', 'a'), (('b', 'sum'), 'sum', 'b'), \
(('b', 'count'), 'count', 'b')]
>>> spec = collections.OrderedDict()
>>> spec['a'] = ['mean', 'size']
>>> spec['b'] = collections.OrderedDict([('e', 'count'), ('f', 'var')])
>>> _normalize_spec(spec, ['a', 'b', 'c'])
... # doctest: +NORMALIZE_WHITESPACE
[(('a', 'mean'), 'mean', 'a'), (('a', 'size'), 'size', 'a'), \
(('b', 'e'), 'count', 'b'), (('b', 'f'), 'var', 'b')]
"""
if not isinstance(spec, dict):
spec = collections.OrderedDict(zip(non_group_columns, it.repeat(spec)))
res = []
if isinstance(spec, dict):
for input_column, subspec in spec.items():
if isinstance(subspec, dict):
res.extend(((input_column, result_column), func, input_column)
for result_column, func in subspec.items())
else:
if not isinstance(subspec, list):
subspec = [subspec]
res.extend(((input_column, funcname(func)), func, input_column)
for func in subspec)
else:
raise ValueError("unsupported agg spec of type {}".format(type(spec)))
compounds = (list, tuple, dict)
use_flat_columns = not any(isinstance(subspec, compounds)
for subspec in spec.values())
if use_flat_columns:
res = [(input_col, func, input_col) for (_, func, input_col) in res]
return res
def _build_agg_args(spec):
"""
Create transformation functions for a normalized aggregate spec.
Parameters
----------
spec: a list of (result-column, aggregation-function, input-column) triples.
To work with all arugment forms understood by pandas use
``_normalize_spec`` to normalize the argment before passing it on to
``_build_agg_args``.
Returns
-------
chunk_funcs: a list of (intermediate-column, function, keyword) triples
that are applied on grouped chunks of the initial dataframe.
agg_funcs: a list of (intermediate-column, functions, keword) triples that
are applied on the grouped concatination of the preprocessed chunks.
finalizers: a list of (result-column, function, keyword) triples that are
applied after the ``agg_funcs``. They are used to create final results
from intermediate representations.
"""
known_np_funcs = {np.min: 'min', np.max: 'max'}
chunks = {}
aggs = {}
finalizers = []
for (result_column, func, input_column) in spec:
func = funcname(known_np_funcs.get(func, func))
impls = _build_agg_args_single(result_column, func, input_column)
# overwrite existing result-columns, generate intermedates only once
chunks.update((spec[0], spec) for spec in impls['chunk_funcs'])
aggs.update((spec[0], spec) for spec in impls['aggregate_funcs'])
finalizers.append(impls['finalizer'])
chunks = sorted(chunks.values())
aggs = sorted(aggs.values())
return chunks, aggs, finalizers
def _build_agg_args_single(result_column, func, input_column):
simple_impl = {
'sum': (M.sum, M.sum),
'min': (M.min, M.min),
'max': (M.max, M.max),
'count': (M.count, M.sum),
'size': (M.size, M.sum),
}
if func in simple_impl.keys():
return _build_agg_args_simple(result_column, func, input_column,
simple_impl[func])
elif func == 'var':
return _build_agg_args_var(result_column, func, input_column)
elif func == 'std':
return _build_agg_args_std(result_column, func, input_column)
elif func == 'mean':
return _build_agg_args_mean(result_column, func, input_column)
else:
raise ValueError("unknown aggregate {}".format(func))
def _build_agg_args_simple(result_column, func, input_column, impl_pair):
intermediate = _make_agg_id(func, input_column)
chunk_impl, agg_impl = impl_pair
return dict(
chunk_funcs=[(intermediate, _apply_func_to_column,
dict(column=input_column, func=chunk_impl))],
aggregate_funcs=[(intermediate, _apply_func_to_column,
dict(column=intermediate, func=agg_impl))],
finalizer=(result_column, operator.itemgetter(intermediate), dict()),
)
def _build_agg_args_var(result_column, func, input_column):
int_sum = _make_agg_id('sum', input_column)
int_sum2 = _make_agg_id('sum2', input_column)
int_count = _make_agg_id('count', input_column)
return dict(
chunk_funcs=[
(int_sum, _apply_func_to_column,
dict(column=input_column, func=M.sum)),
(int_count, _apply_func_to_column,
dict(column=input_column, func=M.count)),
(int_sum2, _compute_sum_of_squares,
dict(column=input_column)),
],
aggregate_funcs=[
(col, _apply_func_to_column, dict(column=col, func=M.sum))
for col in (int_sum, int_count, int_sum2)
],
finalizer=(result_column, _finalize_var,
dict(sum_column=int_sum, count_column=int_count,
sum2_column=int_sum2)),
)
def _build_agg_args_std(result_column, func, input_column):
impls = _build_agg_args_var(result_column, func, input_column)
result_column, _, kwargs = impls['finalizer']
impls['finalizer'] = (result_column, _finalize_std, kwargs)
return impls
def _build_agg_args_mean(result_column, func, input_column):
int_sum = _make_agg_id('sum', input_column)
int_count = _make_agg_id('count', input_column)
return dict(
chunk_funcs=[
(int_sum, _apply_func_to_column,
dict(column=input_column, func=M.sum)),
(int_count, _apply_func_to_column,
dict(column=input_column, func=M.count)),
],
aggregate_funcs=[
(col, _apply_func_to_column, dict(column=col, func=M.sum))
for col in (int_sum, int_count)
],
finalizer=(result_column, _finalize_mean,
dict(sum_column=int_sum, count_column=int_count)),
)
def _groupby_apply_funcs(df, *index, **kwargs):
"""
Group a dataframe and apply multiple aggregation functions.
Parameters
----------
df: pandas.DataFrame
The dataframe to work on.
index: list of groupers
If given, they are added to the keyword arguments as the ``by``
argument.
funcs: list of result-colum, function, keywordargument triples
The list of functions that are applied on the grouped data frame.
Has to be passed as a keyword argument.
kwargs:
All keyword arguments, but ``funcs``, are passed verbatim to the groupby
operation of the dataframe
Returns
-------
aggregated:
the aggregated dataframe.
"""
if len(index):
kwargs.update(by=list(index))
funcs = kwargs.pop('funcs')
grouped = df.groupby(**kwargs)
result = collections.OrderedDict()
for result_column, func, func_kwargs in funcs:
result[result_column] = func(grouped, **func_kwargs)
return pd.DataFrame(result)
def _compute_sum_of_squares(grouped, column):
base = grouped[column] if column is not None else grouped
return base.apply(lambda x: (x ** 2).sum())
def _agg_finalize(df, funcs):
result = collections.OrderedDict()
for result_column, func, kwargs in funcs:
result[result_column] = func(df, **kwargs)
return pd.DataFrame(result)
def _apply_func_to_column(df_like, column, func):
if column is None:
return func(df_like)
return func(df_like[column])
def _finalize_mean(df, sum_column, count_column):
return df[sum_column] / df[count_column]
def _finalize_var(df, count_column, sum_column, sum2_column, ddof=1):
n = df[count_column]
x = df[sum_column]
x2 = df[sum2_column]
result = x2 - x ** 2 / n
div = (n - ddof)
div[div < 0] = 0
result /= div
result[(n - ddof) == 0] = np.nan
return result
def _finalize_std(df, count_column, sum_column, sum2_column, ddof=1):
result = _finalize_var(df, count_column, sum_column, sum2_column, ddof)
return np.sqrt(result)
def _normalize_index(df, index):
if not isinstance(df, DataFrame):
return index
elif isinstance(index, list):
return [_normalize_index(df, col) for col in index]
elif (isinstance(index, Series) and index.name in df.columns and
index._name == df[index.name]._name):
return index.name
elif (isinstance(index, DataFrame) and
set(index.columns).issubset(df.columns) and
index._name == df[index.columns]._name):
return list(index.columns)
else:
return index
class _GroupBy(object):
""" Superclass for DataFrameGroupBy and SeriesGroupBy
Parameters
----------
obj: DataFrame or Series
DataFrame or Series to be grouped
index: str, list or Series
The key for grouping
kwargs: dict
Other keywords passed to groupby
"""
def __init__(self, df, index=None, slice=None, **kwargs):
assert isinstance(df, (DataFrame, Series))
self.obj = df
# grouping key passed via groupby method
self.index = _normalize_index(df, index)
# slicing key applied to _GroupBy instance
self._slice = slice
self.kwargs = kwargs
if isinstance(index, Series) and df.divisions != index.divisions:
msg = ("The Series and index of the groupby"
" must have the same divisions.")
raise NotImplementedError(msg)
if self._is_grouped_by_sliced_column(self.obj, index):
# check whether given Series is taken from given df and unchanged.
# If any operations are performed, _name will be changed to
# e.g. "elemwise-xxxx"
# if group key (index) is a Series sliced from DataFrame,
# emulation must be performed as the same.
# otherwise, group key is regarded as a separate column
self._meta = self.obj._meta.groupby(self.obj._meta[index.name])
elif isinstance(self.index, Series):
self._meta = self.obj._meta.groupby(self.index._meta)
else:
self._meta = self.obj._meta.groupby(self.index)
def _is_grouped_by_sliced_column(self, df, index):
"""
Return whether index is a Series sliced from df
"""
if isinstance(df, Series):
return False
if (isinstance(index, Series) and index._name in df.columns and
index._name == df[index.name]._name):
return True
if (isinstance(index, DataFrame) and
set(index.columns).issubset(df.columns) and
index._name == df[index.columns]._name):
index = list(index.columns)
return True
return False
@property
def _meta_nonempty(self):
"""
Return a pd.DataFrameGroupBy / pd.SeriesGroupBy which contains sample data.
"""
sample = self.obj._meta_nonempty
if isinstance(self.index, Series):
if self._is_grouped_by_sliced_column(self.obj, self.index):
grouped = sample.groupby(sample[self.index.name])
else:
grouped = sample.groupby(self.index._meta_nonempty)
else:
grouped = sample.groupby(self.index)
return _maybe_slice(grouped, self._slice)
def _aca_agg(self, token, func, aggfunc=None, split_every=None):
if aggfunc is None:
aggfunc = func
meta = func(self._meta)
columns = meta.name if isinstance(meta, pd.Series) else meta.columns
token = self._token_prefix + token
if isinstance(self.index, (tuple, list)) and len(self.index) > 1:
levels = list(range(len(self.index)))
else:
levels = 0
return aca([self.obj, self.index, func, columns],
chunk=_apply_chunk, aggregate=_groupby_aggregate,
meta=meta, token=token, split_every=split_every,
aggregate_kwargs=dict(aggfunc=aggfunc, levels=levels))
@derived_from(pd.core.groupby.GroupBy)
def sum(self, split_every=None):
return self._aca_agg(token='sum', func=M.sum, split_every=split_every)
@derived_from(pd.core.groupby.GroupBy)
def min(self, split_every=None):
return self._aca_agg(token='min', func=M.min, split_every=split_every)
@derived_from(pd.core.groupby.GroupBy)
def max(self, split_every=None):
return self._aca_agg(token='max', func=M.max, split_every=split_every)
@derived_from(pd.core.groupby.GroupBy)
def count(self, split_every=None):
return self._aca_agg(token='count', func=M.count,
aggfunc=M.sum, split_every=split_every)
@derived_from(pd.core.groupby.GroupBy)
def mean(self, split_every=None):
return self.sum(split_every=split_every) / self.count(split_every=split_every)
@derived_from(pd.core.groupby.GroupBy)
def size(self, split_every=None):
return self._aca_agg(token='size', func=M.size, aggfunc=M.sum,
split_every=split_every)
@derived_from(pd.core.groupby.GroupBy)
def var(self, ddof=1, split_every=None):
result = aca([self.obj, self.index], chunk=_var_chunk,
aggregate=_var_agg, combine=_var_combine,
token=self._token_prefix + 'var',
aggregate_kwargs={'ddof': ddof}, split_every=split_every)
if isinstance(self.obj, Series):
result = result[result.columns[0]]
if self._slice:
result = result[self._slice]
return result
@derived_from(pd.core.groupby.GroupBy)
def std(self, ddof=1, split_every=None):
v = self.var(ddof, split_every=split_every)
result = map_partitions(np.sqrt, v, meta=v)
return result
@derived_from(pd.core.groupby.GroupBy)
def get_group(self, key):
token = self._token_prefix + 'get_group'
meta = self._meta.obj
if isinstance(meta, pd.DataFrame) and self._slice is not None:
meta = meta[self._slice]
columns = meta.columns if isinstance(meta, pd.DataFrame) else meta.name
return map_partitions(_groupby_get_group, self.obj, self.index, key,
columns, meta=meta, token=token)
def aggregate(self, arg, split_every):
if isinstance(self.obj, DataFrame):
if isinstance(self.index, tuple) or np.isscalar(self.index):
group_columns = {self.index}
elif isinstance(self.index, list):
group_columns = {i for i in self.index
if isinstance(i, tuple) or np.isscalar(i)}
else:
group_columns = set()
# NOTE: this step relies on the index normalization to replace
# series with their name in an index.
non_group_columns = [col for col in self.obj.columns
if col not in group_columns]
spec = _normalize_spec(arg, non_group_columns)
elif isinstance(self.obj, Series):
# implementation detail: if self.obj is a series, a pseudo column
# None is used to denote the series itself. This pseudo column is
# removed from the result columns before passing the spec along.
spec = _normalize_spec({None: arg}, [])
spec = [(result_column, func, input_column)
for ((_, result_column), func, input_column) in spec]
else:
raise ValueError("aggregate on unknown object {}".format(self.obj))
chunk_funcs, aggregate_funcs, finalizers = _build_agg_args(spec)
if isinstance(self.index, (tuple, list)) and len(self.index) > 1:
levels = list(range(len(self.index)))
else:
levels = 0
# apply the transformations to determine the meta object
meta_groupby = pd.Series([], dtype=bool, index=self.obj._meta.index)
meta_stage1 = _groupby_apply_funcs(self.obj._meta, funcs=chunk_funcs,
by=meta_groupby)
meta_stage2 = _groupby_apply_funcs(meta_stage1, funcs=aggregate_funcs,
level=0)
meta = _agg_finalize(meta_stage2, finalizers)
if not isinstance(self.index, list):
chunk_args = [self.obj, self.index]
else:
chunk_args = [self.obj] + self.index
obj = aca(chunk_args,
chunk=_groupby_apply_funcs,
chunk_kwargs=dict(funcs=chunk_funcs),
aggregate=_groupby_apply_funcs,
aggregate_kwargs=dict(funcs=aggregate_funcs, level=levels),
combine=_groupby_apply_funcs,
combine_kwargs=dict(funcs=aggregate_funcs, level=levels),
meta=meta, token='aggregate', split_every=split_every)
return map_partitions(_agg_finalize, obj, meta=meta,
token='aggregate-finalize', funcs=finalizers)
@insert_meta_param_description(pad=12)
def apply(self, func, meta=no_default, columns=no_default):
""" Parallel version of pandas GroupBy.apply
This mimics the pandas version except for the following:
1. The user should provide output metadata.
2. If the grouper does not align with the index then this causes a full
shuffle. The order of rows within each group may not be preserved.
Parameters
----------
func: function
Function to apply
$META
columns: list, scalar or None
Deprecated, use `meta` instead. If list is given, the result is a
DataFrame which columns is specified list. Otherwise, the result is
a Series which name is given scalar or None (no name). If name
keyword is not given, dask tries to infer the result type using its
beginning of data. This inference may take some time and lead to
unexpected result
Returns
-------
applied : Series or DataFrame depending on columns keyword
"""
if columns is not no_default:
warnings.warn("`columns` is deprecated, please use `meta` instead")
if meta is no_default and isinstance(columns, (pd.DataFrame, pd.Series)):
meta = columns
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
with raise_on_meta_error("groupby.apply({0})".format(funcname(func))):
meta = self._meta_nonempty.apply(func)
meta = make_meta(meta)
df = self.obj
if isinstance(self.index, DataFrame): # add index columns to dataframe
df2 = df.assign(**{'_index_' + c: self.index[c]
for c in self.index.columns})
index = self.index
elif isinstance(self.index, Series):
df2 = df.assign(_index=self.index)
index = self.index
else:
df2 = df
index = df[self.index]
df3 = shuffle(df2, index, **self.kwargs) # shuffle dataframe and index
if isinstance(self.index, DataFrame): # extract index from dataframe
cols = ['_index_' + c for c in self.index.columns]
index2 = df3[cols]
df4 = df3.drop(cols, axis=1, dtype=meta.columns.dtype if
isinstance(meta, pd.DataFrame) else None)
elif isinstance(self.index, Series):
index2 = df3['_index']
index2.name = self.index.name
df4 = df3.drop('_index', axis=1, dtype=meta.columns.dtype if
isinstance(meta, DataFrame) else None)
else:
df4 = df3
index2 = self.index
# Perform embarrassingly parallel groupby-apply
df5 = map_partitions(_groupby_slice_apply, df4, index2,
self._slice, func, meta=meta)
return df5
class DataFrameGroupBy(_GroupBy):
_token_prefix = 'dataframe-groupby-'
def __init__(self, df, index=None, slice=None, **kwargs):
if not kwargs.get('as_index', True):
msg = ("The keyword argument `as_index=False` is not supported in "
"dask.dataframe.groupby")
raise NotImplementedError(msg)
super(DataFrameGroupBy, self).__init__(df, index=index,
slice=slice, **kwargs)
def __getitem__(self, key):
if isinstance(key, list):
g = DataFrameGroupBy(self.obj, index=self.index,
slice=key, **self.kwargs)
else:
g = SeriesGroupBy(self.obj, index=self.index,
slice=key, **self.kwargs)
# error is raised from pandas
g._meta = g._meta[key]
return g
def __dir__(self):
return sorted(set(dir(type(self)) + list(self.__dict__) +
list(filter(pd.compat.isidentifier, self.obj.columns))))
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(e)
@derived_from(pd.core.groupby.DataFrameGroupBy)
def aggregate(self, arg, split_every=None):
if arg == 'size':
return self.size()
return super(DataFrameGroupBy, self).aggregate(arg, split_every=split_every)
@derived_from(pd.core.groupby.DataFrameGroupBy)
def agg(self, arg, split_every=None):
return self.aggregate(arg, split_every=split_every)
class SeriesGroupBy(_GroupBy):
_token_prefix = 'series-groupby-'
def __init__(self, df, index, slice=None, **kwargs):
# raise pandas-compat error message
if isinstance(df, Series):
# When obj is Series, index must be Series
if not isinstance(index, Series):
if isinstance(index, list):
if len(index) == 0:
raise ValueError("No group keys passed!")
msg = "Grouper for '{0}' not 1-dimensional"
raise ValueError(msg.format(index[0]))
# raise error from pandas
df._meta.groupby(index)
super(SeriesGroupBy, self).__init__(df, index=index,
slice=slice, **kwargs)
def nunique(self, split_every=None):
name = self._meta.obj.name
meta = pd.Series([], dtype='int64',
index=pd.Index([], dtype=self._meta.obj.dtype),
name=name)
if isinstance(self.obj, DataFrame):
return aca([self.obj, self.index],
chunk=_nunique_df_chunk,
aggregate=_nunique_df_aggregate,
combine=_nunique_df_combine,
meta=meta, token='series-groupby-nunique',
aggregate_kwargs={'name': name},
split_every=split_every)
else:
return aca([self.obj, self.index],
chunk=_nunique_series_chunk,
aggregate=_nunique_series_aggregate,
combine=_nunique_series_combine,
meta=meta, token='series-groupby-nunique',
split_every=split_every)
@derived_from(pd.core.groupby.SeriesGroupBy)
def aggregate(self, arg, split_every=None):
# short-circuit 'simple' aggregations
if (
not isinstance(arg, (list, dict)) and
arg in {'sum', 'mean', 'var', 'size', 'std', 'count'}
):
return getattr(self, arg)(split_every=split_every)
return super(SeriesGroupBy, self).aggregate(arg, split_every=split_every)
@derived_from(pd.core.groupby.SeriesGroupBy)
def agg(self, arg, split_every=None):
return self.aggregate(arg, split_every=split_every)
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/dask/dataframe/groupby.py",
"copies": "1",
"size": "30768",
"license": "mit",
"hash": 2197407534348827400,
"line_mean": 34.3249138921,
"line_max": 95,
"alpha_frac": 0.5796281851,
"autogenerated": false,
"ratio": 3.8956697898202077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49752979749202075,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import collections
import numpy as np
from addie.processing.mantid.master_table.import_from_database.gui_handler import FilterTableHandler, FilterResultTableHandler
class ApplyRuleHandler:
def __init__(self, parent=None):
self.parent = parent
def apply_global_rule(self):
self.retrieve_list_of_rows_for_each_rule()
self.apply_inner_rules()
self.apply_outer_rules()
self.update_tableWidget_filter_result()
def update_tableWidget_filter_result(self):
list_of_rows_to_show = self.parent.list_of_rows_with_global_rule
nbr_row = self.parent.ui.tableWidget_filter_result.rowCount()
for _row in np.arange(nbr_row):
hide_row = True
if _row in list_of_rows_to_show:
hide_row = False
self.parent.ui.tableWidget_filter_result.setRowHidden(_row, hide_row)
def retrieve_list_of_rows_for_each_rule(self):
global_rule_dict = self.parent.global_rule_dict
for _group_key in global_rule_dict.keys():
_group = global_rule_dict[_group_key]
list_of_rules_for_this_group = _group['list_rules']
list_of_rows_for_each_rule = {} # {'0': [0,1,2,3,4], '1':[2,3,4,5] ...}
for _rule in list_of_rules_for_this_group:
list_of_rows_matching_rule = self.get_list_of_rows_matching_rule(rule_index=_rule)
list_of_rows_for_each_rule[_rule] = list_of_rows_matching_rule
global_rule_dict[_group_key]['list_of_rows'] = list_of_rows_for_each_rule
self.parent.global_rule_dict = global_rule_dict
def apply_inner_rules(self):
"""within each group, check the inner rule (and, or) and save the corresponding list of rows that
follow that rule"""
global_rule_dict = self.parent.global_rule_dict
for _group_key in global_rule_dict.keys():
_group = global_rule_dict[_group_key]
inner_rule = _group['inner_rule']
is_first_list_of_rows = True
for _rule_key in _group['list_of_rows'].keys():
if is_first_list_of_rows:
list_of_rows_with_inner_rule = set(_group['list_of_rows'][_rule_key])
is_first_list_of_rows = False
else:
new_list_of_rows = set(_group['list_of_rows'][_rule_key])
if inner_rule == 'and':
list_of_rows_with_inner_rule = list_of_rows_with_inner_rule & new_list_of_rows
else:
list_of_rows_with_inner_rule = list_of_rows_with_inner_rule | new_list_of_rows
global_rule_dict[_group_key]['inner_list_of_rows'] = list_of_rows_with_inner_rule
self.parent.global_rule_dict = global_rule_dict
def apply_outer_rules(self):
global_rule_dict = self.parent.global_rule_dict
is_first_group = True
list_of_rows_with_outer_rule = set()
for _group_key in global_rule_dict.keys():
_group = global_rule_dict[_group_key]
if is_first_group:
list_of_rows_with_outer_rule = _group['inner_list_of_rows']
is_first_group = False
else:
new_list_of_rows = _group['inner_list_of_rows']
outer_rule = _group['outer_rule']
if outer_rule == 'and':
list_of_rows_with_outer_rule = list_of_rows_with_outer_rule & new_list_of_rows
else:
list_of_rows_with_outer_rule = list_of_rows_with_outer_rule | new_list_of_rows
self.parent.list_of_rows_with_global_rule = list_of_rows_with_outer_rule
def get_list_of_rows_matching_rule(self, rule_index=-1):
"""This method will retrieve the rule definition, for example
item: sample formula
logic: is
text: Si
meaning that the Sample formula must be Si to accept this row
"""
table_handler = FilterTableHandler(table_ui=self.parent.ui.tableWidget)
row = table_handler.return_first_row_for_this_item_value(string_to_find=str(rule_index),
column_to_look_for=1)
keyword_name = table_handler.get_keyword_name(row=row)
criteria = table_handler.get_criteria(row=row)
string_to_find = table_handler.get_string_to_look_for(row=row)
result_table_handler = FilterResultTableHandler(table_ui=self.parent.ui.tableWidget_filter_result)
column_where_to_look_for = result_table_handler.get_column_of_given_keyword(keyword=keyword_name)
list_of_rows = result_table_handler.get_rows_of_matching_string(column_to_look_for=column_where_to_look_for,
string_to_find=string_to_find,
criteria=criteria)
return list_of_rows
def change_rule(self, is_added=False, is_removed=False, row=-1):
"""when user adds or removes a rule (criteria), we need to update the global rule dictionary"""
if is_added:
_row_rule_dict = {}
if self.parent.global_rule_dict == {}:
# first time adding a rule = group
_row_rule_dict['group_name'] = "0"
_row_rule_dict['list_rules'] = ['0']
_row_rule_dict['inner_rule'] = 'and'
_row_rule_dict['outer_rule'] = None
self.parent.global_rule_dict = collections.OrderedDict()
self.parent.global_rule_dict['0'] =_row_rule_dict
else:
# not the first time adding a rule
# add a group of just this new rule
name_of_new_rule = str(self.parent.ui.tableWidget.item(row, 1).text())
name_of_group = self.get_name_of_group()
_row_rule_dict['group_name'] = name_of_group
_row_rule_dict['list_rules'] = [name_of_new_rule]
_row_rule_dict['inner_rule'] = 'and'
_row_rule_dict['outer_rule'] = 'and'
self.parent.global_rule_dict[name_of_group] = _row_rule_dict
else:
# remove the rule from all the groups
name_of_rule_to_remove = str(self.parent.ui.tableWidget.item(row, 1).text())
self.remove_rule_from_global_rule_dict(name_of_rule_to_remove = name_of_rule_to_remove)
def remove_rule_from_global_rule_dict(self, name_of_rule_to_remove=None):
global_rule_dict = self.parent.global_rule_dict
for _key in list(global_rule_dict.keys()):
_list_of_rule = global_rule_dict[_key]['list_rules']
new_list_of_rules = [_rule for _rule in _list_of_rule if _rule != name_of_rule_to_remove]
if new_list_of_rules == []:
_ = global_rule_dict.pop(_key, None)
else:
global_rule_dict[_key]['list_rules'] = new_list_of_rules
self.parent.global_rule_dict = global_rule_dict
def get_name_of_group(self):
# using the current list of groups, this method returns the first index (str) available to name the new group.
global_rule_dict = self.parent.global_rule_dict
available_global_rule_index = '0'
list_of_keys = list(global_rule_dict.keys())
while True:
if available_global_rule_index in list_of_keys:
available_global_rule_index = str(np.int(available_global_rule_index) + 1)
else:
return available_global_rule_index
def create_global_rule_string(self):
global_rule_string = ''
global_rule_dict = self.parent.global_rule_dict
is_first_group = True
# looping through the groups
for _group_index in global_rule_dict.keys():
# list of rules for this group
_list_rules = global_rule_dict[_group_index]['list_rules']
# adding '#' in front of each rule name for this group
_str_list_rules = ["#{}".format(_rule) for _rule in _list_rules]
# keeping record of the number of rules to see if we need or not to specify inner logic
nbr_rules = len(_list_rules)
_inner_rule = " " + global_rule_dict[_group_index]['inner_rule'] + " "
str_rule_for_this_group = _inner_rule.join(_str_list_rules)
if nbr_rules > 1:
str_rule_for_this_group = "( " + str_rule_for_this_group + " )"
if is_first_group and (str_rule_for_this_group != ""):
global_rule_string = str_rule_for_this_group
is_first_group = False
elif str_rule_for_this_group != "":
_outer_logic = global_rule_dict[_group_index]['outer_rule']
global_rule_string = "{} {} {}".format(global_rule_string, _outer_logic, str_rule_for_this_group)
return global_rule_string
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/import_from_database/apply_rule_handler.py",
"copies": "1",
"size": "9080",
"license": "mit",
"hash": -6092627300891180000,
"line_mean": 44.4,
"line_max": 126,
"alpha_frac": 0.5742290749,
"autogenerated": false,
"ratio": 3.610337972166998,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4684567047066998,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import collections
from addie.processing.mantid.master_table.master_table_loader import FormatAsciiList
from addie.utilities.list_runs_parser import ListRunsParser
from addie.utilities.general import json_extractor
LIST_OF_METADATA_TO_CHECK = {"Mass Density": ["metadata", "entry", "sample", "mass_density"],
"Sample Env. Device": ["metadata", "entry", "daslogs", "bl1b:se:sampletemp",
"device_name"],
"Chemical Formula": ["metadata", "entry", "sample", "chemical_formula"],
"Geometry": ["metadata", "entry", "sample", "container_name"]}
class FormatJsonFromDatabaseToMasterTable:
# list of runs and title simply retrieved from the input json
list_of_runs = []
list_of_title = []
# list of runs and title after combining the title according to option selected
final_list_of_runs = []
final_list_of_title = []
# json
json = None # list of json returned from ONCat
reformated_json = None # dictionary where key is run number and value is the appropriate json
final_json = None
""" dictionary of runs, with title and list of json for those runs
{'1-3'; {'list_of_json': [json1, json2, json3],
'title': "this is title of 1-3"},
..., }
"""
any_conflict = False
def __init__(self, parent=None):
self.parent = parent
def run(self, json=None, import_option=1):
if json is None:
return
# isolate runs and titles
self._isolate_runs_and_title(json=json)
# create new json dictionary where the key is the run number and the value is the json item
self._reformat_json(json=json)
# combine according to option selected
self._apply_loading_options(option=import_option)
# making final json
self._make_final_json()
def _isolate_runs_and_title(self, json=None):
"""isolate the list of runs and title from the list of json returned by ONCat"""
list_of_runs = []
list_of_title = []
for _entry in json:
run = str(_entry["indexed"]["run_number"])
title = str(_entry["metadata"]["entry"]["title"])
list_of_runs.append(run)
list_of_title.append(title)
self.list_of_runs = list_of_runs
self.list_of_title = list_of_title
def _apply_loading_options(self, option=1):
"""using the ascii options, create the final list of runs and titles"""
list_of_runs = self.list_of_runs
list_of_title = self.list_of_title
o_format = FormatAsciiList(list1=list_of_runs,
list2=list_of_title)
o_format.apply_option(option=option)
self.final_list_of_runs = o_format.new_list1
self.final_list_of_title = o_format.new_list2
def _reformat_json(self, json=None):
new_json = collections.OrderedDict()
for _entry in json:
run_number = _entry["indexed"]["run_number"]
new_json[str(run_number)] = _entry
self.reformated_json = new_json
@staticmethod
def check_conflict(list_json):
"""this method will check if all the metadata of interest are identical. If they are not,
the method will return False"""
o_conflict = ConflictHandler(list_json=list_json)
is_conflict = o_conflict.is_conflict
conflict = o_conflict.conflict
return [is_conflict, conflict]
def _create_resolved_conflict_dictionary(self, json=None):
mass_density = str(json_extractor(json, LIST_OF_METADATA_TO_CHECK["Mass Density"]))
sample_env_device = str(json_extractor(json, LIST_OF_METADATA_TO_CHECK["Sample Env. Device"]))
chemical_formula = str(json_extractor(json, LIST_OF_METADATA_TO_CHECK["Chemical Formula"]))
geometry = str(json_extractor(json, LIST_OF_METADATA_TO_CHECK["Geometry"]))
return {'chemical_formula': chemical_formula,
'geometry': geometry,
'mass_density': mass_density,
'sample_env_device': sample_env_device}
def _make_final_json(self):
"""if runs are group together, those runs are regroup and final list of json is created"""
json = self.reformated_json
list_of_runs = self.final_list_of_runs
list_of_title = self.final_list_of_title
final_json = {}
for _index, _combine_run in enumerate(list_of_runs):
# get discrete list of the runs to isolate their json
o_parser = ListRunsParser(current_runs=_combine_run)
discrete_list_of_runs = o_parser.list_current_runs
discrete_list_of_runs.sort() # make sure the runs are in ascending order
list_of_json_for_this_combine_run = []
for _individual_run in discrete_list_of_runs:
list_of_json_for_this_combine_run.append(json[str(_individual_run)])
final_json[_combine_run] = {}
final_json[_combine_run]['list_of_json'] = list_of_json_for_this_combine_run
final_json[_combine_run]['title'] = list_of_title[_index]
[is_conflict, conflict] = \
FormatJsonFromDatabaseToMasterTable.check_conflict(list_of_json_for_this_combine_run)
final_json[_combine_run]['any_conflict'] = is_conflict
final_json[_combine_run]['conflict_dict'] = conflict
if is_conflict:
self.any_conflict = True
else:
# put inside a "resolved_conflict" key the result of the none conflicts values
resolved_conflict = self._create_resolved_conflict_dictionary(json=list_of_json_for_this_combine_run[0])
final_json[_combine_run]['resolved_conflict'] = resolved_conflict
# final_json = {'1,2,5-10': {'list_of_json': [json1, json2, json5, json6, json7, ... json10],
# 'title': "title_1_1,2,5-10'},
# '20-30': {'list_of_json': [...',
# 'title': "title_20-30"},
# .... }
self.final_json = final_json
class ConflictHandler:
# LIST_OF_METADATA_TO_CHECK = {"Mass Density": ["metadata", "entry", "sample", "mass_density"],
# "Sample Env. Device": ["metadata", "entry", "daslogs", "bl1b:se:sampletemp",
# "device_name"],
# "Chemical Formula": ["metadata", "entry", "sample", "chemical_formula"],
# "Geometry": ["metadata", "entry", "sample", "container_name"]}
run_number_path = ["indexed", "run_number"]
is_conflict = False # inform if there is a conflict or not
conflict = {}
""" this dictionary will inform of the conflict as defined here
# {"1,3-5": {'Sample Env. Device" : "N/A",
# 'Geometry": "N/A"},
# "2": {"Sample Env. Device" : "yoyou",
# "Geometry": "yaha"} }
"""
def __init__(self, list_json=None):
self.check(list_json)
def check(self, list_json):
"""Check the conflict by creating a master dictionary defined as followed
master_dict = {"0": {"Run Number": ["123", "124"],
"Sample Env. Device": "device 1",
"Geometry": "geometry 1",
... },
"1": {"run Number": ["125"],
"Sample Env. Device": "device 2",
"Geometry": "geometry 1",
... },
}
"""
master_dict = {}
master_key = 0
for _json in list_json:
run_number = str(json_extractor(_json, self.run_number_path))
mass_density = str(json_extractor(_json, LIST_OF_METADATA_TO_CHECK["Mass Density"]))
sample_env_device = str(json_extractor(_json, LIST_OF_METADATA_TO_CHECK["Sample Env. Device"]))
chemical_formula = str(json_extractor(_json, LIST_OF_METADATA_TO_CHECK["Chemical Formula"]))
geometry = str(json_extractor(_json, LIST_OF_METADATA_TO_CHECK["Geometry"]))
if master_dict == {}:
master_dict[master_key] = {"Run Number": [run_number],
"sample_env_device": sample_env_device,
"mass_density": mass_density,
"chemical_formula": chemical_formula,
"geometry": geometry}
else:
for _key in master_dict.keys():
if (mass_density == master_dict[_key]["mass_density"]) and \
(sample_env_device == master_dict[_key]["sample_env_device"]) and \
(chemical_formula == master_dict[_key]["chemical_formula"]) and \
(geometry == master_dict[_key]["geometry"]):
master_dict[_key]["Run Number"].append(run_number)
break
else:
# we found a conflict
master_key += 1
master_dict[master_key] = {"Run Number": [run_number],
"sample_env_device": sample_env_device,
"mass_density": mass_density,
"chemical_formula": chemical_formula,
"geometry": geometry}
self.is_conflict = True
self.conflict = master_dict
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/import_from_database/format_json_from_database_to_master_table.py",
"copies": "1",
"size": "9993",
"license": "mit",
"hash": 6849246698823356000,
"line_mean": 42.6375545852,
"line_max": 120,
"alpha_frac": 0.5355749024,
"autogenerated": false,
"ratio": 4.149916943521594,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5185491845921595,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import collections
class CaseInsensitiveDict(collections.MutableMapping):
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
def copy(self):
return CaseInsensitiveDict(self._store.values())
def lower_items(self):
return ((k, v[1]) for k, v in self._store.items())
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
| {
"repo_name": "jmvrbanac/symantecssl",
"path": "symantecssl/datastructures.py",
"copies": "1",
"size": "1332",
"license": "apache-2.0",
"hash": 8014792675471816000,
"line_mean": 27.9565217391,
"line_max": 75,
"alpha_frac": 0.5870870871,
"autogenerated": false,
"ratio": 4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 46
} |
from __future__ import absolute_import, division, print_function
import colorsys
from fractions import Fraction
import numpy as np
import pylab
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap, ListedColormap
from smartFormat import smartFormat, TeX
__all__ = [
"MARKERS",
"DARK_BLUE",
"DARK_RED",
"LIGHT_BLUE",
"LIGHT_RED",
"colorCycleOrthog",
"invertColor",
"hsvaFact",
"colorCycleRainbow",
"human_safe",
"my_rainbow",
"grayify_cmap",
"show_colormap",
"plotDefaults",
"generateColorCycle",
"rugplot",
"onpick_peakfind",
"onpickLegend_toggle",
"complexPlot",
"plotMatrix",
"removeBorder",
"findRenderer",
"ScaledMaxNLocator",
"logticks_format",
"smartticks_format",
"fmtstr_format",
"fractticks_format",
"maskZeros",
]
MARKERS = [
".",
"v",
"o",
"*",
"+",
"D",
"^",
"s",
"p",
"x",
"<",
">",
"h",
"H",
"d",
"|",
"_",
]
DARK_BLUE = (0.0, 0.0, 0.7)
DARK_RED = (0.7, 0.0, 0.0)
LIGHT_BLUE = (0.4, 0.4, 0.8)
LIGHT_RED = (0.8, 0.4, 0.4)
"""
Use the following as:
#mpl.rc('axes', color_cycle=colorCycleOrthog)
source: http://stackoverflow.com/questions/470690/how-to-automatically-generate-n-distinct-colors
... but modified somewhat from that!
"""
colorCycleOrthog = (
"#000000", # 0 Black
"#803E75", # 2 Strong Purple
"#FF6800", # 3 Vivid Orange
"#8A9DD7", # 4 Very Light Blue
"#FFB300", # 1 Vivid Yellow
"#C10020", # 5 Vivid Red
"#CEA262", # 6 Grayish Yellow
"#817066", # 7 Medium Gray
# The following will not be good for people with defective color vision
"#007D34", # 8 Vivid Green
"#F6768E", # 9 Strong Purplish Pink
"#00538A", # 10 Strong Blue
"#93AA00", # 11 Vivid Yellowish Green
"#593315", # 12 Deep Yellowish Brown
"#F14AD3", # 13 PINK/Magenta! (used to be: #F13A13, Vivid Reddish Orange
"#53377A", # 14 Strong Violet
"#FF8E00", # 15 Vivid Orange Yellow
"#54BF00", # 16 Vivid Greenish Yellow
"#0000A5", # 17 BLUE!
"#7F180D", # 18 Strong Reddish Brown
#'#F13A13', # 13 Vivid Reddish Orange
#'#B32851', # 16 Strong Purplish Red
#'#FF7A5C', # 19 Strong Yellowish Pink
)
def invertColor(c):
r, g, b, a = mpl.colors.colorConverter.to_rgba(c)
if len(c) == 3:
return (1 - r, 1 - g, 1 - b)
return (1 - r, 1 - g, 1 - b, a)
# if isinstance(c, basestring):
# c = c.replace('#', '')
# r, g, b = (int(c[2*i:2*i+2], 16) for i in range(3))
# ri = 255-r
# gi = 255-g
# bi = 255-b
# return '#%02x%02x%02x'%(ri, gi, bi)
def hsvaFact(c, hf=1.0, sf=1.0, vf=1.0, af=1.0, clip=True):
r, g, b, a = mpl.colors.colorConverter.to_rgba(c)
h, s, v = colorsys.rgb_to_hsv(r, g, b)
ri, gi, bi = colorsys.hsv_to_rgb(h * hf, s * sf, v * vf)
if clip:
# Clip all values to range [0, 1]
result = (
np.clip(ri, 0, 1),
np.clip(gi, 0, 1),
np.clip(bi, 0, 1),
np.clip(a * af, 0, 1),
)
else:
# Rescale to fit largest within [0, 1]; if all of r, g, b fit in this
# range, do nothing
maxval = max(ri, gi, bi)
# Scale colors if one exceeds range
if maxval > 1:
ri /= maxval
gi /= maxval
bi /= maxval
# Clip alpha to range [0, 1]
alpha = np.clip(a * af, a_min=0, a_max=1)
result = (ri, gi, bi, alpha)
return result
colorCycleRainbow = (
"#FF1008",
"#FF5C2F",
"#FFA055",
"#DED579",
"#ACF59A",
"#7AFFB7",
"#48F1D0",
"#17CBE4",
"#1C93F3",
"#4E4DFC",
"#8000FF",
)
human_safe = ListedColormap(colorCycleOrthog, name="human_safe")
my_rainbow = ListedColormap(colorCycleRainbow, name="my_rainbow")
def grayify_cmap(cmap):
"""Return a grayscale version of the colormap
From: https://jakevdp.github.io/blog/2014/10/16/how-bad-is-your-colormap/"""
cmap = plt.cm.get_cmap(cmap)
colors = cmap(np.arange(cmap.N))
# convert RGBA to perceived greyscale luminance
# cf. http://alienryderflex.com/hsp.html
RGB_weight = [0.299, 0.587, 0.114]
luminance = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
colors[:, :3] = luminance[:, np.newaxis]
if isinstance(cmap, LinearSegmentedColormap):
return cmap.from_list(cmap.name + "_grayscale", colors, cmap.N)
elif isinstance(cmap, ListedColormap):
return ListedColormap(colors=colors, name=cmap.name + "_grayscale")
def show_colormap(cmap):
"""From: https://jakevdp.github.io/blog/2014/10/16/how-bad-is-your-colormap/"""
im = np.outer(np.ones(100), np.arange(1000))
fig, ax = plt.subplots(2, figsize=(6, 1.5), subplot_kw=dict(xticks=[], yticks=[]))
fig.subplots_adjust(hspace=0.1)
ax[0].imshow(im, cmap=cmap)
ax[1].imshow(im, cmap=grayify_cmap(cmap))
def plotColorCycle(color_cycle=colorCycleOrthog):
N = len(color_cycle)
x = np.linspace(0, 2 * np.pi, 100)
f = plt.figure(333)
plt.clf()
ax = f.add_subplot(111)
for n in range(N):
ax.plot(
x,
np.cos(x - 2 * np.pi / N * n),
lw=3,
label=format(n, "2d") + ": " + color_cycle[n][1:],
color=color_cycle[n],
)
plt.legend(loc="center right")
ax.set_xlim([0, 8.2])
ax.set_ylim([-1.1, 1.1])
plt.tight_layout()
def plotDefaults():
plt.ion()
mpl.rc("font", **{"family": "serif", "weight": "normal", "size": 16})
mpl.rc("axes", color_cycle=human_safe.colors)
# generateColorCycle(n_colors=6)
def generateColorCycle(cmap=mpl.cm.brg, n_colors=8, set_it=True):
cmap_indices = np.array(
np.round(np.arange(0, n_colors) * (cmap.N - 1) / (n_colors - 1)), dtype=int
)
color_cycle = [
"#%0.2X%0.2X%0.2X" % tuple(np.round(c[0:3] * 255)) for c in cmap(cmap_indices)
]
if set_it:
mpl.rc("axes", color_cycle=color_cycle)
return color_cycle
def rugplot(a, y0, dy, ax, **kwargs):
return ax.plot([a, a], [y0, y0 + dy], **kwargs)
def onpick_peakfind(event):
"""Use this by:
>> fig = figure(1)
>> ax = axis(111)
>> line, = ax.plot(x, y, picker=5)
>> fig.canvas.mpl_connect('pick_event', onpick_peakfind)
"""
print(event, event.canvas)
thisline = event.artist
vis = thisline.get_visible()
# -- This function doesn't handle the lines in the legend
# fig = event.canvas.figure
# leg = fig.
# print leg.__dict__
# for child in leg.get_children():
# print "child:", child
# if thisline in leg.get_lines():
# return
# -- If the line has been made invisible, ignore it (return from function)
if not vis:
return
c = thisline.get_color()
ls = thisline.get_linestyle()
lw = thisline.get_linewidth()
mk = thisline.get_marker()
mkec = thisline.get_markeredgecolor()
mkew = thisline.get_markeredgewidth()
mkfc = thisline.get_markerfacecolor()
mksz = thisline.get_markersize()
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
label = thisline.get_label()
freqrangeind = event.ind
# print 'onpick points:', zip(xdata[ind], ydata[ind])
# print freqrangeind
# print ""
# print ydata[freqrangeind]
minvalind = np.argmin(ydata[freqrangeind])
maxvalind = np.argmax(ydata[freqrangeind])
minval = ydata[freqrangeind][minvalind]
minx = xdata[freqrangeind][minvalind]
maxval = ydata[freqrangeind][maxvalind]
maxx = xdata[freqrangeind][maxvalind]
print("")
print(label)
print("min:", minval, "at", minx, "max:", maxval, "at", maxx)
halfInd = -1
maxInd = -1
try:
maxInd = pylab.find(ydata[freqrangeind] == maxval)
maxInd = maxInd[0]
# print maxInd
maxInd = freqrangeind[0] + maxInd
halfPower = maxval - 10 * np.log10(2)
# quarterind = find(ydata[freqrangeind] < maxval-10*np.log10(4))
halfInd = pylab.find(ydata < halfPower)
inddiff = halfInd - maxInd
upperInd = min(halfInd[pylab.find(inddiff > 0)])
lowerInd = max(halfInd[pylab.find(inddiff < 0)])
# print lowerInd, maxInd, upperInd
yLower = ydata[lowerInd : maxInd + 1]
xLower = xdata[lowerInd : maxInd + 1]
dyLower = max(yLower) - min(yLower)
yUpper = ydata[maxInd : upperInd + 1]
xUpper = xdata[maxInd : upperInd + 1]
dyUpper = max(yUpper) - min(yUpper)
plt.figure(999)
plt.clf()
# print ls, lw, mk, mkfc, mksz
# print l
# print l.get_markerfacecolor()
# print l.get_color()
# l.set_color(c)
# l.set_linestyle(ls)
# l.set_linewidth(lw)
# l.set_marker(mk)
# l.set_markeredgecolor(mkec)
# l.set_markeredgewidth(mkew)
# l.set_markerfacecolor(mkfc)
# l.set_markersize(mksz)
peakPlotTitle = plt.title(label, fontsize=14)
interpKind = "linear"
interpLower = interp1d(yLower, xLower, kind=interpKind)
interpUpper = interp1d(np.flipud(yUpper), np.flipud(xUpper), kind=interpKind)
lowerHalfPowerFreq = interpLower(halfPower)
upperHalfPowerFreq = interpUpper(halfPower)
iyLower = np.arange(min(yLower), max(yLower), dyLower / 40)
ixLower = interpLower(iyLower)
iyUpper = np.arange(max(yUpper), min(yUpper), -dyUpper / 40)
ixUpper = interpUpper(iyUpper)
delta_f = upperHalfPowerFreq - lowerHalfPowerFreq
f0 = xdata[maxInd]
Q = f0 / delta_f
print(
"f0:",
f0,
"delta_f:",
delta_f,
"pkval:",
ydata[maxInd],
"Q:",
Q,
"eta:",
1 / Q,
)
plt.plot(
np.concatenate((ixLower, ixUpper)),
np.concatenate((iyLower, iyUpper)),
"b.-",
alpha=0.2,
linewidth=8,
)
plt.plot(
[lowerHalfPowerFreq, upperHalfPowerFreq],
[halfPower] * 2,
"c-",
linewidth=15,
alpha=0.25,
)
l, = plt.plot(
np.concatenate((xLower, xUpper)),
np.concatenate((yLower, yUpper)),
color=c,
linestyle=ls,
linewidth=3,
marker=mk,
markerfacecolor=mkfc,
markersize=mksz,
markeredgewidth=mkew,
markeredgecolor=mkec,
)
pylab.text(
(lowerHalfPowerFreq + upperHalfPowerFreq) / 2,
halfPower,
"FWHM = "
+ lowPrec(delta_f)
+ ", Q = "
+ lowPrec(Q)
+ r", $\eta$ = "
+ lowPrec(1 / Q),
horizontalalignment="center",
verticalalignment="center",
fontsize=12,
)
plt.draw()
except:
pass
# raise()
# print "failed to find/fit peak", halfInd, maxInd
def onpickLegend_toggle(event):
try:
# on the pick event, find the orig line corresponding to the
# legend proxy line, and toggle the visibility
legline = event.artist
origline = lined[legline]
vis = not origline.get_visible()
origline.set_visible(vis)
# Change the alpha on the line in the legend so we can see what lines
# have been toggled
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
f4.canvas.draw()
except:
pass
def complexPlot(
f,
data,
plot_kwargs=None,
fig_kwargs=None,
label=None,
title=None,
xlabel=None,
magPlot=True,
phasePlot=True,
realPlot=True,
imagPlot=True,
squareMag=True,
magScale="log",
phaseScale="deg",
realScale="linear",
imagScale="linear",
freqScale="log",
unwrapPhase=False,
fignum=301,
):
nPlots = magPlot + phasePlot + realPlot + imagPlot
# plt.close(fignum)
if fig_kwargs is None:
fig = plt.figure(fignum, figsize=(7, 2.00 * nPlots))
else:
fig = plt.figure(fignum, **fig_kwargs)
if plot_kwargs is None:
plot_kwargs = [{}] * nPlots
elif isinstance(plot_kwargs, dict):
plot_kwargs = [plot_kwargs] * nPlots
# -- Stack plots directly on top of one another
# plt.subplots_adjust(hspace=0.001)
# fig.clf()
plotN = 0
axesList = []
xticklabels = []
magSq = (np.abs(data)) ** 2
if magPlot:
if squareMag:
M = magSq
ylab = r"Mag$^2$"
else:
M = np.sqrt(magSq)
ylab = r"Mag"
plotN += 1
kwargs = plot_kwargs.pop(0)
ax = fig.add_subplot(nPlots, 1, plotN)
ax.plot(f, M, label=label, **kwargs)
ax.set_ylabel(ylab)
ax.grid(b=True)
ax.set_yscale(magScale)
axesList.append(ax)
if plotN < nPlots:
xticklabels += ax.get_xticklabels()
ax.set_xscale(freqScale)
ax.set_xlim(min(f), max(f))
if plotN == 1 and title is not None:
ax.set_title(title)
if label is not None:
ax.legend(loc="best")
if phasePlot:
plotN += 1
if plotN == 1:
sharex = None
else:
sharex = axesList[0]
kwargs = plot_kwargs.pop(0)
phi = np.arctan2(np.imag(data), np.real(data))
if unwrapPhase:
phi = np.unwrap(phi) # , np.pi*(1-1/10))
if phaseScale == "deg":
phaseUnits = r"deg"
phi = phi * 180 / np.pi
else:
phaseUnits = r"rad"
ax = fig.add_subplot(nPlots, 1, plotN, sharex=sharex)
ax.plot(f, phi, label=label, **kwargs)
ax.set_ylabel(r"Phase (" + phaseUnits + r")")
ax.grid(b=True)
axesList.append(ax)
if plotN < nPlots:
xticklabels += ax.get_xticklabels()
ax.set_xscale(freqScale)
ax.set_xlim(min(f), max(f))
if plotN == 1 and title is not None:
ax.set_title(title)
if label is not None:
ax.legend(loc="best")
if realPlot:
plotN += 1
if plotN == 1:
sharex = None
else:
sharex = axesList[0]
kwargs = plot_kwargs.pop(0)
ax = fig.add_subplot(nPlots, 1, plotN, sharex=sharex)
ax.plot(f, np.real(data), label=label, **kwargs)
ax.set_ylabel("Real")
ax.grid(b=True)
axesList.append(ax)
if plotN < nPlots:
xticklabels += ax.get_xticklabels()
ax.set_xscale(freqScale)
ax.set_yscale(realScale)
ax.set_xlim(min(f), max(f))
if plotN == 1 and title is not None:
ax.set_title(title)
if label is not None:
ax.legend(loc="best")
if imagPlot:
plotN += 1
if plotN == 1:
sharex = None
else:
sharex = axesList[0]
kwargs = plot_kwargs.pop(0)
ax = fig.add_subplot(nPlots, 1, plotN, sharex=sharex)
ax.plot(f, np.imag(data), label=label, **kwargs)
ax.set_ylabel("Imaginary")
ax.grid(b=True)
axesList.append(ax)
if plotN < nPlots:
xticklabels += ax.get_xticklabels()
ax.set_xscale(freqScale)
ax.set_yscale(imagScale)
ax.set_xlim(min(f), max(f))
if plotN == 1 and title is not None:
ax.set_title(title)
if label is not None:
ax.legend(loc="best")
ax.set_xscale(freqScale)
if xlabel is not None:
ax.set_xlabel(xlabel)
# plt.setp(xticklabels, visible=False)
# fig.tight_layout()
return fig, axesList
def plotMatrix(tuplesDict, labelsList):
"""From:
http://fromthepantothefire.com/matplotlib/rock_paper_scissors.py"""
# list of string labels for rows/columns and
# data in dictionary of tuples of these labels (row_label, col_label)
# Map text labels to index used on plot
# this is convenient if you want to reorganize the display order
# just update the labelsList order.
labelNameToIndex = {}
for i, lab in enumerate(labelsList):
labelNameToIndex[lab] = i
# number of rows and columns
numLabels = len(labelsList)
# create a list of data points
xyz = []
for t in tuplesDict:
x = labelNameToIndex[t[1]]
# y values are reversed so output oriented the way I
# think about matrices (0, 0) in upper left.
y = numLabels - 1 - labelNameToIndex[t[0]]
# extract value and color
(z, c) = tuplesDict[t]
xyz.append((x, y, z, c))
for x, y, z, c in xyz:
plt.scatter([x], [y], s=[z], color=c, alpha=0.8)
tickLocations = list(range(numLabels))
plt.xticks(tickLocations, labelsList, rotation=90)
# reverse the labels for y axis to match the data
plt.yticks(tickLocations, labelsList[::-1])
# set the axis 1 beyond the data so it looks good.
plt.axis([-1, numLabels, -1, numLabels])
def removeBorder(axes=None, top=False, right=False, left=True, bottom=True):
"""
Minimize chartjunk by stripping out unnecessary plot borders and axis ticks
The top/right/left/bottom keywords toggle whether the corresponding plot
border is drawn
from ChrisBeaumont,
https://github.com/cs109/content/blob/master/README.md
"""
ax = axes or plt.gca()
ax.spines["top"].set_visible(top)
ax.spines["right"].set_visible(right)
ax.spines["left"].set_visible(left)
ax.spines["bottom"].set_visible(bottom)
# turn off all ticks
ax.yaxis.set_ticks_position("none")
ax.xaxis.set_ticks_position("none")
# now re-enable visibles
if top:
ax.xaxis.tick_top()
if bottom:
ax.xaxis.tick_bottom()
if left:
ax.yaxis.tick_left()
if right:
ax.yaxis.tick_right()
def findRenderer(fig):
"""From http://stackoverflow.com/questions/22667224/matplotlib-get-text-bounding-box-independent-of-backend"""
if hasattr(fig.canvas, "get_renderer"):
# Some backends, such as TkAgg, have the get_renderer method, which
# makes this easy.
renderer = fig.canvas.get_renderer()
else:
# Other backends do not have the get_renderer method, so we have a work
# around to find the renderer. Print the figure to a temporary file
# object, and then grab the renderer that was used.
# (I stole this trick from the matplotlib backend_bases.py
# print_figure() method.)
import io
fig.canvas.print_pdf(io.BytesIO())
renderer = fig._cachedRenderer
return renderer
class ScaledMaxNLocator(mpl.ticker.MaxNLocator):
def __init__(self, scale, *args, **kwargs):
super(ScaledMaxNLocator, self).__init__(**kwargs)
self.scale = scale
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
# print self.scale, vmin, vmax, [float(tl)/float(self.scale) for tl in self.tick_values(vmin*self.scale, vmax*self.scale)]
return [
float(tl) / float(self.scale)
for tl in self.tick_values(vmin * self.scale, vmax * self.scale)
]
def logticks_format(value, index):
"""
By Francesco Montesano
http://stackoverflow.com/questions/19239297/matplotlib-bad-ticks-labels-for-loglog-twin-axis
This function decompose value in base*10^{exp} and return a latex string.
If 0<=value<99: return the value as it is.
if 0.1<value<0: returns as it is rounded to the first decimal
otherwise returns $base*10^{exp}$
I've designed the function to be use with values for which the decomposition
returns integers
Use as:
import matplotlib.ticker as ticker
subs = [1., 3., 6.]
ax.xaxis.set_minor_locator(ticker.LogLocator(subs=subs))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.FuncFormatter(ticks_format))
"""
exp = np.floor(np.log10(value))
base = value / 10 ** exp
if exp == 0 or exp == 1:
return "${0:d}$".format(int(value))
if exp == -1:
return "${0:.1f}$".format(value)
else:
if base == 1:
return "$10^{{{0:d}}}$".format(int(exp))
return "${0:d}\\times10^{{{1:d}}}$".format(int(base), int(exp))
def smartticks_format(**kwargs):
sfmtargs = dict(sciThresh=[4, 4], sigFigs=3, keepAllSigFigs=False)
if not kwargs is None:
sfmtargs.update(kwargs)
def smart_ticks_formatter(value, index):
return smartFormat(value, **sfmtargs)
return smart_ticks_formatter
def fmtstr_format(fmt):
def fixed_ticks_formatter(value, index):
return TeX(format(value, fmt))
return fixed_ticks_formatter
def fractticks_format(DENOM_LIMIT):
def fract_ticks_formatter(value, index):
f = Fraction(value).limit_denominator(DENOM_LIMIT)
if f.denominator == 1:
return r"$" + format(f.numerator, "d") + r"$"
return (
r"$" + format(f.numerator, "d") + r"/" + format(f.denominator, "d") + r"$"
)
return fract_ticks_formatter
def maskZeros(H):
return H == 0
| {
"repo_name": "jllanfranchi/pygeneric",
"path": "plotGoodies.py",
"copies": "1",
"size": "21446",
"license": "mit",
"hash": -3357342394156699000,
"line_mean": 27.7865771812,
"line_max": 130,
"alpha_frac": 0.5705959153,
"autogenerated": false,
"ratio": 3.1804834643333826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9245918944831648,
"avg_score": 0.001032086960346951,
"num_lines": 745
} |
from __future__ import absolute_import, division, print_function
import contextlib
import enum
import itertools
import logging
import threading
from six.moves import queue
import workflows
import workflows.logging
class Status(enum.Enum):
'''
Internal service status codes
---------------------------------------------
These codes will be sent to the frontend to indicate the current state of
the main loop regardless of the status text, which can be set freely by
the specific service.
'''
# The state transitions are: (see definition of CommonService.start() below)
# constructor() -> NEW
# NEW -> start() being called -> STARTING
# STARTING -> self.initializing() -> IDLE
# IDLE -> wait for messages on command queue -> PROCESSING
# \--> optionally: idle timer elapsed -> TIMER
# PROCESSING -> process command -> IDLE
# \--> shutdown command received -> SHUTDOWN
# TIMER -> process event -> IDLE
# SHUTDOWN -> self.in_shutdown() -> END
# unhandled exception -> ERROR
NEW = (0, 'constructing')
STARTING = (1, 'starting')
IDLE = (2, 'idle')
TIMER = (3, 'timer event')
PROCESSING = (4, 'processing')
SHUTDOWN = (5, 'shutting down')
END = (6, 'shutdown')
ERROR = (7, 'error')
# Extra states that are not used by services themselves but may be used
# externally:
NONE = (-1, 'no service loaded') # Node has no service instance loaded
TEARDOWN = (-2, 'shutdown') # Node is shutting down
def __init__(self, intval, description):
'''
Each status is defined as a tuple of a unique integer value and a
descriptive string. These are available via enum properties
'''
self.intval = intval
self.description = description
class Priority(enum.IntEnum):
'''
Priorities for the service-internal priority queue. This ensures that eg.
frontend commands are always processed before timer events.
'''
COMMAND = 1
TIMER = 2
TRANSPORT = 3
IDLE = 4
class CommonService(object):
'''
Base class for workflow services. A service is a piece of software that runs
in an isolated environment, communicating only via pipes with the outside
world. Units of work are injected via a pipe. Results, status and log
messages, etc. are written out via a pipe. Any task can be encapsulated
as a service, for example a service that counts spots on an image passed
as a filename, and returns the number of counts.
To instantiate a service two Pipe-like objects should be passed to the
constructors, one to communicate from the service to the frontend, one to
communicate from the frontend to the service.
'''
# Human readable service name -----------------------------------------------
_service_name = 'unnamed service'
# Logger name ---------------------------------------------------------------
_logger_name = 'workflows.service' # The logger can be accessed via self.log
# Overrideable functions ----------------------------------------------------
def initializing(self):
'''Service initialization. This function is run before any commands are
received from the frontend. This is the place to request channel
subscriptions with the messaging layer, and register callbacks.
This function can be overridden by specific service implementations.'''
pass
def in_shutdown(self):
'''Service shutdown. This function is run before the service is terminated.
No more commands are received, but communications can still be sent.
This function can be overridden by specific service implementations.'''
pass
SERVICE_STATUS_NEW = Status.NEW.intval
SERVICE_STATUS_STARTING = Status.STARTING.intval
SERVICE_STATUS_IDLE = Status.IDLE.intval
SERVICE_STATUS_TIMER = Status.TIMER.intval
SERVICE_STATUS_PROCESSING = Status.PROCESSING.intval
SERVICE_STATUS_SHUTDOWN = Status.SHUTDOWN.intval
SERVICE_STATUS_END = Status.END.intval
SERVICE_STATUS_ERROR = Status.ERROR.intval
SERVICE_STATUS_NONE = Status.NONE.intval
SERVICE_STATUS_TEARDOWN = Status.TEARDOWN.intval
# Number to short string conversion
human_readable_state = { e.intval: e.description for e in Status }
# Default logging level for log messages from this service
log_verbosity = logging.INFO
# Any keyword arguments set on service invocation
start_kwargs = { }
# Not so overrideable functions ---------------------------------------------
def __init__(self, *args, **kwargs):
'''Service constructor. Parameters include optional references to two
pipes: frontend= for messages from the service to the frontend,
and commands= for messages from the frontend to the service.
A dictionary can optionally be passed with environment=, which is then
available to the service during runtime.'''
self.__pipe_frontend = None
self.__pipe_commands = None
self._environment = kwargs.get('environment', {})
self._transport = None
self.__callback_register = {}
self.__log_extensions = []
self.__service_status = None
self.__shutdown = False
self.__update_service_status(self.SERVICE_STATUS_NEW)
self.__queue = queue.PriorityQueue()
self._idle_callback = None
self._idle_time = None
# Logger will be overwritten in start() function
self.log = logging.getLogger(self._logger_name)
def __send_to_frontend(self, data_structure):
'''Put a message in the pipe for the frontend.'''
if self.__pipe_frontend:
self.__pipe_frontend.send(data_structure)
@property
def transport(self):
return self._transport
@transport.setter
def transport(self, value):
if self._transport:
raise AttributeError("Transport already defined")
self._transport = value
def start_transport(self):
'''If a transport object has been defined then connect it now.'''
if self.transport:
if self.transport.connect():
self.log.debug('Service successfully connected to transport layer')
else:
raise RuntimeError('Service could not connect to transport layer')
# direct all transport callbacks into the main queue
self._transport_interceptor_counter = itertools.count()
self.transport.subscription_callback_set_intercept(self._transport_interceptor)
else:
self.log.debug('No transport layer defined for service. Skipping.')
def _transport_interceptor(self, callback):
'''Takes a callback function and returns a function that takes headers and
messages and places them on the main service queue.'''
def add_item_to_queue(header, message):
queue_item = (
Priority.TRANSPORT,
next(self._transport_interceptor_counter), # insertion sequence to keep messages in order
(callback, header, message),
)
self.__queue.put(queue_item) # Block incoming transport until insertion completes
return add_item_to_queue
def connect(self, frontend=None, commands=None):
'''Inject pipes connecting the service to the frontend. Two arguments are
supported: frontend= for messages from the service to the frontend,
and commands= for messages from the frontend to the service.
The injection should happen before the service is started, otherwise the
underlying file descriptor references may not be handled correctly.'''
if frontend:
self.__pipe_frontend = frontend
self.__send_service_status_to_frontend()
if commands:
self.__pipe_commands = commands
@contextlib.contextmanager
def extend_log(self, field, value):
'''A context wherein a specified extra field in log messages is populated
with a fixed value. This affects all log messages within the context.'''
self.__log_extensions.append((field, value))
try:
yield
except Exception as e:
setattr(e, 'workflows_log_' + field, value)
raise
finally:
self.__log_extensions.remove((field, value))
def __command_queue_listener(self):
'''Function to continuously retrieve data from the frontend. Commands are
sent to the central priority queue. If the pipe from the frontend is
closed the service shutdown is initiated. Check every second if service
has shut down, then terminate.
This function is run by a separate daemon thread, which is started by
the __start_command_queue_listener function.
'''
self.log.debug("Queue listener thread started")
counter = itertools.count() # insertion sequence to keep messages in order
while not self.__shutdown:
if self.__pipe_commands.poll(1):
try:
message = self.__pipe_commands.recv()
except EOFError:
# Pipe was closed by frontend. Shut down service.
self.__shutdown = True
self.log.error("Pipe closed by frontend, shutting down service", exc_info=True)
break
queue_item = (Priority.COMMAND, next(counter), message)
try:
self.__queue.put(queue_item, True, 60)
except queue.Full:
# If the message can't be stored within 60 seconds then the service is
# operating outside normal parameters. Try to shut it down.
self.__shutdown = True
self.log.error("Write to service priority queue failed, shutting down service", exc_info=True)
break
self.log.debug("Queue listener thread terminating")
def __start_command_queue_listener(self):
'''Start the function __command_queue_listener in a separate thread. This
function continuously listens to the pipe connected to the frontend.
'''
thread_function = self.__command_queue_listener
class QueueListenerThread(threading.Thread):
def run(qltself):
thread_function()
assert not hasattr(self, '__queue_listener_thread')
self.log.debug("Starting queue listener thread")
self.__queue_listener_thread = QueueListenerThread()
self.__queue_listener_thread.daemon = True
self.__queue_listener_thread.name = "Command Queue Listener"
self.__queue_listener_thread.start()
def _log_send(self, logrecord):
'''Forward log records to the frontend.'''
for field, value in self.__log_extensions:
setattr(logrecord, field, value)
self.__send_to_frontend({
'band': 'log',
'payload': logrecord
})
def _register(self, message_band, callback):
'''Register a callback function for a specific message band.'''
self.__callback_register[message_band] = callback
def _register_idle(self, idle_time, callback):
'''Register a callback function that is run when idling for a given
time span (in seconds).'''
self._idle_callback = callback
self._idle_time = idle_time
def __update_service_status(self, statuscode):
'''Set the internal status of the service object, and notify frontend.'''
if self.__service_status != statuscode:
self.__service_status = statuscode
self.__send_service_status_to_frontend()
def __send_service_status_to_frontend(self):
'''Actually send the internal status of the service object to the frontend.'''
self.__send_to_frontend({
'band': 'status_update',
'statuscode': self.__service_status
})
def get_name(self):
'''Get the name for this service.'''
return self._service_name
def _set_name(self, name):
'''Set a new name for this service, and notify the frontend accordingly.'''
self._service_name = name
self.__send_to_frontend({ 'band': 'set_name', 'name': self._service_name })
def _shutdown(self):
'''Terminate the service.'''
self.__shutdown = True
def initialize_logging(self):
'''Reset the logging for the service process. All logged messages are
forwarded to the frontend. If any filtering is desired, then this must
take place on the service side.'''
# Reset logging to pass logrecords into the queue to the frontend only.
# Existing handlers may be broken as they were copied into a new process,
# so should be discarded.
for loggername in [None] + list(logging.Logger.manager.loggerDict.keys()):
logger = logging.getLogger(loggername)
while logger.handlers:
logger.removeHandler(logger.handlers[0])
# Re-enable logging to console
root_logger = logging.getLogger()
# By default pass all warning (and higher) level messages to the frontend
root_logger.setLevel(logging.WARN)
root_logger.addHandler(workflows.logging.CallbackHandler(self._log_send))
# Set up the service logger and pass all info (and higher) level messages
# (or other level if set differently)
self.log = logging.getLogger(self._logger_name)
if self.start_kwargs.get('verbose_log'):
self.log_verbosity = logging.DEBUG
self.log.setLevel(self.log_verbosity)
# Additionally, write all critical messages directly to console
console = logging.StreamHandler()
console.setLevel(logging.CRITICAL)
root_logger.addHandler(console)
def start(self, **kwargs):
'''Start listening to command queue, process commands in main loop,
set status, etc...
This function is most likely called by the frontend in a separate
process.'''
# Keep a copy of keyword arguments for use in subclasses
self.start_kwargs.update(kwargs)
try:
self.initialize_logging()
self.__update_service_status(self.SERVICE_STATUS_STARTING)
self.start_transport()
self.initializing()
self._register('command', self.__process_command)
if self.__pipe_commands is None:
# can only listen to commands if command queue is defined
self.__shutdown = True
else:
# start listening to command queue in separate thread
self.__start_command_queue_listener()
while not self.__shutdown: # main loop
self.__update_service_status(self.SERVICE_STATUS_IDLE)
if self._idle_time is None:
task = self.__queue.get()
else:
try:
task = self.__queue.get(True, self._idle_time)
except queue.Empty:
self.__update_service_status(self.SERVICE_STATUS_TIMER)
if self._idle_callback:
self._idle_callback()
continue
self.__update_service_status(self.SERVICE_STATUS_PROCESSING)
if task[0] == Priority.COMMAND:
message = task[2]
if message and 'band' in message:
processor = self.__callback_register.get(message['band'])
if processor is None:
self.log.warning('received message on unregistered band\n%s',
message)
else:
processor(message.get('payload'))
else:
self.log.warning('received message without band information\n%s',
message)
elif task[0] == Priority.TRANSPORT:
callback, header, message = task[2]
callback(header, message)
else:
self.log.warning('Unknown item on main service queue\n%r', task)
except KeyboardInterrupt:
self.log.warning('Ctrl+C detected. Shutting down.')
except Exception as e:
self.process_uncaught_exception(e)
self.__update_service_status(self.SERVICE_STATUS_ERROR)
self.in_shutdown()
return
try:
self.__update_service_status(self.SERVICE_STATUS_SHUTDOWN)
self.in_shutdown()
self.__update_service_status(self.SERVICE_STATUS_END)
except Exception as e:
self.process_uncaught_exception(e)
self.__update_service_status(self.SERVICE_STATUS_ERROR)
def process_uncaught_exception(self, e):
'''This is called to handle otherwise uncaught exceptions from the service.
The service will terminate either way, but here we can do things such as
gathering useful environment information and logging for posterity.'''
# Add information about the actual exception to the log message
# This includes the file, line and piece of code causing the exception.
# exc_info=True adds the full stack trace to the log message.
exc_file_fullpath, exc_file, exc_lineno, exc_func, exc_line = \
workflows.logging.get_exception_source()
added_information = {
'workflows_exc_lineno': exc_lineno,
'workflows_exc_funcName': exc_func,
'workflows_exc_line': exc_line,
'workflows_exc_pathname': exc_file_fullpath,
'workflows_exc_filename': exc_file,
}
for field in filter(lambda x: x.startswith('workflows_log_'), dir(e)):
added_information[field[14:]] = getattr(e, field, None)
self.log.critical('Unhandled service exception: %s', e, exc_info=True,
extra=added_information)
def __process_command(self, command):
'''Process an incoming command message from the frontend.'''
if command == Commands.SHUTDOWN:
self.__shutdown = True
class Commands():
'''A list of command strings used for communicating with the frontend.'''
SHUTDOWN = 'shutdown'
| {
"repo_name": "xia2/workflows",
"path": "workflows/services/common_service.py",
"copies": "1",
"size": "17096",
"license": "bsd-3-clause",
"hash": 8916683871649775000,
"line_mean": 37.5914221219,
"line_max": 104,
"alpha_frac": 0.6655357978,
"autogenerated": false,
"ratio": 4.35123441079155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012552572818460372,
"num_lines": 443
} |
from __future__ import absolute_import, division, print_function
import contextlib
import logging
import multiprocessing
import threading
import time
import traceback
import warnings
from collections import Mapping, OrderedDict
import numpy as np
from ..conventions import cf_encoder
from ..core import indexing
from ..core.pycompat import dask_array_type, iteritems
from ..core.utils import FrozenOrderedDict, NdimSizeLenMixin
# Import default lock
try:
from dask.utils import SerializableLock
HDF5_LOCK = SerializableLock()
except ImportError:
HDF5_LOCK = threading.Lock()
# Create a logger object, but don't add any handlers. Leave that to user code.
logger = logging.getLogger(__name__)
NONE_VAR_NAME = '__values__'
def get_scheduler(get=None, collection=None):
""" Determine the dask scheduler that is being used.
None is returned if not dask scheduler is active.
See also
--------
dask.utils.effective_get
"""
try:
from dask.utils import effective_get
actual_get = effective_get(get, collection)
try:
from dask.distributed import Client
if isinstance(actual_get.__self__, Client):
return 'distributed'
except (ImportError, AttributeError):
try:
import dask.multiprocessing
if actual_get == dask.multiprocessing.get:
return 'multiprocessing'
else:
return 'threaded'
except ImportError:
return 'threaded'
except ImportError:
return None
def get_scheduler_lock(scheduler, path_or_file=None):
""" Get the appropriate lock for a certain situation based onthe dask
scheduler used.
See Also
--------
dask.utils.get_scheduler_lock
"""
if scheduler == 'distributed':
from dask.distributed import Lock
return Lock(path_or_file)
elif scheduler == 'multiprocessing':
return multiprocessing.Lock()
elif scheduler == 'threaded':
from dask.utils import SerializableLock
return SerializableLock()
else:
return threading.Lock()
def _encode_variable_name(name):
if name is None:
name = NONE_VAR_NAME
return name
def _decode_variable_name(name):
if name == NONE_VAR_NAME:
name = None
return name
def find_root(ds):
"""
Helper function to find the root of a netcdf or h5netcdf dataset.
"""
while ds.parent is not None:
ds = ds.parent
return ds
def robust_getitem(array, key, catch=Exception, max_retries=6,
initial_delay=500):
"""
Robustly index an array, using retry logic with exponential backoff if any
of the errors ``catch`` are raised. The initial_delay is measured in ms.
With the default settings, the maximum delay will be in the range of 32-64
seconds.
"""
assert max_retries >= 0
for n in range(max_retries + 1):
try:
return array[key]
except catch:
if n == max_retries:
raise
base_delay = initial_delay * 2 ** n
next_delay = base_delay + np.random.randint(base_delay)
msg = ('getitem failed, waiting %s ms before trying again '
'(%s tries remaining). Full traceback: %s' %
(next_delay, max_retries - n, traceback.format_exc()))
logger.debug(msg)
time.sleep(1e-3 * next_delay)
class CombinedLock(object):
"""A combination of multiple locks.
Like a locked door, a CombinedLock is locked if any of its constituent
locks are locked.
"""
def __init__(self, locks):
self.locks = tuple(set(locks)) # remove duplicates
def acquire(self, *args):
return all(lock.acquire(*args) for lock in self.locks)
def release(self, *args):
for lock in self.locks:
lock.release(*args)
def __enter__(self):
for lock in self.locks:
lock.__enter__()
def __exit__(self, *args):
for lock in self.locks:
lock.__exit__(*args)
@property
def locked(self):
return any(lock.locked for lock in self.locks)
def __repr__(self):
return "CombinedLock(%r)" % list(self.locks)
class BackendArray(NdimSizeLenMixin, indexing.ExplicitlyIndexed):
def __array__(self, dtype=None):
key = indexing.BasicIndexer((slice(None),) * self.ndim)
return np.asarray(self[key], dtype=dtype)
class AbstractDataStore(Mapping):
_autoclose = None
_ds = None
_isopen = False
def __iter__(self):
return iter(self.variables)
def __getitem__(self, key):
return self.variables[key]
def __len__(self):
return len(self.variables)
def get_dimensions(self): # pragma: no cover
raise NotImplementedError
def get_attrs(self): # pragma: no cover
raise NotImplementedError
def get_variables(self): # pragma: no cover
raise NotImplementedError
def get_encoding(self):
return {}
def load(self):
"""
This loads the variables and attributes simultaneously.
A centralized loading function makes it easier to create
data stores that do automatic encoding/decoding.
For example::
class SuffixAppendingDataStore(AbstractDataStore):
def load(self):
variables, attributes = AbstractDataStore.load(self)
variables = {'%s_suffix' % k: v
for k, v in iteritems(variables)}
attributes = {'%s_suffix' % k: v
for k, v in iteritems(attributes)}
return variables, attributes
This function will be called anytime variables or attributes
are requested, so care should be taken to make sure its fast.
"""
variables = FrozenOrderedDict((_decode_variable_name(k), v)
for k, v in self.get_variables().items())
attributes = FrozenOrderedDict(self.get_attrs())
return variables, attributes
@property
def variables(self):
warnings.warn('The ``variables`` property has been deprecated and '
'will be removed in xarray v0.11.',
FutureWarning, stacklevel=2)
variables, _ = self.load()
return variables
@property
def attrs(self):
warnings.warn('The ``attrs`` property has been deprecated and '
'will be removed in xarray v0.11.',
FutureWarning, stacklevel=2)
_, attrs = self.load()
return attrs
@property
def dimensions(self):
warnings.warn('The ``dimensions`` property has been deprecated and '
'will be removed in xarray v0.11.',
FutureWarning, stacklevel=2)
return self.get_dimensions()
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
class ArrayWriter(object):
def __init__(self, lock=HDF5_LOCK):
self.sources = []
self.targets = []
self.lock = lock
def add(self, source, target):
if isinstance(source, dask_array_type):
self.sources.append(source)
self.targets.append(target)
else:
target[...] = source
def sync(self, compute=True):
if self.sources:
import dask.array as da
delayed_store = da.store(self.sources, self.targets,
lock=self.lock, compute=compute,
flush=True)
self.sources = []
self.targets = []
return delayed_store
class AbstractWritableDataStore(AbstractDataStore):
def __init__(self, writer=None, lock=HDF5_LOCK):
if writer is None:
writer = ArrayWriter(lock=lock)
self.writer = writer
self.delayed_store = None
def encode(self, variables, attributes):
"""
Encode the variables and attributes in this store
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
Returns
-------
variables : dict-like
attributes : dict-like
"""
variables = OrderedDict([(k, self.encode_variable(v))
for k, v in variables.items()])
attributes = OrderedDict([(k, self.encode_attribute(v))
for k, v in attributes.items()])
return variables, attributes
def encode_variable(self, v):
"""encode one variable"""
return v
def encode_attribute(self, a):
"""encode one attribute"""
return a
def set_dimension(self, d, l): # pragma: no cover
raise NotImplementedError
def set_attribute(self, k, v): # pragma: no cover
raise NotImplementedError
def set_variable(self, k, v): # pragma: no cover
raise NotImplementedError
def sync(self, compute=True):
if self._isopen and self._autoclose:
# datastore will be reopened during write
self.close()
self.delayed_store = self.writer.sync(compute=compute)
def store_dataset(self, dataset):
"""
in stores, variables are all variables AND coordinates
in xarray.Dataset variables are variables NOT coordinates,
so here we pass the whole dataset in instead of doing
dataset.variables
"""
self.store(dataset, dataset.attrs)
def store(self, variables, attributes, check_encoding_set=frozenset(),
unlimited_dims=None):
"""
Top level method for putting data on this store, this method:
- encodes variables/attributes
- sets dimensions
- sets variables
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
check_encoding_set : list-like
List of variables that should be checked for invalid encoding
values
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
"""
variables, attributes = self.encode(variables, attributes)
self.set_attributes(attributes)
self.set_dimensions(variables, unlimited_dims=unlimited_dims)
self.set_variables(variables, check_encoding_set,
unlimited_dims=unlimited_dims)
def set_attributes(self, attributes):
"""
This provides a centralized method to set the dataset attributes on the
data store.
Parameters
----------
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
"""
for k, v in iteritems(attributes):
self.set_attribute(k, v)
def set_variables(self, variables, check_encoding_set,
unlimited_dims=None):
"""
This provides a centralized method to set the variables on the data
store.
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
check_encoding_set : list-like
List of variables that should be checked for invalid encoding
values
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
"""
for vn, v in iteritems(variables):
name = _encode_variable_name(vn)
check = vn in check_encoding_set
target, source = self.prepare_variable(
name, v, check, unlimited_dims=unlimited_dims)
self.writer.add(source, target)
def set_dimensions(self, variables, unlimited_dims=None):
"""
This provides a centralized method to set the dimensions on the data
store.
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
"""
if unlimited_dims is None:
unlimited_dims = set()
existing_dims = self.get_dimensions()
dims = OrderedDict()
for v in unlimited_dims: # put unlimited_dims first
dims[v] = None
for v in variables.values():
dims.update(dict(zip(v.dims, v.shape)))
for dim, length in dims.items():
if dim in existing_dims and length != existing_dims[dim]:
raise ValueError(
"Unable to update size for existing dimension"
"%r (%d != %d)" % (dim, length, existing_dims[dim]))
elif dim not in existing_dims:
is_unlimited = dim in unlimited_dims
self.set_dimension(dim, length, is_unlimited)
class WritableCFDataStore(AbstractWritableDataStore):
def encode(self, variables, attributes):
# All NetCDF files get CF encoded by default, without this attempting
# to write times, for example, would fail.
variables, attributes = cf_encoder(variables, attributes)
variables = OrderedDict([(k, self.encode_variable(v))
for k, v in variables.items()])
attributes = OrderedDict([(k, self.encode_attribute(v))
for k, v in attributes.items()])
return variables, attributes
class DataStorePickleMixin(object):
"""Subclasses must define `ds`, `_opener` and `_mode` attributes.
Do not subclass this class: it is not part of xarray's external API.
"""
def __getstate__(self):
state = self.__dict__.copy()
del state['_ds']
del state['_isopen']
if self._mode == 'w':
# file has already been created, don't override when restoring
state['_mode'] = 'a'
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._ds = None
self._isopen = False
@property
def ds(self):
if self._ds is not None and self._isopen:
return self._ds
ds = self._opener(mode=self._mode)
self._isopen = True
return ds
@contextlib.contextmanager
def ensure_open(self, autoclose=None):
"""
Helper function to make sure datasets are closed and opened
at appropriate times to avoid too many open file errors.
Use requires `autoclose=True` argument to `open_mfdataset`.
"""
if autoclose is None:
autoclose = self._autoclose
if not self._isopen:
try:
self._ds = self._opener()
self._isopen = True
yield
finally:
if autoclose:
self.close()
else:
yield
def assert_open(self):
if not self._isopen:
raise AssertionError('internal failure: file must be open '
'if `autoclose=True` is used.')
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/backends/common.py",
"copies": "1",
"size": "15762",
"license": "apache-2.0",
"hash": -8979061242060997000,
"line_mean": 29.9666011788,
"line_max": 79,
"alpha_frac": 0.5822865119,
"autogenerated": false,
"ratio": 4.571345707656612,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000035720664404357916,
"num_lines": 509
} |
from __future__ import absolute_import, division, print_function
import contextlib
"""
A module to find python file, but also embedding namespace package logic (PEP420)
"""
# We need to be extra careful with python versions
# Ref : https://docs.python.org/dev/library/importlib.html#importlib.import_module
import os
import sys
from ._fileloader2 import (
_ImportError,
)
from ._spec_utils import (
ModuleSpec,
spec_from_file_location,
spec_from_loader
)
from ._utils import _verbose_message
from ._fileloader2 import _NamespacePath, NamespaceLoader2
import imp
import warnings
class PathFinder2(object):
"""
MetaFinder
"""
@classmethod
def invalidate_caches(cls):
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for finder in sys.path_importer_cache.values():
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
@classmethod
def _path_hooks(cls, path): # from importlib.PathFinder
"""Search sys.path_hooks for a finder for 'path'."""
if sys.path_hooks is not None and not sys.path_hooks:
warnings.warn('sys.path_hooks is empty', ImportWarning)
for hook in sys.path_hooks:
try:
return hook(path)
except ImportError:
continue
else:
return None
@classmethod
def _path_importer_cache(cls, path): # from importlib.PathFinder
"""Get the finder for the path entry from sys.path_importer_cache.
If the path entry is not in the cache, find the appropriate finder
and cache it. If no finder is available, store None.
"""
if path == '':
try:
path = os.getcwd()
except FileNotFoundError:
# Don't cache the failure as the cwd can easily change to
# a valid directory later on.
return None
try:
finder = sys.path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
sys.path_importer_cache[path] = finder
return finder
@classmethod
def _legacy_get_spec(cls, fullname, finder):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
if hasattr(finder, 'find_loader'):
loader, portions = finder.find_loader(fullname)
else:
loader = finder.find_module(fullname)
portions = []
if loader is not None:
return spec_from_loader(fullname, loader)
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = portions
return spec
@classmethod
def _get_spec(cls, fullname, path, target=None):
"""Find the loader or namespace_path for this module/package name."""
# If this ends up being a namespace package, namespace_path is
# the list of paths that will become its __path__
namespace_path = []
for entry in path:
if not isinstance(entry, (str, bytes)):
continue
finder = cls._path_importer_cache(entry)
if finder is not None:
if hasattr(finder, 'find_spec'):
spec = finder.find_spec(fullname, target)
else:
spec = cls._legacy_get_spec(fullname, finder)
if spec is None:
continue
if spec.loader is not None:
return spec
portions = spec.submodule_search_locations
if portions is None:
raise ImportError('spec missing loader')
# This is possibly part of a namespace package.
# Remember these path entries (if any) for when we
# create a namespace package, and continue iterating
# on path.
namespace_path.extend(portions)
else:
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = namespace_path
return spec
@classmethod
def find_module(cls, fullname, path=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache.
This method is for python2 only
"""
spec = cls.find_spec(fullname, path)
if spec is None:
return None
elif spec.loader is None and spec.submodule_search_locations:
# Here we need to create a namespace loader to handle namespaces since python2 doesn't...
return NamespaceLoader2(spec.name, spec.submodule_search_locations)
else:
return spec.loader
@classmethod
def find_spec(cls, fullname, path=None, target=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache."""
if path is None:
path = sys.path
spec = cls._get_spec(fullname, path, target)
if spec is None:
return None
elif spec.loader is None:
namespace_path = spec.submodule_search_locations
if namespace_path:
# We found at least one namespace path. Return a
# spec which can create the namespace package.
spec.origin = 'namespace'
spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec)
return spec
else:
return None
else:
return spec
class FileFinder2(object):
"""
FileFinder to find modules and load them via Loaders for python 2.7
"""
def __init__(self, path, *loader_details):
"""Initialize with the path to search on and a variable number of
2-tuples containing the loader and the file suffixes the loader
recognizes."""
loaders = []
for loader, suffixes in loader_details:
loaders.extend((suffix, loader) for suffix in suffixes)
self._loaders = loaders
# Base (directory) path
self.path = path or '.'
# Note : we are not playing with cache here (too complex to get right and not worth it for obsolete python)
# Need to disable this to matc importlib API
# # We need to check that we will be able to find a module or package,
# # or raise ImportError to allow other finders to be instantiated for this path.
# # => the logic must correspond to find_module()
# findable = False
# for root, dirs, files in os.walk(self.path):
# findable = findable or any(
# os.path.isfile(os.path.join(os.path.join(path, d), '__init__' + suffix))
# for suffix, _ in self._loaders
# for d in dirs
# ) or any(
# f.endswith(suffix)
# for suffix, _ in self._loaders
# for f in files
# )
#
# # CAREFUL : this is different from the FileFinder design in importlib,
# # since we need to be able to give up (raise ImportError) here and let other finders do their jobs
# if not findable:
# raise _ImportError("cannot find any matching module based on extensions {0}".format(
# [s for s, _ in self._loaders]),
# path=self.path
# )
def _get_spec(self, loader_class, fullname, path, smsl, target):
loader = loader_class(fullname, path)
return spec_from_file_location(fullname, path, loader=loader,
submodule_search_locations=smsl)
def find_spec(self, fullname, target=None):
"""Try to find a spec for the specified module. Returns the
matching spec, or None if not found."""
is_namespace = False
tail_module = fullname.rpartition('.')[2]
base_path = os.path.join(self.path, tail_module)
for suffix, loader_class in self._loaders:
init_filename = '__init__' + suffix
init_full_path = os.path.join(base_path, init_filename)
full_path = base_path + suffix
if os.path.isfile(init_full_path):
return self._get_spec(loader_class, fullname, init_full_path, [base_path], target)
if os.path.isfile(full_path): # maybe we need more checks here (importlib filefinder checks its cache...)
return self._get_spec(loader_class, fullname, full_path, None, target)
else:
# If a namespace package, return the path if we don't
# find a module in the next section.
is_namespace = os.path.isdir(base_path)
if is_namespace:
_verbose_message('possible namespace for {}'.format(base_path))
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = [base_path]
return spec
return None
# def find_spec(self, fullname, target=None):
# """ python3 latest API, to provide a py2/py3 extensible API """
# path = self.path
# tail_module = fullname.rpartition('.')[2]
#
# base_path = os.path.join(path, tail_module)
# for suffix, loader_class in self._loaders:
# full_path = None # adjusting path for package or file
# if os.path.isdir(base_path) and os.path.isfile(os.path.join(base_path, '__init__' + suffix)):
# # __init__.py path will be computed by the loader when needed
# loader = loader_class(fullname, base_path)
# elif os.path.isfile(base_path + suffix):
# loader = loader_class(fullname, base_path + suffix)
# loader = None
#
# return spec_from_loader(fullname, loader)
#
def find_loader(self, fullname):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions).
This method is deprecated. Use find_spec() instead.
"""
spec = self.find_spec(fullname)
if spec is None:
return None, []
return spec.loader, spec.submodule_search_locations or []
def find_module(self, fullname):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns loader.
"""
spec = self.find_spec(fullname)
if spec is None:
return None
# We need to handle the namespace case here for python2
if spec.loader is None and len(spec.submodule_search_locations):
spec.loader = NamespaceLoader2(spec.name, spec.submodule_search_locations)
return spec.loader
@classmethod
def path_hook(cls, *loader_details):
"""A class method which returns a closure to use on sys.path_hook
which will return an instance using the specified loaders and the path
called on the closure.
If the path called on the closure is not a directory, ImportError is
raised.
"""
def path_hook_for_FileFinder2(path):
"""Path hook for FileFinder2."""
if not os.path.isdir(path):
raise _ImportError('only directories are supported', path=path)
return cls(path, *loader_details)
return path_hook_for_FileFinder2
def __repr__(self):
return 'FileFinder2({!r})'.format(self.path)
| {
"repo_name": "asmodehn/filefinder2",
"path": "filefinder2/_filefinder2.py",
"copies": "1",
"size": "11583",
"license": "mit",
"hash": -437257864991677300,
"line_mean": 37.4817275748,
"line_max": 118,
"alpha_frac": 0.5894845895,
"autogenerated": false,
"ratio": 4.372593431483579,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5462078020983578,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import copy
import hashlib
import inspect
import linecache
from ._compat import exec_, iteritems
from . import _config
class _Nothing(object):
"""
Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
All instances of `_Nothing` are equal.
"""
def __copy__(self):
return self
def __deepcopy__(self, _):
return self
def __eq__(self, other):
return self.__class__ == _Nothing
def __ne__(self, other):
return not self == other
def __repr__(self):
return "NOTHING"
def __hash__(self):
return 0xdeadbeef
NOTHING = _Nothing()
"""
Sentinel to indicate the lack of a value when ``None`` is ambiguous.
"""
def attr(default=NOTHING, validator=None,
repr=True, cmp=True, hash=True, init=True):
"""
Create a new attribute on a class.
.. warning::
Does *not* do anything unless the class is also decorated with
:func:`attr.s`!
:param default: Value that is used if an ``attrs``-generated
``__init__`` is used and no value is passed while instantiating. If
the value an instance of :class:`Factory`, it callable will be use to
construct a new value (useful for mutable datatypes like lists or
dicts).
:type default: Any value.
:param validator: :func:`callable` that is called by ``attrs``-generated
``__init__`` methods after the instance has been initialized. They
receive the initialized instance, the :class:`Attribute`, and the
passed value.
The return value is *not* inspected so the validator has to throw an
exception itself.
They can be globally disabled and re-enabled using
:func:`get_run_validators`.
:type validator: callable
:param repr: Include this attribute in the generated ``__repr__`` method.
:type repr: bool
:param cmp: Include this attribute in the generated comparison methods
(``__eq__`` et al).
:type cmp: bool
:param hash: Include this attribute in the generated ``__hash__`` method.
:type hash: bool
:param init: Include this attribute in the generated ``__init__`` method.
:type init: bool
"""
return _CountingAttr(
default=default,
validator=validator,
repr=repr,
cmp=cmp,
hash=hash,
init=init,
)
def _transform_attrs(cl, these):
"""
Transforms all `_CountingAttr`s on a class into `Attribute`s and saves the
list as a tuple in `__attrs_attrs__`.
If *these* is passed, use that and don't look for them on the class.
"""
super_cls = []
for c in reversed(cl.__mro__[1:-1]):
sub_attrs = getattr(c, "__attrs_attrs__", None)
if sub_attrs is not None:
super_cls.extend(sub_attrs)
if these is None:
ca_list = [(name, attr)
for name, attr
in cl.__dict__.items()
if isinstance(attr, _CountingAttr)]
else:
ca_list = [(name, ca)
for name, ca
in iteritems(these)]
cl.__attrs_attrs__ = tuple(super_cls + [
Attribute.from_counting_attr(name=attr_name, ca=ca)
for attr_name, ca
in sorted(ca_list, key=lambda e: e[1].counter)
])
had_default = False
for a in cl.__attrs_attrs__:
if these is None and a not in super_cls:
setattr(cl, a.name, a)
if had_default is True and a.default is NOTHING:
raise ValueError(
"No mandatory attributes allowed after an attribute with a "
"default value or factory. Attribute in question: {a!r}"
.format(a=a)
)
elif had_default is False and a.default is not NOTHING:
had_default = True
def attributes(maybe_cl=None, these=None, repr_ns=None,
repr=True, cmp=True, hash=True, init=True):
"""
A class decorator that adds `dunder
<https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
specified attributes using :func:`attr.ib` or the *these* argument.
:param these: A dictionary of name to :func:`attr.ib` mappings. This is
useful to avoid the definition of your attributes within the class body
because you can't (e.g. if you want to add ``__repr__`` methods to
Django models) or don't want to (e.g. if you want to use
:class:`properties <property>`).
If *these* is not `None`, the class body is *ignored*.
:type these: class:`dict` of :class:`str` to :func:`attr.ib`
:param repr_ns: When using nested classes, there's no way in Python 2 to
automatically detect that. Therefore it's possible to set the
namespace explicitly for a more meaningful ``repr`` output.
:param repr: Create a ``__repr__`` method with a human readable
represantation of ``attrs`` attributes..
:type repr: bool
:param cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
``__gt__``, and ``__ge__`` methods that compare the class as if it were
a tuple of its ``attrs`` attributes. But the attributes are *only*
compared, if the type of both classes is *identical*!
:type cmp: bool
:param hash: Create a ``__hash__`` method that returns the :func:`hash` of
a tuple of all ``attrs`` attribute values.
:type hash: bool
:param init: Create a ``__init__`` method that initialiazes the ``attrs``
attributes. Leading underscores are stripped for the argument name.
:type init: bool
"""
def wrap(cl):
if getattr(cl, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
_transform_attrs(cl, these)
if repr is True:
cl = _add_repr(cl, ns=repr_ns)
if cmp is True:
cl = _add_cmp(cl)
if hash is True:
cl = _add_hash(cl)
if init is True:
cl = _add_init(cl)
return cl
# attrs_or class type depends on the usage of the decorator. It's a class
# if it's used as `@attributes` but ``None`` if used # as `@attributes()`.
if maybe_cl is None:
return wrap
else:
return wrap(maybe_cl)
def _attrs_to_tuple(obj, attrs):
"""
Create a tuple of all values of *obj*'s *attrs*.
"""
return tuple(getattr(obj, a.name) for a in attrs)
def _add_hash(cl, attrs=None):
"""
Add a hash method to *cl*.
"""
if attrs is None:
attrs = [a for a in cl.__attrs_attrs__ if a.hash]
def hash_(self):
"""
Automatically created by attrs.
"""
return hash(_attrs_to_tuple(self, attrs))
cl.__hash__ = hash_
return cl
def _add_cmp(cl, attrs=None):
"""
Add comparison methods to *cl*.
"""
if attrs is None:
attrs = [a for a in cl.__attrs_attrs__ if a.cmp]
def attrs_to_tuple(obj):
"""
Save us some typing.
"""
return _attrs_to_tuple(obj, attrs)
def eq(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) == attrs_to_tuple(other)
else:
return NotImplemented
def ne(self, other):
"""
Automatically created by attrs.
"""
result = eq(self, other)
if result is NotImplemented:
return NotImplemented
else:
return not result
def lt(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) < attrs_to_tuple(other)
else:
return NotImplemented
def le(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) <= attrs_to_tuple(other)
else:
return NotImplemented
def gt(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) > attrs_to_tuple(other)
else:
return NotImplemented
def ge(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) >= attrs_to_tuple(other)
else:
return NotImplemented
cl.__eq__ = eq
cl.__ne__ = ne
cl.__lt__ = lt
cl.__le__ = le
cl.__gt__ = gt
cl.__ge__ = ge
return cl
def _add_repr(cl, ns=None, attrs=None):
"""
Add a repr method to *cl*.
"""
if attrs is None:
attrs = [a for a in cl.__attrs_attrs__ if a.repr]
if ns is None:
qualname = getattr(cl, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1] # pragma: nocover
else:
class_name = cl.__name__
else:
class_name = ns + "." + cl.__name__
def repr_(self):
"""
Automatically created by attrs.
"""
return "{0}({1})".format(
class_name,
", ".join(a.name + "=" + repr(getattr(self, a.name))
for a in attrs)
)
cl.__repr__ = repr_
return cl
def _add_init(cl):
attrs = [a for a in cl.__attrs_attrs__ if a.init]
# We cache the generated init methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated init {0}>".format(
sha1.hexdigest()
)
script = _attrs_to_script(attrs)
locs = {}
bytecode = compile(script, unique_filename, "exec")
attr_dict = dict((a.name, a) for a in attrs)
exec_(bytecode, {"NOTHING": NOTHING,
"attr_dict": attr_dict,
"validate": validate}, locs)
init = locs["__init__"]
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename
)
cl.__init__ = init
return cl
def fields(cl):
"""
Returns the tuple of ``attrs`` attributes for a class.
:param cl: Class to introspect.
:type cl: class
:raise TypeError: If *cl* is not a class.
:raise ValueError: If *cl* is not an ``attrs`` class.
:rtype: tuple of :class:`attr.Attribute`
"""
if not inspect.isclass(cl):
raise TypeError("Passed object must be a class.")
attrs = getattr(cl, "__attrs_attrs__", None)
if attrs is None:
raise ValueError("{cl!r} is not an attrs-decorated class.".format(
cl=cl
))
return copy.deepcopy(attrs)
def validate(inst):
"""
Validate all attributes on *inst* that have a validator.
Leaves all exceptions through.
:param inst: Instance of a class with ``attrs`` attributes.
"""
if _config._run_validators is False:
return
for a in fields(inst.__class__):
if a.validator is not None:
a.validator(inst, a, getattr(inst, a.name))
def _attrs_to_script(attrs):
"""
Return a valid Python script of an initializer for *attrs*.
"""
lines = []
args = []
has_validator = False
for a in attrs:
if a.validator is not None:
has_validator = True
attr_name = a.name
arg_name = a.name.lstrip("_")
if a.default is not NOTHING and not isinstance(a.default, Factory):
args.append(
"{arg_name}=attr_dict['{attr_name}'].default".format(
arg_name=arg_name,
attr_name=attr_name,
)
)
lines.append("self.{attr_name} = {arg_name}".format(
arg_name=arg_name,
attr_name=attr_name,
))
elif a.default is not NOTHING and isinstance(a.default, Factory):
args.append("{arg_name}=NOTHING".format(arg_name=arg_name))
lines.extend("""\
if {arg_name} is not NOTHING:
self.{attr_name} = {arg_name}
else:
self.{attr_name} = attr_dict["{attr_name}"].default.factory()"""
.format(attr_name=attr_name,
arg_name=arg_name)
.split("\n"))
else:
args.append(arg_name)
lines.append("self.{attr_name} = {arg_name}".format(
attr_name=attr_name,
arg_name=arg_name,
))
if has_validator:
lines.append("validate(self)")
return """\
def __init__(self, {args}):
{setters}
""".format(
args=", ".join(args),
setters="\n ".join(lines) if lines else "pass",
)
class Attribute(object):
"""
*Read-only* representation of an attribute.
:attribute name: The name of the attribute.
Plus *all* arguments of :func:`attr.ib`.
"""
_attributes = [
"name", "default", "validator", "repr", "cmp", "hash", "init",
] # we can't use ``attrs`` so we have to cheat a little.
def __init__(self, **kw):
if len(kw) > len(Attribute._attributes):
raise TypeError("Too many arguments.")
try:
for a in Attribute._attributes:
setattr(self, a, kw[a])
except KeyError:
raise TypeError("Missing argument '{arg}'.".format(arg=a))
@classmethod
def from_counting_attr(cl, name, ca):
return cl(name=name,
**dict((k, getattr(ca, k))
for k
in Attribute._attributes
if k != "name"))
_a = [Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=True, init=True)
for name in Attribute._attributes]
Attribute = _add_hash(
_add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a), attrs=_a
)
class _CountingAttr(object):
"""
Intermediate representation of attributes that uses a counter to preserve
the order in which the attributes have been defined.
"""
__attrs_attrs__ = [
Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=True, init=True)
for name
in ("counter", "default", "repr", "cmp", "hash", "init",)
]
counter = 0
def __init__(self, default, validator, repr, cmp, hash, init):
_CountingAttr.counter += 1
self.counter = _CountingAttr.counter
self.default = default
self.validator = validator
self.repr = repr
self.cmp = cmp
self.hash = hash
self.init = init
_CountingAttr = _add_cmp(_add_repr(_CountingAttr))
@attributes
class Factory(object):
"""
Stores a factory callable.
If passed as the default value to :func:`attr.ib`, the factory is used to
generate a new value.
"""
factory = attr()
def make_class(name, attrs, **attributes_arguments):
"""
A quick way to create a new class called *name* with *attrs*.
:param name: The name for the new class.
:type name: str
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
:type attrs: :class:`list` or :class:`dict`
:param attributes_arguments: Passed unmodified to :func:`attr.s`.
:return: A new class with *attrs*.
:rtype: type
"""
if isinstance(attrs, dict):
cl_dict = attrs
elif isinstance(attrs, (list, tuple)):
cl_dict = dict((a, attr()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
return attributes(**attributes_arguments)(type(name, (object,), cl_dict))
| {
"repo_name": "cyli/attrs",
"path": "attr/_make.py",
"copies": "1",
"size": "16008",
"license": "mit",
"hash": 1438861658888189200,
"line_mean": 28.2116788321,
"line_max": 79,
"alpha_frac": 0.5639055472,
"autogenerated": false,
"ratio": 3.943828529194383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5007734076394383,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import copy
import inspect
import linecache
import sys
import threading
import warnings
from operator import itemgetter
from . import _config, setters
from ._compat import (
HAS_F_STRINGS,
PY2,
PY310,
PYPY,
isclass,
iteritems,
metadata_proxy,
new_class,
ordered_dict,
set_closure_cell,
)
from .exceptions import (
DefaultAlreadySetError,
FrozenInstanceError,
NotAnAttrsClassError,
PythonTooOldError,
UnannotatedAttributeError,
)
if not PY2:
import typing
# This is used at least twice, so cache it here.
_obj_setattr = object.__setattr__
_init_converter_pat = "__attr_converter_%s"
_init_factory_pat = "__attr_factory_{}"
_tuple_property_pat = (
" {attr_name} = _attrs_property(_attrs_itemgetter({index}))"
)
_classvar_prefixes = (
"typing.ClassVar",
"t.ClassVar",
"ClassVar",
"typing_extensions.ClassVar",
)
# we don't use a double-underscore prefix because that triggers
# name mangling when trying to create a slot for the field
# (when slots=True)
_hash_cache_field = "_attrs_cached_hash"
_empty_metadata_singleton = metadata_proxy({})
# Unique object for unequivocal getattr() defaults.
_sentinel = object()
class _Nothing(object):
"""
Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
``_Nothing`` is a singleton. There is only ever one of it.
.. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False.
"""
_singleton = None
def __new__(cls):
if _Nothing._singleton is None:
_Nothing._singleton = super(_Nothing, cls).__new__(cls)
return _Nothing._singleton
def __repr__(self):
return "NOTHING"
def __bool__(self):
return False
def __len__(self):
return 0 # __bool__ for Python 2
NOTHING = _Nothing()
"""
Sentinel to indicate the lack of a value when ``None`` is ambiguous.
"""
class _CacheHashWrapper(int):
"""
An integer subclass that pickles / copies as None
This is used for non-slots classes with ``cache_hash=True``, to avoid
serializing a potentially (even likely) invalid hash value. Since ``None``
is the default value for uncalculated hashes, whenever this is copied,
the copy's value for the hash should automatically reset.
See GH #613 for more details.
"""
if PY2:
# For some reason `type(None)` isn't callable in Python 2, but we don't
# actually need a constructor for None objects, we just need any
# available function that returns None.
def __reduce__(self, _none_constructor=getattr, _args=(0, "", None)):
return _none_constructor, _args
else:
def __reduce__(self, _none_constructor=type(None), _args=()):
return _none_constructor, _args
def attrib(
default=NOTHING,
validator=None,
repr=True,
cmp=None,
hash=None,
init=True,
metadata=None,
type=None,
converter=None,
factory=None,
kw_only=False,
eq=None,
order=None,
on_setattr=None,
):
"""
Create a new attribute on a class.
.. warning::
Does *not* do anything unless the class is also decorated with
`attr.s`!
:param default: A value that is used if an ``attrs``-generated ``__init__``
is used and no value is passed while instantiating or the attribute is
excluded using ``init=False``.
If the value is an instance of `Factory`, its callable will be
used to construct a new value (useful for mutable data types like lists
or dicts).
If a default is not set (or set manually to `attr.NOTHING`), a value
*must* be supplied when instantiating; otherwise a `TypeError`
will be raised.
The default can also be set using decorator notation as shown below.
:type default: Any value
:param callable factory: Syntactic sugar for
``default=attr.Factory(factory)``.
:param validator: `callable` that is called by ``attrs``-generated
``__init__`` methods after the instance has been initialized. They
receive the initialized instance, the `Attribute`, and the
passed value.
The return value is *not* inspected so the validator has to throw an
exception itself.
If a `list` is passed, its items are treated as validators and must
all pass.
Validators can be globally disabled and re-enabled using
`get_run_validators`.
The validator can also be set using decorator notation as shown below.
:type validator: `callable` or a `list` of `callable`\\ s.
:param repr: Include this attribute in the generated ``__repr__``
method. If ``True``, include the attribute; if ``False``, omit it. By
default, the built-in ``repr()`` function is used. To override how the
attribute value is formatted, pass a ``callable`` that takes a single
value and returns a string. Note that the resulting string is used
as-is, i.e. it will be used directly *instead* of calling ``repr()``
(the default).
:type repr: a `bool` or a `callable` to use a custom function.
:param eq: If ``True`` (default), include this attribute in the
generated ``__eq__`` and ``__ne__`` methods that check two instances
for equality. To override how the attribute value is compared,
pass a ``callable`` that takes a single value and returns the value
to be compared.
:type eq: a `bool` or a `callable`.
:param order: If ``True`` (default), include this attributes in the
generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods.
To override how the attribute value is ordered,
pass a ``callable`` that takes a single value and returns the value
to be ordered.
:type order: a `bool` or a `callable`.
:param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the
same value. Must not be mixed with *eq* or *order*.
:type cmp: a `bool` or a `callable`.
:param Optional[bool] hash: Include this attribute in the generated
``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This
is the correct behavior according the Python spec. Setting this value
to anything else than ``None`` is *discouraged*.
:param bool init: Include this attribute in the generated ``__init__``
method. It is possible to set this to ``False`` and set a default
value. In that case this attributed is unconditionally initialized
with the specified default value or factory.
:param callable converter: `callable` that is called by
``attrs``-generated ``__init__`` methods to convert attribute's value
to the desired format. It is given the passed-in value, and the
returned value will be used as the new value of the attribute. The
value is converted before being passed to the validator, if any.
:param metadata: An arbitrary mapping, to be used by third-party
components. See `extending_metadata`.
:param type: The type of the attribute. In Python 3.6 or greater, the
preferred method to specify the type is using a variable annotation
(see `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_).
This argument is provided for backward compatibility.
Regardless of the approach used, the type will be stored on
``Attribute.type``.
Please note that ``attrs`` doesn't do anything with this metadata by
itself. You can use it as part of your own code or for
`static type checking <types>`.
:param kw_only: Make this attribute keyword-only (Python 3+)
in the generated ``__init__`` (if ``init`` is ``False``, this
parameter is ignored).
:param on_setattr: Allows to overwrite the *on_setattr* setting from
`attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used.
Set to `attr.setters.NO_OP` to run **no** `setattr` hooks for this
attribute -- regardless of the setting in `attr.s`.
:type on_setattr: `callable`, or a list of callables, or `None`, or
`attr.setters.NO_OP`
.. versionadded:: 15.2.0 *convert*
.. versionadded:: 16.3.0 *metadata*
.. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
.. versionchanged:: 17.1.0
*hash* is ``None`` and therefore mirrors *eq* by default.
.. versionadded:: 17.3.0 *type*
.. deprecated:: 17.4.0 *convert*
.. versionadded:: 17.4.0 *converter* as a replacement for the deprecated
*convert* to achieve consistency with other noun-based arguments.
.. versionadded:: 18.1.0
``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``.
.. versionadded:: 18.2.0 *kw_only*
.. versionchanged:: 19.2.0 *convert* keyword argument removed.
.. versionchanged:: 19.2.0 *repr* also accepts a custom callable.
.. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
.. versionadded:: 19.2.0 *eq* and *order*
.. versionadded:: 20.1.0 *on_setattr*
.. versionchanged:: 20.3.0 *kw_only* backported to Python 2
.. versionchanged:: 21.1.0
*eq*, *order*, and *cmp* also accept a custom callable
.. versionchanged:: 21.1.0 *cmp* undeprecated
"""
eq, eq_key, order, order_key = _determine_attrib_eq_order(
cmp, eq, order, True
)
if hash is not None and hash is not True and hash is not False:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
if factory is not None:
if default is not NOTHING:
raise ValueError(
"The `default` and `factory` arguments are mutually "
"exclusive."
)
if not callable(factory):
raise ValueError("The `factory` argument must be a callable.")
default = Factory(factory)
if metadata is None:
metadata = {}
# Apply syntactic sugar by auto-wrapping.
if isinstance(on_setattr, (list, tuple)):
on_setattr = setters.pipe(*on_setattr)
if validator and isinstance(validator, (list, tuple)):
validator = and_(*validator)
if converter and isinstance(converter, (list, tuple)):
converter = pipe(*converter)
return _CountingAttr(
default=default,
validator=validator,
repr=repr,
cmp=None,
hash=hash,
init=init,
converter=converter,
metadata=metadata,
type=type,
kw_only=kw_only,
eq=eq,
eq_key=eq_key,
order=order,
order_key=order_key,
on_setattr=on_setattr,
)
def _compile_and_eval(script, globs, locs=None, filename=""):
"""
"Exec" the script with the given global (globs) and local (locs) variables.
"""
bytecode = compile(script, filename, "exec")
eval(bytecode, globs, locs)
def _make_method(name, script, filename, globs=None):
"""
Create the method with the script given and return the method object.
"""
locs = {}
if globs is None:
globs = {}
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
count = 1
base_filename = filename
while True:
linecache_tuple = (
len(script),
None,
script.splitlines(True),
filename,
)
old_val = linecache.cache.setdefault(filename, linecache_tuple)
if old_val == linecache_tuple:
break
else:
filename = "{}-{}>".format(base_filename[:-1], count)
count += 1
_compile_and_eval(script, globs, locs, filename)
return locs[name]
def _make_attr_tuple_class(cls_name, attr_names):
"""
Create a tuple subclass to hold `Attribute`s for an `attrs` class.
The subclass is a bare tuple with properties for names.
class MyClassAttributes(tuple):
__slots__ = ()
x = property(itemgetter(0))
"""
attr_class_name = "{}Attributes".format(cls_name)
attr_class_template = [
"class {}(tuple):".format(attr_class_name),
" __slots__ = ()",
]
if attr_names:
for i, attr_name in enumerate(attr_names):
attr_class_template.append(
_tuple_property_pat.format(index=i, attr_name=attr_name)
)
else:
attr_class_template.append(" pass")
globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property}
_compile_and_eval("\n".join(attr_class_template), globs)
return globs[attr_class_name]
# Tuple class for extracted attributes from a class definition.
# `base_attrs` is a subset of `attrs`.
_Attributes = _make_attr_tuple_class(
"_Attributes",
[
# all attributes to build dunder methods for
"attrs",
# attributes that have been inherited
"base_attrs",
# map inherited attributes to their originating classes
"base_attrs_map",
],
)
def _is_class_var(annot):
"""
Check whether *annot* is a typing.ClassVar.
The string comparison hack is used to avoid evaluating all string
annotations which would put attrs-based classes at a performance
disadvantage compared to plain old classes.
"""
annot = str(annot)
# Annotation can be quoted.
if annot.startswith(("'", '"')) and annot.endswith(("'", '"')):
annot = annot[1:-1]
return annot.startswith(_classvar_prefixes)
def _has_own_attribute(cls, attrib_name):
"""
Check whether *cls* defines *attrib_name* (and doesn't just inherit it).
Requires Python 3.
"""
attr = getattr(cls, attrib_name, _sentinel)
if attr is _sentinel:
return False
for base_cls in cls.__mro__[1:]:
a = getattr(base_cls, attrib_name, None)
if attr is a:
return False
return True
def _get_annotations(cls):
"""
Get annotations for *cls*.
"""
if _has_own_attribute(cls, "__annotations__"):
return cls.__annotations__
return {}
def _counter_getter(e):
"""
Key function for sorting to avoid re-creating a lambda for every class.
"""
return e[1].counter
def _collect_base_attrs(cls, taken_attr_names):
"""
Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
"""
base_attrs = []
base_attr_map = {} # A dictionary of base attrs to their classes.
# Traverse the MRO and collect attributes.
for base_cls in reversed(cls.__mro__[1:-1]):
for a in getattr(base_cls, "__attrs_attrs__", []):
if a.inherited or a.name in taken_attr_names:
continue
a = a.evolve(inherited=True)
base_attrs.append(a)
base_attr_map[a.name] = base_cls
# For each name, only keep the freshest definition i.e. the furthest at the
# back. base_attr_map is fine because it gets overwritten with every new
# instance.
filtered = []
seen = set()
for a in reversed(base_attrs):
if a.name in seen:
continue
filtered.insert(0, a)
seen.add(a.name)
return filtered, base_attr_map
def _collect_base_attrs_broken(cls, taken_attr_names):
"""
Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
N.B. *taken_attr_names* will be mutated.
Adhere to the old incorrect behavior.
Notably it collects from the front and considers inherited attributes which
leads to the buggy behavior reported in #428.
"""
base_attrs = []
base_attr_map = {} # A dictionary of base attrs to their classes.
# Traverse the MRO and collect attributes.
for base_cls in cls.__mro__[1:-1]:
for a in getattr(base_cls, "__attrs_attrs__", []):
if a.name in taken_attr_names:
continue
a = a.evolve(inherited=True)
taken_attr_names.add(a.name)
base_attrs.append(a)
base_attr_map[a.name] = base_cls
return base_attrs, base_attr_map
def _transform_attrs(
cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer
):
"""
Transform all `_CountingAttr`s on a class into `Attribute`s.
If *these* is passed, use that and don't look for them on the class.
*collect_by_mro* is True, collect them in the correct MRO order, otherwise
use the old -- incorrect -- order. See #428.
Return an `_Attributes`.
"""
cd = cls.__dict__
anns = _get_annotations(cls)
if these is not None:
ca_list = [(name, ca) for name, ca in iteritems(these)]
if not isinstance(these, ordered_dict):
ca_list.sort(key=_counter_getter)
elif auto_attribs is True:
ca_names = {
name
for name, attr in cd.items()
if isinstance(attr, _CountingAttr)
}
ca_list = []
annot_names = set()
for attr_name, type in anns.items():
if _is_class_var(type):
continue
annot_names.add(attr_name)
a = cd.get(attr_name, NOTHING)
if not isinstance(a, _CountingAttr):
if a is NOTHING:
a = attrib()
else:
a = attrib(default=a)
ca_list.append((attr_name, a))
unannotated = ca_names - annot_names
if len(unannotated) > 0:
raise UnannotatedAttributeError(
"The following `attr.ib`s lack a type annotation: "
+ ", ".join(
sorted(unannotated, key=lambda n: cd.get(n).counter)
)
+ "."
)
else:
ca_list = sorted(
(
(name, attr)
for name, attr in cd.items()
if isinstance(attr, _CountingAttr)
),
key=lambda e: e[1].counter,
)
own_attrs = [
Attribute.from_counting_attr(
name=attr_name, ca=ca, type=anns.get(attr_name)
)
for attr_name, ca in ca_list
]
if collect_by_mro:
base_attrs, base_attr_map = _collect_base_attrs(
cls, {a.name for a in own_attrs}
)
else:
base_attrs, base_attr_map = _collect_base_attrs_broken(
cls, {a.name for a in own_attrs}
)
attr_names = [a.name for a in base_attrs + own_attrs]
AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
if kw_only:
own_attrs = [a.evolve(kw_only=True) for a in own_attrs]
base_attrs = [a.evolve(kw_only=True) for a in base_attrs]
attrs = AttrsClass(base_attrs + own_attrs)
# Mandatory vs non-mandatory attr order only matters when they are part of
# the __init__ signature and when they aren't kw_only (which are moved to
# the end and can be mandatory or non-mandatory in any order, as they will
# be specified as keyword args anyway). Check the order of those attrs:
had_default = False
for a in (a for a in attrs if a.init is not False and a.kw_only is False):
if had_default is True and a.default is NOTHING:
raise ValueError(
"No mandatory attributes allowed after an attribute with a "
"default value or factory. Attribute in question: %r" % (a,)
)
if had_default is False and a.default is not NOTHING:
had_default = True
if field_transformer is not None:
attrs = field_transformer(cls, attrs)
return _Attributes((attrs, base_attrs, base_attr_map))
if PYPY:
def _frozen_setattrs(self, name, value):
"""
Attached to frozen classes as __setattr__.
"""
if isinstance(self, BaseException) and name in (
"__cause__",
"__context__",
):
BaseException.__setattr__(self, name, value)
return
raise FrozenInstanceError()
else:
def _frozen_setattrs(self, name, value):
"""
Attached to frozen classes as __setattr__.
"""
raise FrozenInstanceError()
def _frozen_delattrs(self, name):
"""
Attached to frozen classes as __delattr__.
"""
raise FrozenInstanceError()
class _ClassBuilder(object):
"""
Iteratively build *one* class.
"""
__slots__ = (
"_attr_names",
"_attrs",
"_base_attr_map",
"_base_names",
"_cache_hash",
"_cls",
"_cls_dict",
"_delete_attribs",
"_frozen",
"_has_pre_init",
"_has_post_init",
"_is_exc",
"_on_setattr",
"_slots",
"_weakref_slot",
"_wrote_own_setattr",
"_has_custom_setattr",
)
def __init__(
self,
cls,
these,
slots,
frozen,
weakref_slot,
getstate_setstate,
auto_attribs,
kw_only,
cache_hash,
is_exc,
collect_by_mro,
on_setattr,
has_custom_setattr,
field_transformer,
):
attrs, base_attrs, base_map = _transform_attrs(
cls,
these,
auto_attribs,
kw_only,
collect_by_mro,
field_transformer,
)
self._cls = cls
self._cls_dict = dict(cls.__dict__) if slots else {}
self._attrs = attrs
self._base_names = set(a.name for a in base_attrs)
self._base_attr_map = base_map
self._attr_names = tuple(a.name for a in attrs)
self._slots = slots
self._frozen = frozen
self._weakref_slot = weakref_slot
self._cache_hash = cache_hash
self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False))
self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
self._delete_attribs = not bool(these)
self._is_exc = is_exc
self._on_setattr = on_setattr
self._has_custom_setattr = has_custom_setattr
self._wrote_own_setattr = False
self._cls_dict["__attrs_attrs__"] = self._attrs
if frozen:
self._cls_dict["__setattr__"] = _frozen_setattrs
self._cls_dict["__delattr__"] = _frozen_delattrs
self._wrote_own_setattr = True
elif on_setattr == setters.validate:
for a in attrs:
if a.validator is not None:
break
else:
# If class-level on_setattr is set to validating, but there's
# no field to validate, pretend like there's no on_setattr.
self._on_setattr = None
if getstate_setstate:
(
self._cls_dict["__getstate__"],
self._cls_dict["__setstate__"],
) = self._make_getstate_setstate()
def __repr__(self):
return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__)
def build_class(self):
"""
Finalize class based on the accumulated configuration.
Builder cannot be used after calling this method.
"""
if self._slots is True:
return self._create_slots_class()
else:
return self._patch_original_class()
def _patch_original_class(self):
"""
Apply accumulated methods and return the class.
"""
cls = self._cls
base_names = self._base_names
# Clean class of attribute definitions (`attr.ib()`s).
if self._delete_attribs:
for name in self._attr_names:
if (
name not in base_names
and getattr(cls, name, _sentinel) is not _sentinel
):
try:
delattr(cls, name)
except AttributeError:
# This can happen if a base class defines a class
# variable and we want to set an attribute with the
# same name by using only a type annotation.
pass
# Attach our dunder methods.
for name, value in self._cls_dict.items():
setattr(cls, name, value)
# If we've inherited an attrs __setattr__ and don't write our own,
# reset it to object's.
if not self._wrote_own_setattr and getattr(
cls, "__attrs_own_setattr__", False
):
cls.__attrs_own_setattr__ = False
if not self._has_custom_setattr:
cls.__setattr__ = object.__setattr__
return cls
def _create_slots_class(self):
"""
Build and return a new class with a `__slots__` attribute.
"""
cd = {
k: v
for k, v in iteritems(self._cls_dict)
if k not in tuple(self._attr_names) + ("__dict__", "__weakref__")
}
# If our class doesn't have its own implementation of __setattr__
# (either from the user or by us), check the bases, if one of them has
# an attrs-made __setattr__, that needs to be reset. We don't walk the
# MRO because we only care about our immediate base classes.
# XXX: This can be confused by subclassing a slotted attrs class with
# XXX: a non-attrs class and subclass the resulting class with an attrs
# XXX: class. See `test_slotted_confused` for details. For now that's
# XXX: OK with us.
if not self._wrote_own_setattr:
cd["__attrs_own_setattr__"] = False
if not self._has_custom_setattr:
for base_cls in self._cls.__bases__:
if base_cls.__dict__.get("__attrs_own_setattr__", False):
cd["__setattr__"] = object.__setattr__
break
# Traverse the MRO to collect existing slots
# and check for an existing __weakref__.
existing_slots = dict()
weakref_inherited = False
for base_cls in self._cls.__mro__[1:-1]:
if base_cls.__dict__.get("__weakref__", None) is not None:
weakref_inherited = True
existing_slots.update(
{
name: getattr(base_cls, name)
for name in getattr(base_cls, "__slots__", [])
}
)
base_names = set(self._base_names)
names = self._attr_names
if (
self._weakref_slot
and "__weakref__" not in getattr(self._cls, "__slots__", ())
and "__weakref__" not in names
and not weakref_inherited
):
names += ("__weakref__",)
# We only add the names of attributes that aren't inherited.
# Setting __slots__ to inherited attributes wastes memory.
slot_names = [name for name in names if name not in base_names]
# There are slots for attributes from current class
# that are defined in parent classes.
# As their descriptors may be overriden by a child class,
# we collect them here and update the class dict
reused_slots = {
slot: slot_descriptor
for slot, slot_descriptor in iteritems(existing_slots)
if slot in slot_names
}
slot_names = [name for name in slot_names if name not in reused_slots]
cd.update(reused_slots)
if self._cache_hash:
slot_names.append(_hash_cache_field)
cd["__slots__"] = tuple(slot_names)
qualname = getattr(self._cls, "__qualname__", None)
if qualname is not None:
cd["__qualname__"] = qualname
# Create new class based on old class and our methods.
cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd)
# The following is a fix for
# https://github.com/python-attrs/attrs/issues/102. On Python 3,
# if a method mentions `__class__` or uses the no-arg super(), the
# compiler will bake a reference to the class in the method itself
# as `method.__closure__`. Since we replace the class with a
# clone, we rewrite these references so it keeps working.
for item in cls.__dict__.values():
if isinstance(item, (classmethod, staticmethod)):
# Class- and staticmethods hide their functions inside.
# These might need to be rewritten as well.
closure_cells = getattr(item.__func__, "__closure__", None)
elif isinstance(item, property):
# Workaround for property `super()` shortcut (PY3-only).
# There is no universal way for other descriptors.
closure_cells = getattr(item.fget, "__closure__", None)
else:
closure_cells = getattr(item, "__closure__", None)
if not closure_cells: # Catch None or the empty list.
continue
for cell in closure_cells:
try:
match = cell.cell_contents is self._cls
except ValueError: # ValueError: Cell is empty
pass
else:
if match:
set_closure_cell(cell, cls)
return cls
def add_repr(self, ns):
self._cls_dict["__repr__"] = self._add_method_dunders(
_make_repr(self._attrs, ns, self._cls)
)
return self
def add_str(self):
repr = self._cls_dict.get("__repr__")
if repr is None:
raise ValueError(
"__str__ can only be generated if a __repr__ exists."
)
def __str__(self):
return self.__repr__()
self._cls_dict["__str__"] = self._add_method_dunders(__str__)
return self
def _make_getstate_setstate(self):
"""
Create custom __setstate__ and __getstate__ methods.
"""
# __weakref__ is not writable.
state_attr_names = tuple(
an for an in self._attr_names if an != "__weakref__"
)
def slots_getstate(self):
"""
Automatically created by attrs.
"""
return tuple(getattr(self, name) for name in state_attr_names)
hash_caching_enabled = self._cache_hash
def slots_setstate(self, state):
"""
Automatically created by attrs.
"""
__bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(state_attr_names, state):
__bound_setattr(name, value)
# The hash code cache is not included when the object is
# serialized, but it still needs to be initialized to None to
# indicate that the first call to __hash__ should be a cache
# miss.
if hash_caching_enabled:
__bound_setattr(_hash_cache_field, None)
return slots_getstate, slots_setstate
def make_unhashable(self):
self._cls_dict["__hash__"] = None
return self
def add_hash(self):
self._cls_dict["__hash__"] = self._add_method_dunders(
_make_hash(
self._cls,
self._attrs,
frozen=self._frozen,
cache_hash=self._cache_hash,
)
)
return self
def add_init(self):
self._cls_dict["__init__"] = self._add_method_dunders(
_make_init(
self._cls,
self._attrs,
self._has_pre_init,
self._has_post_init,
self._frozen,
self._slots,
self._cache_hash,
self._base_attr_map,
self._is_exc,
self._on_setattr,
attrs_init=False,
)
)
return self
def add_match_args(self):
self._cls_dict["__match_args__"] = tuple(
field.name
for field in self._attrs
if field.init and not field.kw_only
)
def add_attrs_init(self):
self._cls_dict["__attrs_init__"] = self._add_method_dunders(
_make_init(
self._cls,
self._attrs,
self._has_pre_init,
self._has_post_init,
self._frozen,
self._slots,
self._cache_hash,
self._base_attr_map,
self._is_exc,
self._on_setattr,
attrs_init=True,
)
)
return self
def add_eq(self):
cd = self._cls_dict
cd["__eq__"] = self._add_method_dunders(
_make_eq(self._cls, self._attrs)
)
cd["__ne__"] = self._add_method_dunders(_make_ne())
return self
def add_order(self):
cd = self._cls_dict
cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = (
self._add_method_dunders(meth)
for meth in _make_order(self._cls, self._attrs)
)
return self
def add_setattr(self):
if self._frozen:
return self
sa_attrs = {}
for a in self._attrs:
on_setattr = a.on_setattr or self._on_setattr
if on_setattr and on_setattr is not setters.NO_OP:
sa_attrs[a.name] = a, on_setattr
if not sa_attrs:
return self
if self._has_custom_setattr:
# We need to write a __setattr__ but there already is one!
raise ValueError(
"Can't combine custom __setattr__ with on_setattr hooks."
)
# docstring comes from _add_method_dunders
def __setattr__(self, name, val):
try:
a, hook = sa_attrs[name]
except KeyError:
nval = val
else:
nval = hook(self, a, val)
_obj_setattr(self, name, nval)
self._cls_dict["__attrs_own_setattr__"] = True
self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__)
self._wrote_own_setattr = True
return self
def _add_method_dunders(self, method):
"""
Add __module__ and __qualname__ to a *method* if possible.
"""
try:
method.__module__ = self._cls.__module__
except AttributeError:
pass
try:
method.__qualname__ = ".".join(
(self._cls.__qualname__, method.__name__)
)
except AttributeError:
pass
try:
method.__doc__ = "Method generated by attrs for class %s." % (
self._cls.__qualname__,
)
except AttributeError:
pass
return method
_CMP_DEPRECATION = (
"The usage of `cmp` is deprecated and will be removed on or after "
"2021-06-01. Please use `eq` and `order` instead."
)
def _determine_attrs_eq_order(cmp, eq, order, default_eq):
"""
Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
values of eq and order. If *eq* is None, set it to *default_eq*.
"""
if cmp is not None and any((eq is not None, order is not None)):
raise ValueError("Don't mix `cmp` with `eq' and `order`.")
# cmp takes precedence due to bw-compatibility.
if cmp is not None:
return cmp, cmp
# If left None, equality is set to the specified default and ordering
# mirrors equality.
if eq is None:
eq = default_eq
if order is None:
order = eq
if eq is False and order is True:
raise ValueError("`order` can only be True if `eq` is True too.")
return eq, order
def _determine_attrib_eq_order(cmp, eq, order, default_eq):
"""
Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
values of eq and order. If *eq* is None, set it to *default_eq*.
"""
if cmp is not None and any((eq is not None, order is not None)):
raise ValueError("Don't mix `cmp` with `eq' and `order`.")
def decide_callable_or_boolean(value):
"""
Decide whether a key function is used.
"""
if callable(value):
value, key = True, value
else:
key = None
return value, key
# cmp takes precedence due to bw-compatibility.
if cmp is not None:
cmp, cmp_key = decide_callable_or_boolean(cmp)
return cmp, cmp_key, cmp, cmp_key
# If left None, equality is set to the specified default and ordering
# mirrors equality.
if eq is None:
eq, eq_key = default_eq, None
else:
eq, eq_key = decide_callable_or_boolean(eq)
if order is None:
order, order_key = eq, eq_key
else:
order, order_key = decide_callable_or_boolean(order)
if eq is False and order is True:
raise ValueError("`order` can only be True if `eq` is True too.")
return eq, eq_key, order, order_key
def _determine_whether_to_implement(
cls, flag, auto_detect, dunders, default=True
):
"""
Check whether we should implement a set of methods for *cls*.
*flag* is the argument passed into @attr.s like 'init', *auto_detect* the
same as passed into @attr.s and *dunders* is a tuple of attribute names
whose presence signal that the user has implemented it themselves.
Return *default* if no reason for either for or against is found.
auto_detect must be False on Python 2.
"""
if flag is True or flag is False:
return flag
if flag is None and auto_detect is False:
return default
# Logically, flag is None and auto_detect is True here.
for dunder in dunders:
if _has_own_attribute(cls, dunder):
return False
return default
def attrs(
maybe_cls=None,
these=None,
repr_ns=None,
repr=None,
cmp=None,
hash=None,
init=None,
slots=False,
frozen=False,
weakref_slot=True,
str=False,
auto_attribs=False,
kw_only=False,
cache_hash=False,
auto_exc=False,
eq=None,
order=None,
auto_detect=False,
collect_by_mro=False,
getstate_setstate=None,
on_setattr=None,
field_transformer=None,
match_args=True,
):
r"""
A class decorator that adds `dunder
<https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
specified attributes using `attr.ib` or the *these* argument.
:param these: A dictionary of name to `attr.ib` mappings. This is
useful to avoid the definition of your attributes within the class body
because you can't (e.g. if you want to add ``__repr__`` methods to
Django models) or don't want to.
If *these* is not ``None``, ``attrs`` will *not* search the class body
for attributes and will *not* remove any attributes from it.
If *these* is an ordered dict (`dict` on Python 3.6+,
`collections.OrderedDict` otherwise), the order is deduced from
the order of the attributes inside *these*. Otherwise the order
of the definition of the attributes is used.
:type these: `dict` of `str` to `attr.ib`
:param str repr_ns: When using nested classes, there's no way in Python 2
to automatically detect that. Therefore it's possible to set the
namespace explicitly for a more meaningful ``repr`` output.
:param bool auto_detect: Instead of setting the *init*, *repr*, *eq*,
*order*, and *hash* arguments explicitly, assume they are set to
``True`` **unless any** of the involved methods for one of the
arguments is implemented in the *current* class (i.e. it is *not*
inherited from some base class).
So for example by implementing ``__eq__`` on a class yourself,
``attrs`` will deduce ``eq=False`` and will create *neither*
``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible
``__ne__`` by default, so it *should* be enough to only implement
``__eq__`` in most cases).
.. warning::
If you prevent ``attrs`` from creating the ordering methods for you
(``order=False``, e.g. by implementing ``__le__``), it becomes
*your* responsibility to make sure its ordering is sound. The best
way is to use the `functools.total_ordering` decorator.
Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*,
*cmp*, or *hash* overrides whatever *auto_detect* would determine.
*auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises
a `PythonTooOldError`.
:param bool repr: Create a ``__repr__`` method with a human readable
representation of ``attrs`` attributes..
:param bool str: Create a ``__str__`` method that is identical to
``__repr__``. This is usually not necessary except for
`Exception`\ s.
:param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__``
and ``__ne__`` methods that check two instances for equality.
They compare the instances as if they were tuples of their ``attrs``
attributes if and only if the types of both classes are *identical*!
:param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``,
``__gt__``, and ``__ge__`` methods that behave like *eq* above and
allow instances to be ordered. If ``None`` (default) mirror value of
*eq*.
:param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq*
and *order* to the same value. Must not be mixed with *eq* or *order*.
:param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method
is generated according how *eq* and *frozen* are set.
1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to
None, marking it unhashable (which it is).
3. If *eq* is False, ``__hash__`` will be left untouched meaning the
``__hash__`` method of the base class will be used (if base class is
``object``, this means it will fall back to id-based hashing.).
Although not recommended, you can decide for yourself and force
``attrs`` to create one (e.g. if the class is immutable even though you
didn't freeze it programmatically) by passing ``True`` or not. Both of
these cases are rather special and should be used carefully.
See our documentation on `hashing`, Python's documentation on
`object.__hash__`, and the `GitHub issue that led to the default \
behavior <https://github.com/python-attrs/attrs/issues/136>`_ for more
details.
:param bool init: Create a ``__init__`` method that initializes the
``attrs`` attributes. Leading underscores are stripped for the argument
name. If a ``__attrs_pre_init__`` method exists on the class, it will
be called before the class is initialized. If a ``__attrs_post_init__``
method exists on the class, it will be called after the class is fully
initialized.
If ``init`` is ``False``, an ``__attrs_init__`` method will be
injected instead. This allows you to define a custom ``__init__``
method that can do pre-init work such as ``super().__init__()``,
and then call ``__attrs_init__()`` and ``__attrs_post_init__()``.
:param bool slots: Create a `slotted class <slotted classes>` that's more
memory-efficient. Slotted classes are generally superior to the default
dict classes, but have some gotchas you should know about, so we
encourage you to read the `glossary entry <slotted classes>`.
:param bool frozen: Make instances immutable after initialization. If
someone attempts to modify a frozen instance,
`attr.exceptions.FrozenInstanceError` is raised.
.. note::
1. This is achieved by installing a custom ``__setattr__`` method
on your class, so you can't implement your own.
2. True immutability is impossible in Python.
3. This *does* have a minor a runtime performance `impact
<how-frozen>` when initializing new instances. In other words:
``__init__`` is slightly slower with ``frozen=True``.
4. If a class is frozen, you cannot modify ``self`` in
``__attrs_post_init__`` or a self-written ``__init__``. You can
circumvent that limitation by using
``object.__setattr__(self, "attribute_name", value)``.
5. Subclasses of a frozen class are frozen too.
:param bool weakref_slot: Make instances weak-referenceable. This has no
effect unless ``slots`` is also enabled.
:param bool auto_attribs: If ``True``, collect `PEP 526`_-annotated
attributes (Python 3.6 and later only) from the class body.
In this case, you **must** annotate every field. If ``attrs``
encounters a field that is set to an `attr.ib` but lacks a type
annotation, an `attr.exceptions.UnannotatedAttributeError` is
raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
want to set a type.
If you assign a value to those attributes (e.g. ``x: int = 42``), that
value becomes the default value like if it were passed using
``attr.ib(default=42)``. Passing an instance of `Factory` also
works as expected in most cases (see warning below).
Attributes annotated as `typing.ClassVar`, and attributes that are
neither annotated nor set to an `attr.ib` are **ignored**.
.. warning::
For features that use the attribute name to create decorators (e.g.
`validators <validators>`), you still *must* assign `attr.ib` to
them. Otherwise Python will either not find the name or try to use
the default value to call e.g. ``validator`` on it.
These errors can be quite confusing and probably the most common bug
report on our bug tracker.
.. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/
:param bool kw_only: Make all attributes keyword-only (Python 3+)
in the generated ``__init__`` (if ``init`` is ``False``, this
parameter is ignored).
:param bool cache_hash: Ensure that the object's hash code is computed
only once and stored on the object. If this is set to ``True``,
hashing must be either explicitly or implicitly enabled for this
class. If the hash code is cached, avoid any reassignments of
fields involved in hash code computation or mutations of the objects
those fields point to after object creation. If such changes occur,
the behavior of the object's hash code is undefined.
:param bool auto_exc: If the class subclasses `BaseException`
(which implicitly includes any subclass of any exception), the
following happens to behave like a well-behaved Python exceptions
class:
- the values for *eq*, *order*, and *hash* are ignored and the
instances compare and hash by the instance's ids (N.B. ``attrs`` will
*not* remove existing implementations of ``__hash__`` or the equality
methods. It just won't add own ones.),
- all attributes that are either passed into ``__init__`` or have a
default value are additionally available as a tuple in the ``args``
attribute,
- the value of *str* is ignored leaving ``__str__`` to base classes.
:param bool collect_by_mro: Setting this to `True` fixes the way ``attrs``
collects attributes from base classes. The default behavior is
incorrect in certain cases of multiple inheritance. It should be on by
default but is kept off for backward-compatibility.
See issue `#428 <https://github.com/python-attrs/attrs/issues/428>`_ for
more details.
:param Optional[bool] getstate_setstate:
.. note::
This is usually only interesting for slotted classes and you should
probably just set *auto_detect* to `True`.
If `True`, ``__getstate__`` and
``__setstate__`` are generated and attached to the class. This is
necessary for slotted classes to be pickleable. If left `None`, it's
`True` by default for slotted classes and ``False`` for dict classes.
If *auto_detect* is `True`, and *getstate_setstate* is left `None`,
and **either** ``__getstate__`` or ``__setstate__`` is detected directly
on the class (i.e. not inherited), it is set to `False` (this is usually
what you want).
:param on_setattr: A callable that is run whenever the user attempts to set
an attribute (either by assignment like ``i.x = 42`` or by using
`setattr` like ``setattr(i, "x", 42)``). It receives the same arguments
as validators: the instance, the attribute that is being modified, and
the new value.
If no exception is raised, the attribute is set to the return value of
the callable.
If a list of callables is passed, they're automatically wrapped in an
`attr.setters.pipe`.
:param Optional[callable] field_transformer:
A function that is called with the original class object and all
fields right before ``attrs`` finalizes the class. You can use
this, e.g., to automatically add converters or validators to
fields based on their types. See `transform-fields` for more details.
:param bool match_args:
If `True` (default), set ``__match_args__`` on the class to support
`PEP 634 <https://www.python.org/dev/peps/pep-0634/>`_ (Structural
Pattern Matching). It is a tuple of all positional-only ``__init__``
parameter names on Python 3.10 and later. Ignored on older Python
versions.
.. versionadded:: 16.0.0 *slots*
.. versionadded:: 16.1.0 *frozen*
.. versionadded:: 16.3.0 *str*
.. versionadded:: 16.3.0 Support for ``__attrs_post_init__``.
.. versionchanged:: 17.1.0
*hash* supports ``None`` as value which is also the default now.
.. versionadded:: 17.3.0 *auto_attribs*
.. versionchanged:: 18.1.0
If *these* is passed, no attributes are deleted from the class body.
.. versionchanged:: 18.1.0 If *these* is ordered, the order is retained.
.. versionadded:: 18.2.0 *weakref_slot*
.. deprecated:: 18.2.0
``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a
`DeprecationWarning` if the classes compared are subclasses of
each other. ``__eq`` and ``__ne__`` never tried to compared subclasses
to each other.
.. versionchanged:: 19.2.0
``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider
subclasses comparable anymore.
.. versionadded:: 18.2.0 *kw_only*
.. versionadded:: 18.2.0 *cache_hash*
.. versionadded:: 19.1.0 *auto_exc*
.. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
.. versionadded:: 19.2.0 *eq* and *order*
.. versionadded:: 20.1.0 *auto_detect*
.. versionadded:: 20.1.0 *collect_by_mro*
.. versionadded:: 20.1.0 *getstate_setstate*
.. versionadded:: 20.1.0 *on_setattr*
.. versionadded:: 20.3.0 *field_transformer*
.. versionchanged:: 21.1.0
``init=False`` injects ``__attrs_init__``
.. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__``
.. versionchanged:: 21.1.0 *cmp* undeprecated
.. versionadded:: 21.3.0 *match_args*
"""
if auto_detect and PY2:
raise PythonTooOldError(
"auto_detect only works on Python 3 and later."
)
eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None)
hash_ = hash # work around the lack of nonlocal
if isinstance(on_setattr, (list, tuple)):
on_setattr = setters.pipe(*on_setattr)
def wrap(cls):
if getattr(cls, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
is_frozen = frozen or _has_frozen_base_class(cls)
is_exc = auto_exc is True and issubclass(cls, BaseException)
has_own_setattr = auto_detect and _has_own_attribute(
cls, "__setattr__"
)
if has_own_setattr and is_frozen:
raise ValueError("Can't freeze a class with a custom __setattr__.")
builder = _ClassBuilder(
cls,
these,
slots,
is_frozen,
weakref_slot,
_determine_whether_to_implement(
cls,
getstate_setstate,
auto_detect,
("__getstate__", "__setstate__"),
default=slots,
),
auto_attribs,
kw_only,
cache_hash,
is_exc,
collect_by_mro,
on_setattr,
has_own_setattr,
field_transformer,
)
if _determine_whether_to_implement(
cls, repr, auto_detect, ("__repr__",)
):
builder.add_repr(repr_ns)
if str is True:
builder.add_str()
eq = _determine_whether_to_implement(
cls, eq_, auto_detect, ("__eq__", "__ne__")
)
if not is_exc and eq is True:
builder.add_eq()
if not is_exc and _determine_whether_to_implement(
cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__")
):
builder.add_order()
builder.add_setattr()
if (
hash_ is None
and auto_detect is True
and _has_own_attribute(cls, "__hash__")
):
hash = False
else:
hash = hash_
if hash is not True and hash is not False and hash is not None:
# Can't use `hash in` because 1 == True for example.
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
elif hash is False or (hash is None and eq is False) or is_exc:
# Don't do anything. Should fall back to __object__'s __hash__
# which is by id.
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" hashing must be either explicitly or implicitly "
"enabled."
)
elif hash is True or (
hash is None and eq is True and is_frozen is True
):
# Build a __hash__ if told so, or if it's safe.
builder.add_hash()
else:
# Raise TypeError on attempts to hash.
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" hashing must be either explicitly or implicitly "
"enabled."
)
builder.make_unhashable()
if _determine_whether_to_implement(
cls, init, auto_detect, ("__init__",)
):
builder.add_init()
else:
builder.add_attrs_init()
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" init must be True."
)
if (
PY310
and match_args
and not _has_own_attribute(cls, "__match_args__")
):
builder.add_match_args()
return builder.build_class()
# maybe_cls's type depends on the usage of the decorator. It's a class
# if it's used as `@attrs` but ``None`` if used as `@attrs()`.
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
_attrs = attrs
"""
Internal alias so we can use it in functions that take an argument called
*attrs*.
"""
if PY2:
def _has_frozen_base_class(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return (
getattr(cls.__setattr__, "__module__", None)
== _frozen_setattrs.__module__
and cls.__setattr__.__name__ == _frozen_setattrs.__name__
)
else:
def _has_frozen_base_class(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return cls.__setattr__ == _frozen_setattrs
def _generate_unique_filename(cls, func_name):
"""
Create a "filename" suitable for a function being generated.
"""
unique_filename = "<attrs generated {0} {1}.{2}>".format(
func_name,
cls.__module__,
getattr(cls, "__qualname__", cls.__name__),
)
return unique_filename
def _make_hash(cls, attrs, frozen, cache_hash):
attrs = tuple(
a for a in attrs if a.hash is True or (a.hash is None and a.eq is True)
)
tab = " "
unique_filename = _generate_unique_filename(cls, "hash")
type_hash = hash(unique_filename)
hash_def = "def __hash__(self"
hash_func = "hash(("
closing_braces = "))"
if not cache_hash:
hash_def += "):"
else:
if not PY2:
hash_def += ", *"
hash_def += (
", _cache_wrapper="
+ "__import__('attr._make')._make._CacheHashWrapper):"
)
hash_func = "_cache_wrapper(" + hash_func
closing_braces += ")"
method_lines = [hash_def]
def append_hash_computation_lines(prefix, indent):
"""
Generate the code for actually computing the hash code.
Below this will either be returned directly or used to compute
a value which is then cached, depending on the value of cache_hash
"""
method_lines.extend(
[
indent + prefix + hash_func,
indent + " %d," % (type_hash,),
]
)
for a in attrs:
method_lines.append(indent + " self.%s," % a.name)
method_lines.append(indent + " " + closing_braces)
if cache_hash:
method_lines.append(tab + "if self.%s is None:" % _hash_cache_field)
if frozen:
append_hash_computation_lines(
"object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2
)
method_lines.append(tab * 2 + ")") # close __setattr__
else:
append_hash_computation_lines(
"self.%s = " % _hash_cache_field, tab * 2
)
method_lines.append(tab + "return self.%s" % _hash_cache_field)
else:
append_hash_computation_lines("return ", tab)
script = "\n".join(method_lines)
return _make_method("__hash__", script, unique_filename)
def _add_hash(cls, attrs):
"""
Add a hash method to *cls*.
"""
cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False)
return cls
def _make_ne():
"""
Create __ne__ method.
"""
def __ne__(self, other):
"""
Check equality and either forward a NotImplemented or
return the result negated.
"""
result = self.__eq__(other)
if result is NotImplemented:
return NotImplemented
return not result
return __ne__
def _make_eq(cls, attrs):
"""
Create __eq__ method for *cls* with *attrs*.
"""
attrs = [a for a in attrs if a.eq]
unique_filename = _generate_unique_filename(cls, "eq")
lines = [
"def __eq__(self, other):",
" if other.__class__ is not self.__class__:",
" return NotImplemented",
]
# We can't just do a big self.x = other.x and... clause due to
# irregularities like nan == nan is false but (nan,) == (nan,) is true.
globs = {}
if attrs:
lines.append(" return (")
others = [" ) == ("]
for a in attrs:
if a.eq_key:
cmp_name = "_%s_key" % (a.name,)
# Add the key function to the global namespace
# of the evaluated function.
globs[cmp_name] = a.eq_key
lines.append(
" %s(self.%s),"
% (
cmp_name,
a.name,
)
)
others.append(
" %s(other.%s),"
% (
cmp_name,
a.name,
)
)
else:
lines.append(" self.%s," % (a.name,))
others.append(" other.%s," % (a.name,))
lines += others + [" )"]
else:
lines.append(" return True")
script = "\n".join(lines)
return _make_method("__eq__", script, unique_filename, globs)
def _make_order(cls, attrs):
"""
Create ordering methods for *cls* with *attrs*.
"""
attrs = [a for a in attrs if a.order]
def attrs_to_tuple(obj):
"""
Save us some typing.
"""
return tuple(
key(value) if key else value
for value, key in (
(getattr(obj, a.name), a.order_key) for a in attrs
)
)
def __lt__(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) < attrs_to_tuple(other)
return NotImplemented
def __le__(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) <= attrs_to_tuple(other)
return NotImplemented
def __gt__(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) > attrs_to_tuple(other)
return NotImplemented
def __ge__(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) >= attrs_to_tuple(other)
return NotImplemented
return __lt__, __le__, __gt__, __ge__
def _add_eq(cls, attrs=None):
"""
Add equality methods to *cls* with *attrs*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__eq__ = _make_eq(cls, attrs)
cls.__ne__ = _make_ne()
return cls
_already_repring = threading.local()
if HAS_F_STRINGS:
def _make_repr(attrs, ns, cls):
unique_filename = "repr"
# Figure out which attributes to include, and which function to use to
# format them. The a.repr value can be either bool or a custom
# callable.
attr_names_with_reprs = tuple(
(a.name, (repr if a.repr is True else a.repr), a.init)
for a in attrs
if a.repr is not False
)
globs = {
name + "_repr": r
for name, r, _ in attr_names_with_reprs
if r != repr
}
globs["_already_repring"] = _already_repring
globs["AttributeError"] = AttributeError
globs["NOTHING"] = NOTHING
attribute_fragments = []
for name, r, i in attr_names_with_reprs:
accessor = (
"self." + name
if i
else 'getattr(self, "' + name + '", NOTHING)'
)
fragment = (
"%s={%s!r}" % (name, accessor)
if r == repr
else "%s={%s_repr(%s)}" % (name, name, accessor)
)
attribute_fragments.append(fragment)
repr_fragment = ", ".join(attribute_fragments)
if ns is None:
cls_name_fragment = (
'{self.__class__.__qualname__.rsplit(">.", 1)[-1]}'
)
else:
cls_name_fragment = ns + ".{self.__class__.__name__}"
lines = []
lines.append("def __repr__(self):")
lines.append(" try:")
lines.append(" working_set = _already_repring.working_set")
lines.append(" except AttributeError:")
lines.append(" working_set = {id(self),}")
lines.append(" _already_repring.working_set = working_set")
lines.append(" else:")
lines.append(" if id(self) in working_set:")
lines.append(" return '...'")
lines.append(" else:")
lines.append(" working_set.add(id(self))")
lines.append(" try:")
lines.append(
" return f'%s(%s)'" % (cls_name_fragment, repr_fragment)
)
lines.append(" finally:")
lines.append(" working_set.remove(id(self))")
return _make_method(
"__repr__", "\n".join(lines), unique_filename, globs=globs
)
else:
def _make_repr(attrs, ns, _):
"""
Make a repr method that includes relevant *attrs*, adding *ns* to the
full name.
"""
# Figure out which attributes to include, and which function to use to
# format them. The a.repr value can be either bool or a custom
# callable.
attr_names_with_reprs = tuple(
(a.name, repr if a.repr is True else a.repr)
for a in attrs
if a.repr is not False
)
def __repr__(self):
"""
Automatically created by attrs.
"""
try:
working_set = _already_repring.working_set
except AttributeError:
working_set = set()
_already_repring.working_set = working_set
if id(self) in working_set:
return "..."
real_cls = self.__class__
if ns is None:
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None: # pragma: no cover
# This case only happens on Python 3.5 and 3.6. We exclude
# it from coverage, because we don't want to slow down our
# test suite by running them under coverage too for this
# one line.
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
else:
class_name = ns + "." + real_cls.__name__
# Since 'self' remains on the stack (i.e.: strongly referenced)
# for the duration of this call, it's safe to depend on id(...)
# stability, and not need to track the instance and therefore
# worry about properties like weakref- or hash-ability.
working_set.add(id(self))
try:
result = [class_name, "("]
first = True
for name, attr_repr in attr_names_with_reprs:
if first:
first = False
else:
result.append(", ")
result.extend(
(name, "=", attr_repr(getattr(self, name, NOTHING)))
)
return "".join(result) + ")"
finally:
working_set.remove(id(self))
return __repr__
def _add_repr(cls, ns=None, attrs=None):
"""
Add a repr method to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__repr__ = _make_repr(attrs, ns, cls)
return cls
def fields(cls):
"""
Return the tuple of ``attrs`` attributes for a class.
The tuple also allows accessing the fields by their names (see below for
examples).
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: tuple (with name accessors) of `attr.Attribute`
.. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
by name.
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return attrs
def fields_dict(cls):
"""
Return an ordered dictionary of ``attrs`` attributes for a class, whose
keys are the attribute names.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: an ordered dict where keys are attribute names and values are
`attr.Attribute`\\ s. This will be a `dict` if it's
naturally ordered like on Python 3.6+ or an
:class:`~collections.OrderedDict` otherwise.
.. versionadded:: 18.1.0
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return ordered_dict(((a.name, a) for a in attrs))
def validate(inst):
"""
Validate all attributes on *inst* that have a validator.
Leaves all exceptions through.
:param inst: Instance of a class with ``attrs`` attributes.
"""
if _config._run_validators is False:
return
for a in fields(inst.__class__):
v = a.validator
if v is not None:
v(inst, a, getattr(inst, a.name))
def _is_slot_cls(cls):
return "__slots__" in cls.__dict__
def _is_slot_attr(a_name, base_attr_map):
"""
Check if the attribute name comes from a slot class.
"""
return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name])
def _make_init(
cls,
attrs,
pre_init,
post_init,
frozen,
slots,
cache_hash,
base_attr_map,
is_exc,
cls_on_setattr,
attrs_init,
):
has_cls_on_setattr = (
cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP
)
if frozen and has_cls_on_setattr:
raise ValueError("Frozen classes can't use on_setattr.")
needs_cached_setattr = cache_hash or frozen
filtered_attrs = []
attr_dict = {}
for a in attrs:
if not a.init and a.default is NOTHING:
continue
filtered_attrs.append(a)
attr_dict[a.name] = a
if a.on_setattr is not None:
if frozen is True:
raise ValueError("Frozen classes can't use on_setattr.")
needs_cached_setattr = True
elif (
has_cls_on_setattr and a.on_setattr is not setters.NO_OP
) or _is_slot_attr(a.name, base_attr_map):
needs_cached_setattr = True
unique_filename = _generate_unique_filename(cls, "init")
script, globs, annotations = _attrs_to_init_script(
filtered_attrs,
frozen,
slots,
pre_init,
post_init,
cache_hash,
base_attr_map,
is_exc,
needs_cached_setattr,
has_cls_on_setattr,
attrs_init,
)
if cls.__module__ in sys.modules:
# This makes typing.get_type_hints(CLS.__init__) resolve string types.
globs.update(sys.modules[cls.__module__].__dict__)
globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict})
if needs_cached_setattr:
# Save the lookup overhead in __init__ if we need to circumvent
# setattr hooks.
globs["_cached_setattr"] = _obj_setattr
init = _make_method(
"__attrs_init__" if attrs_init else "__init__",
script,
unique_filename,
globs,
)
init.__annotations__ = annotations
return init
def _setattr(attr_name, value_var, has_on_setattr):
"""
Use the cached object.setattr to set *attr_name* to *value_var*.
"""
return "_setattr('%s', %s)" % (attr_name, value_var)
def _setattr_with_converter(attr_name, value_var, has_on_setattr):
"""
Use the cached object.setattr to set *attr_name* to *value_var*, but run
its converter first.
"""
return "_setattr('%s', %s(%s))" % (
attr_name,
_init_converter_pat % (attr_name,),
value_var,
)
def _assign(attr_name, value, has_on_setattr):
"""
Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise
relegate to _setattr.
"""
if has_on_setattr:
return _setattr(attr_name, value, True)
return "self.%s = %s" % (attr_name, value)
def _assign_with_converter(attr_name, value_var, has_on_setattr):
"""
Unless *attr_name* has an on_setattr hook, use normal assignment after
conversion. Otherwise relegate to _setattr_with_converter.
"""
if has_on_setattr:
return _setattr_with_converter(attr_name, value_var, True)
return "self.%s = %s(%s)" % (
attr_name,
_init_converter_pat % (attr_name,),
value_var,
)
if PY2:
def _unpack_kw_only_py2(attr_name, default=None):
"""
Unpack *attr_name* from _kw_only dict.
"""
if default is not None:
arg_default = ", %s" % default
else:
arg_default = ""
return "%s = _kw_only.pop('%s'%s)" % (
attr_name,
attr_name,
arg_default,
)
def _unpack_kw_only_lines_py2(kw_only_args):
"""
Unpack all *kw_only_args* from _kw_only dict and handle errors.
Given a list of strings "{attr_name}" and "{attr_name}={default}"
generates list of lines of code that pop attrs from _kw_only dict and
raise TypeError similar to builtin if required attr is missing or
extra key is passed.
>>> print("\n".join(_unpack_kw_only_lines_py2(["a", "b=42"])))
try:
a = _kw_only.pop('a')
b = _kw_only.pop('b', 42)
except KeyError as _key_error:
raise TypeError(
...
if _kw_only:
raise TypeError(
...
"""
lines = ["try:"]
lines.extend(
" " + _unpack_kw_only_py2(*arg.split("="))
for arg in kw_only_args
)
lines += """\
except KeyError as _key_error:
raise TypeError(
'__init__() missing required keyword-only argument: %s' % _key_error
)
if _kw_only:
raise TypeError(
'__init__() got an unexpected keyword argument %r'
% next(iter(_kw_only))
)
""".split(
"\n"
)
return lines
def _attrs_to_init_script(
attrs,
frozen,
slots,
pre_init,
post_init,
cache_hash,
base_attr_map,
is_exc,
needs_cached_setattr,
has_cls_on_setattr,
attrs_init,
):
"""
Return a script of an initializer for *attrs* and a dict of globals.
The globals are expected by the generated script.
If *frozen* is True, we cannot set the attributes directly so we use
a cached ``object.__setattr__``.
"""
lines = []
if pre_init:
lines.append("self.__attrs_pre_init__()")
if needs_cached_setattr:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup per
# assignment.
# Note _setattr will be used again below if cache_hash is True
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
if frozen is True:
if slots is True:
fmt_setter = _setattr
fmt_setter_with_converter = _setattr_with_converter
else:
# Dict frozen classes assign directly to __dict__.
# But only if the attribute doesn't come from an ancestor slot
# class.
# Note _inst_dict will be used again below if cache_hash is True
lines.append("_inst_dict = self.__dict__")
def fmt_setter(attr_name, value_var, has_on_setattr):
if _is_slot_attr(attr_name, base_attr_map):
return _setattr(attr_name, value_var, has_on_setattr)
return "_inst_dict['%s'] = %s" % (attr_name, value_var)
def fmt_setter_with_converter(
attr_name, value_var, has_on_setattr
):
if has_on_setattr or _is_slot_attr(attr_name, base_attr_map):
return _setattr_with_converter(
attr_name, value_var, has_on_setattr
)
return "_inst_dict['%s'] = %s(%s)" % (
attr_name,
_init_converter_pat % (attr_name,),
value_var,
)
else:
# Not frozen.
fmt_setter = _assign
fmt_setter_with_converter = _assign_with_converter
args = []
kw_only_args = []
attrs_to_validate = []
# This is a dictionary of names to validator and converter callables.
# Injecting this into __init__ globals lets us avoid lookups.
names_for_globals = {}
annotations = {"return": None}
for a in attrs:
if a.validator:
attrs_to_validate.append(a)
attr_name = a.name
has_on_setattr = a.on_setattr is not None or (
a.on_setattr is not setters.NO_OP and has_cls_on_setattr
)
arg_name = a.name.lstrip("_")
has_factory = isinstance(a.default, Factory)
if has_factory and a.default.takes_self:
maybe_self = "self"
else:
maybe_self = ""
if a.init is False:
if has_factory:
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(
fmt_setter_with_converter(
attr_name,
init_factory_name + "(%s)" % (maybe_self,),
has_on_setattr,
)
)
conv_name = _init_converter_pat % (a.name,)
names_for_globals[conv_name] = a.converter
else:
lines.append(
fmt_setter(
attr_name,
init_factory_name + "(%s)" % (maybe_self,),
has_on_setattr,
)
)
names_for_globals[init_factory_name] = a.default.factory
else:
if a.converter is not None:
lines.append(
fmt_setter_with_converter(
attr_name,
"attr_dict['%s'].default" % (attr_name,),
has_on_setattr,
)
)
conv_name = _init_converter_pat % (a.name,)
names_for_globals[conv_name] = a.converter
else:
lines.append(
fmt_setter(
attr_name,
"attr_dict['%s'].default" % (attr_name,),
has_on_setattr,
)
)
elif a.default is not NOTHING and not has_factory:
arg = "%s=attr_dict['%s'].default" % (arg_name, attr_name)
if a.kw_only:
kw_only_args.append(arg)
else:
args.append(arg)
if a.converter is not None:
lines.append(
fmt_setter_with_converter(
attr_name, arg_name, has_on_setattr
)
)
names_for_globals[
_init_converter_pat % (a.name,)
] = a.converter
else:
lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
elif has_factory:
arg = "%s=NOTHING" % (arg_name,)
if a.kw_only:
kw_only_args.append(arg)
else:
args.append(arg)
lines.append("if %s is not NOTHING:" % (arg_name,))
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(
" "
+ fmt_setter_with_converter(
attr_name, arg_name, has_on_setattr
)
)
lines.append("else:")
lines.append(
" "
+ fmt_setter_with_converter(
attr_name,
init_factory_name + "(" + maybe_self + ")",
has_on_setattr,
)
)
names_for_globals[
_init_converter_pat % (a.name,)
] = a.converter
else:
lines.append(
" " + fmt_setter(attr_name, arg_name, has_on_setattr)
)
lines.append("else:")
lines.append(
" "
+ fmt_setter(
attr_name,
init_factory_name + "(" + maybe_self + ")",
has_on_setattr,
)
)
names_for_globals[init_factory_name] = a.default.factory
else:
if a.kw_only:
kw_only_args.append(arg_name)
else:
args.append(arg_name)
if a.converter is not None:
lines.append(
fmt_setter_with_converter(
attr_name, arg_name, has_on_setattr
)
)
names_for_globals[
_init_converter_pat % (a.name,)
] = a.converter
else:
lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
if a.init is True:
if a.type is not None and a.converter is None:
annotations[arg_name] = a.type
elif a.converter is not None and not PY2:
# Try to get the type from the converter.
sig = None
try:
sig = inspect.signature(a.converter)
except (ValueError, TypeError): # inspect failed
pass
if sig:
sig_params = list(sig.parameters.values())
if (
sig_params
and sig_params[0].annotation
is not inspect.Parameter.empty
):
annotations[arg_name] = sig_params[0].annotation
if attrs_to_validate: # we can skip this if there are no validators.
names_for_globals["_config"] = _config
lines.append("if _config._run_validators is True:")
for a in attrs_to_validate:
val_name = "__attr_validator_" + a.name
attr_name = "__attr_" + a.name
lines.append(
" %s(self, %s, self.%s)" % (val_name, attr_name, a.name)
)
names_for_globals[val_name] = a.validator
names_for_globals[attr_name] = a
if post_init:
lines.append("self.__attrs_post_init__()")
# because this is set only after __attrs_post_init is called, a crash
# will result if post-init tries to access the hash code. This seemed
# preferable to setting this beforehand, in which case alteration to
# field values during post-init combined with post-init accessing the
# hash code would result in silent bugs.
if cache_hash:
if frozen:
if slots:
# if frozen and slots, then _setattr defined above
init_hash_cache = "_setattr('%s', %s)"
else:
# if frozen and not slots, then _inst_dict defined above
init_hash_cache = "_inst_dict['%s'] = %s"
else:
init_hash_cache = "self.%s = %s"
lines.append(init_hash_cache % (_hash_cache_field, "None"))
# For exceptions we rely on BaseException.__init__ for proper
# initialization.
if is_exc:
vals = ",".join("self." + a.name for a in attrs if a.init)
lines.append("BaseException.__init__(self, %s)" % (vals,))
args = ", ".join(args)
if kw_only_args:
if PY2:
lines = _unpack_kw_only_lines_py2(kw_only_args) + lines
args += "%s**_kw_only" % (", " if args else "",) # leading comma
else:
args += "%s*, %s" % (
", " if args else "", # leading comma
", ".join(kw_only_args), # kw_only args
)
return (
"""\
def {init_name}(self, {args}):
{lines}
""".format(
init_name=("__attrs_init__" if attrs_init else "__init__"),
args=args,
lines="\n ".join(lines) if lines else "pass",
),
names_for_globals,
annotations,
)
class Attribute(object):
"""
*Read-only* representation of an attribute.
Instances of this class are frequently used for introspection purposes
like:
- `fields` returns a tuple of them.
- Validators get them passed as the first argument.
- The *field transformer* hook receives a list of them.
:attribute name: The name of the attribute.
:attribute inherited: Whether or not that attribute has been inherited from
a base class.
Plus *all* arguments of `attr.ib` (except for ``factory``
which is only syntactic sugar for ``default=Factory(...)``.
.. versionadded:: 20.1.0 *inherited*
.. versionadded:: 20.1.0 *on_setattr*
.. versionchanged:: 20.2.0 *inherited* is not taken into account for
equality checks and hashing anymore.
.. versionadded:: 21.1.0 *eq_key* and *order_key*
For the full version history of the fields, see `attr.ib`.
"""
__slots__ = (
"name",
"default",
"validator",
"repr",
"eq",
"eq_key",
"order",
"order_key",
"hash",
"init",
"metadata",
"type",
"converter",
"kw_only",
"inherited",
"on_setattr",
)
def __init__(
self,
name,
default,
validator,
repr,
cmp, # XXX: unused, remove along with other cmp code.
hash,
init,
inherited,
metadata=None,
type=None,
converter=None,
kw_only=False,
eq=None,
eq_key=None,
order=None,
order_key=None,
on_setattr=None,
):
eq, eq_key, order, order_key = _determine_attrib_eq_order(
cmp, eq_key or eq, order_key or order, True
)
# Cache this descriptor here to speed things up later.
bound_setattr = _obj_setattr.__get__(self, Attribute)
# Despite the big red warning, people *do* instantiate `Attribute`
# themselves.
bound_setattr("name", name)
bound_setattr("default", default)
bound_setattr("validator", validator)
bound_setattr("repr", repr)
bound_setattr("eq", eq)
bound_setattr("eq_key", eq_key)
bound_setattr("order", order)
bound_setattr("order_key", order_key)
bound_setattr("hash", hash)
bound_setattr("init", init)
bound_setattr("converter", converter)
bound_setattr(
"metadata",
(
metadata_proxy(metadata)
if metadata
else _empty_metadata_singleton
),
)
bound_setattr("type", type)
bound_setattr("kw_only", kw_only)
bound_setattr("inherited", inherited)
bound_setattr("on_setattr", on_setattr)
def __setattr__(self, name, value):
raise FrozenInstanceError()
@classmethod
def from_counting_attr(cls, name, ca, type=None):
# type holds the annotated value. deal with conflicts:
if type is None:
type = ca.type
elif ca.type is not None:
raise ValueError(
"Type annotation and type argument cannot both be present"
)
inst_dict = {
k: getattr(ca, k)
for k in Attribute.__slots__
if k
not in (
"name",
"validator",
"default",
"type",
"inherited",
) # exclude methods and deprecated alias
}
return cls(
name=name,
validator=ca._validator,
default=ca._default,
type=type,
cmp=None,
inherited=False,
**inst_dict
)
@property
def cmp(self):
"""
Simulate the presence of a cmp attribute and warn.
"""
warnings.warn(_CMP_DEPRECATION, DeprecationWarning, stacklevel=2)
return self.eq and self.order
# Don't use attr.evolve since fields(Attribute) doesn't work
def evolve(self, **changes):
"""
Copy *self* and apply *changes*.
This works similarly to `attr.evolve` but that function does not work
with ``Attribute``.
It is mainly meant to be used for `transform-fields`.
.. versionadded:: 20.3.0
"""
new = copy.copy(self)
new._setattrs(changes.items())
return new
# Don't use _add_pickle since fields(Attribute) doesn't work
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(
getattr(self, name) if name != "metadata" else dict(self.metadata)
for name in self.__slots__
)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
self._setattrs(zip(self.__slots__, state))
def _setattrs(self, name_values_pairs):
bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in name_values_pairs:
if name != "metadata":
bound_setattr(name, value)
else:
bound_setattr(
name,
metadata_proxy(value)
if value
else _empty_metadata_singleton,
)
_a = [
Attribute(
name=name,
default=NOTHING,
validator=None,
repr=True,
cmp=None,
eq=True,
order=False,
hash=(name != "metadata"),
init=True,
inherited=False,
)
for name in Attribute.__slots__
]
Attribute = _add_hash(
_add_eq(
_add_repr(Attribute, attrs=_a),
attrs=[a for a in _a if a.name != "inherited"],
),
attrs=[a for a in _a if a.hash and a.name != "inherited"],
)
class _CountingAttr(object):
"""
Intermediate representation of attributes that uses a counter to preserve
the order in which the attributes have been defined.
*Internal* data structure of the attrs library. Running into is most
likely the result of a bug like a forgotten `@attr.s` decorator.
"""
__slots__ = (
"counter",
"_default",
"repr",
"eq",
"eq_key",
"order",
"order_key",
"hash",
"init",
"metadata",
"_validator",
"converter",
"type",
"kw_only",
"on_setattr",
)
__attrs_attrs__ = tuple(
Attribute(
name=name,
default=NOTHING,
validator=None,
repr=True,
cmp=None,
hash=True,
init=True,
kw_only=False,
eq=True,
eq_key=None,
order=False,
order_key=None,
inherited=False,
on_setattr=None,
)
for name in (
"counter",
"_default",
"repr",
"eq",
"order",
"hash",
"init",
"on_setattr",
)
) + (
Attribute(
name="metadata",
default=None,
validator=None,
repr=True,
cmp=None,
hash=False,
init=True,
kw_only=False,
eq=True,
eq_key=None,
order=False,
order_key=None,
inherited=False,
on_setattr=None,
),
)
cls_counter = 0
def __init__(
self,
default,
validator,
repr,
cmp,
hash,
init,
converter,
metadata,
type,
kw_only,
eq,
eq_key,
order,
order_key,
on_setattr,
):
_CountingAttr.cls_counter += 1
self.counter = _CountingAttr.cls_counter
self._default = default
self._validator = validator
self.converter = converter
self.repr = repr
self.eq = eq
self.eq_key = eq_key
self.order = order
self.order_key = order_key
self.hash = hash
self.init = init
self.metadata = metadata
self.type = type
self.kw_only = kw_only
self.on_setattr = on_setattr
def validator(self, meth):
"""
Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0
"""
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth
def default(self, meth):
"""
Decorator that allows to set the default for an attribute.
Returns *meth* unchanged.
:raises DefaultAlreadySetError: If default has been set before.
.. versionadded:: 17.1.0
"""
if self._default is not NOTHING:
raise DefaultAlreadySetError()
self._default = Factory(meth, takes_self=True)
return meth
_CountingAttr = _add_eq(_add_repr(_CountingAttr))
class Factory(object):
"""
Stores a factory callable.
If passed as the default value to `attr.ib`, the factory is used to
generate a new value.
:param callable factory: A callable that takes either none or exactly one
mandatory positional argument depending on *takes_self*.
:param bool takes_self: Pass the partially initialized instance that is
being initialized as a positional argument.
.. versionadded:: 17.1.0 *takes_self*
"""
__slots__ = ("factory", "takes_self")
def __init__(self, factory, takes_self=False):
"""
`Factory` is part of the default machinery so if we want a default
value here, we have to implement it ourselves.
"""
self.factory = factory
self.takes_self = takes_self
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(getattr(self, name) for name in self.__slots__)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
for name, value in zip(self.__slots__, state):
setattr(self, name, value)
_f = [
Attribute(
name=name,
default=NOTHING,
validator=None,
repr=True,
cmp=None,
eq=True,
order=False,
hash=True,
init=True,
inherited=False,
)
for name in Factory.__slots__
]
Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f)
def make_class(name, attrs, bases=(object,), **attributes_arguments):
"""
A quick way to create a new class called *name* with *attrs*.
:param str name: The name for the new class.
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
If *attrs* is a list or an ordered dict (`dict` on Python 3.6+,
`collections.OrderedDict` otherwise), the order is deduced from
the order of the names or attributes inside *attrs*. Otherwise the
order of the definition of the attributes is used.
:type attrs: `list` or `dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to `attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
.. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained.
"""
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = dict((a, attrib()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
pre_init = cls_dict.pop("__attrs_pre_init__", None)
post_init = cls_dict.pop("__attrs_post_init__", None)
user_init = cls_dict.pop("__init__", None)
body = {}
if pre_init is not None:
body["__attrs_pre_init__"] = pre_init
if post_init is not None:
body["__attrs_post_init__"] = post_init
if user_init is not None:
body["__init__"] = user_init
type_ = new_class(name, bases, {}, lambda ns: ns.update(body))
# For pickling to work, the __module__ variable needs to be set to the
# frame where the class is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
type_.__module__ = sys._getframe(1).f_globals.get(
"__name__", "__main__"
)
except (AttributeError, ValueError):
pass
# We do it here for proper warnings with meaningful stacklevel.
cmp = attributes_arguments.pop("cmp", None)
(
attributes_arguments["eq"],
attributes_arguments["order"],
) = _determine_attrs_eq_order(
cmp,
attributes_arguments.get("eq"),
attributes_arguments.get("order"),
True,
)
return _attrs(these=cls_dict, **attributes_arguments)(type_)
# These are required by within this module so we define them here and merely
# import into .validators / .converters.
@attrs(slots=True, hash=True)
class _AndValidator(object):
"""
Compose many validators to a single one.
"""
_validators = attrib()
def __call__(self, inst, attr, value):
for v in self._validators:
v(inst, attr, value)
def and_(*validators):
"""
A validator that composes multiple validators into one.
When called on a value, it runs all wrapped validators.
:param callables validators: Arbitrary number of validators.
.. versionadded:: 17.1.0
"""
vals = []
for validator in validators:
vals.extend(
validator._validators
if isinstance(validator, _AndValidator)
else [validator]
)
return _AndValidator(tuple(vals))
def pipe(*converters):
"""
A converter that composes multiple converters into one.
When called on a value, it runs all wrapped converters, returning the
*last* value.
Type annotations will be inferred from the wrapped converters', if
they have any.
:param callables converters: Arbitrary number of converters.
.. versionadded:: 20.1.0
"""
def pipe_converter(val):
for converter in converters:
val = converter(val)
return val
if not PY2:
if not converters:
# If the converter list is empty, pipe_converter is the identity.
A = typing.TypeVar("A")
pipe_converter.__annotations__ = {"val": A, "return": A}
else:
# Get parameter type.
sig = None
try:
sig = inspect.signature(converters[0])
except (ValueError, TypeError): # inspect failed
pass
if sig:
params = list(sig.parameters.values())
if (
params
and params[0].annotation is not inspect.Parameter.empty
):
pipe_converter.__annotations__["val"] = params[
0
].annotation
# Get return type.
sig = None
try:
sig = inspect.signature(converters[-1])
except (ValueError, TypeError): # inspect failed
pass
if sig and sig.return_annotation is not inspect.Signature().empty:
pipe_converter.__annotations__[
"return"
] = sig.return_annotation
return pipe_converter
| {
"repo_name": "python-attrs/attrs",
"path": "src/attr/_make.py",
"copies": "2",
"size": "101447",
"license": "mit",
"hash": -1822634281005859600,
"line_mean": 31.2156240076,
"line_max": 79,
"alpha_frac": 0.5479610043,
"autogenerated": false,
"ratio": 4.136135687201859,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 3149
} |
from __future__ import absolute_import, division, print_function
import copy
import json
import os.path
import shutil
import tempfile
import yaml
from appr.discovery import ishosted, split_package_name
from appr.formats.appr.manifest_jsonnet import ManifestJsonnet
from appr.formats.base import FormatBase
from appr.utils import convert_utf8, mkdir_p, parse_version_req
from appr.platforms.kubernetes import ANNOTATIONS
class KubBase(FormatBase):
media_type = "kpm"
target = "kubernetes"
def __init__(self, name, version=None, variables=None, shards=None, namespace=None,
endpoint=None, resources=None, ssl_verify=True, **kwargs):
super(KubBase, self).__init__(name, version=version, endpoint=endpoint,
ssl_verify=ssl_verify, **kwargs)
if shards.__class__ in [str, unicode]:
shards = json.loads(shards)
if variables is None:
variables = {}
self.endpoint = endpoint
self._dependencies = None
self._resources = None
self._deploy_name = name
self._deploy_shards = shards
self._deploy_resources = resources
self._package = None
self._manifest = None
self.namespace = namespace
if self.namespace:
variables["namespace"] = self.namespace
self._deploy_vars = variables
self._variables = None
self.tla_codes = {"variables": self._deploy_vars}
if shards is not None:
self.tla_codes["shards"] = shards
def create_kub_resources(self, resources):
r = []
for resource in resources:
name = resource['metadata']['name']
kind = resource['kind'].lower()
protected = resource.get('annotations', {}).get(ANNOTATIONS['protected'], False)
r.append({
"file": "%s-%s.yaml" % (name, kind),
"name": name,
"generated": True,
"order": -1,
"protected": protected,
"value": resource,
"patch": [],
"variables": {},
"type": kind})
return r
def _create_manifest(self):
return ManifestJsonnet(self.package, {"params": json.dumps(self.tla_codes)})
@property
def author(self):
return self.manifest.package['author']
@property
def version(self):
return self.manifest.package['version']
@property
def description(self):
return self.manifest.package['description']
@property
def name(self):
return self.manifest.package['name']
@property
def variables(self):
if self._variables is None:
self._variables = copy.deepcopy(self.manifest.variables)
self._variables.update(self._deploy_vars)
return self._variables
@property
def kubClass(self):
raise NotImplementedError
def _fetch_deps(self):
self._dependencies = []
for dep in self.manifest.deploy:
if dep['name'] != '$self':
# if the parent app has discovery but not child,
# use the same domain to the child
if ishosted(self._deploy_name) and not ishosted(dep['name']):
dep['name'] = "%s/%s" % (split_package_name(self._deploy_name)[0], dep['name'])
variables = dep.get('variables', {})
variables['kpmparent'] = {
'name': self.name,
'shards': self.shards,
'variables': self.variables}
kub = self.kubClass(dep['name'], endpoint=self.endpoint,
version=parse_version_req(dep.get('version', None)),
variables=variables, resources=dep.get('resources', None),
shards=dep.get('shards', None), namespace=self.namespace)
self._dependencies.append(kub)
else:
self._dependencies.append(self)
if not self._dependencies:
self._dependencies.append(self)
@property
def dependencies(self):
if self._dependencies is None:
self._fetch_deps()
return self._dependencies
def resources(self):
if self._resources is None:
self._resources = self.manifest.resources
return self._resources
@property
def shards(self):
shards = self.manifest.shards
if self._deploy_shards is not None and len(self._deploy_shards):
shards = self._deploy_shards
return shards
def build_tar(self, dest="/tmp"):
package_json = self.build()
tempdir = tempfile.mkdtemp()
dest = os.path.join(tempdir, self.manifest.package_name())
mkdir_p(dest)
index = 0
for kub in self.dependencies:
index = kub.prepare_resources(dest, index)
with open(os.path.join(dest, ".package.json"), mode="w") as f:
f.write(json.dumps(package_json))
tar = self.make_tarfile(dest)
tar.flush()
tar.seek(0)
shutil.rmtree(tempdir)
return tar.read()
def prepare_resources(self, dest="/tmp", index=0):
for resource in self.resources():
index += 1
path = os.path.join(dest, "%02d_%s_%s" % (index, self.version, resource['file']))
f = open(path, 'w')
f.write(yaml.safe_dump(convert_utf8(resource['value'])))
resource['filepath'] = f.name
f.close()
return index
def build(self):
raise NotImplementedError
def convert_to(self):
raise NotImplementedError
def deploy(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def status(self):
raise NotImplementedError
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "appr/formats/appr/kub_base.py",
"copies": "2",
"size": "5923",
"license": "apache-2.0",
"hash": 6778166499572270000,
"line_mean": 31.1902173913,
"line_max": 99,
"alpha_frac": 0.5681242614,
"autogenerated": false,
"ratio": 4.390659747961453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00047239074461068826,
"num_lines": 184
} |
from __future__ import absolute_import, division, print_function
import copy
import json
import string
import workflows
try: # Python3 compatibility
basestring = basestring
except NameError:
basestring = (str, bytes)
class Recipe(object):
'''Object containing a processing recipe that can be passed to services.
A recipe describes how all involved services are connected together, how
data should be passed and how errors should be handled.'''
recipe = {}
'''The processing recipe is encoded in this dictionary.'''
# TODO: Describe format
def __init__(self, recipe=None):
'''Constructor allows passing in a recipe dictionary.'''
if isinstance(recipe, basestring):
self.recipe = self.deserialize(recipe)
elif recipe:
self.recipe = self._sanitize(recipe)
def deserialize(self, string):
'''Convert a recipe that has been stored as serialized json string to a
data structure.'''
return self._sanitize(json.loads(string))
@staticmethod
def _sanitize(recipe):
'''Clean up a recipe that may have been stored as serialized json string.
Convert any numerical pointers that are stored as strings to integers.'''
recipe = recipe.copy()
for k in list(recipe):
if k not in ('start', 'error') and int(k) and k != int(k):
recipe[int(k)] = recipe[k]
del(recipe[k])
for k in list(recipe):
if 'output' in recipe[k] and not isinstance(recipe[k]['output'], (list, dict)):
recipe[k]['output'] = [ recipe[k]['output'] ]
# dicts should be normalized, too
if 'start' in recipe:
recipe['start'] = [ tuple(x) for x in recipe['start'] ]
return recipe
def serialize(self):
'''Write out the current recipe as serialized json string.'''
return json.dumps(self.recipe)
def pretty(self):
'''Write out the current recipe as serialized json string with pretty formatting.'''
return json.dumps(self.recipe, indent=2)
def __getitem__(self, item):
'''Allow direct dictionary access to recipe elements.'''
return self.recipe.__getitem__(item)
def __contains__(self, item):
'''Testing for presence of recipe elements.'''
return item in self.recipe
def __eq__(self, other):
'''Overload equality operator (!=) to allow comparing recipe objects
with one another and with their string representations.'''
if isinstance(other, Recipe):
return self.recipe == other.recipe
if isinstance(other, dict):
return self.recipe == self._sanitize(other)
return self.recipe == self.deserialize(other)
def __ne__(self, other):
'''Overload inequality operator (!=) to allow comparing recipe objects
with one another and with their string representations.'''
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
'''Recipe objects are mutable and therefore should not be hashable.'''
return None
def validate(self):
'''Check whether the encoded recipe is valid. It must describe a directed
acyclical graph, all connections must be defined, etc.'''
if not self.recipe:
raise workflows.Error('Invalid recipe: No recipe defined')
# Without a 'start' node nothing would happen
if 'start' not in self.recipe:
raise workflows.Error('Invalid recipe: "start" node missing')
if not self.recipe['start']:
raise workflows.Error('Invalid recipe: "start" node empty')
if not all(isinstance(x, (list, tuple)) and len(x) == 2
for x in self.recipe['start']):
raise workflows.Error('Invalid recipe: "start" node invalid')
if any(x[0] == 'start' for x in self.recipe['start']):
raise workflows.Error('Invalid recipe: "start" node points to itself')
# Check that 'error' node points to regular nodes only
if 'error' in self.recipe and \
isinstance(self.recipe['error'], (list, tuple, basestring)):
if 'start' in self.recipe['error']:
raise workflows.Error('Invalid recipe: "error" node points to "start" node')
if 'error' in self.recipe['error']:
raise workflows.Error('Invalid recipe: "error" node points to itself')
# All other nodes must be numeric
nodes = list(filter(lambda x: not isinstance(x, int)
and x not in ('start', 'error'),
self.recipe))
if nodes:
raise workflows.Error('Invalid recipe: Node "%s" is not numeric' % nodes[0])
# Detect cycles
touched_nodes = set(['start', 'error'])
def flatten_links(struct):
'''Take an output/error link object, list or dictionary and return flat list of linked nodes.'''
if struct is None: return []
if isinstance(struct, int): return [ struct ]
if isinstance(struct, list):
if not all(isinstance(x, int) for x in struct):
raise workflows.Error('Invalid recipe: Invalid link in recipe (%s)' % str(struct))
return struct
if isinstance(struct, dict):
joined_list = []
for sub_list in struct.values():
joined_list += flatten_links(sub_list)
return joined_list
raise workflows.Error('Invalid recipe: Invalid link in recipe (%s)' % str(struct))
def find_cycles(path):
'''Depth-First-Search helper function to identify cycles.'''
if path[-1] not in self.recipe:
raise workflows.Error('Invalid recipe: Node "%s" is referenced via "%s" but missing' % (str(path[-1]), str(path[:-1])))
touched_nodes.add(path[-1])
node = self.recipe[path[-1]]
for outgoing in ('output', 'error'):
if outgoing in node:
references = flatten_links(node[outgoing])
for n in references:
if n in path:
raise workflows.Error('Invalid recipe: Recipe contains cycle (%s -> %s)' % (str(path), str(n)))
find_cycles(path + [n])
for link in self.recipe['start']:
find_cycles(['start', link[0]])
if 'error' in self.recipe:
if isinstance(self.recipe['error'], (list, tuple)):
for link in self.recipe['error']:
find_cycles(['error', link])
else:
find_cycles(['error', self.recipe['error']])
# Test recipe for unreferenced nodes
for node in self.recipe:
if node not in touched_nodes:
raise workflows.Error('Invalid recipe: Recipe contains unreferenced node "%s"' % str(node))
def apply_parameters(self, parameters):
'''Recursively apply dictionary entries in 'parameters' to {item}s in recipe
structure, leaving undefined {item}s as they are. A special case is a
{$REPLACE:item}, which replaces the string with a copy of the referenced
parameter item.
Examples:
parameters = { 'x':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '5': '{y}' }
parameters = { 'y':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '{x}': '5' }
parameters = { 'x':'3', 'y':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '3': '5' }
parameters = { 'l': [ 1, 2 ] }
apply_parameters( { 'x': '{$REPLACE:l}' }, parameters )
=> { 'x': [ 1, 2 ] }
'''
class SafeString(object):
def __init__(self, s):
self.string = s
def __repr__(self):
return '{' + self.string + '}'
def __str__(self):
return '{' + self.string + '}'
def __getitem__(self, item):
return SafeString(self.string + '[' + item + ']')
class SafeDict(dict):
'''A dictionary that returns undefined keys as {keyname}.
This can be used to selectively replace variables in datastructures.'''
def __missing__(self, key):
return SafeString(key)
# By default the python formatter class is used to resolve {item} references
formatter = string.Formatter()
# Special format strings "{$REPLACE:(...)}" use this data structure
# formatter to return the referenced data structure rather than a formatted
# string.
ds_formatter = string.Formatter()
def ds_format_field(value, spec):
ds_format_field.last = value
return ''
ds_formatter.format_field = ds_format_field
params = SafeDict(parameters)
def _recursive_apply(item):
'''Helper function to recursively apply replacements.'''
if isinstance(item, basestring):
if item.startswith('{$REPLACE') and item.endswith('}'):
try:
ds_formatter.vformat("{" + item[10:-1] + "}", (), parameters)
except KeyError:
return None
return copy.deepcopy(ds_formatter.format_field.last)
else:
return formatter.vformat(item, (), params)
if isinstance(item, dict):
return { _recursive_apply(key): _recursive_apply(value) for
key, value in item.items() }
if isinstance(item, tuple):
return tuple(_recursive_apply(list(item)))
if isinstance(item, list):
return [ _recursive_apply(x) for x in item ]
return item
self.recipe = _recursive_apply(self.recipe)
def merge(self, other):
'''Merge two recipes together, returning a single recipe containing all
nodes.
Note: This does NOT yet return a minimal recipe.
:param other: A Recipe object that should be merged with the current
Recipe object.
:return: A new Recipe object containing information from both recipes.
'''
# Merging empty values returns a copy of the original
if not other:
return Recipe(self.recipe)
# When a string is passed, merge with a constructed recipe object
if isinstance(other, basestring):
return self.merge(Recipe(other))
# Merging empty recipes returns a copy of the original
if not other.recipe:
return Recipe(self.recipe)
# If own recipe empty, use other recipe
if not self.recipe:
return Recipe(other.recipe)
# Assuming both recipes are valid
self.validate()
other.validate()
# Start from current recipe
new_recipe = self.recipe
# Find the maximum index of the current recipe
max_index = max(1, *filter(lambda x:isinstance(x, int), self.recipe.keys()))
next_index = max_index + 1
# Set up a translation table for indices and copy all entries
translation = {}
for key, value in other.recipe.items():
if isinstance(key, int):
if key not in translation:
translation[key] = next_index
next_index = next_index + 1
new_recipe[translation[key]] = value
# Rewrite all copied entries to point to new keys
def translate(x):
if isinstance(x, list):
return list(map(translate, x))
elif isinstance(x, tuple):
return tuple(map(translate, x))
elif isinstance(x, dict):
return { k: translate(v) for k, v in x.items() }
else:
return translation[x]
for idx in translation.values():
if 'output' in new_recipe[idx]:
new_recipe[idx]['output'] = translate(new_recipe[idx]['output'])
if 'error' in new_recipe[idx]:
new_recipe[idx]['error'] = translate(new_recipe[idx]['error'])
# Join 'start' nodes
for (idx, param) in other.recipe['start']:
new_recipe['start'].append((translate(idx), param))
# Join 'error' nodes
if 'error' in other.recipe:
if 'error' not in new_recipe:
new_recipe['error'] = translate(other.recipe['error'])
else:
if isinstance(new_recipe['error'], (list, tuple)):
new_recipe['error'] = list(new_recipe['error'])
else:
new_recipe['error'] = list([new_recipe['error']])
if isinstance(other.recipe['error'], (list, tuple)):
new_recipe['error'].extend(translate(other.recipe['error']))
else:
new_recipe['error'].append(translate(other.recipe['error']))
# # Minimize DAG
# queuehash, topichash = {}, {}
# for k, v in new_recipe.items():
# if isinstance(v, dict):
# if 'queue' in v:
# queuehash[v['queue']] = queuehash.get(v['queue'], [])
# queuehash[v['queue']].append(k)
# if 'topic' in v:
# topichash[v['topic']] = topichash.get(v['topic'], [])
# topichash[v['topic']].append(k)
#
# print queuehash
# print topichash
return Recipe(new_recipe)
| {
"repo_name": "xia2/workflows",
"path": "workflows/recipe/recipe.py",
"copies": "1",
"size": "12389",
"license": "bsd-3-clause",
"hash": -8677705866911175000,
"line_mean": 36.0928143713,
"line_max": 127,
"alpha_frac": 0.619985471,
"autogenerated": false,
"ratio": 4.0407697325505545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5160755203550554,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import copy
import linecache
import sys
import threading
import uuid
import warnings
from operator import itemgetter
from . import _config
from ._compat import (
PY2,
isclass,
iteritems,
metadata_proxy,
ordered_dict,
set_closure_cell,
)
from .exceptions import (
DefaultAlreadySetError,
FrozenInstanceError,
NotAnAttrsClassError,
PythonTooOldError,
UnannotatedAttributeError,
)
# This is used at least twice, so cache it here.
_obj_setattr = object.__setattr__
_init_converter_pat = "__attr_converter_{}"
_init_factory_pat = "__attr_factory_{}"
_tuple_property_pat = (
" {attr_name} = _attrs_property(_attrs_itemgetter({index}))"
)
_classvar_prefixes = ("typing.ClassVar", "t.ClassVar", "ClassVar")
# we don't use a double-underscore prefix because that triggers
# name mangling when trying to create a slot for the field
# (when slots=True)
_hash_cache_field = "_attrs_cached_hash"
_empty_metadata_singleton = metadata_proxy({})
# Unique object for unequivocal getattr() defaults.
_sentinel = object()
class _Nothing(object):
"""
Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
``_Nothing`` is a singleton. There is only ever one of it.
"""
_singleton = None
def __new__(cls):
if _Nothing._singleton is None:
_Nothing._singleton = super(_Nothing, cls).__new__(cls)
return _Nothing._singleton
def __repr__(self):
return "NOTHING"
NOTHING = _Nothing()
"""
Sentinel to indicate the lack of a value when ``None`` is ambiguous.
"""
def attrib(
default=NOTHING,
validator=None,
repr=True,
cmp=None,
hash=None,
init=True,
metadata=None,
type=None,
converter=None,
factory=None,
kw_only=False,
eq=None,
order=None,
):
"""
Create a new attribute on a class.
.. warning::
Does *not* do anything unless the class is also decorated with
`attr.s`!
:param default: A value that is used if an ``attrs``-generated ``__init__``
is used and no value is passed while instantiating or the attribute is
excluded using ``init=False``.
If the value is an instance of `Factory`, its callable will be
used to construct a new value (useful for mutable data types like lists
or dicts).
If a default is not set (or set manually to ``attr.NOTHING``), a value
*must* be supplied when instantiating; otherwise a `TypeError`
will be raised.
The default can also be set using decorator notation as shown below.
:type default: Any value
:param callable factory: Syntactic sugar for
``default=attr.Factory(callable)``.
:param validator: `callable` that is called by ``attrs``-generated
``__init__`` methods after the instance has been initialized. They
receive the initialized instance, the `Attribute`, and the
passed value.
The return value is *not* inspected so the validator has to throw an
exception itself.
If a ``list`` is passed, its items are treated as validators and must
all pass.
Validators can be globally disabled and re-enabled using
`get_run_validators`.
The validator can also be set using decorator notation as shown below.
:type validator: ``callable`` or a ``list`` of ``callable``\\ s.
:param repr: Include this attribute in the generated ``__repr__``
method. If ``True``, include the attribute; if ``False``, omit it. By
default, the built-in ``repr()`` function is used. To override how the
attribute value is formatted, pass a ``callable`` that takes a single
value and returns a string. Note that the resulting string is used
as-is, i.e. it will be used directly *instead* of calling ``repr()``
(the default).
:type repr: a ``bool`` or a ``callable`` to use a custom function.
:param bool eq: If ``True`` (default), include this attribute in the
generated ``__eq__`` and ``__ne__`` methods that check two instances
for equality.
:param bool order: If ``True`` (default), include this attributes in the
generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods.
:param bool cmp: Setting to ``True`` is equivalent to setting ``eq=True,
order=True``. Deprecated in favor of *eq* and *order*.
:param hash: Include this attribute in the generated ``__hash__``
method. If ``None`` (default), mirror *eq*'s value. This is the
correct behavior according the Python spec. Setting this value to
anything else than ``None`` is *discouraged*.
:type hash: ``bool`` or ``None``
:param bool init: Include this attribute in the generated ``__init__``
method. It is possible to set this to ``False`` and set a default
value. In that case this attributed is unconditionally initialized
with the specified default value or factory.
:param callable converter: `callable` that is called by
``attrs``-generated ``__init__`` methods to converter attribute's value
to the desired format. It is given the passed-in value, and the
returned value will be used as the new value of the attribute. The
value is converted before being passed to the validator, if any.
:param metadata: An arbitrary mapping, to be used by third-party
components. See `extending_metadata`.
:param type: The type of the attribute. In Python 3.6 or greater, the
preferred method to specify the type is using a variable annotation
(see `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_).
This argument is provided for backward compatibility.
Regardless of the approach used, the type will be stored on
``Attribute.type``.
Please note that ``attrs`` doesn't do anything with this metadata by
itself. You can use it as part of your own code or for
`static type checking <types>`.
:param kw_only: Make this attribute keyword-only (Python 3+)
in the generated ``__init__`` (if ``init`` is ``False``, this
parameter is ignored).
.. versionadded:: 15.2.0 *convert*
.. versionadded:: 16.3.0 *metadata*
.. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
.. versionchanged:: 17.1.0
*hash* is ``None`` and therefore mirrors *eq* by default.
.. versionadded:: 17.3.0 *type*
.. deprecated:: 17.4.0 *convert*
.. versionadded:: 17.4.0 *converter* as a replacement for the deprecated
*convert* to achieve consistency with other noun-based arguments.
.. versionadded:: 18.1.0
``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``.
.. versionadded:: 18.2.0 *kw_only*
.. versionchanged:: 19.2.0 *convert* keyword argument removed
.. versionchanged:: 19.2.0 *repr* also accepts a custom callable.
.. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
.. versionadded:: 19.2.0 *eq* and *order*
"""
eq, order = _determine_eq_order(cmp, eq, order)
if hash is not None and hash is not True and hash is not False:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
if factory is not None:
if default is not NOTHING:
raise ValueError(
"The `default` and `factory` arguments are mutually "
"exclusive."
)
if not callable(factory):
raise ValueError("The `factory` argument must be a callable.")
default = Factory(factory)
if metadata is None:
metadata = {}
return _CountingAttr(
default=default,
validator=validator,
repr=repr,
cmp=None,
hash=hash,
init=init,
converter=converter,
metadata=metadata,
type=type,
kw_only=kw_only,
eq=eq,
order=order,
)
def _make_attr_tuple_class(cls_name, attr_names):
"""
Create a tuple subclass to hold `Attribute`s for an `attrs` class.
The subclass is a bare tuple with properties for names.
class MyClassAttributes(tuple):
__slots__ = ()
x = property(itemgetter(0))
"""
attr_class_name = "{}Attributes".format(cls_name)
attr_class_template = [
"class {}(tuple):".format(attr_class_name),
" __slots__ = ()",
]
if attr_names:
for i, attr_name in enumerate(attr_names):
attr_class_template.append(
_tuple_property_pat.format(index=i, attr_name=attr_name)
)
else:
attr_class_template.append(" pass")
globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property}
eval(compile("\n".join(attr_class_template), "", "exec"), globs)
return globs[attr_class_name]
# Tuple class for extracted attributes from a class definition.
# `base_attrs` is a subset of `attrs`.
_Attributes = _make_attr_tuple_class(
"_Attributes",
[
# all attributes to build dunder methods for
"attrs",
# attributes that have been inherited
"base_attrs",
# map inherited attributes to their originating classes
"base_attrs_map",
],
)
def _is_class_var(annot):
"""
Check whether *annot* is a typing.ClassVar.
The string comparison hack is used to avoid evaluating all string
annotations which would put attrs-based classes at a performance
disadvantage compared to plain old classes.
"""
return str(annot).startswith(_classvar_prefixes)
def _get_annotations(cls):
"""
Get annotations for *cls*.
"""
anns = getattr(cls, "__annotations__", None)
if anns is None:
return {}
# Verify that the annotations aren't merely inherited.
for base_cls in cls.__mro__[1:]:
if anns is getattr(base_cls, "__annotations__", None):
return {}
return anns
def _counter_getter(e):
"""
Key function for sorting to avoid re-creating a lambda for every class.
"""
return e[1].counter
def _transform_attrs(cls, these, auto_attribs, kw_only):
"""
Transform all `_CountingAttr`s on a class into `Attribute`s.
If *these* is passed, use that and don't look for them on the class.
Return an `_Attributes`.
"""
cd = cls.__dict__
anns = _get_annotations(cls)
if these is not None:
ca_list = [(name, ca) for name, ca in iteritems(these)]
if not isinstance(these, ordered_dict):
ca_list.sort(key=_counter_getter)
elif auto_attribs is True:
ca_names = {
name
for name, attr in cd.items()
if isinstance(attr, _CountingAttr)
}
ca_list = []
annot_names = set()
for attr_name, type in anns.items():
if _is_class_var(type):
continue
annot_names.add(attr_name)
a = cd.get(attr_name, NOTHING)
if not isinstance(a, _CountingAttr):
if a is NOTHING:
a = attrib()
else:
a = attrib(default=a)
ca_list.append((attr_name, a))
unannotated = ca_names - annot_names
if len(unannotated) > 0:
raise UnannotatedAttributeError(
"The following `attr.ib`s lack a type annotation: "
+ ", ".join(
sorted(unannotated, key=lambda n: cd.get(n).counter)
)
+ "."
)
else:
ca_list = sorted(
(
(name, attr)
for name, attr in cd.items()
if isinstance(attr, _CountingAttr)
),
key=lambda e: e[1].counter,
)
own_attrs = [
Attribute.from_counting_attr(
name=attr_name, ca=ca, type=anns.get(attr_name)
)
for attr_name, ca in ca_list
]
base_attrs = []
base_attr_map = {} # A dictionary of base attrs to their classes.
taken_attr_names = {a.name: a for a in own_attrs}
# Traverse the MRO and collect attributes.
for base_cls in cls.__mro__[1:-1]:
sub_attrs = getattr(base_cls, "__attrs_attrs__", None)
if sub_attrs is not None:
for a in sub_attrs:
prev_a = taken_attr_names.get(a.name)
# Only add an attribute if it hasn't been defined before. This
# allows for overwriting attribute definitions by subclassing.
if prev_a is None:
base_attrs.append(a)
taken_attr_names[a.name] = a
base_attr_map[a.name] = base_cls
attr_names = [a.name for a in base_attrs + own_attrs]
AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
if kw_only:
own_attrs = [a._assoc(kw_only=True) for a in own_attrs]
base_attrs = [a._assoc(kw_only=True) for a in base_attrs]
attrs = AttrsClass(base_attrs + own_attrs)
# Mandatory vs non-mandatory attr order only matters when they are part of
# the __init__ signature and when they aren't kw_only (which are moved to
# the end and can be mandatory or non-mandatory in any order, as they will
# be specified as keyword args anyway). Check the order of those attrs:
had_default = False
for a in (a for a in attrs if a.init is not False and a.kw_only is False):
if had_default is True and a.default is NOTHING:
raise ValueError(
"No mandatory attributes allowed after an attribute with a "
"default value or factory. Attribute in question: %r" % (a,)
)
if had_default is False and a.default is not NOTHING:
had_default = True
return _Attributes((attrs, base_attrs, base_attr_map))
def _frozen_setattrs(self, name, value):
"""
Attached to frozen classes as __setattr__.
"""
raise FrozenInstanceError()
def _frozen_delattrs(self, name):
"""
Attached to frozen classes as __delattr__.
"""
raise FrozenInstanceError()
class _ClassBuilder(object):
"""
Iteratively build *one* class.
"""
__slots__ = (
"_cls",
"_cls_dict",
"_attrs",
"_base_names",
"_attr_names",
"_slots",
"_frozen",
"_weakref_slot",
"_cache_hash",
"_has_post_init",
"_delete_attribs",
"_base_attr_map",
"_is_exc",
)
def __init__(
self,
cls,
these,
slots,
frozen,
weakref_slot,
auto_attribs,
kw_only,
cache_hash,
is_exc,
):
attrs, base_attrs, base_map = _transform_attrs(
cls, these, auto_attribs, kw_only
)
self._cls = cls
self._cls_dict = dict(cls.__dict__) if slots else {}
self._attrs = attrs
self._base_names = set(a.name for a in base_attrs)
self._base_attr_map = base_map
self._attr_names = tuple(a.name for a in attrs)
self._slots = slots
self._frozen = frozen or _has_frozen_base_class(cls)
self._weakref_slot = weakref_slot
self._cache_hash = cache_hash
self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
self._delete_attribs = not bool(these)
self._is_exc = is_exc
self._cls_dict["__attrs_attrs__"] = self._attrs
if frozen:
self._cls_dict["__setattr__"] = _frozen_setattrs
self._cls_dict["__delattr__"] = _frozen_delattrs
def __repr__(self):
return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__)
def build_class(self):
"""
Finalize class based on the accumulated configuration.
Builder cannot be used after calling this method.
"""
if self._slots is True:
return self._create_slots_class()
else:
return self._patch_original_class()
def _patch_original_class(self):
"""
Apply accumulated methods and return the class.
"""
cls = self._cls
base_names = self._base_names
# Clean class of attribute definitions (`attr.ib()`s).
if self._delete_attribs:
for name in self._attr_names:
if (
name not in base_names
and getattr(cls, name, _sentinel) is not _sentinel
):
try:
delattr(cls, name)
except AttributeError:
# This can happen if a base class defines a class
# variable and we want to set an attribute with the
# same name by using only a type annotation.
pass
# Attach our dunder methods.
for name, value in self._cls_dict.items():
setattr(cls, name, value)
# Attach __setstate__. This is necessary to clear the hash code
# cache on deserialization. See issue
# https://github.com/python-attrs/attrs/issues/482 .
# Note that this code only handles setstate for dict classes.
# For slotted classes, see similar code in _create_slots_class .
if self._cache_hash:
existing_set_state_method = getattr(cls, "__setstate__", None)
if existing_set_state_method:
raise NotImplementedError(
"Currently you cannot use hash caching if "
"you specify your own __setstate__ method."
"See https://github.com/python-attrs/attrs/issues/494 ."
)
def cache_hash_set_state(chss_self, _):
# clear hash code cache
setattr(chss_self, _hash_cache_field, None)
setattr(cls, "__setstate__", cache_hash_set_state)
return cls
def _create_slots_class(self):
"""
Build and return a new class with a `__slots__` attribute.
"""
base_names = self._base_names
cd = {
k: v
for k, v in iteritems(self._cls_dict)
if k not in tuple(self._attr_names) + ("__dict__", "__weakref__")
}
weakref_inherited = False
# Traverse the MRO to check for an existing __weakref__.
for base_cls in self._cls.__mro__[1:-1]:
if "__weakref__" in getattr(base_cls, "__dict__", ()):
weakref_inherited = True
break
names = self._attr_names
if (
self._weakref_slot
and "__weakref__" not in getattr(self._cls, "__slots__", ())
and "__weakref__" not in names
and not weakref_inherited
):
names += ("__weakref__",)
# We only add the names of attributes that aren't inherited.
# Settings __slots__ to inherited attributes wastes memory.
slot_names = [name for name in names if name not in base_names]
if self._cache_hash:
slot_names.append(_hash_cache_field)
cd["__slots__"] = tuple(slot_names)
qualname = getattr(self._cls, "__qualname__", None)
if qualname is not None:
cd["__qualname__"] = qualname
# __weakref__ is not writable.
state_attr_names = tuple(
an for an in self._attr_names if an != "__weakref__"
)
def slots_getstate(self):
"""
Automatically created by attrs.
"""
return tuple(getattr(self, name) for name in state_attr_names)
hash_caching_enabled = self._cache_hash
def slots_setstate(self, state):
"""
Automatically created by attrs.
"""
__bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(state_attr_names, state):
__bound_setattr(name, value)
# Clearing the hash code cache on deserialization is needed
# because hash codes can change from run to run. See issue
# https://github.com/python-attrs/attrs/issues/482 .
# Note that this code only handles setstate for slotted classes.
# For dict classes, see similar code in _patch_original_class .
if hash_caching_enabled:
__bound_setattr(_hash_cache_field, None)
# slots and frozen require __getstate__/__setstate__ to work
cd["__getstate__"] = slots_getstate
cd["__setstate__"] = slots_setstate
# Create new class based on old class and our methods.
cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd)
# The following is a fix for
# https://github.com/python-attrs/attrs/issues/102. On Python 3,
# if a method mentions `__class__` or uses the no-arg super(), the
# compiler will bake a reference to the class in the method itself
# as `method.__closure__`. Since we replace the class with a
# clone, we rewrite these references so it keeps working.
for item in cls.__dict__.values():
if isinstance(item, (classmethod, staticmethod)):
# Class- and staticmethods hide their functions inside.
# These might need to be rewritten as well.
closure_cells = getattr(item.__func__, "__closure__", None)
else:
closure_cells = getattr(item, "__closure__", None)
if not closure_cells: # Catch None or the empty list.
continue
for cell in closure_cells:
if cell.cell_contents is self._cls:
set_closure_cell(cell, cls)
return cls
def add_repr(self, ns):
self._cls_dict["__repr__"] = self._add_method_dunders(
_make_repr(self._attrs, ns=ns)
)
return self
def add_str(self):
repr = self._cls_dict.get("__repr__")
if repr is None:
raise ValueError(
"__str__ can only be generated if a __repr__ exists."
)
def __str__(self):
return self.__repr__()
self._cls_dict["__str__"] = self._add_method_dunders(__str__)
return self
def make_unhashable(self):
self._cls_dict["__hash__"] = None
return self
def add_hash(self):
self._cls_dict["__hash__"] = self._add_method_dunders(
_make_hash(
self._cls,
self._attrs,
frozen=self._frozen,
cache_hash=self._cache_hash,
)
)
return self
def add_init(self):
self._cls_dict["__init__"] = self._add_method_dunders(
_make_init(
self._cls,
self._attrs,
self._has_post_init,
self._frozen,
self._slots,
self._cache_hash,
self._base_attr_map,
self._is_exc,
)
)
return self
def add_eq(self):
cd = self._cls_dict
cd["__eq__"], cd["__ne__"] = (
self._add_method_dunders(meth)
for meth in _make_eq(self._cls, self._attrs)
)
return self
def add_order(self):
cd = self._cls_dict
cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = (
self._add_method_dunders(meth)
for meth in _make_order(self._cls, self._attrs)
)
return self
def _add_method_dunders(self, method):
"""
Add __module__ and __qualname__ to a *method* if possible.
"""
try:
method.__module__ = self._cls.__module__
except AttributeError:
pass
try:
method.__qualname__ = ".".join(
(self._cls.__qualname__, method.__name__)
)
except AttributeError:
pass
return method
_CMP_DEPRECATION = (
"The usage of `cmp` is deprecated and will be removed on or after "
"2021-06-01. Please use `eq` and `order` instead."
)
def _determine_eq_order(cmp, eq, order):
"""
Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
values of eq and order.
"""
if cmp is not None and any((eq is not None, order is not None)):
raise ValueError("Don't mix `cmp` with `eq' and `order`.")
# cmp takes precedence due to bw-compatibility.
if cmp is not None:
warnings.warn(_CMP_DEPRECATION, DeprecationWarning, stacklevel=3)
return cmp, cmp
# If left None, equality is on and ordering mirrors equality.
if eq is None:
eq = True
if order is None:
order = eq
if eq is False and order is True:
raise ValueError("`order` can only be True if `eq` is True too.")
return eq, order
def attrs(
maybe_cls=None,
these=None,
repr_ns=None,
repr=True,
cmp=None,
hash=None,
init=True,
slots=False,
frozen=False,
weakref_slot=True,
str=False,
auto_attribs=False,
kw_only=False,
cache_hash=False,
auto_exc=False,
eq=None,
order=None,
):
r"""
A class decorator that adds `dunder
<https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
specified attributes using `attr.ib` or the *these* argument.
:param these: A dictionary of name to `attr.ib` mappings. This is
useful to avoid the definition of your attributes within the class body
because you can't (e.g. if you want to add ``__repr__`` methods to
Django models) or don't want to.
If *these* is not ``None``, ``attrs`` will *not* search the class body
for attributes and will *not* remove any attributes from it.
If *these* is an ordered dict (`dict` on Python 3.6+,
`collections.OrderedDict` otherwise), the order is deduced from
the order of the attributes inside *these*. Otherwise the order
of the definition of the attributes is used.
:type these: `dict` of `str` to `attr.ib`
:param str repr_ns: When using nested classes, there's no way in Python 2
to automatically detect that. Therefore it's possible to set the
namespace explicitly for a more meaningful ``repr`` output.
:param bool repr: Create a ``__repr__`` method with a human readable
representation of ``attrs`` attributes..
:param bool str: Create a ``__str__`` method that is identical to
``__repr__``. This is usually not necessary except for
`Exception`\ s.
:param bool eq: If ``True`` or ``None`` (default), add ``__eq__`` and
``__ne__`` methods that check two instances for equality.
They compare the instances as if they were tuples of their ``attrs``
attributes, but only iff the types of both classes are *identical*!
:type eq: `bool` or `None`
:param bool order: If ``True``, add ``__lt__``, ``__le__``, ``__gt__``,
and ``__ge__`` methods that behave like *eq* above and allow instances
to be ordered. If ``None`` (default) mirror value of *eq*.
:type order: `bool` or `None`
:param cmp: Setting to ``True`` is equivalent to setting ``eq=True,
order=True``. Deprecated in favor of *eq* and *order*, has precedence
over them for backward-compatibility though. Must not be mixed with
*eq* or *order*.
:type cmp: `bool` or `None`
:param hash: If ``None`` (default), the ``__hash__`` method is generated
according how *eq* and *frozen* are set.
1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to
None, marking it unhashable (which it is).
3. If *eq* is False, ``__hash__`` will be left untouched meaning the
``__hash__`` method of the base class will be used (if base class is
``object``, this means it will fall back to id-based hashing.).
Although not recommended, you can decide for yourself and force
``attrs`` to create one (e.g. if the class is immutable even though you
didn't freeze it programmatically) by passing ``True`` or not. Both of
these cases are rather special and should be used carefully.
See our documentation on `hashing`, Python's documentation on
`object.__hash__`, and the `GitHub issue that led to the default \
behavior <https://github.com/python-attrs/attrs/issues/136>`_ for more
details.
:type hash: ``bool`` or ``None``
:param bool init: Create a ``__init__`` method that initializes the
``attrs`` attributes. Leading underscores are stripped for the
argument name. If a ``__attrs_post_init__`` method exists on the
class, it will be called after the class is fully initialized.
:param bool slots: Create a `slotted class <slotted classes>` that's more
memory-efficient.
:param bool frozen: Make instances immutable after initialization. If
someone attempts to modify a frozen instance,
`attr.exceptions.FrozenInstanceError` is raised.
Please note:
1. This is achieved by installing a custom ``__setattr__`` method
on your class, so you can't implement your own.
2. True immutability is impossible in Python.
3. This *does* have a minor a runtime performance `impact
<how-frozen>` when initializing new instances. In other words:
``__init__`` is slightly slower with ``frozen=True``.
4. If a class is frozen, you cannot modify ``self`` in
``__attrs_post_init__`` or a self-written ``__init__``. You can
circumvent that limitation by using
``object.__setattr__(self, "attribute_name", value)``.
:param bool weakref_slot: Make instances weak-referenceable. This has no
effect unless ``slots`` is also enabled.
:param bool auto_attribs: If True, collect `PEP 526`_-annotated attributes
(Python 3.6 and later only) from the class body.
In this case, you **must** annotate every field. If ``attrs``
encounters a field that is set to an `attr.ib` but lacks a type
annotation, an `attr.exceptions.UnannotatedAttributeError` is
raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
want to set a type.
If you assign a value to those attributes (e.g. ``x: int = 42``), that
value becomes the default value like if it were passed using
``attr.ib(default=42)``. Passing an instance of `Factory` also
works as expected.
Attributes annotated as `typing.ClassVar`, and attributes that are
neither annotated nor set to an `attr.ib` are **ignored**.
.. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/
:param bool kw_only: Make all attributes keyword-only (Python 3+)
in the generated ``__init__`` (if ``init`` is ``False``, this
parameter is ignored).
:param bool cache_hash: Ensure that the object's hash code is computed
only once and stored on the object. If this is set to ``True``,
hashing must be either explicitly or implicitly enabled for this
class. If the hash code is cached, avoid any reassignments of
fields involved in hash code computation or mutations of the objects
those fields point to after object creation. If such changes occur,
the behavior of the object's hash code is undefined.
:param bool auto_exc: If the class subclasses `BaseException`
(which implicitly includes any subclass of any exception), the
following happens to behave like a well-behaved Python exceptions
class:
- the values for *eq*, *order*, and *hash* are ignored and the
instances compare and hash by the instance's ids (N.B. ``attrs`` will
*not* remove existing implementations of ``__hash__`` or the equality
methods. It just won't add own ones.),
- all attributes that are either passed into ``__init__`` or have a
default value are additionally available as a tuple in the ``args``
attribute,
- the value of *str* is ignored leaving ``__str__`` to base classes.
.. versionadded:: 16.0.0 *slots*
.. versionadded:: 16.1.0 *frozen*
.. versionadded:: 16.3.0 *str*
.. versionadded:: 16.3.0 Support for ``__attrs_post_init__``.
.. versionchanged:: 17.1.0
*hash* supports ``None`` as value which is also the default now.
.. versionadded:: 17.3.0 *auto_attribs*
.. versionchanged:: 18.1.0
If *these* is passed, no attributes are deleted from the class body.
.. versionchanged:: 18.1.0 If *these* is ordered, the order is retained.
.. versionadded:: 18.2.0 *weakref_slot*
.. deprecated:: 18.2.0
``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a
`DeprecationWarning` if the classes compared are subclasses of
each other. ``__eq`` and ``__ne__`` never tried to compared subclasses
to each other.
.. versionchanged:: 19.2.0
``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider
subclasses comparable anymore.
.. versionadded:: 18.2.0 *kw_only*
.. versionadded:: 18.2.0 *cache_hash*
.. versionadded:: 19.1.0 *auto_exc*
.. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
.. versionadded:: 19.2.0 *eq* and *order*
"""
eq, order = _determine_eq_order(cmp, eq, order)
def wrap(cls):
if getattr(cls, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
is_exc = auto_exc is True and issubclass(cls, BaseException)
builder = _ClassBuilder(
cls,
these,
slots,
frozen,
weakref_slot,
auto_attribs,
kw_only,
cache_hash,
is_exc,
)
if repr is True:
builder.add_repr(repr_ns)
if str is True:
builder.add_str()
if eq is True and not is_exc:
builder.add_eq()
if order is True and not is_exc:
builder.add_order()
if hash is not True and hash is not False and hash is not None:
# Can't use `hash in` because 1 == True for example.
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
elif hash is False or (hash is None and eq is False) or is_exc:
# Don't do anything. Should fall back to __object__'s __hash__
# which is by id.
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" hashing must be either explicitly or implicitly "
"enabled."
)
elif hash is True or (hash is None and eq is True and frozen is True):
# Build a __hash__ if told so, or if it's safe.
builder.add_hash()
else:
# Raise TypeError on attempts to hash.
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" hashing must be either explicitly or implicitly "
"enabled."
)
builder.make_unhashable()
if init is True:
builder.add_init()
else:
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" init must be True."
)
return builder.build_class()
# maybe_cls's type depends on the usage of the decorator. It's a class
# if it's used as `@attrs` but ``None`` if used as `@attrs()`.
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
_attrs = attrs
"""
Internal alias so we can use it in functions that take an argument called
*attrs*.
"""
if PY2:
def _has_frozen_base_class(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return (
getattr(cls.__setattr__, "__module__", None)
== _frozen_setattrs.__module__
and cls.__setattr__.__name__ == _frozen_setattrs.__name__
)
else:
def _has_frozen_base_class(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return cls.__setattr__ == _frozen_setattrs
def _attrs_to_tuple(obj, attrs):
"""
Create a tuple of all values of *obj*'s *attrs*.
"""
return tuple(getattr(obj, a.name) for a in attrs)
def _generate_unique_filename(cls, func_name):
"""
Create a "filename" suitable for a function being generated.
"""
unique_id = uuid.uuid4()
extra = ""
count = 1
while True:
unique_filename = "<attrs generated {0} {1}.{2}{3}>".format(
func_name,
cls.__module__,
getattr(cls, "__qualname__", cls.__name__),
extra,
)
# To handle concurrency we essentially "reserve" our spot in
# the linecache with a dummy line. The caller can then
# set this value correctly.
cache_line = (1, None, (str(unique_id),), unique_filename)
if (
linecache.cache.setdefault(unique_filename, cache_line)
== cache_line
):
return unique_filename
# Looks like this spot is taken. Try again.
count += 1
extra = "-{0}".format(count)
def _make_hash(cls, attrs, frozen, cache_hash):
attrs = tuple(
a for a in attrs if a.hash is True or (a.hash is None and a.eq is True)
)
tab = " "
unique_filename = _generate_unique_filename(cls, "hash")
type_hash = hash(unique_filename)
method_lines = ["def __hash__(self):"]
def append_hash_computation_lines(prefix, indent):
"""
Generate the code for actually computing the hash code.
Below this will either be returned directly or used to compute
a value which is then cached, depending on the value of cache_hash
"""
method_lines.extend(
[indent + prefix + "hash((", indent + " %d," % (type_hash,)]
)
for a in attrs:
method_lines.append(indent + " self.%s," % a.name)
method_lines.append(indent + " ))")
if cache_hash:
method_lines.append(tab + "if self.%s is None:" % _hash_cache_field)
if frozen:
append_hash_computation_lines(
"object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2
)
method_lines.append(tab * 2 + ")") # close __setattr__
else:
append_hash_computation_lines(
"self.%s = " % _hash_cache_field, tab * 2
)
method_lines.append(tab + "return self.%s" % _hash_cache_field)
else:
append_hash_computation_lines("return ", tab)
script = "\n".join(method_lines)
globs = {}
locs = {}
bytecode = compile(script, unique_filename, "exec")
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
return locs["__hash__"]
def _add_hash(cls, attrs):
"""
Add a hash method to *cls*.
"""
cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False)
return cls
def __ne__(self, other):
"""
Check equality and either forward a NotImplemented or return the result
negated.
"""
result = self.__eq__(other)
if result is NotImplemented:
return NotImplemented
return not result
def _make_eq(cls, attrs):
attrs = [a for a in attrs if a.eq]
unique_filename = _generate_unique_filename(cls, "eq")
lines = [
"def __eq__(self, other):",
" if other.__class__ is not self.__class__:",
" return NotImplemented",
]
# We can't just do a big self.x = other.x and... clause due to
# irregularities like nan == nan is false but (nan,) == (nan,) is true.
if attrs:
lines.append(" return (")
others = [" ) == ("]
for a in attrs:
lines.append(" self.%s," % (a.name,))
others.append(" other.%s," % (a.name,))
lines += others + [" )"]
else:
lines.append(" return True")
script = "\n".join(lines)
globs = {}
locs = {}
bytecode = compile(script, unique_filename, "exec")
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
return locs["__eq__"], __ne__
def _make_order(cls, attrs):
attrs = [a for a in attrs if a.order]
def attrs_to_tuple(obj):
"""
Save us some typing.
"""
return _attrs_to_tuple(obj, attrs)
def __lt__(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) < attrs_to_tuple(other)
return NotImplemented
def __le__(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) <= attrs_to_tuple(other)
return NotImplemented
def __gt__(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) > attrs_to_tuple(other)
return NotImplemented
def __ge__(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) >= attrs_to_tuple(other)
return NotImplemented
return __lt__, __le__, __gt__, __ge__
def _add_eq(cls, attrs=None):
"""
Add equality methods to *cls* with *attrs*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__eq__, cls.__ne__ = _make_eq(cls, attrs)
return cls
_already_repring = threading.local()
def _make_repr(attrs, ns):
"""
Make a repr method that includes relevant *attrs*, adding *ns* to the full
name.
"""
# Figure out which attributes to include, and which function to use to
# format them. The a.repr value can be either bool or a custom callable.
attr_names_with_reprs = tuple(
(a.name, repr if a.repr is True else a.repr)
for a in attrs
if a.repr is not False
)
def __repr__(self):
"""
Automatically created by attrs.
"""
try:
working_set = _already_repring.working_set
except AttributeError:
working_set = set()
_already_repring.working_set = working_set
if id(self) in working_set:
return "..."
real_cls = self.__class__
if ns is None:
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
else:
class_name = ns + "." + real_cls.__name__
# Since 'self' remains on the stack (i.e.: strongly referenced) for the
# duration of this call, it's safe to depend on id(...) stability, and
# not need to track the instance and therefore worry about properties
# like weakref- or hash-ability.
working_set.add(id(self))
try:
result = [class_name, "("]
first = True
for name, attr_repr in attr_names_with_reprs:
if first:
first = False
else:
result.append(", ")
result.extend(
(name, "=", attr_repr(getattr(self, name, NOTHING)))
)
return "".join(result) + ")"
finally:
working_set.remove(id(self))
return __repr__
def _add_repr(cls, ns=None, attrs=None):
"""
Add a repr method to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__repr__ = _make_repr(attrs, ns)
return cls
def _make_init(
cls, attrs, post_init, frozen, slots, cache_hash, base_attr_map, is_exc
):
attrs = [a for a in attrs if a.init or a.default is not NOTHING]
unique_filename = _generate_unique_filename(cls, "init")
script, globs, annotations = _attrs_to_init_script(
attrs, frozen, slots, post_init, cache_hash, base_attr_map, is_exc
)
locs = {}
bytecode = compile(script, unique_filename, "exec")
attr_dict = dict((a.name, a) for a in attrs)
globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict})
if frozen is True:
# Save the lookup overhead in __init__ if we need to circumvent
# immutability.
globs["_cached_setattr"] = _obj_setattr
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
__init__ = locs["__init__"]
__init__.__annotations__ = annotations
return __init__
def fields(cls):
"""
Return the tuple of ``attrs`` attributes for a class.
The tuple also allows accessing the fields by their names (see below for
examples).
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: tuple (with name accessors) of `attr.Attribute`
.. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
by name.
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return attrs
def fields_dict(cls):
"""
Return an ordered dictionary of ``attrs`` attributes for a class, whose
keys are the attribute names.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: an ordered dict where keys are attribute names and values are
`attr.Attribute`\\ s. This will be a `dict` if it's
naturally ordered like on Python 3.6+ or an
:class:`~collections.OrderedDict` otherwise.
.. versionadded:: 18.1.0
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return ordered_dict(((a.name, a) for a in attrs))
def validate(inst):
"""
Validate all attributes on *inst* that have a validator.
Leaves all exceptions through.
:param inst: Instance of a class with ``attrs`` attributes.
"""
if _config._run_validators is False:
return
for a in fields(inst.__class__):
v = a.validator
if v is not None:
v(inst, a, getattr(inst, a.name))
def _is_slot_cls(cls):
return "__slots__" in cls.__dict__
def _is_slot_attr(a_name, base_attr_map):
"""
Check if the attribute name comes from a slot class.
"""
return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name])
def _attrs_to_init_script(
attrs, frozen, slots, post_init, cache_hash, base_attr_map, is_exc
):
"""
Return a script of an initializer for *attrs* and a dict of globals.
The globals are expected by the generated script.
If *frozen* is True, we cannot set the attributes directly so we use
a cached ``object.__setattr__``.
"""
lines = []
any_slot_ancestors = any(
_is_slot_attr(a.name, base_attr_map) for a in attrs
)
if frozen is True:
if slots is True:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup per
# assignment.
# Note _setattr will be used again below if cache_hash is True
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
def fmt_setter(attr_name, value_var):
return "_setattr('%(attr_name)s', %(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
else:
# Dict frozen classes assign directly to __dict__.
# But only if the attribute doesn't come from an ancestor slot
# class.
# Note _inst_dict will be used again below if cache_hash is True
lines.append("_inst_dict = self.__dict__")
if any_slot_ancestors:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup
# per assignment.
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
def fmt_setter(attr_name, value_var):
if _is_slot_attr(attr_name, base_attr_map):
res = "_setattr('%(attr_name)s', %(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
}
else:
res = "_inst_dict['%(attr_name)s'] = %(value_var)s" % {
"attr_name": attr_name,
"value_var": value_var,
}
return res
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
if _is_slot_attr(attr_name, base_attr_map):
tmpl = "_setattr('%(attr_name)s', %(c)s(%(value_var)s))"
else:
tmpl = "_inst_dict['%(attr_name)s'] = %(c)s(%(value_var)s)"
return tmpl % {
"attr_name": attr_name,
"value_var": value_var,
"c": conv_name,
}
else:
# Not frozen.
def fmt_setter(attr_name, value):
return "self.%(attr_name)s = %(value)s" % {
"attr_name": attr_name,
"value": value,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
args = []
kw_only_args = []
attrs_to_validate = []
# This is a dictionary of names to validator and converter callables.
# Injecting this into __init__ globals lets us avoid lookups.
names_for_globals = {}
annotations = {"return": None}
for a in attrs:
if a.validator:
attrs_to_validate.append(a)
attr_name = a.name
arg_name = a.name.lstrip("_")
has_factory = isinstance(a.default, Factory)
if has_factory and a.default.takes_self:
maybe_self = "self"
else:
maybe_self = ""
if a.init is False:
if has_factory:
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(
fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self),
)
)
conv_name = _init_converter_pat.format(a.name)
names_for_globals[conv_name] = a.converter
else:
lines.append(
fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self),
)
)
names_for_globals[init_factory_name] = a.default.factory
else:
if a.converter is not None:
lines.append(
fmt_setter_with_converter(
attr_name,
"attr_dict['{attr_name}'].default".format(
attr_name=attr_name
),
)
)
conv_name = _init_converter_pat.format(a.name)
names_for_globals[conv_name] = a.converter
else:
lines.append(
fmt_setter(
attr_name,
"attr_dict['{attr_name}'].default".format(
attr_name=attr_name
),
)
)
elif a.default is not NOTHING and not has_factory:
arg = "{arg_name}=attr_dict['{attr_name}'].default".format(
arg_name=arg_name, attr_name=attr_name
)
if a.kw_only:
kw_only_args.append(arg)
else:
args.append(arg)
if a.converter is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[
_init_converter_pat.format(a.name)
] = a.converter
else:
lines.append(fmt_setter(attr_name, arg_name))
elif has_factory:
arg = "{arg_name}=NOTHING".format(arg_name=arg_name)
if a.kw_only:
kw_only_args.append(arg)
else:
args.append(arg)
lines.append(
"if {arg_name} is not NOTHING:".format(arg_name=arg_name)
)
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(
" " + fmt_setter_with_converter(attr_name, arg_name)
)
lines.append("else:")
lines.append(
" "
+ fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self),
)
)
names_for_globals[
_init_converter_pat.format(a.name)
] = a.converter
else:
lines.append(" " + fmt_setter(attr_name, arg_name))
lines.append("else:")
lines.append(
" "
+ fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self),
)
)
names_for_globals[init_factory_name] = a.default.factory
else:
if a.kw_only:
kw_only_args.append(arg_name)
else:
args.append(arg_name)
if a.converter is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[
_init_converter_pat.format(a.name)
] = a.converter
else:
lines.append(fmt_setter(attr_name, arg_name))
if a.init is True and a.converter is None and a.type is not None:
annotations[arg_name] = a.type
if attrs_to_validate: # we can skip this if there are no validators.
names_for_globals["_config"] = _config
lines.append("if _config._run_validators is True:")
for a in attrs_to_validate:
val_name = "__attr_validator_{}".format(a.name)
attr_name = "__attr_{}".format(a.name)
lines.append(
" {}(self, {}, self.{})".format(val_name, attr_name, a.name)
)
names_for_globals[val_name] = a.validator
names_for_globals[attr_name] = a
if post_init:
lines.append("self.__attrs_post_init__()")
# because this is set only after __attrs_post_init is called, a crash
# will result if post-init tries to access the hash code. This seemed
# preferable to setting this beforehand, in which case alteration to
# field values during post-init combined with post-init accessing the
# hash code would result in silent bugs.
if cache_hash:
if frozen:
if slots:
# if frozen and slots, then _setattr defined above
init_hash_cache = "_setattr('%s', %s)"
else:
# if frozen and not slots, then _inst_dict defined above
init_hash_cache = "_inst_dict['%s'] = %s"
else:
init_hash_cache = "self.%s = %s"
lines.append(init_hash_cache % (_hash_cache_field, "None"))
# For exceptions we rely on BaseException.__init__ for proper
# initialization.
if is_exc:
vals = ",".join("self." + a.name for a in attrs if a.init)
lines.append("BaseException.__init__(self, %s)" % (vals,))
args = ", ".join(args)
if kw_only_args:
if PY2:
raise PythonTooOldError(
"Keyword-only arguments only work on Python 3 and later."
)
args += "{leading_comma}*, {kw_only_args}".format(
leading_comma=", " if args else "",
kw_only_args=", ".join(kw_only_args),
)
return (
"""\
def __init__(self, {args}):
{lines}
""".format(
args=args, lines="\n ".join(lines) if lines else "pass"
),
names_for_globals,
annotations,
)
class Attribute(object):
"""
*Read-only* representation of an attribute.
:attribute name: The name of the attribute.
Plus *all* arguments of `attr.ib` (except for ``factory``
which is only syntactic sugar for ``default=Factory(...)``.
For the version history of the fields, see `attr.ib`.
"""
__slots__ = (
"name",
"default",
"validator",
"repr",
"eq",
"order",
"hash",
"init",
"metadata",
"type",
"converter",
"kw_only",
)
def __init__(
self,
name,
default,
validator,
repr,
cmp, # XXX: unused, remove along with other cmp code.
hash,
init,
metadata=None,
type=None,
converter=None,
kw_only=False,
eq=None,
order=None,
):
eq, order = _determine_eq_order(cmp, eq, order)
# Cache this descriptor here to speed things up later.
bound_setattr = _obj_setattr.__get__(self, Attribute)
# Despite the big red warning, people *do* instantiate `Attribute`
# themselves.
bound_setattr("name", name)
bound_setattr("default", default)
bound_setattr("validator", validator)
bound_setattr("repr", repr)
bound_setattr("eq", eq)
bound_setattr("order", order)
bound_setattr("hash", hash)
bound_setattr("init", init)
bound_setattr("converter", converter)
bound_setattr(
"metadata",
(
metadata_proxy(metadata)
if metadata
else _empty_metadata_singleton
),
)
bound_setattr("type", type)
bound_setattr("kw_only", kw_only)
def __setattr__(self, name, value):
raise FrozenInstanceError()
@classmethod
def from_counting_attr(cls, name, ca, type=None):
# type holds the annotated value. deal with conflicts:
if type is None:
type = ca.type
elif ca.type is not None:
raise ValueError(
"Type annotation and type argument cannot both be present"
)
inst_dict = {
k: getattr(ca, k)
for k in Attribute.__slots__
if k
not in (
"name",
"validator",
"default",
"type",
) # exclude methods and deprecated alias
}
return cls(
name=name,
validator=ca._validator,
default=ca._default,
type=type,
cmp=None,
**inst_dict
)
@property
def cmp(self):
"""
Simulate the presence of a cmp attribute and warn.
"""
warnings.warn(_CMP_DEPRECATION, DeprecationWarning, stacklevel=2)
return self.eq and self.order
# Don't use attr.assoc since fields(Attribute) doesn't work
def _assoc(self, **changes):
"""
Copy *self* and apply *changes*.
"""
new = copy.copy(self)
new._setattrs(changes.items())
return new
# Don't use _add_pickle since fields(Attribute) doesn't work
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(
getattr(self, name) if name != "metadata" else dict(self.metadata)
for name in self.__slots__
)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
self._setattrs(zip(self.__slots__, state))
def _setattrs(self, name_values_pairs):
bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in name_values_pairs:
if name != "metadata":
bound_setattr(name, value)
else:
bound_setattr(
name,
metadata_proxy(value)
if value
else _empty_metadata_singleton,
)
_a = [
Attribute(
name=name,
default=NOTHING,
validator=None,
repr=True,
cmp=None,
eq=True,
order=False,
hash=(name != "metadata"),
init=True,
)
for name in Attribute.__slots__
]
Attribute = _add_hash(
_add_eq(_add_repr(Attribute, attrs=_a), attrs=_a),
attrs=[a for a in _a if a.hash],
)
class _CountingAttr(object):
"""
Intermediate representation of attributes that uses a counter to preserve
the order in which the attributes have been defined.
*Internal* data structure of the attrs library. Running into is most
likely the result of a bug like a forgotten `@attr.s` decorator.
"""
__slots__ = (
"counter",
"_default",
"repr",
"eq",
"order",
"hash",
"init",
"metadata",
"_validator",
"converter",
"type",
"kw_only",
)
__attrs_attrs__ = tuple(
Attribute(
name=name,
default=NOTHING,
validator=None,
repr=True,
cmp=None,
hash=True,
init=True,
kw_only=False,
eq=True,
order=False,
)
for name in (
"counter",
"_default",
"repr",
"eq",
"order",
"hash",
"init",
)
) + (
Attribute(
name="metadata",
default=None,
validator=None,
repr=True,
cmp=None,
hash=False,
init=True,
kw_only=False,
eq=True,
order=False,
),
)
cls_counter = 0
def __init__(
self,
default,
validator,
repr,
cmp, # XXX: unused, remove along with cmp
hash,
init,
converter,
metadata,
type,
kw_only,
eq,
order,
):
_CountingAttr.cls_counter += 1
self.counter = _CountingAttr.cls_counter
self._default = default
# If validator is a list/tuple, wrap it using helper validator.
if validator and isinstance(validator, (list, tuple)):
self._validator = and_(*validator)
else:
self._validator = validator
self.repr = repr
self.eq = eq
self.order = order
self.hash = hash
self.init = init
self.converter = converter
self.metadata = metadata
self.type = type
self.kw_only = kw_only
def validator(self, meth):
"""
Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0
"""
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth
def default(self, meth):
"""
Decorator that allows to set the default for an attribute.
Returns *meth* unchanged.
:raises DefaultAlreadySetError: If default has been set before.
.. versionadded:: 17.1.0
"""
if self._default is not NOTHING:
raise DefaultAlreadySetError()
self._default = Factory(meth, takes_self=True)
return meth
_CountingAttr = _add_eq(_add_repr(_CountingAttr))
@attrs(slots=True, init=False, hash=True)
class Factory(object):
"""
Stores a factory callable.
If passed as the default value to `attr.ib`, the factory is used to
generate a new value.
:param callable factory: A callable that takes either none or exactly one
mandatory positional argument depending on *takes_self*.
:param bool takes_self: Pass the partially initialized instance that is
being initialized as a positional argument.
.. versionadded:: 17.1.0 *takes_self*
"""
factory = attrib()
takes_self = attrib()
def __init__(self, factory, takes_self=False):
"""
`Factory` is part of the default machinery so if we want a default
value here, we have to implement it ourselves.
"""
self.factory = factory
self.takes_self = takes_self
def make_class(name, attrs, bases=(object,), **attributes_arguments):
"""
A quick way to create a new class called *name* with *attrs*.
:param name: The name for the new class.
:type name: str
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
If *attrs* is a list or an ordered dict (`dict` on Python 3.6+,
`collections.OrderedDict` otherwise), the order is deduced from
the order of the names or attributes inside *attrs*. Otherwise the
order of the definition of the attributes is used.
:type attrs: `list` or `dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to `attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
.. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained.
"""
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = dict((a, attrib()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
post_init = cls_dict.pop("__attrs_post_init__", None)
type_ = type(
name,
bases,
{} if post_init is None else {"__attrs_post_init__": post_init},
)
# For pickling to work, the __module__ variable needs to be set to the
# frame where the class is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
type_.__module__ = sys._getframe(1).f_globals.get(
"__name__", "__main__"
)
except (AttributeError, ValueError):
pass
# We do it here for proper warnings with meaningful stacklevel.
cmp = attributes_arguments.pop("cmp", None)
attributes_arguments["eq"], attributes_arguments[
"order"
] = _determine_eq_order(
cmp, attributes_arguments.get("eq"), attributes_arguments.get("order")
)
return _attrs(these=cls_dict, **attributes_arguments)(type_)
# These are required by within this module so we define them here and merely
# import into .validators.
@attrs(slots=True, hash=True)
class _AndValidator(object):
"""
Compose many validators to a single one.
"""
_validators = attrib()
def __call__(self, inst, attr, value):
for v in self._validators:
v(inst, attr, value)
def and_(*validators):
"""
A validator that composes multiple validators into one.
When called on a value, it runs all wrapped validators.
:param validators: Arbitrary number of validators.
:type validators: callables
.. versionadded:: 17.1.0
"""
vals = []
for validator in validators:
vals.extend(
validator._validators
if isinstance(validator, _AndValidator)
else [validator]
)
return _AndValidator(tuple(vals))
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/attr/_make.py",
"copies": "4",
"size": "70807",
"license": "mit",
"hash": -3664384271123570700,
"line_mean": 31.6600553506,
"line_max": 79,
"alpha_frac": 0.551993447,
"autogenerated": false,
"ratio": 4.135922897196262,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 2168
} |
from __future__ import absolute_import, division, print_function
import copy
import random
import pytest
import appr.semver
@pytest.fixture(scope='module')
def version_list():
l = ["1.4.0",
"1.6.0-alpha",
"1.6.0-alpha.1",
"1.6.0-alpha.beta",
"1.6.0-beta",
"1.6.0-beta.2",
"1.6.0-beta.11",
"1.6.0-rc.1",
"1.6.0",
"1.7.0-rc"]
return l
@pytest.fixture(scope='module')
def version_query():
return ['0.1.0',
'0.3.0',
'0.3.2-rc',
'0.5.0',
'0.5.2-rc',
'0.7.0',
'0.8.0-rc']
def test_ordering(version_list):
l2 = copy.copy(version_list)
random.seed(1)
random.shuffle(l2)
assert str(appr.semver.versions(l2, stable=False)) != str(appr.semver.versions(version_list, stable=False))
assert str(sorted(appr.semver.versions(l2, stable=False))) == str(appr.semver.versions(version_list, stable=False))
def test_stable_only(version_list):
assert appr.semver.versions(version_list, stable=True) == appr.semver.versions(["1.4.0", "1.6.0"])
def test_last_stable_version(version_list):
assert str(appr.semver.last_version(version_list, True)) == "1.6.0"
def test_last_unstable_version(version_list):
assert str(appr.semver.last_version(version_list, False)) == "1.7.0-rc"
def test_select_version(version_query):
expected_results = [("==0.5.0", "0.5.0"),
(">=0.1.0", "0.7.0"),
("<0.4.0", "0.3.0"),
(">=0.3.0,<0.6.0", "0.5.0"),
([">=0.3.0", "<0.6.0"], "0.5.0"),
(">=0.1.0-", "0.8.0-rc"),
("<0.4.0-", "0.3.2-rc"),
(">=0.3.0-,<0.6.0", "0.5.2-rc"),
(">=0.3.0,<0.6.0-", "0.5.2-rc"),
("==10.0.0", 'None')]
for query, expected in expected_results:
assert str(appr.semver.select_version(version_query, query)) == expected
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "tests/test_semver.py",
"copies": "2",
"size": "2038",
"license": "apache-2.0",
"hash": -2190023654511110000,
"line_mean": 28.5362318841,
"line_max": 119,
"alpha_frac": 0.5009813543,
"autogenerated": false,
"ratio": 2.742934051144011,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42439154054440115,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import copy
from ._compat import iteritems
from ._make import Attribute, NOTHING, fields
def asdict(inst, recurse=True, filter=None):
"""
Return the ``attrs`` attribute values of *i* as a dict. Optionally recurse
into other ``attrs``-decorated classes.
:param inst: Instance of a ``attrs``-decorated class.
:param recurse: Recurse into classes that are also ``attrs``-decorated.
:type recurse: bool
:param filter: A callable whose return code deteremines whether an
attribute or element is included (``True``) or dropped (``False``). Is
called with the :class:`attr.Attribute` as the first argument and the
value as the second argument.
:type filer: callable
:rtype: :class:`dict`
"""
attrs = fields(inst.__class__)
rv = {}
for a in attrs:
v = getattr(inst, a.name)
if filter is not None and not filter(a, v):
continue
if recurse is True:
if has(v.__class__):
rv[a.name] = asdict(v, recurse=True, filter=filter)
elif isinstance(v, (tuple, list, set)):
rv[a.name] = [
asdict(i, recurse=True, filter=filter)
if has(i.__class__) else i
for i in v
]
elif isinstance(v, dict):
rv[a.name] = dict((asdict(kk) if has(kk.__class__) else kk,
asdict(vv) if has(vv.__class__) else vv)
for kk, vv in iteritems(v))
else:
rv[a.name] = v
else:
rv[a.name] = v
return rv
def has(cl):
"""
Check whether *cl* is a class with ``attrs`` attributes.
:param cl: Class to introspect.
:type cl: type
:raise TypeError: If *cl* is not a class.
:rtype: :class:`bool`
"""
try:
fields(cl)
except ValueError:
return False
else:
return True
def assoc(inst, **changes):
"""
Copy *inst* and apply *changes*.
:param inst: Instance of a class with ``attrs`` attributes.
:param changes: Keyword changes in the new copy.
:return: A copy of inst with *changes* incorporated.
"""
new = copy.copy(inst)
for k, v in iteritems(changes):
a = getattr(new.__class__, k, NOTHING)
if a is NOTHING or not isinstance(a, Attribute):
raise ValueError(
"{k} is not an attrs attribute on {cl}."
.format(k=k, cl=new.__class__)
)
setattr(new, k, v)
return new
| {
"repo_name": "cyli/attrs",
"path": "attr/_funcs.py",
"copies": "1",
"size": "2661",
"license": "mit",
"hash": 4144849698193384400,
"line_mean": 28.2417582418,
"line_max": 79,
"alpha_frac": 0.5475385194,
"autogenerated": false,
"ratio": 3.9716417910447763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5019180310444776,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import copy
from ._compat import iteritems
from ._make import NOTHING, _obj_setattr, fields
from .exceptions import AttrsAttributeNotFoundError
def asdict(inst, recurse=True, filter=None, dict_factory=dict,
retain_collection_types=False):
"""
Return the ``attrs`` attribute values of *inst* as a dict.
Optionally recurse into other ``attrs``-decorated classes.
:param inst: Instance of an ``attrs``-decorated class.
:param bool recurse: Recurse into classes that are also
``attrs``-decorated.
:param callable filter: A callable whose return code determines whether an
attribute or element is included (``True``) or dropped (``False``). Is
called with the :class:`attr.Attribute` as the first argument and the
value as the second argument.
:param callable dict_factory: A callable to produce dictionaries from. For
example, to produce ordered dictionaries instead of normal Python
dictionaries, pass in ``collections.OrderedDict``.
:param bool retain_collection_types: Do not convert to ``list`` when
encountering an attribute whose type is ``tuple`` or ``set``. Only
meaningful if ``recurse`` is ``True``.
:rtype: return type of *dict_factory*
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 16.0.0 *dict_factory*
.. versionadded:: 16.1.0 *retain_collection_types*
"""
attrs = fields(inst.__class__)
rv = dict_factory()
for a in attrs:
v = getattr(inst, a.name)
if filter is not None and not filter(a, v):
continue
if recurse is True:
if has(v.__class__):
rv[a.name] = asdict(v, recurse=True, filter=filter,
dict_factory=dict_factory)
elif isinstance(v, (tuple, list, set)):
cf = v.__class__ if retain_collection_types is True else list
rv[a.name] = cf([
asdict(i, recurse=True, filter=filter,
dict_factory=dict_factory)
if has(i.__class__) else i
for i in v
])
elif isinstance(v, dict):
df = dict_factory
rv[a.name] = df((
asdict(kk, dict_factory=df) if has(kk.__class__) else kk,
asdict(vv, dict_factory=df) if has(vv.__class__) else vv)
for kk, vv in iteritems(v))
else:
rv[a.name] = v
else:
rv[a.name] = v
return rv
def astuple(inst, recurse=True, filter=None, tuple_factory=tuple,
retain_collection_types=False):
"""
Return the ``attrs`` attribute values of *inst* as a tuple.
Optionally recurse into other ``attrs``-decorated classes.
:param inst: Instance of an ``attrs``-decorated class.
:param bool recurse: Recurse into classes that are also
``attrs``-decorated.
:param callable filter: A callable whose return code determines whether an
attribute or element is included (``True``) or dropped (``False``). Is
called with the :class:`attr.Attribute` as the first argument and the
value as the second argument.
:param callable tuple_factory: A callable to produce tuples from. For
example, to produce lists instead of tuples.
:param bool retain_collection_types: Do not convert to ``list``
or ``dict`` when encountering an attribute which type is
``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is
``True``.
:rtype: return type of *tuple_factory*
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 16.2.0
"""
attrs = fields(inst.__class__)
rv = []
retain = retain_collection_types # Very long. :/
for a in attrs:
v = getattr(inst, a.name)
if filter is not None and not filter(a, v):
continue
if recurse is True:
if has(v.__class__):
rv.append(astuple(v, recurse=True, filter=filter,
tuple_factory=tuple_factory,
retain_collection_types=retain))
elif isinstance(v, (tuple, list, set)):
cf = v.__class__ if retain is True else list
rv.append(cf([
astuple(j, recurse=True, filter=filter,
tuple_factory=tuple_factory,
retain_collection_types=retain)
if has(j.__class__) else j
for j in v
]))
elif isinstance(v, dict):
df = v.__class__ if retain is True else dict
rv.append(df(
(
astuple(
kk,
tuple_factory=tuple_factory,
retain_collection_types=retain
) if has(kk.__class__) else kk,
astuple(
vv,
tuple_factory=tuple_factory,
retain_collection_types=retain
) if has(vv.__class__) else vv
)
for kk, vv in iteritems(v)))
else:
rv.append(v)
else:
rv.append(v)
return rv if tuple_factory is list else tuple_factory(rv)
def has(cls):
"""
Check whether *cls* is a class with ``attrs`` attributes.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:rtype: :class:`bool`
"""
return getattr(cls, "__attrs_attrs__", None) is not None
def assoc(inst, **changes):
"""
Copy *inst* and apply *changes*.
:param inst: Instance of a class with ``attrs`` attributes.
:param changes: Keyword changes in the new copy.
:return: A copy of inst with *changes* incorporated.
:raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't
be found on *cls*.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. deprecated:: 17.1.0
Use :func:`evolve` instead.
"""
import warnings
warnings.warn("assoc is deprecated and will be removed after 2018/01.",
DeprecationWarning, stacklevel=2)
new = copy.copy(inst)
attrs = fields(inst.__class__)
for k, v in iteritems(changes):
a = getattr(attrs, k, NOTHING)
if a is NOTHING:
raise AttrsAttributeNotFoundError(
"{k} is not an attrs attribute on {cl}."
.format(k=k, cl=new.__class__)
)
_obj_setattr(new, k, v)
return new
def evolve(inst, **changes):
"""
Create a new instance, based on *inst* with *changes* applied.
:param inst: Instance of a class with ``attrs`` attributes.
:param changes: Keyword changes in the new copy.
:return: A copy of inst with *changes* incorporated.
:raise TypeError: If *attr_name* couldn't be found in the class
``__init__``.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 17.1.0
"""
cls = inst.__class__
attrs = fields(cls)
for a in attrs:
if not a.init:
continue
attr_name = a.name # To deal with private attributes.
init_name = attr_name if attr_name[0] != "_" else attr_name[1:]
if init_name not in changes:
changes[init_name] = getattr(inst, attr_name)
return cls(**changes)
| {
"repo_name": "emilio/servo",
"path": "tests/wpt/web-platform-tests/tools/third_party/attrs/src/attr/_funcs.py",
"copies": "54",
"size": "7894",
"license": "mpl-2.0",
"hash": -6401157151985768000,
"line_mean": 36.2358490566,
"line_max": 79,
"alpha_frac": 0.5557385356,
"autogenerated": false,
"ratio": 4.271645021645021,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import copy
import numpy as np
from matplotlib.patches import Ellipse, Polygon, Rectangle, Path as MplPath, PathPatch
from matplotlib.transforms import IdentityTransform, blended_transform_factory
from glue.core.exceptions import UndefinedROI
from glue.utils import points_inside_poly
np.seterr(all='ignore')
__all__ = ['Roi', 'RectangularROI', 'CircularROI', 'PolygonalROI',
'AbstractMplRoi', 'MplRectangularROI', 'MplCircularROI',
'MplPolygonalROI', 'MplXRangeROI', 'MplYRangeROI',
'XRangeROI', 'RangeROI', 'YRangeROI', 'VertexROIBase',
'CategoricalROI']
PATCH_COLOR = '#FFFF00'
SCRUBBING_KEY = 'control'
def aspect_ratio(axes):
""" Returns the pixel height / width of a box that spans 1
data unit in x and y
"""
width = axes.get_position().width * axes.figure.get_figwidth()
height = axes.get_position().height * axes.figure.get_figheight()
xmin, xmax = axes.get_xlim()
ymin, ymax = axes.get_ylim()
return height / width / (ymax - ymin) * (xmax - xmin)
def data_to_norm(axes, x, y):
xy = np.column_stack((np.asarray(x).ravel(), np.asarray(y).ravel()))
pixel = axes.transData.transform(xy)
norm = axes.transAxes.inverted().transform(pixel)
return norm
def data_to_pixel(axes, x, y):
xy = np.column_stack((np.asarray(x).ravel(), np.asarray(y).ravel()))
return axes.transData.transform(xy)
def pixel_to_data(axes, x, y):
xy = np.column_stack((np.asarray(x).ravel(), np.asarray(y).ravel()))
return axes.transData.inverted().transform(xy)
class Roi(object): # pragma: no cover
"""
A geometrical 2D region of interest.
Glue uses Roi's to represent user-drawn regions on plots. There
are many specific subtypes of Roi, but they all have a ``contains``
method to test whether a collection of 2D points lies inside the region.
"""
def contains(self, x, y):
"""Return true/false for each x/y pair.
:param x: Array of X locations
:param y: Array of Y locations
:returns: A Boolean array, where each element is True
if the corresponding (x,y) tuple is inside the Roi.
:raises: UndefinedROI exception if not defined
"""
raise NotImplementedError()
def center(self):
"""Return the (x,y) coordinates of the ROI center"""
raise NotImplementedError()
def move_to(self, x, y):
"""Translate the ROI to a center of (x, y)"""
raise NotImplementedError()
def defined(self):
""" Returns whether or not the subset is properly defined """
raise NotImplementedError()
def to_polygon(self):
""" Returns a tuple of x and y points, approximating the ROI
as a polygon."""
raise NotImplementedError
def copy(self):
"""
Return a clone of the ROI
"""
return copy.copy(self)
class PointROI(Roi):
def __init__(self, x=None, y=None):
self.x = x
self.y = y
def contains(self, x, y):
return False
def move_to(self, x, y):
self.x = x
self.y = y
def defined(self):
try:
return np.isfinite([self.x, self.y]).all()
except TypeError:
return False
def center(self):
return self.x, self.y
def reset(self):
self.x = self.y = None
class RectangularROI(Roi):
"""
A 2D rectangular region of interest.
"""
def __init__(self, xmin=None, xmax=None, ymin=None, ymax=None):
super(RectangularROI, self).__init__()
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def __str__(self):
if self.defined():
return "x=[%0.3f, %0.3f], y=[%0.3f, %0.3f]" % (self.xmin,
self.xmax,
self.ymin,
self.ymax)
else:
return "Undefined Rectangular ROI"
def center(self):
return self.xmin + self.width() / 2, self.ymin + self.height() / 2
def move_to(self, x, y):
cx, cy = self.center()
dx = x - cx
dy = y - cy
self.xmin += dx
self.xmax += dx
self.ymin += dy
self.ymax += dy
def transpose(self, copy=True):
if copy:
new = self.copy()
new.xmin, new.xmax = self.ymin, self.ymax
new.ymin, new.ymax = self.xmin, self.xmax
return new
self.xmin, self.ymin = self.ymin, self.xmin
self.xmax, self.ymax = self.ymax, self.xmax
def corner(self):
return (self.xmin, self.ymin)
def width(self):
return self.xmax - self.xmin
def height(self):
return self.ymax - self.ymin
def contains(self, x, y):
"""
Test whether a set of (x,y) points falls within
the region of interest
:param x: A scalar or numpy array of x points
:param y: A scalar or numpy array of y points
*Returns*
A list of True/False values, for whether each (x,y)
point falls within the ROI
"""
if not self.defined():
raise UndefinedROI
return (x > self.xmin) & (x < self.xmax) & \
(y > self.ymin) & (y < self.ymax)
def update_limits(self, xmin, ymin, xmax, ymax):
"""
Update the limits of the rectangle
"""
self.xmin = min(xmin, xmax)
self.xmax = max(xmin, xmax)
self.ymin = min(ymin, ymax)
self.ymax = max(ymin, ymax)
def reset(self):
"""
Reset the rectangular region.
"""
self.xmin = None
self.xmax = None
self.ymin = None
self.ymax = None
def defined(self):
return self.xmin is not None
def to_polygon(self):
if self.defined():
return [self.xmin, self.xmax, self.xmax, self.xmin, self.xmin], \
[self.ymin, self.ymin, self.ymax, self.ymax, self.ymin]
else:
return [], []
def __gluestate__(self, context):
return dict(xmin=self.xmin, xmax=self.xmax, ymin=self.ymin, ymax=self.ymax)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(xmin=rec['xmin'], xmax=rec['xmax'],
ymin=rec['ymin'], ymax=rec['ymax'])
class RangeROI(Roi):
def __init__(self, orientation, min=None, max=None):
""":param orientation: 'x' or 'y'. Sets which axis to range"""
super(RangeROI, self).__init__()
self.min = min
self.max = max
self.ori = orientation
@property
def ori(self):
return self._ori
@ori.setter
def ori(self, value):
if value in set('xy'):
self._ori = value
else:
raise ValueError("Orientation must be one of 'x', 'y'")
def __str__(self):
if self.defined():
return "%0.3f < %s < %0.3f" % (self.min, self.ori,
self.max)
else:
return "Undefined %s" % type(self).__name__
def range(self):
return self.min, self.max
def center(self):
return (self.min + self.max) / 2
def set_range(self, lo, hi):
self.min, self.max = lo, hi
def move_to(self, center):
delta = center - self.center()
self.min += delta
self.max += delta
def contains(self, x, y):
if not self.defined():
raise UndefinedROI()
coord = x if self.ori == 'x' else y
return (coord > self.min) & (coord < self.max)
def reset(self):
self.min = None
self.max = None
def defined(self):
return self.min is not None and self.max is not None
def to_polygon(self):
if self.defined():
on = [self.min, self.max, self.max, self.min, self.min]
off = [-1e100, -1e100, 1e100, 1e100, -1e100]
x, y = (on, off) if (self.ori == 'x') else (off, on)
return x, y
else:
return [], []
def __gluestate__(self, context):
return dict(ori=self.ori, min=self.min, max=self.max)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(rec['ori'], min=rec['min'], max=rec['max'])
class XRangeROI(RangeROI):
def __init__(self, min=None, max=None):
super(XRangeROI, self).__init__('x', min=min, max=max)
class YRangeROI(RangeROI):
def __init__(self, min=None, max=None):
super(YRangeROI, self).__init__('y', min=min, max=max)
class CircularROI(Roi):
"""
A 2D circular region of interest.
"""
def __init__(self, xc=None, yc=None, radius=None):
super(CircularROI, self).__init__()
self.xc = xc
self.yc = yc
self.radius = radius
def contains(self, x, y):
"""
Test whether a set of (x,y) points falls within
the region of interest
:param x: A list of x points
:param y: A list of y points
*Returns*
A list of True/False values, for whether each (x,y)
point falls within the ROI
"""
if not self.defined():
raise UndefinedROI
if not isinstance(x, np.ndarray):
x = np.asarray(x)
if not isinstance(y, np.ndarray):
y = np.asarray(y)
return (x - self.xc) ** 2 + (y - self.yc) ** 2 < self.radius ** 2
def set_center(self, x, y):
"""
Set the center of the circular region
"""
self.xc = x
self.yc = y
def set_radius(self, radius):
"""
Set the radius of the circular region
"""
self.radius = radius
def get_center(self):
return self.xc, self.yc
def get_radius(self):
return self.radius
def reset(self):
"""
Reset the rectangular region.
"""
self.xc = None
self.yc = None
self.radius = 0.
def defined(self):
""" Returns True if the ROI is defined """
return self.xc is not None and \
self.yc is not None and self.radius is not None
def to_polygon(self):
""" Returns x, y, where each is a list of points """
if not self.defined():
return [], []
theta = np.linspace(0, 2 * np.pi, num=20)
x = self.xc + self.radius * np.cos(theta)
y = self.yc + self.radius * np.sin(theta)
return x, y
def __gluestate__(self, context):
return dict(xc=self.xc, yc=self.yc, radius=self.radius)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(xc=rec['xc'], yc=rec['yc'], radius=rec['radius'])
class VertexROIBase(Roi):
def __init__(self, vx=None, vy=None):
"""
:param vx: initial x vertices
:type vx: list
:param vy: initial y vertices
:type vy: list
"""
super(VertexROIBase, self).__init__()
self.vx = vx
self.vy = vy
if self.vx is None:
self.vx = []
if self.vy is None:
self.vy = []
def add_point(self, x, y):
"""
Add another vertex to the ROI
:param x: The x coordinate
:param y: The y coordinate
"""
self.vx.append(x)
self.vy.append(y)
def reset(self):
"""
Reset the vertex list.
"""
self.vx = []
self.vy = []
def replace_last_point(self, x, y):
if len(self.vx) > 0:
self.vx[-1] = x
self.vy[-1] = y
def remove_point(self, x, y, thresh=None):
"""Remove the vertex closest to a reference (xy) point
:param x: The x coordinate of the reference point
:param y: The y coordinate of the reference point
:param thresh: An optional threshhold. If present, the vertex
closest to (x,y) will only be removed if the distance
is less than thresh
"""
if len(self.vx) == 0:
return
# find distance between vertices and input
dist = [(x - a) ** 2 + (y - b) ** 2 for a, b
in zip(self.vx, self.vy)]
inds = range(len(dist))
near = min(inds, key=lambda x: dist[x])
if thresh is not None and dist[near] > (thresh ** 2):
return
self.vx = [self.vx[i] for i in inds if i != near]
self.vy = [self.vy[i] for i in inds if i != near]
def defined(self):
return len(self.vx) > 0
def to_polygon(self):
return self.vx, self.vy
def __gluestate__(self, context):
return dict(vx=np.asarray(self.vx).tolist(),
vy=np.asarray(self.vy).tolist())
@classmethod
def __setgluestate__(cls, rec, context):
return cls(vx=rec['vx'], vy=rec['vy'])
class PolygonalROI(VertexROIBase):
"""
A class to define 2D polygonal regions-of-interest
"""
def __str__(self):
result = 'Polygonal ROI ('
result += ','.join(['(%s, %s)' % (x, y)
for x, y in zip(self.vx, self.vy)])
result += ')'
return result
def contains(self, x, y):
"""
Test whether a set of (x,y) points falls within
the region of interest
:param x: A list of x points
:param y: A list of y points
*Returns*
A list of True/False values, for whether each (x,y)
point falls within the ROI
"""
if not self.defined():
raise UndefinedROI
if not isinstance(x, np.ndarray):
x = np.asarray(x)
if not isinstance(y, np.ndarray):
y = np.asarray(y)
result = points_inside_poly(x.flat, y.flat, self.vx, self.vy)
good = np.isfinite(x.flat) & np.isfinite(y.flat)
result[~good] = False
result.shape = x.shape
return result
def move_to(self, xdelta, ydelta):
self.vx = list(map(lambda x: x + xdelta, self.vx))
self.vy = list(map(lambda y: y + ydelta, self.vy))
class Path(VertexROIBase):
def __str__(self):
result = 'Path ('
result += ','.join(['(%s, %s)' % (x, y)
for x, y in zip(self.vx, self.vy)])
result += ')'
return result
class AbstractMplRoi(object): # pragma: no cover
""" Base class for objects which use
Matplotlib user events to edit/display ROIs
"""
def __init__(self, axes):
"""
:param axes: The Matplotlib Axes object to draw to
"""
self._axes = axes
self._roi = self._roi_factory()
self._previous_roi = None
self._mid_selection = False
self._scrubbing = False
def _draw(self):
self._axes.figure.canvas.draw()
def _roi_factory(self):
raise NotImplementedError()
def roi(self):
return self._roi.copy()
def reset(self, include_roi=True):
self._mid_selection = False
self._scrubbing = False
if include_roi:
self._roi.reset()
self._sync_patch()
def active(self):
return self._mid_selection
def start_selection(self, event):
raise NotImplementedError()
def update_selection(self, event):
raise NotImplementedError()
def finalize_selection(self, event):
raise NotImplementedError()
def abort_selection(self, event):
if self._mid_selection:
self._roi_restore()
self.reset(include_roi=False)
def _sync_patch(self):
raise NotImplementedError()
def _roi_store(self):
self._previous_roi = self._roi.copy()
def _roi_restore(self):
self._roi = self._previous_roi
class MplPickROI(AbstractMplRoi):
def _draw(self):
pass
def _roi_factory(self):
return PointROI()
def start_selection(self, event):
self._roi.x = event.xdata
self._roi.y = event.ydata
def update_selection(self, event):
self._roi.x = event.xdata
self._roi.y = event.ydata
def finalize_selection(self, event):
self._roi.x = event.xdata
self._roi.y = event.ydata
def _sync_patch(self):
pass
class MplRectangularROI(AbstractMplRoi):
"""
A subclass of RectangularROI that also renders the ROI to a plot
*Attributes*:
plot_opts:
Dictionary instance
A dictionary of plot keywords that are passed to
the patch representing the ROI. These control
the visual properties of the ROI
"""
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self._xi = None
self._yi = None
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
self._patch = Rectangle((0., 0.), 1., 1.)
self._patch.set_zorder(100)
self._setup_patch()
def _setup_patch(self):
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return RectangularROI()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
self._xi = event.xdata
self._yi = event.ydata
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._cx, self._cy = self._roi.center()
else:
self.reset()
self._roi.update_limits(event.xdata, event.ydata,
event.xdata, event.ydata)
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(self._cx + event.xdata - self._xi,
self._cy + event.ydata - self._yi)
else:
self._roi.update_limits(min(event.xdata, self._xi),
min(event.ydata, self._yi),
max(event.xdata, self._xi),
max(event.ydata, self._yi))
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._draw()
def _sync_patch(self):
if self._roi.defined():
corner = self._roi.corner()
width = self._roi.width()
height = self._roi.height()
self._patch.set_xy(corner)
self._patch.set_width(width)
self._patch.set_height(height)
self._patch.set(**self.plot_opts)
self._patch.set_visible(True)
else:
self._patch.set_visible(False)
self._draw()
def __str__(self):
return "MPL Rectangle: %s" % self._patch
class MplXRangeROI(AbstractMplRoi):
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self._xi = None
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
trans = blended_transform_factory(self._axes.transData,
self._axes.transAxes)
self._patch = Rectangle((0., 0.), 1., 1., transform=trans)
self._patch.set_zorder(100)
self._setup_patch()
def _setup_patch(self):
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return XRangeROI()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._dx = event.xdata - self._roi.center()
else:
self.reset()
self._roi.set_range(event.xdata, event.xdata)
self._xi = event.xdata
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(event.xdata + self._dx)
else:
self._roi.set_range(min(event.xdata, self._xi),
max(event.xdata, self._xi))
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._draw()
def _sync_patch(self):
if self._roi.defined():
rng = self._roi.range()
self._patch.set_xy((rng[0], 0))
self._patch.set_width(rng[1] - rng[0])
self._patch.set_height(1)
self._patch.set(**self.plot_opts)
self._patch.set_visible(True)
else:
self._patch.set_visible(False)
self._draw()
class MplYRangeROI(AbstractMplRoi):
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self._xi = None
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
trans = blended_transform_factory(self._axes.transAxes,
self._axes.transData)
self._patch = Rectangle((0., 0.), 1., 1., transform=trans)
self._patch.set_zorder(100)
self._setup_patch()
def _setup_patch(self):
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return YRangeROI()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._dy = event.ydata - self._roi.center()
else:
self.reset()
self._roi.set_range(event.ydata, event.ydata)
self._xi = event.ydata
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(event.ydata + self._dy)
else:
self._roi.set_range(min(event.ydata, self._xi),
max(event.ydata, self._xi))
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._draw()
def _sync_patch(self):
if self._roi.defined():
rng = self._roi.range()
self._patch.set_xy((0, rng[0]))
self._patch.set_height(rng[1] - rng[0])
self._patch.set_width(1)
self._patch.set(**self.plot_opts)
self._patch.set_visible(True)
else:
self._patch.set_visible(False)
self._draw()
class MplCircularROI(AbstractMplRoi):
"""
Class to display / edit circular ROIs using matplotlib
Since circles on the screen may not be circles in the data
(due, e.g., to logarithmic scalings on the axes), the
ultimate ROI that is created is a polygonal ROI
:param plot_opts:
A dictionary of plot keywords that are passed to
the patch representing the ROI. These control
the visual properties of the ROI
"""
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
self._xi = None
self._yi = None
self._setup_patch()
def _setup_patch(self):
self._patch = Ellipse((0., 0.), transform=IdentityTransform(),
width=0., height=0.,)
self._patch.set_zorder(100)
self._patch.set(**self.plot_opts)
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return CircularROI()
def _sync_patch(self):
# Update geometry
if not self._roi.defined():
self._patch.set_visible(False)
else:
xy = self._roi.get_center()
r = self._roi.get_radius()
self._patch.center = xy
self._patch.width = 2. * r
self._patch.height = 2. * r
self._patch.set_visible(True)
# Update appearance
self._patch.set(**self.plot_opts)
# Refresh
self._axes.figure.canvas.draw()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
xy = data_to_pixel(self._axes, [event.xdata], [event.ydata])
xi = xy[0, 0]
yi = xy[0, 1]
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(xi, yi):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
(xc, yc) = self._roi.get_center()
self._dx = xc - xi
self._dy = yc - yi
else:
self.reset()
self._roi.set_center(xi, yi)
self._roi.set_radius(0.)
self._xi = xi
self._yi = yi
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
xy = data_to_pixel(self._axes, [event.xdata], [event.ydata])
xi = xy[0, 0]
yi = xy[0, 1]
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.set_center(xi + self._dx, yi + self._dy)
else:
dx = xy[0, 0] - self._xi
dy = xy[0, 1] - self._yi
self._roi.set_radius(np.hypot(dx, dy))
self._sync_patch()
def roi(self):
if not self._roi.defined():
return PolygonalROI()
theta = np.linspace(0, 2 * np.pi, num=200)
xy_center = self._roi.get_center()
rad = self._roi.get_radius()
x = xy_center[0] + rad * np.cos(theta)
y = xy_center[1] + rad * np.sin(theta)
xy_data = pixel_to_data(self._axes, x, y)
vx = xy_data[:, 0].ravel().tolist()
vy = xy_data[:, 1].ravel().tolist()
result = PolygonalROI(vx, vy)
return result
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._axes.figure.canvas.draw()
class MplPolygonalROI(AbstractMplRoi):
"""
Defines and displays polygonal ROIs on matplotlib plots
Attributes:
plot_opts: Dictionary instance
A dictionary of plot keywords that are passed to
the patch representing the ROI. These control
the visual properties of the ROI
"""
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
self._setup_patch()
def _setup_patch(self):
self._patch = Polygon(np.array(list(zip([0, 1], [0, 1]))))
self._patch.set_zorder(100)
self._patch.set(**self.plot_opts)
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return PolygonalROI()
def _sync_patch(self):
# Update geometry
if not self._roi.defined():
self._patch.set_visible(False)
else:
x, y = self._roi.to_polygon()
self._patch.set_xy(list(zip(x + [x[0]],
y + [y[0]])))
self._patch.set_visible(True)
# Update appearance
self._patch.set(**self.plot_opts)
# Refresh
self._axes.figure.canvas.draw()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._cx = event.xdata
self._cy = event.ydata
else:
self.reset()
self._roi.add_point(event.xdata, event.ydata)
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(event.xdata - self._cx,
event.ydata - self._cy)
self._cx = event.xdata
self._cy = event.ydata
else:
self._roi.add_point(event.xdata, event.ydata)
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._axes.figure.canvas.draw()
class MplPathROI(MplPolygonalROI):
def roi_factory(self):
return Path()
def _setup_patch(self):
self._patch = None
def _sync_patch(self):
if self._patch is not None:
self._patch.remove()
self._patch = None
# Update geometry
if not self._roi.defined():
return
else:
x, y = self._roi.to_polygon()
p = MplPath(np.column_stack((x, y)))
self._patch = PathPatch(p)
self._patch.set_visible(True)
# Update appearance
self._patch.set(**self.plot_opts)
# Refresh
self._axes.figure.canvas.draw()
def finalize_selection(self, event):
self._mid_selection = False
if self._patch is not None:
self._patch.set_visible(False)
self._axes.figure.canvas.draw()
class CategoricalROI(Roi):
"""
A ROI abstraction to represent selections of categorical data.
"""
def __init__(self, categories=None):
if categories is None:
self.categories = None
else:
self.update_categories(categories)
def to_polygon(self):
""" Just not possible.
"""
raise NotImplementedError
def _categorical_helper(self, indata):
"""
A helper function to do the rigamaroll of getting categorical data.
:param indata: Any type of input data
:return: The best guess at the categorical data associated with indata
"""
try:
if indata.categorical:
return indata._categorical_data
else:
return indata[:]
except AttributeError:
return np.asarray(indata)
def contains(self, x, y):
"""
Test whether a set categorical elements fall within
the region of interest
:param x: Any array-like object of categories
(includes CategoricalComponenets)
:param y: Unused but required for compatibility
*Returns*
A list of True/False values, for whether each x value falls
within the ROI
"""
if self.categories is None or len(self.categories) == 0:
return np.zeros(x.shape, dtype=bool)
else:
check = self._categorical_helper(x)
index = np.minimum(np.searchsorted(self.categories, check),
len(self.categories) - 1)
return self.categories[index] == check
def update_categories(self, categories):
self.categories = np.unique(self._categorical_helper(categories))
def defined(self):
""" Returns True if the ROI is defined """
return self.categories is not None
def reset(self):
self.categories = None
@staticmethod
def from_range(cat_comp, lo, hi):
"""
Utility function to help construct the Roi from a range.
:param cat_comp: Anything understood by ._categorical_helper ... array, list or component
:param lo: lower bound of the range
:param hi: upper bound of the range
:return: CategoricalROI object
"""
# Convert lo and hi to integers. Note that if lo or hi are negative,
# which can happen if the user zoomed out, we need to reset the to zero
# otherwise they will have strange effects when slicing the categories.
# Note that we used ceil for lo, because if lo is 0.9 then we should
# only select 1 and above.
lo = np.intp(np.ceil(lo) if lo > 0 else 0)
hi = np.intp(np.ceil(hi) if hi > 0 else 0)
roi = CategoricalROI()
cat_data = cat_comp.categories
roi.update_categories(cat_data[lo:hi])
return roi
| {
"repo_name": "saimn/glue",
"path": "glue/core/roi.py",
"copies": "1",
"size": "35257",
"license": "bsd-3-clause",
"hash": 8090697605407279000,
"line_mean": 26.8711462451,
"line_max": 97,
"alpha_frac": 0.5380208186,
"autogenerated": false,
"ratio": 3.820654529692241,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9858542529729353,
"avg_score": 0.000026563712577848364,
"num_lines": 1265
} |
from __future__ import absolute_import, division, print_function
import copy
import numpy as np
from ..core.pycompat import OrderedDict
from ..core.variable import Variable
from .common import AbstractWritableDataStore
class InMemoryDataStore(AbstractWritableDataStore):
"""
Stores dimensions, variables and attributes in ordered dictionaries, making
this store fast compared to stores which save to disk.
This store exists purely for internal testing purposes.
"""
def __init__(self, variables=None, attributes=None, writer=None):
self._variables = OrderedDict() if variables is None else variables
self._attributes = OrderedDict() if attributes is None else attributes
super(InMemoryDataStore, self).__init__(writer)
def get_attrs(self):
return self._attributes
def get_variables(self):
return self._variables
def get_dimensions(self):
dims = OrderedDict()
for v in self._variables.values():
for d, s in v.dims.items():
dims[d] = s
return dims
def prepare_variable(self, k, v, *args, **kwargs):
new_var = Variable(v.dims, np.empty_like(v), v.attrs)
self._variables[k] = new_var
return new_var, v.data
def set_attribute(self, k, v):
# copy to imitate writing to disk.
self._attributes[k] = copy.deepcopy(v)
def set_dimension(self, d, l, unlimited_dims=None):
# in this model, dimensions are accounted for in the variables
pass
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/backends/memory.py",
"copies": "1",
"size": "1537",
"license": "apache-2.0",
"hash": 7802358578531043000,
"line_mean": 30.3673469388,
"line_max": 79,
"alpha_frac": 0.6629798308,
"autogenerated": false,
"ratio": 4.154054054054054,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5317033884854053,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import csv
import datetime
import functools
import time
import requests
from panoptes_client.panoptes import (
PanoptesAPIException,
Talk,
)
TALK_EXPORT_TYPES = (
'talk_comments',
'talk_tags',
)
talk = Talk()
class Exportable(object):
"""
Abstract class containing methods for generating and downloading data
exports.
"""
def get_export(
self,
export_type,
generate=False,
wait=False,
wait_timeout=None,
):
"""
Downloads a data export over HTTP. Returns a `Requests Response
<http://docs.python-requests.org/en/master/api/#requests.Response>`_
object containing the content of the export.
- **export_type** is a string specifying which type of export should be
downloaded.
- **generate** is a boolean specifying whether to generate a new export
and wait for it to be ready, or to just download the latest export.
- **wait** is a boolean specifying whether to wait for an in-progress
export to finish, if there is one. Has no effect if ``generate`` is
``True``.
- **wait_timeout** is the number of seconds to wait if ``wait`` is
``True``. Has no effect if ``wait`` is ``False`` or if ``generate``
is ``True``.
The returned :py:class:`.Response` object has two additional attributes
as a convenience for working with the CSV content; **csv_reader** and
**csv_dictreader**, which are wrappers for :py:meth:`.csv.reader`
and :py:class:`csv.DictReader` respectively. These wrappers take care
of correctly decoding the export content for the CSV parser.
Example::
classification_export = Project(1234).get_export('classifications')
for row in classification_export.csv_reader():
print(row)
classification_export = Project(1234).get_export('classifications')
for row in classification_export.csv_dictreader():
print(row)
"""
if generate:
self.generate_export(export_type)
if generate or wait:
export = self.wait_export(export_type, wait_timeout)
else:
export = self.describe_export(export_type)
if export_type in TALK_EXPORT_TYPES:
media_url = export['data_requests'][0]['url']
else:
media_url = export['media'][0]['src']
response = requests.get(media_url, stream=True)
response.csv_reader = functools.partial(
csv.reader,
response.iter_lines(decode_unicode=True),
)
response.csv_dictreader = functools.partial(
csv.DictReader,
response.iter_lines(decode_unicode=True),
)
return response
def wait_export(
self,
export_type,
timeout=None,
):
"""
Blocks until an in-progress export is ready.
- **export_type** is a string specifying which type of export to wait
for.
- **timeout** is the maximum number of seconds to wait.
If ``timeout`` is given and the export is not ready by the time limit,
:py:class:`.PanoptesAPIException` is raised.
"""
success = False
if timeout:
end_time = datetime.datetime.now() + datetime.timedelta(
seconds=timeout
)
while (not timeout) or (datetime.datetime.now() < end_time):
export_description = self.describe_export(
export_type,
)
if export_type in TALK_EXPORT_TYPES:
export_metadata = export_description['data_requests'][0]
else:
export_metadata = export_description['media'][0]['metadata']
if export_metadata.get('state', '') in ('ready', 'finished'):
success = True
break
time.sleep(2)
if not success:
raise PanoptesAPIException(
'{}_export not ready within {} seconds'.format(
export_type,
timeout
)
)
return export_description
def generate_export(self, export_type):
"""
Start a new export.
- **export_type** is a string specifying which type of export to start.
Returns a :py:class:`dict` containing metadata for the new export.
"""
if export_type in TALK_EXPORT_TYPES:
return talk.post_data_request(
'project-{}'.format(self.id),
export_type.replace('talk_', '')
)
return self.http_post(
self._export_path(export_type),
json={"media": {"content_type": "text/csv"}},
)[0]
def describe_export(self, export_type):
"""
Fetch metadata for an export.
- **export_type** is a string specifying which type of export to look
up.
Returns a :py:class:`dict` containing metadata for the export.
"""
if export_type in TALK_EXPORT_TYPES:
return talk.get_data_request(
'project-{}'.format(self.id),
export_type.replace('talk_', '')
)[0]
return self.http_get(
self._export_path(export_type),
)[0]
def _export_path(self, export_type):
return '{}/{}_export'.format(self.id, export_type)
| {
"repo_name": "zooniverse/panoptes-python-client",
"path": "panoptes_client/exportable.py",
"copies": "1",
"size": "5571",
"license": "apache-2.0",
"hash": 955584282095770400,
"line_mean": 29.6098901099,
"line_max": 79,
"alpha_frac": 0.5683001257,
"autogenerated": false,
"ratio": 4.421428571428572,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5489728697128572,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import csv
import itertools as it
import os
from operator import itemgetter
from collections import Iterator
import sys
import datashape
from datashape.discovery import discover, null, string, unpack
from datashape import dshape, Record, Option, Fixed, CType, Tuple
from dynd import nd
from .core import DataDescriptor
from .utils import coerce_record_to_row
from ..utils import nth, nth_list, get
from .. import compatibility
from ..compatibility import map
__all__ = ['CSV']
def has_header(sample):
""" Sample text has a header """
sniffer = csv.Sniffer()
try:
return sniffer.has_header(sample)
except:
return None
def discover_dialect(sample, dialect=None, **kwargs):
""" Discover CSV dialect from string sample
Returns dict
"""
if isinstance(dialect, compatibility._strtypes):
dialect = csv.get_dialect(dialect)
sniffer = csv.Sniffer()
if not dialect:
try:
dialect = sniffer.sniff(sample)
except:
dialect = csv.get_dialect('excel')
# Convert dialect to dictionary
dialect = dict((key, getattr(dialect, key))
for key in dir(dialect) if not key.startswith('_'))
# Update dialect with any keyword arguments passed in
# E.g. allow user to override with delimiter=','
for k, v in kwargs.items():
if k in dialect:
dialect[k] = v
return dialect
class CSV(DataDescriptor):
"""
A Blaze data descriptor which exposes a CSV file.
Parameters
----------
path : string
A path string for the CSV file.
schema : string or datashape
A datashape (or its string representation) of the schema
in the CSV file.
dialect : string or csv.Dialect instance
The dialect as understood by the `csv` module in Python standard
library. If not specified, a value is guessed.
header : boolean
Whether the CSV file has a header or not. If not specified a value
is guessed.
"""
immutable = False
deferred = False
persistent = True
appendable = True
remote = False
def __init__(self, path, mode='rt',
schema=None, columns=None, types=None, typehints=None,
dialect=None, header=None, open=open, nrows_discovery=50,
**kwargs):
if 'r' in mode and os.path.isfile(path) is not True:
raise ValueError('CSV file "%s" does not exist' % path)
if not schema and 'w' in mode:
raise ValueError('Please specify schema for writable CSV file')
self.path = path
self.mode = mode
self.open = open
if os.path.exists(path) and mode != 'w':
f = self.open(path)
sample = f.read(16384)
try:
f.close()
except AttributeError:
pass
else:
sample = ''
# Pandas uses sep instead of delimiter.
# Lets support that too
if 'sep' in kwargs:
kwargs['delimiter'] = kwargs['sep']
dialect = discover_dialect(sample, dialect, **kwargs)
assert dialect
if header is None:
header = has_header(sample)
if not schema and 'w' not in mode:
if not types:
with open(self.path) as f:
data = list(it.islice(csv.reader(f, **dialect), 1, nrows_discovery))
types = discover(data)
rowtype = types.subshape[0]
if isinstance(rowtype[0], Tuple):
types = types.subshape[0][0].dshapes
types = [unpack(t) for t in types]
types = [t.ty if isinstance(unpack(t), Option) else t
for t in types]
types = [string if t == null else t
for t in types]
elif (isinstance(rowtype[0], Fixed) and
isinstance(rowtype[1], CType)):
types = int(rowtype[0]) * [rowtype[1]]
else:
ValueError("Could not discover schema from data.\n"
"Please specify schema.")
if not columns:
if header:
with open(self.path) as f:
columns = next(csv.reader([next(f)], **dialect))
else:
columns = ['_%d' % i for i in range(len(types))]
if typehints:
types = [typehints.get(c, t) for c, t in zip(columns, types)]
schema = dshape(Record(list(zip(columns, types))))
self._schema = schema
self.header = header
self.dialect = dialect
def _get_py(self, key):
if isinstance(key, tuple):
assert len(key) == 2
result = self._get_py(key[0])
if isinstance(key[1], list):
getter = itemgetter(*key[1])
else:
getter = itemgetter(key[1])
if isinstance(key[0], (list, slice)):
return map(getter, result)
else:
return getter(result)
f = self.open(self.path)
if self.header:
next(f)
if isinstance(key, compatibility._inttypes):
line = nth(key, f)
result = next(csv.reader([line], **self.dialect))
elif isinstance(key, list):
lines = nth_list(key, f)
result = csv.reader(lines, **self.dialect)
elif isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
result = csv.reader(it.islice(f, start, stop, step),
**self.dialect)
else:
raise IndexError("key '%r' is not valid" % key)
try:
if not isinstance(result, Iterator):
f.close()
except AttributeError:
pass
return result
def _iter(self):
f = self.open(self.path)
if self.header:
next(f) # burn header
for row in csv.reader(f, **self.dialect):
yield row
try:
f.close()
except AttributeError:
pass
def _extend(self, rows):
rows = iter(rows)
if sys.version_info[0] == 3:
f = self.open(self.path, 'a', newline='')
elif sys.version_info[0] == 2:
f = self.open(self.path, 'ab')
row = next(rows)
if isinstance(row, dict):
schema = dshape(self.schema)
row = coerce_record_to_row(schema, row)
rows = (coerce_record_to_row(schema, row) for row in rows)
# Write all rows to file
writer = csv.writer(f, **self.dialect)
writer.writerow(row)
writer.writerows(rows)
try:
f.close()
except AttributeError:
pass
def remove(self):
"""Remove the persistent storage."""
os.unlink(self.path)
| {
"repo_name": "aterrel/blaze",
"path": "blaze/data/csv.py",
"copies": "1",
"size": "7141",
"license": "bsd-3-clause",
"hash": -1764997209271546400,
"line_mean": 30.5973451327,
"line_max": 88,
"alpha_frac": 0.5353591934,
"autogenerated": false,
"ratio": 4.276047904191617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012365824397704232,
"num_lines": 226
} |
from __future__ import absolute_import, division, print_function
import csv
import itertools as it
import os
import datashape
from dynd import nd
from .core import DataDescriptor
from .utils import coerce_record_to_row
from ..utils import partition_all, nth
from .. import py2help
__all__ = ['CSV']
def has_header(sample):
"""
>>> s = '''
... x,y
... 1,1
... 2,2'''
>>> has_header(s)
True
"""
sniffer = csv.Sniffer()
try:
return sniffer.has_header(sample)
except:
return None
def discover_dialect(sample, dialect=None, **kwargs):
"""
>>> s = '''
... 1,1
... 2,2'''
>>> discover_dialect(s) # doctest: +SKIP
{'escapechar': None,
'skipinitialspace': False,
'quoting': 0,
'delimiter': ',',
'lineterminator': '\r\n',
'quotechar': '"',
'doublequote': False}
"""
if isinstance(dialect, py2help._strtypes):
dialect = csv.get_dialect(dialect)
sniffer = csv.Sniffer()
if not dialect:
try:
dialect = sniffer.sniff(sample)
except:
dialect = csv.get_dialect('excel')
# Convert dialect to dictionary
dialect = dict((key, getattr(dialect, key))
for key in dir(dialect) if not key.startswith('_'))
# Update dialect with any keyword arguments passed in
# E.g. allow user to override with delimiter=','
for k, v in kwargs.items():
if k in dialect:
dialect[k] = v
return dialect
class CSV(DataDescriptor):
"""
A Blaze data descriptor which exposes a CSV file.
Parameters
----------
path : string
A path string for the CSV file.
schema : string or datashape
A datashape (or its string representation) of the schema
in the CSV file.
dialect : string or csv.Dialect instance
The dialect as understood by the `csv` module in Python standard
library. If not specified, a value is guessed.
header : boolean
Whether the CSV file has a header or not. If not specified a value
is guessed.
"""
immutable = False
deferred = False
persistent = True
appendable = True
remote = False
def __init__(self, path, mode='r', schema=None, dshape=None,
dialect=None, header=None, open=open, **kwargs):
if 'r' in mode and os.path.isfile(path) is not True:
raise ValueError('CSV file "%s" does not exist' % path)
self.path = path
self.mode = mode
self.open = open
if not schema and not dshape:
# TODO: Infer schema
raise ValueError('No schema detected')
if not schema and dshape:
dshape = datashape.dshape(dshape)
if isinstance(dshape[0], datashape.Var):
schema = dshape.subarray(1)
self._schema = schema
if os.path.exists(path) and mode != 'w':
with self.open(path, 'r') as f:
sample = f.read(1024)
else:
sample = ''
dialect = discover_dialect(sample, dialect, **kwargs)
assert dialect
if header is None:
header = has_header(sample)
self.header = header
self.dialect = dialect
def _getitem(self, key):
with self.open(self.path, self.mode) as f:
if self.header:
next(f)
if isinstance(key, py2help._inttypes):
line = nth(key, f)
result = next(csv.reader([line], **self.dialect))
elif isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
result = list(csv.reader(it.islice(f, start, stop, step),
**self.dialect))
else:
raise IndexError("key '%r' is not valid" % key)
return result
def _iter(self):
with self.open(self.path, 'r') as f:
if self.header:
next(f) # burn header
for row in csv.reader(f, **self.dialect):
yield row
def _extend(self, rows):
rows = iter(rows)
with self.open(self.path, self.mode) as f:
if self.header:
next(f)
row = next(rows)
if isinstance(row, dict):
schema = datashape.dshape(self.schema)
row = coerce_record_to_row(schema, row)
rows = (coerce_record_to_row(schema, row) for row in rows)
# Write all rows to file
f.seek(0, os.SEEK_END) # go to the end of the file
writer = csv.writer(f, **self.dialect)
writer.writerow(row)
writer.writerows(rows)
def remove(self):
"""Remove the persistent storage."""
os.unlink(self.path)
| {
"repo_name": "sethkontny/blaze",
"path": "blaze/data/csv.py",
"copies": "1",
"size": "4845",
"license": "bsd-3-clause",
"hash": 6584645608469060000,
"line_mean": 27.6686390533,
"line_max": 75,
"alpha_frac": 0.5490196078,
"autogenerated": false,
"ratio": 3.96481178396072,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.501383139176072,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import csv
import itertools as it
import os
import datashape
from dynd import nd
from .. import py2help
from .data_descriptor import DDesc, Capabilities
from .dynd_data_descriptor import DyND_DDesc
def open_file(path, mode, has_header):
"""Return a file handler positionated at the first valid line."""
csvfile = open(path, mode=mode)
if has_header:
csvfile.readline()
return csvfile
def csv_descriptor_iter(filename, mode, has_header, schema, dialect={}):
with open_file(filename, mode, has_header) as csvfile:
for row in csv.reader(csvfile, **dialect):
yield DyND_DDesc(nd.array(row, dtype=schema))
def csv_descriptor_iterchunks(filename, mode, has_header, schema,
blen, dialect={}, start=None, stop=None):
rows = []
with open_file(filename, mode, has_header) as csvfile:
for nrow, row in enumerate(csv.reader(csvfile, **dialect)):
if start is not None and nrow < start:
continue
if stop is not None and nrow >= stop:
if rows != []:
# Build the descriptor for the data we have and return
yield DyND_DDesc(nd.array(rows, dtype=schema))
return
rows.append(row)
if nrow % blen == 0:
print("rows:", rows, schema)
yield DyND_DDesc(nd.array(rows, dtype=schema))
rows = []
class CSV_DDesc(DDesc):
"""
A Blaze data descriptor which exposes a CSV file.
Parameters
----------
path : string
A path string for the CSV file.
schema : string or datashape
A datashape (or its string representation) of the schema
in the CSV file.
dialect : string or csv.Dialect instance
The dialect as understood by the `csv` module in Python standard
library. If not specified, a value is guessed.
has_header : boolean
Whether the CSV file has a header or not. If not specified a value
is guessed.
"""
def __init__(self, path, mode='r', schema=None, dialect=None,
has_header=None, **kwargs):
if os.path.isfile(path) is not True:
raise ValueError('CSV file "%s" does not exist' % path)
self.path = path
self.mode = mode
csvfile = open(path, mode=self.mode)
# Handle Schema
if isinstance(schema, py2help._strtypes):
schema = datashape.dshape(schema)
if isinstance(schema, datashape.DataShape) and len(schema) == 1:
schema = schema[0]
if not isinstance(schema, datashape.Record):
raise TypeError(
'schema cannot be converted into a blaze record dshape')
self.schema = str(schema)
# Handle Dialect
if dialect is None:
# Guess the dialect
sniffer = csv.Sniffer()
try:
dialect = sniffer.sniff(csvfile.read(1024))
except:
# Cannot guess dialect. Assume Excel.
dialect = csv.get_dialect('excel')
csvfile.seek(0)
else:
dialect = csv.get_dialect(dialect)
self.dialect = dict((key, getattr(dialect, key))
for key in dir(dialect) if not key.startswith('_'))
# Update dialect with any keyword arguments passed in
# E.g. allow user to override with delimiter=','
for k, v in kwargs.items():
if k in self.dialect:
self.dialect[k] = v
# Handle Header
if has_header is None:
# Guess whether the file has a header or not
sniffer = csv.Sniffer()
csvfile.seek(0)
sample = csvfile.read(1024)
self.has_header = sniffer.has_header(sample)
else:
self.has_header = has_header
csvfile.close()
@property
def dshape(self):
return datashape.DataShape(datashape.Var(), self.schema)
@property
def capabilities(self):
"""The capabilities for the csv data descriptor."""
return Capabilities(
# csv datadescriptor cannot be updated
immutable = False,
# csv datadescriptors are concrete
deferred = False,
# csv datadescriptor is persistent
persistent = True,
# csv datadescriptor can be appended efficiently
appendable = True,
remote = False,
)
def dynd_arr(self):
# Positionate at the beginning of the file
with open_file(self.path, self.mode, self.has_header) as csvfile:
return nd.array(csv.reader(csvfile, **self.dialect), dtype=self.schema)
def __array__(self):
return nd.as_numpy(self.dynd_arr())
def __len__(self):
# We don't know how many rows we have
return None
def __getitem__(self, key):
with open_file(self.path, self.mode, self.has_header) as csvfile:
if isinstance(key, py2help._inttypes):
start, stop, step = key, key + 1, 1
elif isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
else:
raise IndexError("key '%r' is not valid" % key)
read_iter = it.islice(csv.reader(csvfile, **self.dialect),
start, stop, step)
res = nd.array(read_iter, dtype=self.schema)
return DyND_DDesc(res)
def __setitem__(self, key, value):
# CSV files cannot be updated (at least, not efficiently)
raise NotImplementedError
def __iter__(self):
return csv_descriptor_iter(
self.path, self.mode, self.has_header, self.schema, self.dialect)
def append(self, row):
"""Append a row of values (in sequence form)."""
values = nd.array(row, dtype=self.schema) # validate row
with open_file(self.path, self.mode, self.has_header) as csvfile:
csvfile.seek(0, os.SEEK_END) # go to the end of the file
delimiter = self.dialect['delimiter']
csvfile.write(delimiter.join(py2help.unicode(v) for v in row)+'\n')
def iterchunks(self, blen=None, start=None, stop=None):
"""Return chunks of size `blen` (in leading dimension).
Parameters
----------
blen : int
The length, in rows, of the buffers that are returned.
start : int
Where the iterator starts. The default is to start at the
beginning.
stop : int
Where the iterator stops. The default is to stop at the end.
Returns
-------
out : iterable
This iterable returns buffers as DyND arrays,
"""
# Return the iterable
return csv_descriptor_iterchunks(
self.path, self.mode, self.has_header,
self.schema, blen, self.dialect, start, stop)
def remove(self):
"""Remove the persistent storage."""
os.unlink(self.path)
| {
"repo_name": "talumbau/blaze",
"path": "blaze/datadescriptor/csv_data_descriptor.py",
"copies": "3",
"size": "7159",
"license": "bsd-3-clause",
"hash": -1985219243039403300,
"line_mean": 33.9219512195,
"line_max": 83,
"alpha_frac": 0.5759184244,
"autogenerated": false,
"ratio": 4.130986728216965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6206905152616965,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import csv
import re
import datashape
from datashape import discover, Record, Option
from datashape.predicates import isrecord
from datashape.dispatch import dispatch
from collections import Iterator
from toolz import concat, keyfilter, assoc
import pandas
import pandas as pd
import os
import gzip
import bz2
from ..utils import keywords
from ..append import append
from ..convert import convert, ooc_types
from ..resource import resource
from ..chunks import chunks
from ..numpy_dtype import dshape_to_pandas
from .pandas import coerce_datetimes
dialect_terms = '''delimiter doublequote escapechar lineterminator quotechar
quoting skipinitialspace strict'''.split()
class CSV(object):
""" Proxy for a CSV file
Parameters
----------
path : str
Path to file on disk
has_header : bool
If the csv file has a header or not
encoding : str (default utf-8)
File encoding
kwargs : other...
Various choices about dialect
"""
def __init__(self, path, has_header='no-input', encoding='utf-8', **kwargs):
self.path = path
if has_header == 'no-input':
if not os.path.exists(path):
self.has_header = True
else:
self.has_header = None
else:
self.has_header = has_header
self.encoding = encoding
self.dialect = dict((d, kwargs[d]) for d in dialect_terms
if d in kwargs)
@append.register(CSV, object)
def append_object_to_csv(c, seq, **kwargs):
append(c, convert(chunks(pd.DataFrame), seq, **kwargs), **kwargs)
return c
compressed_open = {'gz': gzip.open, 'bz2': bz2.BZ2File}
@append.register(CSV, pd.DataFrame)
def append_dataframe_to_csv(c, df, dshape=None, **kwargs):
if not os.path.exists(c.path) or os.path.getsize(c.path) == 0:
has_header = kwargs.pop('has_header', c.has_header)
else:
has_header = False
if has_header is None:
has_header = True
sep = kwargs.get('sep', kwargs.get('delimiter', c.dialect.get('delimiter', ',')))
encoding=kwargs.get('encoding', c.encoding)
if ext(c.path) in compressed_open:
f = compressed_open[ext(c.path)](c.path, mode='a')
else:
f = c.path
df.to_csv(f, mode='a',
header=has_header,
index=False,
sep=sep,
encoding=encoding)
if hasattr(f, 'flush'):
f.flush()
f.close()
return c
@append.register(CSV, chunks(pd.DataFrame))
def append_iterator_to_csv(c, cs, **kwargs):
for chunk in cs:
append(c, chunk, **kwargs)
return c
def ext(path):
_, e = os.path.splitext(path)
return e.lstrip('.')
@convert.register(pd.DataFrame, CSV, cost=20.0)
def csv_to_DataFrame(c, dshape=None, chunksize=None, nrows=None, **kwargs):
try:
return _csv_to_DataFrame(c, dshape=dshape,
chunksize=chunksize, nrows=nrows, **kwargs)
except StopIteration:
if nrows:
return _csv_to_DataFrame(c, dshape=dshape, chunksize=chunksize, **kwargs)
else:
raise
def _csv_to_DataFrame(c, dshape=None, chunksize=None, **kwargs):
has_header = kwargs.pop('has_header', c.has_header)
if has_header is False:
header = None
elif has_header is True:
header = 0
else:
header = 'infer'
sep = kwargs.pop('sep', kwargs.pop('delimiter', c.dialect.get('delimiter', ',')))
encoding = kwargs.get('encoding', c.encoding)
if dshape:
dtypes, parse_dates = dshape_to_pandas(dshape)
if isrecord(dshape.measure):
names = kwargs.get('names', dshape.measure.names)
else:
names = kwargs.get('names')
else:
dtypes = parse_dates = names = None
usecols = kwargs.pop('usecols', None)
if parse_dates and usecols:
parse_dates = [col for col in parse_dates if col in usecols]
compression = kwargs.pop('compression',
{'gz': 'gzip', 'bz2': 'bz2'}.get(ext(c.path)))
# See read_csv docs for header for reasoning
if names:
try:
found_names = pd.read_csv(c.path, encoding=encoding,
compression=compression, nrows=1)
except StopIteration:
found_names = pd.read_csv(c.path, encoding=encoding,
compression=compression)
if names and header == 'infer':
if [n.strip() for n in found_names] == [n.strip() for n in names]:
header = 0
elif (all(re.match('^\s*\D\w*\s*$', n) for n in found_names) and
not all(dt == datashape.string for dt in dshape.measure.types)):
header = 0
else:
header = None
kwargs2 = keyfilter(keywords(pandas.read_csv).__contains__, kwargs)
return pandas.read_csv(c.path,
header=header,
sep=sep,
encoding=encoding,
dtype=dtypes,
parse_dates=parse_dates,
names=names,
compression=compression,
chunksize=chunksize,
usecols=usecols,
**kwargs2)
@convert.register(chunks(pd.DataFrame), CSV, cost=10.0)
def CSV_to_chunks_of_dataframes(c, chunksize=2**20, **kwargs):
# Load a small 1000 line DF to start
# This helps with rapid viewing of a large CSV file
first = csv_to_DataFrame(c, nrows=1000, **kwargs)
if len(first) == 1000:
rest = csv_to_DataFrame(c, chunksize=chunksize, skiprows=1000, **kwargs)
else:
rest = []
def _():
yield first
for df in rest:
yield df
return chunks(pd.DataFrame)(_)
@discover.register(CSV)
def discover_csv(c, nrows=1000, **kwargs):
df = csv_to_DataFrame(c, nrows=nrows, **kwargs)
df = coerce_datetimes(df)
if (not list(df.columns) == list(range(len(df.columns)))
and any(re.match('^[-\d_]*$', c) for c in df.columns)):
df = csv_to_DataFrame(c, chunksize=50, has_header=False).get_chunk()
df = coerce_datetimes(df)
df.columns = [str(c).strip() for c in df.columns]
# Replace np.nan with None. Forces type string rather than flaot
for col in df.columns:
if df[col].count() == 0:
df[col] = [None] * len(df)
measure = discover(df).measure
# Use Series.notnull to determine Option-ness
measure2 = Record([[name, Option(typ) if (~df[name].notnull()).any() else typ]
for name, typ in zip(measure.names, measure.types)])
return datashape.var * measure2
@resource.register('.+\.(csv|tsv|ssv|data|dat)(\.gz|\.bz)?')
def resource_csv(uri, **kwargs):
return CSV(uri, **kwargs)
from glob import glob
@resource.register('.+\*.+', priority=12)
def resource_glob(uri, **kwargs):
filenames = sorted(glob(uri))
r = resource(filenames[0], **kwargs)
return chunks(type(r))([resource(u, **kwargs) for u in sorted(glob(uri))])
# Alternatively check each time we iterate?
def _():
return (resource(u, **kwargs) for u in glob(uri))
return chunks(type(r))(_)
@convert.register(chunks(pd.DataFrame), chunks(CSV), cost=10.0)
def convert_glob_of_csvs_to_chunks_of_dataframes(csvs, **kwargs):
def _():
return concat(convert(chunks(pd.DataFrame), csv, **kwargs) for csv in csvs)
return chunks(pd.DataFrame)(_)
@dispatch(CSV)
def drop(c):
os.unlink(c.path)
ooc_types.add(CSV)
| {
"repo_name": "mrocklin/into",
"path": "into/backends/csv.py",
"copies": "1",
"size": "7720",
"license": "bsd-3-clause",
"hash": 5095160124585989000,
"line_mean": 30.0040160643,
"line_max": 85,
"alpha_frac": 0.5893782383,
"autogenerated": false,
"ratio": 3.67444074250357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47638189808035697,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import ctypes
from .. import llvm_array as lla
import llvm.core as lc
from llvm.core import Type, Function, Module
from ..llvm_array import (void_type, intp_type,
SCALAR, POINTER, array_kinds, check_array,
get_cpp_template, array_type, const_intp, LLArray, orderchar)
from blaze.py2help import PY2
int32_type = Type.int(32)
intp_type = Type.int(8*ctypes.sizeof(ctypes.c_void_p))
int8_p_type = Type.pointer(Type.int(8))
#typedef void (*expr_single_operation_t)(
# char *dst, const char * const *src,
# kernel_data_prefix *extra);
single_ckernel_func_type = Type.function(void_type,
[int8_p_type, Type.pointer(int8_p_type), int8_p_type])
#typedef void (*expr_strided_operation_t)(
# char *dst, intptr_t dst_stride,
# const char * const *src, const intptr_t *src_stride,
# size_t count, kernel_data_prefix *extra);
strided_ckernel_func_type = Type.function(void_type,
[int8_p_type, intp_type,
Type.pointer(int8_p_type), Type.pointer(intp_type),
intp_type, int8_p_type])
def map_llvm_to_ctypes(llvm_type, py_module=None, sname=None, i8p_str=False):
'''
Map an LLVM type to an equivalent ctypes type. py_module is an
optional module that is used for structure wrapping. If
structures are found, the struct definitions will be created in
that module.
'''
kind = llvm_type.kind
if kind == lc.TYPE_INTEGER:
if llvm_type.width < 8:
return ctypes.c_int8
ctype = getattr(ctypes,"c_int"+str(llvm_type.width))
elif kind == lc.TYPE_DOUBLE:
ctype = ctypes.c_double
elif kind == lc.TYPE_FLOAT:
ctype = ctypes.c_float
elif kind == lc.TYPE_VOID:
ctype = None
elif kind == lc.TYPE_POINTER:
pointee = llvm_type.pointee
p_kind = pointee.kind
if p_kind == lc.TYPE_INTEGER:
width = pointee.width
# Special case: char * is mapped to strings
if width == 8 and i8p_str:
ctype = ctypes.c_char_p
else:
ctype = ctypes.POINTER(map_llvm_to_ctypes(pointee, py_module, sname))
# Special case: void * mapped to c_void_p type
elif p_kind == lc.TYPE_VOID:
ctype = ctypes.c_void_p
else:
ctype = ctypes.POINTER(map_llvm_to_ctypes(pointee, py_module, sname))
elif kind == lc.TYPE_ARRAY:
ctype = llvm_type.count * map_llvm_to_ctypes(llvm_type.element, py_module, sname)
elif kind == lc.TYPE_STRUCT:
lookup = True
if llvm_type.is_literal:
if sname:
struct_name = sname
else:
struct_name = 'llvm_struct'
lookup = False
else:
struct_name = llvm_type.name
struct_name = struct_name.replace('.','_')
if PY2:
struct_name = struct_name.encode('ascii')
# If the named type is already known, return it
if py_module and lookup:
struct_type = getattr(py_module, struct_name, None)
else:
struct_type = None
if struct_type and issubclass(struct_type, ctypes.Structure):
return struct_type
# If there is an object with the name of the structure already
# present and it has the field names specified, use those names
# to help out
if hasattr(struct_type, '_fields_'):
names = struct_type._fields_
else:
names = [ "e"+str(n) for n in range(llvm_type.element_count) ]
# Create a class definition for the type. It is critical that this
# Take place before the handling of members to avoid issues with
# self-referential data structures
if py_module and lookup:
type_dict = { '__module__' : py_module.__name__}
else:
type_dict = {}
ctype = type(ctypes.Structure)(struct_name, (ctypes.Structure,),
type_dict)
if py_module and lookup:
setattr(py_module, struct_name, ctype)
# Resolve the structure fields
fields = [ (name, map_llvm_to_ctypes(elem, py_module))
for name, elem in zip(names, llvm_type.elements) ]
# Set the fields member of the type last. The order is critical
# to deal with self-referential structures.
setattr(ctype, '_fields_', fields)
else:
raise TypeError("Unknown type %s" % kind)
return ctype
| {
"repo_name": "zeeshanali/blaze",
"path": "blaze/compute/bkernel/llutil.py",
"copies": "7",
"size": "4665",
"license": "bsd-3-clause",
"hash": -2294513906888643000,
"line_mean": 36.6209677419,
"line_max": 89,
"alpha_frac": 0.5852090032,
"autogenerated": false,
"ratio": 3.6906645569620253,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7775873560162025,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import ctypes
import llvm.core as lc
from llvm.core import Type
from ..llvm_array import (void_type, intp_type)
from blaze.py2help import PY2
int32_type = Type.int(32)
intp_type = Type.int(8*ctypes.sizeof(ctypes.c_void_p))
int8_p_type = Type.pointer(Type.int(8))
#typedef void (*expr_single_operation_t)(
# char *dst, const char * const *src,
# kernel_data_prefix *extra);
single_ckernel_func_type = Type.function(void_type,
[int8_p_type, Type.pointer(int8_p_type), int8_p_type])
#typedef void (*expr_strided_operation_t)(
# char *dst, intptr_t dst_stride,
# const char * const *src, const intptr_t *src_stride,
# size_t count, kernel_data_prefix *extra);
strided_ckernel_func_type = Type.function(void_type,
[int8_p_type, intp_type,
Type.pointer(int8_p_type), Type.pointer(intp_type),
intp_type, int8_p_type])
def map_llvm_to_ctypes(llvm_type, py_module=None, sname=None, i8p_str=False):
'''
Map an LLVM type to an equivalent ctypes type. py_module is an
optional module that is used for structure wrapping. If
structures are found, the struct definitions will be created in
that module.
'''
kind = llvm_type.kind
if kind == lc.TYPE_INTEGER:
if llvm_type.width < 8:
return ctypes.c_int8
ctype = getattr(ctypes,"c_int"+str(llvm_type.width))
elif kind == lc.TYPE_DOUBLE:
ctype = ctypes.c_double
elif kind == lc.TYPE_FLOAT:
ctype = ctypes.c_float
elif kind == lc.TYPE_VOID:
ctype = None
elif kind == lc.TYPE_POINTER:
pointee = llvm_type.pointee
p_kind = pointee.kind
if p_kind == lc.TYPE_INTEGER:
width = pointee.width
# Special case: char * is mapped to strings
if width == 8 and i8p_str:
ctype = ctypes.c_char_p
else:
ctype = ctypes.POINTER(map_llvm_to_ctypes(pointee, py_module, sname))
# Special case: void * mapped to c_void_p type
elif p_kind == lc.TYPE_VOID:
ctype = ctypes.c_void_p
else:
ctype = ctypes.POINTER(map_llvm_to_ctypes(pointee, py_module, sname))
elif kind == lc.TYPE_ARRAY:
ctype = llvm_type.count * map_llvm_to_ctypes(llvm_type.element, py_module, sname)
elif kind == lc.TYPE_STRUCT:
lookup = True
if llvm_type.is_literal:
if sname:
struct_name = sname
else:
struct_name = 'llvm_struct'
lookup = False
else:
struct_name = llvm_type.name
struct_name = struct_name.replace('.','_')
if PY2:
struct_name = struct_name.encode('ascii')
# If the named type is already known, return it
if py_module and lookup:
struct_type = getattr(py_module, struct_name, None)
else:
struct_type = None
if struct_type and issubclass(struct_type, ctypes.Structure):
return struct_type
# If there is an object with the name of the structure already
# present and it has the field names specified, use those names
# to help out
if hasattr(struct_type, '_fields_'):
names = struct_type._fields_
else:
names = [ "e"+str(n) for n in range(llvm_type.element_count) ]
# Create a class definition for the type. It is critical that this
# Take place before the handling of members to avoid issues with
# self-referential data structures
if py_module and lookup:
type_dict = { '__module__' : py_module.__name__}
else:
type_dict = {}
ctype = type(ctypes.Structure)(struct_name, (ctypes.Structure,),
type_dict)
if py_module and lookup:
setattr(py_module, struct_name, ctype)
# Resolve the structure fields
fields = [ (name, map_llvm_to_ctypes(elem, py_module))
for name, elem in zip(names, llvm_type.elements) ]
# Set the fields member of the type last. The order is critical
# to deal with self-referential structures.
setattr(ctype, '_fields_', fields)
else:
raise TypeError("Unknown type %s" % kind)
return ctype
| {
"repo_name": "aaronmartin0303/blaze",
"path": "blaze/compute/bkernel/llutil.py",
"copies": "2",
"size": "4477",
"license": "bsd-3-clause",
"hash": -4093268613956062700,
"line_mean": 36,
"line_max": 89,
"alpha_frac": 0.5832030377,
"autogenerated": false,
"ratio": 3.6969446738232867,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0021279527639947097,
"num_lines": 121
} |
from __future__ import absolute_import, division, print_function
import ctypes
import operator
import llvm.core as lc
from llvm.core import Type, Function, Module
import llvm.ee as le
from llvm.passes import build_pass_managers
from .. import llvm_array as lla
from blaze.py2help import izip, _strtypes, c_ssize_t, PY2
from ..llvm_array import (void_type, intp_type,
array_kinds, check_array,
get_cpp_template, array_type, const_intp, LLArray, orderchar)
from .llutil import (int32_type, int8_p_type, single_ckernel_func_type,
strided_ckernel_func_type, map_llvm_to_ctypes)
from ..ckernel import JITCKernelData, wrap_ckernel_func
from dynd import nd, ndt, _lowlevel
def args_to_kernel_data_struct(kinds, argtypes):
# Build up the kernel data structure. Currently, this means
# adding a shape field for each array argument. First comes
# the kernel data prefix with a spot for the 'owner' reference added.
input_field_indices = []
kernel_data_fields = [Type.struct([int8_p_type]*3)]
kernel_data_ctypes_fields = [('base', JITCKernelData)]
for i, (kind, a) in enumerate(izip(kinds, argtypes)):
if isinstance(kind, tuple):
if kind[0] != lla.C_CONTIGUOUS:
raise ValueError('only support C contiguous array presently')
input_field_indices.append(len(kernel_data_fields))
kernel_data_fields.append(Type.array(
intp_type, len(bek.dshapes[i])-1))
kernel_data_ctypes_fields.append(('operand_%d' % i,
c_ssize_t * (len(bek.dshapes[i])-1)))
elif kind in [lla.SCALAR, lla.POINTER]:
input_field_indices.append(None)
else:
raise TypeError(("unbound_single_ckernel codegen doesn't " +
"support the parameter kind %r yet") % (k,))
# Make an LLVM and ctypes type for the extra data pointer.
kernel_data_llvmtype = Type.struct(kernel_data_fields)
class kernel_data_ctypestype(ctypes.Structure):
_fields_ = kernel_data_ctypes_fields
return (kernel_data_llvmtype, kernel_data_ctypestype)
def build_llvm_arg_ptr(builder, raw_ptr_arg, dshape, kind, argtype):
if kind == lla.SCALAR:
src_ptr = builder.bitcast(raw_ptr_arg,
Type.pointer(argtype))
src_val = builder.load(src_ptr)
return src_val
elif kind == lla.POINTER:
src_ptr = builder.bitcast(raw_ptr_arg,
Type.pointer(argtype))
return src_ptr
elif isinstance(kind, tuple):
src_ptr = builder.bitcast(raw_ptr_arg,
Type.pointer(kind[2]))
# First get the shape of this parameter. This will
# be a combination of Fixed and TypeVar (Var unsupported
# here for now)
shape = dshapes[i][:-1]
# Get the llvm array
arr_var = builder.alloca(argtype.pointee)
builder.store(src_ptr,
builder.gep(arr_var,
(lc.Constant.int(int32_type, 0),
lc.Constant.int(int32_type, 0))))
for j, sz in enumerate(shape):
if isinstance(sz, Fixed):
# If the shape is already known at JIT compile time,
# insert the constant
shape_el_ptr = builder.gep(arr_var,
(lc.Constant.int(int32_type, 0),
lc.Constant.int(int32_type, 1),
lc.Constant.int(intp_type, j)))
builder.store(lc.Constant.int(intp_type,
operator.index(sz)),
shape_el_ptr)
elif isinstance(sz, TypeVar):
# TypeVar types are only known when the kernel is bound,
# so copy it from the extra data pointer
sz_from_extra_ptr = builder.gep(extra_struct,
(lc.Constant.int(int32_type, 0),
lc.Constant.int(int32_type,
input_field_indices[i]),
lc.Constant.int(intp_type, j)))
sz_from_extra = builder.load(sz_from_extra_ptr)
shape_el_ptr = builder.gep(arr_var,
(lc.Constant.int(int32_type, 0),
lc.Constant.int(int32_type, 1),
lc.Constant.int(intp_type, j)))
builder.store(sz_from_extra, shape_el_ptr)
else:
raise TypeError(("unbound_single_ckernel codegen doesn't " +
"support dimension type %r") % type(sz))
return arr_var
def build_llvm_src_ptrs(builder, src_ptr_arr_arg, dshapes, kinds, argtypes):
args = []
for i, (dshape, kind, argtype) in enumerate(izip(dshapes, kinds, argtypes)):
raw_ptr_arg = builder.load(builder.gep(src_ptr_arr_arg,
(lc.Constant.int(intp_type, i),)))
arg = build_llvm_arg_ptr(builder, raw_ptr_arg, dshape, kind, argtype)
args.append(arg)
return args
def jit_compile_ckernel_deferred(bek, out_dshape):
"""
Creates a ckernel_deferred from the blaze element kernel.
Actual JIT compilation is done at instantiation.
Parameters
----------
bek : BlazeElementKernel
The blaze kernel.
"""
# Create a deferred ckernel via a closure
def instantiate_ckernel(out_ckb, ckb_offset, types, meta, kerntype):
out_ckb = _lowlevel.CKernelBuilder(out_ckb)
strided = (kerntype == 'strided')
# TODO cache the compiled kernels
module, lfunc = create_ckernel_interface(bek, strided)
optimize(module, lfunc)
ee, func_ptr = get_pointer(module, lfunc)
# TODO: Something like the following needs to be reenabled
# to handle array types
# Build llvm and ctypes structures for the kernel data, using
# the argument types.
##kd_llvmtype, kd_ctypestype = args_to_kernel_data_struct(bek.kinds, bek.argtypes)
# Cast the extra pointer to the right llvm type
#extra_struct = builder.bitcast(extra_ptr_arg,
# Type.pointer(kd_llvmtype))
# Create a function which copies the shape from data
# descriptors to the extra data struct.
##if len(kd_ctypestype._fields_) == 1:
## # If there were no extra data fields, it's a no-op function
## def bind_func(estruct, dst_dd, src_dd_list):
## pass
##else:
## def bind_func(estruct, dst_dd, src_dd_list):
## for i, (ds, dd) in enumerate(
## izip(bek.dshapes, src_dd_list + [dst_dd])):
## shape = [operator.index(dim)
## for dim in dd.dshape[-len(ds):-1]]
## cshape = getattr(estruct, 'operand_%d' % i)
## for j, dim_size in enumerate(shape):
## cshape[j] = dim_size
if strided:
optype = _lowlevel.ExprStridedOperation
else:
optype = _lowlevel.ExprSingleOperation
return wrap_ckernel_func(out_ckb, ckb_offset, optype(func_ptr),
(ee, func_ptr))
# Wrap the function in a ckernel_deferred
in_dshapes = list(bek.dshapes)
# HACK: sometimes the return type is there, sometimes not,
# exclude it unconditionally.
in_dshapes = in_dshapes[:len(bek.kinds)-1]
out_type = out_dshape.measure
in_types = [in_dshape.measure for in_dshape in in_dshapes]
return _lowlevel.ckernel_deferred_from_pyfunc(instantiate_ckernel,
[ndt.type(str(t)) for t in [out_type] + in_types])
def create_ckernel_interface(bek, strided):
"""Create a function wrapper with a CKernel interface according to
`strided`.
Parameters
----------
bek : BlazeElementKernel
The blaze kernel to compile into an unbound single ckernel.
strided : bool
If true, returns an ExprStridedOperation, otherwise an
ExprSingleOperation.
"""
# TODO: Decouple this from BlazeElementKernel
inarg_count = len(bek.kinds)-1
module = bek.module.clone()
if strided:
ck_func_name = bek.func.name +"_strided_ckernel"
ck_func = Function.new(module, strided_ckernel_func_type,
name=ck_func_name)
else:
ck_func_name = bek.func.name +"_single_ckernel"
ck_func = Function.new(module, single_ckernel_func_type,
name=ck_func_name)
entry_block = ck_func.append_basic_block('entry')
builder = lc.Builder.new(entry_block)
if strided:
dst_ptr_arg, dst_stride_arg, \
src_ptr_arr_arg, src_stride_arr_arg, \
count_arg, extra_ptr_arg = ck_func.args
dst_stride_arg.name = 'dst_stride'
src_stride_arr_arg.name = 'src_strides'
count_arg.name = 'count'
else:
dst_ptr_arg, src_ptr_arr_arg, extra_ptr_arg = ck_func.args
dst_ptr_arg.name = 'dst_ptr'
src_ptr_arr_arg.name = 'src_ptrs'
extra_ptr_arg.name = 'extra_ptr'
if strided:
# Allocate an array of pointer counters for the
# strided loop
src_ptr_arr_tmp = builder.alloca_array(int8_p_type,
lc.Constant.int(int32_type, inarg_count), 'src_ptr_arr')
# Copy the pointers
for i in range(inarg_count):
builder.store(builder.load(builder.gep(src_ptr_arr_arg,
(lc.Constant.int(int32_type, i),))),
builder.gep(src_ptr_arr_tmp,
(lc.Constant.int(int32_type, i),)))
# Get all the src strides
src_stride_vals = [builder.load(builder.gep(src_stride_arr_arg,
(lc.Constant.int(int32_type, i),)))
for i in range(inarg_count)]
# Replace src_ptr_arr_arg with this local variable
src_ptr_arr_arg = src_ptr_arr_tmp
# Initialize some more basic blocks for the strided loop
looptest_block = ck_func.append_basic_block('looptest')
loopbody_block = ck_func.append_basic_block('loopbody')
end_block = ck_func.append_basic_block('finish')
# Finish the entry block by branching
# to the looptest block
builder.branch(looptest_block)
# The looptest block continues the loop while counter != 0
builder.position_at_end(looptest_block)
counter_phi = builder.phi(count_arg.type)
counter_phi.add_incoming(count_arg, entry_block)
dst_ptr_phi = builder.phi(dst_ptr_arg.type)
dst_ptr_phi.add_incoming(dst_ptr_arg, entry_block)
dst_ptr_arg = dst_ptr_phi
kzero = lc.Constant.int(count_arg.type, 0)
pred = builder.icmp(lc.ICMP_NE, counter_phi, kzero)
builder.cbranch(pred, loopbody_block, end_block)
# The loopbody block decrements the counter, and executes
# one kernel iteration
builder.position_at_end(loopbody_block)
kone = lc.Constant.int(counter_phi.type, 1)
counter_dec = builder.sub(counter_phi, kone)
counter_phi.add_incoming(counter_dec, loopbody_block)
# Convert the src pointer args to the
# appropriate kinds for the llvm call
args = build_llvm_src_ptrs(builder, src_ptr_arr_arg,
bek.dshapes, bek.kinds[:-1], bek.argtypes)
# Call the function and store in the dst
kind = bek.kinds[-1]
func = module.get_function_named(bek.func.name)
if kind == lla.SCALAR:
dst_ptr = builder.bitcast(dst_ptr_arg,
Type.pointer(bek.return_type))
dst_val = builder.call(func, args)
builder.store(dst_val, dst_ptr)
else:
dst_ptr = build_llvm_arg_ptr(builder, dst_ptr_arg,
bek.dshapes[-1], kind, bek.argtypes[-1])
builder.call(func, args + [dst_ptr])
if strided:
# Finish the loopbody block by incrementing all the pointers
# and branching to the looptest block
dst_ptr_inc = builder.gep(dst_ptr_arg, (dst_stride_arg,))
dst_ptr_phi.add_incoming(dst_ptr_inc, loopbody_block)
# Increment the src pointers
for i in range(inarg_count):
src_ptr_val = builder.load(builder.gep(src_ptr_arr_tmp,
(lc.Constant.int(int32_type, i),)))
src_ptr_inc = builder.gep(src_ptr_val, (src_stride_vals[i],))
builder.store(src_ptr_inc,
builder.gep(src_ptr_arr_tmp,
(lc.Constant.int(int32_type, i),)))
builder.branch(looptest_block)
# The end block just returns
builder.position_at_end(end_block)
builder.ret_void()
#print("Function before optimization passes:")
#print(ck_func)
#module.verify()
return module, ck_func
def optimize(module, lfunc):
tm = le.TargetMachine.new(opt=3, cm=le.CM_JITDEFAULT, features='')
pms = build_pass_managers(tm, opt=3, fpm=False,
vectorize=True, loop_vectorize=True)
pms.pm.run(module)
#print("Function after optimization passes:")
#print(ck_func)
def get_pointer(module, lfunc):
# DEBUGGING: Verify the module.
#module.verify()
# TODO: Cache the EE - the interplay with the func_ptr
# was broken, so just avoiding caching for now
# FIXME: Temporarily disabling AVX, because of misdetection
# in linux VMs. Some code is in llvmpy's workarounds
# submodule related to this.
ee = le.EngineBuilder.new(module).mattrs("-avx").create()
func_ptr = ee.get_pointer_to_function(lfunc)
return ee, func_ptr
| {
"repo_name": "AbhiAgarwal/blaze",
"path": "blaze/compute/bkernel/jit_ckernel.py",
"copies": "7",
"size": "13944",
"license": "bsd-3-clause",
"hash": 586995942842595800,
"line_mean": 42.1702786378,
"line_max": 90,
"alpha_frac": 0.5808232932,
"autogenerated": false,
"ratio": 3.61993769470405,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003816619982900457,
"num_lines": 323
} |
from __future__ import absolute_import, division, print_function
import ctypes
import unittest
import datashape
from datashape import dshape, error
from datashape.py2help import skip
class TestDataShapeCreation(unittest.TestCase):
def test_raise_on_bad_input(self):
# Make sure it raises exceptions on a few nonsense inputs
self.assertRaises(TypeError, dshape, None)
self.assertRaises(TypeError, dshape, lambda x: x+1)
# Check issue 11
self.assertRaises(datashape.DataShapeSyntaxError, dshape, '1 *')
self.assertRaises(datashape.DataShapeSyntaxError, dshape, '1,')
def test_reserved_future_bigint(self):
# The "bigint" datashape is reserved for a future big integer type
self.assertRaises(Exception, dshape, "bigint")
def test_atom_shapes(self):
self.assertEqual(dshape('bool'), dshape(datashape.bool_))
self.assertEqual(dshape('int8'), dshape(datashape.int8))
self.assertEqual(dshape('int16'), dshape(datashape.int16))
self.assertEqual(dshape('int32'), dshape(datashape.int32))
self.assertEqual(dshape('int64'), dshape(datashape.int64))
self.assertEqual(dshape('uint8'), dshape(datashape.uint8))
self.assertEqual(dshape('uint16'), dshape(datashape.uint16))
self.assertEqual(dshape('uint32'), dshape(datashape.uint32))
self.assertEqual(dshape('uint64'), dshape(datashape.uint64))
self.assertEqual(dshape('float32'), dshape(datashape.float32))
self.assertEqual(dshape('float64'), dshape(datashape.float64))
self.assertEqual(dshape('complex64'), dshape(datashape.complex64))
self.assertEqual(dshape('complex128'), dshape(datashape.complex128))
self.assertEqual(dshape('complex64'), dshape('complex[float32]'))
self.assertEqual(dshape('complex128'), dshape('complex[float64]'))
self.assertEqual(dshape("string"), dshape(datashape.string))
self.assertEqual(dshape("json"), dshape(datashape.json))
if ctypes.sizeof(ctypes.c_void_p) == 4:
self.assertEqual(dshape('intptr'), dshape(datashape.int32))
self.assertEqual(dshape('uintptr'), dshape(datashape.uint32))
else:
self.assertEqual(dshape('intptr'), dshape(datashape.int64))
self.assertEqual(dshape('uintptr'), dshape(datashape.uint64))
self.assertEqual(dshape("date"), dshape(datashape.date_))
self.assertEqual(dshape("time"), dshape(datashape.time_))
self.assertEqual(dshape("datetime"), dshape(datashape.datetime_))
def test_atom_shape_errors(self):
self.assertRaises(error.DataShapeSyntaxError, dshape, 'boot')
self.assertRaises(error.DataShapeSyntaxError, dshape, 'int33')
self.assertRaises(error.DataShapeSyntaxError, dshape, '12')
self.assertRaises(error.DataShapeSyntaxError, dshape, 'var')
@skip('implements has not been implemented in the new parser')
def test_constraints_error(self):
self.assertRaises(error.DataShapeTypeError, dshape,
'A : integral * B : numeric')
def test_ellipsis_error(self):
self.assertRaises(error.DataShapeSyntaxError, dshape, 'T * ...')
self.assertRaises(error.DataShapeSyntaxError, dshape, 'T * S...')
@skip('type decl has been removed in the new parser')
def test_type_decl(self):
self.assertRaises(error.DataShapeTypeError, dshape, 'type X T = 3, T')
self.assertEqual(dshape('3, int32'), dshape('type X = 3, int32'))
def test_string_atom(self):
self.assertEqual(dshape('string'), dshape("string['U8']"))
self.assertEqual(dshape("string['ascii']")[0].encoding, 'A')
self.assertEqual(dshape("string['A']")[0].encoding, 'A')
self.assertEqual(dshape("string['utf-8']")[0].encoding, 'U8')
self.assertEqual(dshape("string['U8']")[0].encoding, 'U8')
self.assertEqual(dshape("string['utf-16']")[0].encoding, 'U16')
self.assertEqual(dshape("string['U16']")[0].encoding, 'U16')
self.assertEqual(dshape("string['utf-32']")[0].encoding, 'U32')
self.assertEqual(dshape("string['U32']")[0].encoding, 'U32')
def test_time(self):
self.assertEqual(dshape('time')[0].tz, None)
self.assertEqual(dshape('time[tz="UTC"]')[0].tz, 'UTC')
self.assertEqual(dshape('time[tz="America/Vancouver"]')[0].tz,
'America/Vancouver')
def test_datetime(self):
self.assertEqual(dshape('datetime')[0].tz, None)
self.assertEqual(dshape('datetime[tz="UTC"]')[0].tz, 'UTC')
self.assertEqual(dshape('datetime[tz="America/Vancouver"]')[0].tz,
'America/Vancouver')
def test_units(self):
self.assertEqual(dshape('units["second"]')[0].unit, 'second')
self.assertEqual(dshape('units["second"]')[0].tp, dshape('float64'))
self.assertEqual(dshape('units["second", int32]')[0].unit, 'second')
self.assertEqual(dshape('units["second", int32]')[0].tp,
dshape('int32'))
def test_struct_of_array(self):
self.assertEqual(str(dshape('5 * int32')), '5 * int32')
self.assertEqual(str(dshape('{field: 5 * int32}')),
'{ field : 5 * int32 }')
self.assertEqual(str(dshape('{field: M * int32}')),
'{ field : M * int32 }')
def test_ragged_array(self):
self.assertTrue(isinstance(dshape('3 * var * int32')[1], datashape.Var))
def test_from_numpy_fields(self):
import numpy as np
dt = np.dtype('i4,i8,f8')
ds = datashape.from_numpy((), dt)
self.assertEqual(ds.names, ['f0', 'f1', 'f2'])
self.assertEqual(ds.types,
[dshape('int32'), dshape('int64'), dshape('float64')])
def test_to_numpy_fields(self):
import numpy as np
ds = datashape.dshape('{x: int32, y: float32}')
shape, dt = datashape.to_numpy(ds)
self.assertEqual(shape, ())
self.assertEqual(dt, np.dtype([('x', 'int32'), ('y', 'float32')]))
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "talumbau/datashape",
"path": "datashape/tests/test_creation.py",
"copies": "1",
"size": "6153",
"license": "bsd-2-clause",
"hash": 6901419608142554000,
"line_mean": 46.6976744186,
"line_max": 80,
"alpha_frac": 0.6330245409,
"autogenerated": false,
"ratio": 3.498010233086981,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9629120715225377,
"avg_score": 0.0003828117523207962,
"num_lines": 129
} |
from __future__ import absolute_import, division, print_function
import ctypes
from dynd import ndt, _lowlevel
import datashape
from .dynd_data_descriptor import DyNDDataDescriptor
def data_descriptor_from_ctypes(cdata, writable):
"""
Parameters
----------
cdata : ctypes data instance
The ctypes data object which owns the data.
writable : bool
Should be true if the data is writable, flase
if it's read-only.
"""
ds = datashape.from_ctypes(type(cdata))
access = "readwrite" if writable else "readonly"
dyndarr = _lowlevel.array_from_ptr(ndt.type(str(ds)),
ctypes.addressof(cdata), cdata,
access)
return DyNDDataDescriptor(dyndarr)
def data_descriptor_from_cffi(ffi, cdata, writable):
"""
Parameters
----------
ffi : cffi.FFI
The cffi namespace which contains the cdata.
cdata : cffi.CData
The cffi data object which owns the data.
writable : bool
Should be true if the data is writable, flase
if it's read-only.
"""
if not isinstance(cdata, ffi.CData):
raise TypeError('object is not a cffi.CData object, has type %s' %
type(cdata))
owner = (ffi, cdata)
# Get the raw pointer out of the cdata as an integer
ptr = int(ffi.cast('uintptr_t', ffi.cast('char *', cdata)))
ds = datashape.from_cffi(ffi, ffi.typeof(cdata))
if (isinstance(ds, datashape.DataShape) and
isinstance(ds[0], datashape.TypeVar)):
# If the outermost dimension is an array without fixed
# size, get its size from the data
ds = datashape.DataShape(*(datashape.Fixed(len(cdata)),) + ds[1:])
access = "readwrite" if writable else "readonly"
dyndarr = _lowlevel.array_from_ptr(ndt.type(str(ds)), ptr, owner, access)
return DyNDDataDescriptor(dyndarr)
| {
"repo_name": "aaronmartin0303/blaze",
"path": "blaze/datadescriptor/membuf_data_descriptor.py",
"copies": "10",
"size": "1892",
"license": "bsd-3-clause",
"hash": -8144940776442677000,
"line_mean": 32.7857142857,
"line_max": 77,
"alpha_frac": 0.6358350951,
"autogenerated": false,
"ratio": 3.6525096525096523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01820054945054945,
"num_lines": 56
} |
from __future__ import absolute_import, division, print_function
import ctypes
from dynd import ndt, _lowlevel
import datashape
from .dynd_data_descriptor import DyND_DDesc
def data_descriptor_from_ctypes(cdata, writable):
"""
Parameters
----------
cdata : ctypes data instance
The ctypes data object which owns the data.
writable : bool
Should be true if the data is writable, flase
if it's read-only.
"""
ds = datashape.from_ctypes(type(cdata))
access = "readwrite" if writable else "readonly"
dyndarr = _lowlevel.array_from_ptr(ndt.type(str(ds)),
ctypes.addressof(cdata), cdata,
access)
return DyND_DDesc(dyndarr)
def data_descriptor_from_cffi(ffi, cdata, writable):
"""
Parameters
----------
ffi : cffi.FFI
The cffi namespace which contains the cdata.
cdata : cffi.CData
The cffi data object which owns the data.
writable : bool
Should be true if the data is writable, flase
if it's read-only.
"""
if not isinstance(cdata, ffi.CData):
raise TypeError('object is not a cffi.CData object, has type %s' %
type(cdata))
owner = (ffi, cdata)
# Get the raw pointer out of the cdata as an integer
ptr = int(ffi.cast('uintptr_t', ffi.cast('char *', cdata)))
ds = datashape.from_cffi(ffi, ffi.typeof(cdata))
if (isinstance(ds, datashape.DataShape) and
isinstance(ds[0], datashape.TypeVar)):
# If the outermost dimension is an array without fixed
# size, get its size from the data
ds = datashape.DataShape(*(datashape.Fixed(len(cdata)),) + ds[1:])
access = "readwrite" if writable else "readonly"
dyndarr = _lowlevel.array_from_ptr(ndt.type(str(ds)), ptr, owner, access)
return DyND_DDesc(dyndarr)
| {
"repo_name": "FrancescAlted/blaze",
"path": "blaze/datadescriptor/membuf_data_descriptor.py",
"copies": "2",
"size": "1868",
"license": "bsd-3-clause",
"hash": 4605142460420198000,
"line_mean": 32.3571428571,
"line_max": 77,
"alpha_frac": 0.6295503212,
"autogenerated": false,
"ratio": 3.5854126679462572,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5214962989146257,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import curses
import threading
import time
import workflows.transport
from workflows.services.common_service import CommonService
try: # Python3 compatibility
basestring = basestring
except NameError:
basestring = (str, bytes)
class Monitor(): # pragma: no cover
'''A sample implementation of a status monitor showing all running services.
To use this example class, pass in a transport object and call the run()
method.'''
shutdown = False
'''Set to true to end the main loop and shut down the service monitor.'''
cards = {}
'''Register card shown for seen services'''
border_chars = ()
'''Characters used for frame borders.'''
border_chars_text = ('|', '|', '=', '=', '/', '\\', '\\', '/')
'''Example alternative set of frame border characters.'''
def __init__(self, transport=None):
'''Set up monitor and connect to the network transport layer'''
if transport is None or isinstance(transport, basestring):
self._transport = workflows.transport.lookup(transport)()
else:
self._transport = transport()
assert self._transport.connect(), "Could not connect to transport layer"
self._lock = threading.RLock()
self._node_status = {}
self.message_box = None
self._transport.subscribe_broadcast('transient.status', self.update_status, retroactive=True)
def update_status(self, header, message):
'''Process incoming status message. Acquire lock for status dictionary before updating.'''
with self._lock:
if self.message_box:
self.message_box.erase()
self.message_box.move(0,0)
for n, field in enumerate(header):
if n == 0:
self.message_box.addstr(field + ":", curses.color_pair(1))
else:
self.message_box.addstr(", " + field + ":", curses.color_pair(1))
self.message_box.addstr(header[field])
self.message_box.addstr(": ", curses.color_pair(1))
self.message_box.addstr(str(message), curses.color_pair(2) + curses.A_BOLD)
self.message_box.refresh()
if message['host'] not in self._node_status or \
int(header['timestamp']) >= self._node_status[message['host']]['last_seen']:
self._node_status[message['host']] = message
self._node_status[message['host']]['last_seen'] = int(header['timestamp'])
def run(self):
'''A wrapper for the real _run() function to cleanly enable/disable the
curses environment.'''
curses.wrapper(self._run)
def _boxwin(self, height, width, row, column, title=None, title_x=7, color_pair=None):
with self._lock:
box = curses.newwin(height, width, row, column)
box.clear()
if color_pair:
box.attron(curses.color_pair(color_pair))
box.border(*self.border_chars)
if title:
box.addstr(0, title_x, " " + title + " ")
if color_pair:
box.attroff(curses.color_pair(color_pair))
box.noutrefresh()
return curses.newwin(height - 2, width - 2, row + 1, column + 1)
def _redraw_screen(self, stdscr):
'''Redraw screen. This could be to initialize, or to redraw after resizing.'''
with self._lock:
stdscr.clear()
stdscr.addstr(0, 0, "workflows service monitor -- quit with Ctrl+C", curses.A_BOLD)
stdscr.refresh()
self.message_box = self._boxwin(5, curses.COLS, 2, 0, title='last seen message', color_pair=1)
self.message_box.scrollok(True)
self.cards = []
def _get_card(self, number):
with self._lock:
if number < len(self.cards):
return self.cards[number]
if number == len(self.cards):
max_cards_horiz = int(curses.COLS / 35)
self.cards.append(self._boxwin(6, 35, 7 + 6 * (number // max_cards_horiz), 35 * (number % max_cards_horiz), color_pair=3))
return self.cards[number]
raise RuntimeError("Card number too high")
def _erase_card(self, number):
'''Destroy cards with this or higher number.'''
with self._lock:
if number < (len(self.cards) - 1):
self._erase_card(number + 1)
if number > (len(self.cards) - 1):
return
max_cards_horiz = int(curses.COLS / 35)
obliterate = curses.newwin(6, 35, 7 + 6 * (number // max_cards_horiz), 35 * (number % max_cards_horiz))
obliterate.erase()
obliterate.noutrefresh()
del self.cards[number]
def _run(self, stdscr):
'''Start the actual service monitor'''
with self._lock:
curses.use_default_colors()
curses.curs_set(False)
curses.init_pair(1, curses.COLOR_RED, -1)
curses.init_pair(2, curses.COLOR_BLACK, -1)
curses.init_pair(3, curses.COLOR_GREEN, -1)
self._redraw_screen(stdscr)
try:
while not self.shutdown:
now = int(time.time())
with self._lock:
overview = self._node_status.copy()
cardnumber = 0
for host, status in overview.items():
age = now - int(status['last_seen'] / 1000)
with self._lock:
if age > 90:
del self._node_status[host]
else:
card = self._get_card(cardnumber)
card.erase()
card.move(0, 0)
card.addstr('Host: ', curses.color_pair(3))
card.addstr(host)
card.move(1, 0)
card.addstr('Service: ', curses.color_pair(3))
if 'service' in status and status['service']:
card.addstr(status['service'])
else:
card.addstr('---', curses.color_pair(2))
card.move(2, 0)
card.addstr('State: ', curses.color_pair(3))
if 'status' in status:
status_code = status['status']
state_string = CommonService.human_readable_state.get(status_code, str(status_code))
state_color = None
if status_code in (CommonService.SERVICE_STATUS_PROCESSING, CommonService.SERVICE_STATUS_TIMER):
state_color = curses.color_pair(3) + curses.A_BOLD
if status_code == CommonService.SERVICE_STATUS_IDLE:
state_color = curses.color_pair(2) + curses.A_BOLD
if status_code == CommonService.SERVICE_STATUS_ERROR:
state_color = curses.color_pair(1)
if state_color:
card.addstr(state_string, state_color)
else:
card.addstr(state_string)
card.move(3, 0)
if age >= 10:
card.addstr("last seen %d seconds ago" % age, curses.color_pair(1) + (0 if age < 60 else curses.A_BOLD))
card.noutrefresh()
cardnumber = cardnumber + 1
if cardnumber < len(self.cards):
with self._lock:
self._erase_card(cardnumber)
with self._lock:
curses.doupdate()
time.sleep(0.2)
except KeyboardInterrupt:
pass # User pressed CTRL+C
self._transport.disconnect()
| {
"repo_name": "xia2/workflows",
"path": "workflows/contrib/status_monitor.py",
"copies": "1",
"size": "7034",
"license": "bsd-3-clause",
"hash": 3147657323622373400,
"line_mean": 38.7401129944,
"line_max": 130,
"alpha_frac": 0.5978106341,
"autogenerated": false,
"ratio": 3.74746936600959,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48452800001095897,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import dask.array as da
from dask.utils import ignoring
from dask.array.reductions import arg_aggregate
import numpy as np
def eq(a, b):
if isinstance(a, da.Array):
a = a.compute()
if isinstance(b, da.Array):
b = b.compute()
if isinstance(a, (np.generic, np.ndarray)):
return np.allclose(a, b)
else:
return a == b
def test_arg_reduction():
pairs = [([4, 3, 5], [10, 11, 12]),
([3, 5, 1], [1, 2, 3])]
result = arg_aggregate(np.min, np.argmin, (100, 100), pairs)
assert eq(result, np.array([101, 11, 103]))
def test_reductions():
x = np.random.random((20, 20))
a = da.from_array(x, blockshape=(7, 7))
assert eq(a.argmin(axis=1), x.argmin(axis=1))
assert eq(a.argmax(axis=0), x.argmax(axis=0))
# assert eq(a.argmin(), x.argmin())
def test_nan():
x = np.array([[1, np.nan, 3, 4],
[5, 6, 7, np.nan],
[9, 10, 11, 12]])
d = da.from_array(x, blockshape=(2, 2))
assert eq(np.nansum(x), da.nansum(d))
assert eq(np.nansum(x, axis=0), da.nansum(d, axis=0))
assert eq(np.nanmean(x, axis=1), da.nanmean(d, axis=1))
assert eq(np.nanmin(x, axis=1), da.nanmin(d, axis=1))
assert eq(np.nanmax(x, axis=(0, 1)), da.nanmax(d, axis=(0, 1)))
assert eq(np.nanvar(x), da.nanvar(d))
assert eq(np.nanstd(x, axis=0), da.nanstd(d, axis=0))
assert eq(np.nanargmin(x, axis=0), da.nanargmin(d, axis=0))
assert eq(np.nanargmax(x, axis=0), da.nanargmax(d, axis=0))
with ignoring(AttributeError):
assert eq(np.nanprod(x), da.nanprod(d))
| {
"repo_name": "PeterDSteinberg/dask",
"path": "dask/array/tests/test_reductions.py",
"copies": "1",
"size": "1663",
"license": "bsd-3-clause",
"hash": -8341213176583273000,
"line_mean": 30.9807692308,
"line_max": 67,
"alpha_frac": 0.5868911606,
"autogenerated": false,
"ratio": 2.7351973684210527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38220885290210527,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import dask
from dask.array.blaze import *
from dask.array.into import into
from into import discover, convert, into
from collections import Iterable
from toolz import concat
from operator import getitem
def eq(a, b):
if isinstance(a, Array):
a = a.compute()
if isinstance(b, Array):
b = b.compute()
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
nx = np.arange(600).reshape((20, 30))
dx = convert(Array, nx, blockshape=(4, 5))
sx = symbol('x', discover(dx))
ny = np.arange(600).reshape((30, 20))
dy = convert(Array, ny, blockshape=(5, 4))
sy = symbol('y', discover(dy))
na = np.arange(20)
da = convert(Array, na, blockshape=(4,))
sa = symbol('a', discover(da))
nb = np.arange(30).reshape((30, 1))
db = convert(Array, nb, blockshape=(5, 1))
sb = symbol('b', discover(db))
dask_ns = {sx: dx, sy: dy, sa: da, sb: db}
numpy_ns = {sx: nx, sy: ny, sa: na, sb: nb}
def test_compute():
for expr in [2*sx + 1,
sx.sum(axis=0), sx.mean(axis=0),
sx + sx, sx.T, sx.T + sy,
sx.dot(sy), sy.dot(sx),
sx.sum(),
sx - sx.sum(),
sx.dot(sx.T),
sx.sum(axis=1),
sy + sa,
sy + sb,
sx[3:17], sx[3:10, 10:25:2] + 1, sx[:5, 10],
sx[0, 0]
]:
result = compute(expr, dask_ns)
expected = compute(expr, numpy_ns)
assert isinstance(result, Array)
if expr.dshape.shape:
result2 = into(np.ndarray, result)
else:
result2 = into(float, result)
assert eq(result2, expected)
def test_elemwise_broadcasting():
arr = compute(sy + sb, dask_ns)
expected = [[(arr.name, i, j) for j in range(5)]
for i in range(6)]
assert arr._keys() == expected
def test_ragged_blockdims():
dsk = {('x', 0, 0): np.ones((2, 2)),
('x', 0, 1): np.ones((2, 3)),
('x', 1, 0): np.ones((5, 2)),
('x', 1, 1): np.ones((5, 3))}
a = Array(dsk, 'x', shape=(7, 5), blockdims=[(2, 5), (2, 3)])
s = symbol('s', '7 * 5 * int')
assert compute(s.sum(axis=0), a).blockdims == ((2, 3),)
assert compute(s.sum(axis=1), a).blockdims == ((2, 5),)
assert compute(s + 1, a).blockdims == a.blockdims
def test_slicing_with_singleton_dimensions():
arr = compute(sx[5:15, 12], dx)
x = dx.name
y = arr.name
assert arr.dask[(y, 0)] == (getitem, (x, 1, 2), (slice(1, 4, 1), 2))
assert arr.dask[(y, 1)] == (getitem, (x, 2, 2), (slice(None, None, None), 2))
assert arr.dask[(y, 2)] == (getitem, (x, 3, 2), (slice(0, 3, 1), 2))
assert all(len(k) == 2 for k in arr._keys())
def test_slicing_with_lists():
nx = np.arange(20).reshape((4, 5))
dx = convert(Array, nx, blockshape=(2, 2))
sx = symbol('x', discover(dx))
expr = sx[[2, 0, 3]]
assert eq(np.array(compute(expr, dx)), compute(expr, nx))
expr = sx[::2, [2, 0, 3]]
assert eq(np.array(compute(expr, dx)), compute(expr, nx))
expr = sx[1, [2, 0, 3]]
assert eq(np.array(compute(expr, dx)), compute(expr, nx))
expr = sx[[2, 0, 3], -2]
assert eq(np.array(compute(expr, dx)), compute(expr, nx))
expr = sx[:, :]
assert compute(expr, dx).dask == dx.dask
expr = sx[0]
assert eq(np.array(compute(expr, dx)), compute(expr, nx))
expr = sx[0, [3, 1, 4]]
assert eq(np.array(compute(expr, dx)), compute(expr, nx))
def test_slicing_on_boundary_lines():
nx = np.arange(100).reshape((10, 10))
dx = convert(Array, nx, blockshape=(3, 3))
sx = symbol('x', discover(dx))
expr = sx[0, [1, 3, 9, 3]]
result = compute(expr, dx)
assert eq(result, nx[0, [1, 3, 9, 3]])
def test_slicing_with_newaxis():
nx = np.arange(20).reshape((4, 5))
dx = convert(Array, nx, blockshape=(2, 2))
sx = symbol('x', discover(dx))
expr = sx[:, None, :]
result = compute(expr, dx)
assert result.shape == (4, 1, 5)
assert result.blockdims == ((2, 2), (1,), (2, 2, 1))
assert eq(np.array(result), compute(expr, nx))
expr = sx[None, [2, 1, 3], None, None, :, None]
result = compute(expr, dx)
assert result.shape == (1, 3, 1, 1, 5, 1)
assert result.blockdims == ((1,), (3,), (1,), (1,), (2, 2, 1), (1,))
assert eq(np.array(result), compute(expr, nx))
| {
"repo_name": "PeterDSteinberg/dask",
"path": "dask/array/tests/test_blaze.py",
"copies": "1",
"size": "4473",
"license": "bsd-3-clause",
"hash": -4750765025605636000,
"line_mean": 28.2352941176,
"line_max": 81,
"alpha_frac": 0.5325285044,
"autogenerated": false,
"ratio": 2.898898250162022,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8928097874237328,
"avg_score": 0.0006657760649387253,
"num_lines": 153
} |
from __future__ import absolute_import, division, print_function
import dask
import dask.array as da
from dask.array.core import *
from dask.utils import raises
from toolz import merge
from operator import getitem, add, mul
inc = lambda x: x + 1
def test_getem():
assert getem('X', blockshape=(2, 3), shape=(4, 6)) == \
{('X', 0, 0): (getitem, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getitem, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getitem, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getitem, 'X', (slice(0, 2), slice(3, 6)))}
def test_top():
assert top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) == \
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
assert top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij',
numblocks={'x': (2, 2), 'y': (2, 2)}) == \
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
assert top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk',
numblocks={'x': (2, 2), 'y': (2, 2)}) == \
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
assert top(identity, 'z', '', 'x', 'ij', numblocks={'x': (2, 2)}) ==\
{('z',): (identity, [[('x', 0, 0), ('x', 0, 1)],
[('x', 1, 0), ('x', 1, 1)]])}
def test_top_supports_broadcasting_rules():
assert top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij',
numblocks={'x': (1, 2), 'y': (2, 1)}) == \
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 0)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 0))}
def test_rec_concatenate():
x = np.array([1, 2])
assert rec_concatenate([[x, x, x], [x, x, x]]).shape == (2, 6)
x = np.array([[1, 2]])
assert rec_concatenate([[x, x, x], [x, x, x]]).shape == (2, 6)
def eq(a, b):
if isinstance(a, Array):
a = a.compute(get=dask.get)
if isinstance(b, Array):
b = b.compute(get=dask.get)
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_chunked_dot_product():
x = np.arange(400).reshape((20, 20))
o = np.ones((20, 20))
d = {'x': x, 'o': o}
getx = getem('x', blockshape=(5, 5), shape=(20, 20))
geto = getem('o', blockshape=(5, 5), shape=(20, 20))
result = top(dotmany, 'out', 'ik', 'x', 'ij', 'o', 'jk',
numblocks={'x': (4, 4), 'o': (4, 4)})
dsk = merge(d, getx, geto, result)
out = dask.get(dsk, [[('out', i, j) for j in range(4)] for i in range(4)])
assert eq(np.dot(x, o), rec_concatenate(out))
def test_chunked_transpose_plus_one():
x = np.arange(400).reshape((20, 20))
d = {'x': x}
getx = getem('x', blockshape=(5, 5), shape=(20, 20))
f = lambda x: x.T + 1
comp = top(f, 'out', 'ij', 'x', 'ji', numblocks={'x': (4, 4)})
dsk = merge(d, getx, comp)
out = dask.get(dsk, [[('out', i, j) for j in range(4)] for i in range(4)])
assert eq(rec_concatenate(out), x.T + 1)
def test_broadcast_dimensions_works_with_singleton_dimensions():
argpairs = [('x', 'i')]
numblocks = {'x': ((1,),)}
assert broadcast_dimensions(argpairs, numblocks) == {'i': (1,)}
def test_broadcast_dimensions():
argpairs = [('x', 'ij'), ('y', 'ij')]
d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
assert broadcast_dimensions(argpairs, d) == {'i': 'Hello', 'j': (2, 3)}
def test_Array():
shape = (1000, 1000)
blockshape = (100, 100)
name = 'x'
dsk = merge({name: 'some-array'}, getem(name, shape=shape, blockshape=blockshape))
a = Array(dsk, name, shape, blockshape)
assert a.numblocks == (10, 10)
assert a._keys() == [[('x', i, j) for j in range(10)]
for i in range(10)]
assert a.blockdims == ((100,) * 10, (100,) * 10)
def test_uneven_blockdims():
a = Array({}, 'x', shape=(10, 10), blockshape=(3, 3))
assert a.blockdims == ((3, 3, 3, 1), (3, 3, 3, 1))
def test_numblocks_suppoorts_singleton_block_dims():
shape = (100, 10)
blockshape = (10, 10)
name = 'x'
dsk = merge({name: 'some-array'}, getem(name, shape=shape, blockshape=blockshape))
a = Array(dsk, name, shape, blockshape)
assert set(concat(a._keys())) == set([('x', i, 0) for i in range(100//10)])
def test_keys():
dsk = dict((('x', i, j), ()) for i in range(5) for j in range(6))
dx = Array(dsk, 'x', (50, 60), blockshape=(10, 10))
assert dx._keys() == [[(dx.name, i, j) for j in range(6)]
for i in range(5)]
d = Array({}, 'x', (), ())
assert d._keys() == [('x',)]
def test_Array_computation():
a = Array({('x', 0, 0): np.eye(3)}, 'x', shape=(3, 3), blockshape=(3, 3))
assert eq(np.array(a), np.eye(3))
assert isinstance(a.compute(), np.ndarray)
assert float(a[0, 0]) == 1
def test_stack():
a, b, c = [Array(getem(name, blockshape=(2, 3), shape=(4, 6)),
name, shape=(4, 6), blockshape=(2, 3))
for name in 'ABC']
s = stack([a, b, c], axis=0)
assert s.shape == (3, 4, 6)
assert s.blockdims == ((1, 1, 1), (2, 2), (3, 3))
assert s.dask[(s.name, 0, 1, 0)] == ('A', 1, 0)
assert s.dask[(s.name, 2, 1, 0)] == ('C', 1, 0)
s2 = stack([a, b, c], axis=1)
assert s2.shape == (4, 3, 6)
assert s2.blockdims == ((2, 2), (1, 1, 1), (3, 3))
assert s2.dask[(s2.name, 0, 1, 0)] == ('B', 0, 0)
assert s2.dask[(s2.name, 1, 1, 0)] == ('B', 1, 0)
s2 = stack([a, b, c], axis=2)
assert s2.shape == (4, 6, 3)
assert s2.blockdims == ((2, 2), (3, 3), (1, 1, 1))
assert s2.dask[(s2.name, 0, 1, 0)] == ('A', 0, 1)
assert s2.dask[(s2.name, 1, 1, 2)] == ('C', 1, 1)
assert raises(ValueError, lambda: stack([a, b, c], axis=3))
assert set(b.dask.keys()).issubset(s2.dask.keys())
assert stack([a, b, c], axis=-1).blockdims == \
stack([a, b, c], axis=2).blockdims
def test_concatenate():
a, b, c = [Array(getem(name, blockshape=(2, 3), shape=(4, 6)),
name, shape=(4, 6), blockshape=(2, 3))
for name in 'ABC']
x = concatenate([a, b, c], axis=0)
assert x.shape == (12, 6)
assert x.blockdims == ((2, 2, 2, 2, 2, 2), (3, 3))
assert x.dask[(x.name, 0, 1)] == ('A', 0, 1)
assert x.dask[(x.name, 5, 0)] == ('C', 1, 0)
y = concatenate([a, b, c], axis=1)
assert y.shape == (4, 18)
assert y.blockdims == ((2, 2), (3, 3, 3, 3, 3, 3))
assert y.dask[(y.name, 1, 0)] == ('A', 1, 0)
assert y.dask[(y.name, 1, 5)] == ('C', 1, 1)
assert set(b.dask.keys()).issubset(y.dask.keys())
assert concatenate([a, b, c], axis=-1).blockdims == \
concatenate([a, b, c], axis=1).blockdims
assert raises(ValueError, lambda: concatenate([a, b, c], axis=2))
def test_binops():
a = Array(dict((('a', i), '') for i in range(3)),
'a', blockdims=((10, 10, 10),))
b = Array(dict((('b', i), '') for i in range(3)),
'b', blockdims=((10, 10, 10),))
result = elemwise(add, a, b, name='c')
assert result.dask == merge(a.dask, b.dask,
dict((('c', i), (add, ('a', i), ('b', i)))
for i in range(3)))
result = elemwise(pow, a, 2, name='c')
assert result.dask[('c', 0)][1] == ('a', 0)
f = result.dask[('c', 0)][0]
assert f(10) == 100
def test_elemwise_on_scalars():
x = np.arange(10)
a = from_array(x, blockshape=(5,))
assert eq(a.sum()**2, x.sum()**2)
def test_operators():
x = np.arange(10)
y = np.arange(10).reshape((10, 1))
a = from_array(x, blockshape=(5,))
b = from_array(y, blockshape=(5, 1))
c = a + 1
assert eq(c, x + 1)
c = a + b
assert eq(c, x + x.reshape((10, 1)))
expr = (3 / a * b)**2 > 5
assert eq(expr, (3 / x * y)**2 > 5)
c = exp(a)
assert eq(c, np.exp(x))
assert eq(abs(-a), a)
def test_field_access():
x = np.array([(1, 1.0), (2, 2.0)], dtype=[('a', 'i4'), ('b', 'f4')])
y = from_array(x, blockshape=(1,))
assert eq(y['a'], x['a'])
assert eq(y[['b', 'a']], x[['b', 'a']])
def test_reductions():
x = np.arange(400).reshape((20, 20))
a = from_array(x, blockshape=(7, 7))
assert eq(a.sum(), x.sum())
assert eq(a.sum(axis=1), x.sum(axis=1))
assert eq(a.sum(axis=1, keepdims=True), x.sum(axis=1, keepdims=True))
assert eq(a.mean(), x.mean())
assert eq(a.var(axis=(1, 0)), x.var(axis=(1, 0)))
b = a.sum(keepdims=True)
assert b._keys() == [[(b.name, 0, 0)]]
assert eq(a.std(axis=0, keepdims=True), x.std(axis=0, keepdims=True))
def test_tensordot():
x = np.arange(400).reshape((20, 20))
a = from_array(x, blockshape=(5, 5))
y = np.arange(200).reshape((20, 10))
b = from_array(y, blockshape=(5, 5))
assert eq(tensordot(a, b, axes=1), np.tensordot(x, y, axes=1))
assert eq(tensordot(a, b, axes=(1, 0)), np.tensordot(x, y, axes=(1, 0)))
# assert (tensordot(a, a).blockdims
# == tensordot(a, a, axes=((1, 0), (0, 1))).blockdims)
# assert eq(tensordot(a, a), np.tensordot(x, x))
def test_dot_method():
x = np.arange(400).reshape((20, 20))
a = from_array(x, blockshape=(5, 5))
y = np.arange(200).reshape((20, 10))
b = from_array(y, blockshape=(5, 5))
assert eq(a.dot(b), x.dot(y))
def test_T():
x = np.arange(400).reshape((20, 20))
a = from_array(x, blockshape=(5, 5))
assert eq(x.T, a.T)
def test_norm():
a = np.arange(200, dtype='f8').reshape((20, 10))
b = from_array(a, blockshape=(5, 5))
assert eq(b.vnorm(), np.linalg.norm(a))
assert eq(b.vnorm(ord=1), np.linalg.norm(a.flatten(), ord=1))
assert eq(b.vnorm(ord=4, axis=0), np.linalg.norm(a, ord=4, axis=0))
assert b.vnorm(ord=4, axis=0, keepdims=True).ndim == b.ndim
def test_choose():
x = np.random.randint(10, size=(15, 16))
d = from_array(x, blockshape=(4, 5))
assert eq(choose(d > 5, [0, d]), np.choose(x > 5, [0, x]))
assert eq(choose(d > 5, [-d, d]), np.choose(x > 5, [-x, x]))
def test_where():
x = np.random.randint(10, size=(15, 16))
d = from_array(x, blockshape=(4, 5))
y = np.random.randint(10, size=15)
e = from_array(y, blockshape=(4,))
assert eq(where(d > 5, d, 0), np.where(x > 5, x, 0))
assert eq(where(d > 5, d, -e[:, None]), np.where(x > 5, x, -y[:, None]))
def test_coarsen():
x = np.random.randint(10, size=(24, 24))
d = from_array(x, blockshape=(4, 8))
assert eq(chunk.coarsen(np.sum, x, {0: 2, 1: 4}),
coarsen(np.sum, d, {0: 2, 1: 4}))
assert eq(chunk.coarsen(np.sum, x, {0: 2, 1: 4}),
coarsen(da.sum, d, {0: 2, 1: 4}))
def test_constant():
d = da.constant(2, blockdims=((2, 2), (3, 3)))
assert d.blockdims == ((2, 2), (3, 3))
assert (np.array(d)[:] == 2).all()
def test_map_blocks():
inc = lambda x: x + 1
x = np.arange(400).reshape((20, 20))
d = from_array(x, blockshape=(7, 7))
e = d.map_blocks(inc)
assert d.blockdims == e.blockdims
assert eq(e, x + 1)
d = from_array(x, blockshape=(10, 10))
e = d.map_blocks(lambda x: x[::2, ::2], blockshape=(5, 5))
assert e.blockdims == ((5, 5), (5, 5))
assert eq(e, x[::2, ::2])
d = from_array(x, blockshape=(8, 8))
e = d.map_blocks(lambda x: x[::2, ::2], blockdims=((4, 4, 2), (4, 4, 2)))
assert eq(e, x[::2, ::2])
def test_map_blocks():
x = np.arange(10)
d = from_array(x, blockshape=(2,))
def func(block, block_id=None):
return np.ones_like(block) * sum(block_id)
d = d.map_blocks(func)
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
assert eq(d, expected)
def test_fromfunction():
def f(x, y):
return x + y
d = fromfunction(f, shape=(5, 5), blockshape=(2, 2))
assert eq(d, np.fromfunction(f, shape=(5, 5)))
def test_from_function_requires_block_args():
x = np.arange(10)
assert raises(Exception, lambda: from_array(x))
def test_repr():
d = da.ones((4, 4), blockshape=(2, 2))
assert d.name in repr(d)
assert str(d.shape) in repr(d)
assert str(d.blockdims) in repr(d)
def test_slicing_with_ellipsis():
x = np.arange(256).reshape((4, 4, 4, 4))
d = da.from_array(x, blockshape=((2, 2, 2, 2)))
assert eq(d[..., 1], x[..., 1])
assert eq(d[0, ..., 1], x[0, ..., 1])
def test_dtype():
d = da.ones((4, 4), blockshape=(2, 2))
assert d.dtype == d.compute().dtype
assert (d * 1.0).dtype == (d + 1.0).compute().dtype
assert d.sum().dtype == d.sum().compute().dtype # no shape
| {
"repo_name": "PeterDSteinberg/dask",
"path": "dask/array/tests/test_array_core.py",
"copies": "1",
"size": "13349",
"license": "bsd-3-clause",
"hash": -8179767841774279000,
"line_mean": 29.2013574661,
"line_max": 86,
"alpha_frac": 0.4852048843,
"autogenerated": false,
"ratio": 2.634497730412473,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3619702614712473,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import dask
from dask.async import (start_state_from_dask, get_sync, finish_task, sortkey,
remote_exception)
from dask.order import order
from dask.utils_test import GetFunctionTestMixin, inc, add
fib_dask = {'f0': 0, 'f1': 1, 'f2': 1, 'f3': 2, 'f4': 3, 'f5': 5, 'f6': 8}
def test_start_state():
dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
result = start_state_from_dask(dsk)
expected = {'cache': {'x': 1, 'y': 2},
'dependencies': {'w': set(['y', 'z']),
'x': set([]),
'y': set([]),
'z': set(['x'])},
'dependents': {'w': set([]),
'x': set(['z']),
'y': set(['w']),
'z': set(['w'])},
'finished': set([]),
'released': set([]),
'running': set([]),
'ready': ['z'],
'waiting': {'w': set(['z'])},
'waiting_data': {'x': set(['z']),
'y': set(['w']),
'z': set(['w'])}}
assert result == expected
def test_start_state_looks_at_cache():
dsk = {'b': (inc, 'a')}
cache = {'a': 1}
result = start_state_from_dask(dsk, cache)
assert result['dependencies']['b'] == set(['a'])
assert result['ready'] == ['b']
def test_start_state_with_redirects():
dsk = {'x': 1, 'y': 'x', 'z': (inc, 'y')}
result = start_state_from_dask(dsk)
assert result['cache'] == {'x': 1}
def test_start_state_with_independent_but_runnable_tasks():
assert start_state_from_dask({'x': (inc, 1)})['ready'] == ['x']
def test_start_state_with_tasks_no_deps():
dsk = {'a': [1, (inc, 2)],
'b': [1, 2, 3, 4],
'c': (inc, 3)}
state = start_state_from_dask(dsk)
assert list(state['cache'].keys()) == ['b']
assert 'a' in state['ready'] and 'c' in state['ready']
deps = dict((k, set()) for k in 'abc')
assert state['dependencies'] == deps
assert state['dependents'] == deps
def test_finish_task():
dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
sortkey = order(dsk).get
state = start_state_from_dask(dsk)
state['ready'].remove('z')
state['running'] = set(['z', 'other-task'])
task = 'z'
result = 2
state['cache']['z'] = result
finish_task(dsk, task, state, set(), sortkey)
assert state == {
'cache': {'y': 2, 'z': 2},
'dependencies': {'w': set(['y', 'z']),
'x': set([]),
'y': set([]),
'z': set(['x'])},
'finished': set(['z']),
'released': set(['x']),
'running': set(['other-task']),
'dependents': {'w': set([]),
'x': set(['z']),
'y': set(['w']),
'z': set(['w'])},
'ready': ['w'],
'waiting': {},
'waiting_data': {'y': set(['w']),
'z': set(['w'])}}
class TestGetAsync(GetFunctionTestMixin):
get = staticmethod(get_sync)
def test_get_sync_num_workers(self):
self.get({'x': (inc, 'y'), 'y': 1}, 'x', num_workers=2)
def test_cache_options():
try:
from chest import Chest
except ImportError:
return
cache = Chest()
def inc2(x):
assert 'y' in cache
return x + 1
with dask.set_options(cache=cache):
get_sync({'x': (inc2, 'y'), 'y': 1}, 'x')
def test_sort_key():
L = ['x', ('x', 1), ('z', 0), ('x', 0)]
assert sorted(L, key=sortkey) == ['x', ('x', 0), ('x', 1), ('z', 0)]
def test_callback():
f = lambda x: x + 1
dsk = {'a': (f, 1)}
from dask.threaded import get
def start_callback(key, d, state):
assert key == 'a' or key is None
assert d == dsk
assert isinstance(state, dict)
def end_callback(key, value, d, state, worker_id):
assert key == 'a' or key is None
assert value == 2 or value is None
assert d == dsk
assert isinstance(state, dict)
get(dsk, 'a', start_callback=start_callback, end_callback=end_callback)
def test_order_of_startstate():
dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b'),
'x': 1, 'y': (inc, 'x')}
result = start_state_from_dask(dsk)
assert result['ready'] == ['y', 'b']
dsk = {'x': 1, 'y': (inc, 'x'), 'z': (inc, 'y'),
'a': 1, 'b': (inc, 'a')}
result = start_state_from_dask(dsk)
assert result['ready'] == ['b', 'y']
def test_nonstandard_exceptions_propagate():
class MyException(Exception):
def __init__(self, a, b):
self.a = a
self.b = b
def __str__(self):
return "My Exception!"
def f():
raise MyException(1, 2)
from dask.threaded import get
try:
get({'x': (f,)}, 'x')
assert False
except MyException as e:
assert "My Exception!" in str(e)
assert "Traceback" in str(e)
assert 'a' in dir(e)
assert 'traceback' in dir(e)
assert e.exception.a == 1 and e.exception.b == 2
assert e.a == 1 and e.b == 2
def test_remote_exception():
e = TypeError("hello")
a = remote_exception(e, 'traceback')
b = remote_exception(e, 'traceback')
assert type(a) == type(b)
assert isinstance(a, TypeError)
assert 'hello' in str(a)
assert 'traceback' in str(a)
def test_ordering():
L = []
def append(i):
L.append(i)
dsk = {('x', i): (append, i) for i in range(10)}
x_keys = sorted(dsk)
dsk['y'] = (lambda *args: None, list(x_keys))
get_sync(dsk, 'y')
assert L == sorted(L)
| {
"repo_name": "cowlicks/dask",
"path": "dask/tests/test_async.py",
"copies": "2",
"size": "5897",
"license": "bsd-3-clause",
"hash": -4677153238792300000,
"line_mean": 27.6262135922,
"line_max": 78,
"alpha_frac": 0.4612514838,
"autogenerated": false,
"ratio": 3.287068004459309,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4748319488259309,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import datashape
from ..catalog.blaze_url import add_indexers_to_url
from .data_descriptor import DDesc, Capabilities
from dynd import nd, ndt
class Remote_DDesc(DDesc):
"""
A Blaze data descriptor which exposes an array on another
server.
"""
def __init__(self, url, dshape=None):
from ..io.client import requests
self.url = url
if dshape is None:
self._dshape = datashape.dshape(requests.get_remote_datashape(url))
else:
self._dshape = datashape.dshape(dshape)
@property
def dshape(self):
return self._dshape
@property
def capabilities(self):
"""The capabilities for the remote data descriptor."""
return Capabilities(
# treat remote arrays as immutable (maybe not?)
immutable = True,
# TODO: not sure what to say here
deferred = False,
# persistent on the remote server
persistent = True,
appendable = False,
remote = True,
)
def __repr__(self):
return 'Remote_DDesc(%r, dshape=%r)' % (self.url, self.dshape)
def dynd_arr(self):
from ..io.client import requests
"""Downloads the data and returns a local in-memory nd.array"""
# TODO: Need binary serialization
j = requests.get_remote_json(self.url)
tp = ndt.type(str(self.dshape))
return nd.parse_json(tp, j)
def __len__(self):
ds = self.dshape
if isinstance(ds, datashape.DataShape):
ds = ds[-1]
if isinstance(ds, datashape.Fixed):
return int(ds)
raise AttributeError('the datashape (%s) of this data descriptor has no length' % ds)
def __getitem__(self, key):
return Remote_DDesc(add_indexers_to_url(self.url, (key,)))
def getattr(self, name):
ds = self.dshape
if isinstance(ds, datashape.DataShape):
ds = ds[-1]
if isinstance(ds, datashape.Record) and name in ds.names:
return Remote_DDesc(self.url + '.' + name)
else:
raise AttributeError(('Blaze remote array does not ' +
'have attribute "%s"') % name)
def __iter__(self):
raise NotImplementedError('remote data descriptor iterator unimplemented')
| {
"repo_name": "FrancescAlted/blaze",
"path": "blaze/datadescriptor/remote_data_descriptor.py",
"copies": "3",
"size": "2419",
"license": "bsd-3-clause",
"hash": 4032346576026830300,
"line_mean": 31.6891891892,
"line_max": 93,
"alpha_frac": 0.5874328235,
"autogenerated": false,
"ratio": 4.01827242524917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.610570524874917,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import datashape
from ..catalog.blaze_url import add_indexers_to_url
from .data_descriptor import IDataDescriptor, Capabilities
from dynd import nd, ndt
class RemoteDataDescriptor(IDataDescriptor):
"""
A Blaze data descriptor which exposes an array on another
server.
"""
def __init__(self, url, dshape=None):
from ..io.client import requests
self.url = url
if dshape is None:
self._dshape = datashape.dshape(requests.get_remote_datashape(url))
else:
self._dshape = datashape.dshape(dshape)
@property
def dshape(self):
return self._dshape
@property
def capabilities(self):
"""The capabilities for the remote data descriptor."""
return Capabilities(
# treat remote arrays as immutable (maybe not?)
immutable = True,
# TODO: not sure what to say here
deferred = False,
# persistent on the remote server
persistent = True,
appendable = False,
remote = True,
)
def __repr__(self):
return 'RemoteDataDescriptor(%r, dshape=%r)' % (self.url, self.dshape)
def dynd_arr(self):
from ..io.client import requests
"""Downloads the data and returns a local in-memory nd.array"""
# TODO: Need binary serialization
j = requests.get_remote_json(self.url)
tp = ndt.type(str(self.dshape))
return nd.parse_json(tp, j)
def __len__(self):
ds = self.dshape
if isinstance(ds, datashape.DataShape):
ds = ds[-1]
if isinstance(ds, datashape.Fixed):
return int(ds)
raise AttributeError('the datashape (%s) of this data descriptor has no length' % ds)
def __getitem__(self, key):
return RemoteDataDescriptor(add_indexers_to_url(self.url, (key,)))
def getattr(self, name):
ds = self.dshape
if isinstance(ds, datashape.DataShape):
ds = ds[-1]
if isinstance(ds, datashape.Record) and name in ds.names:
return RemoteDataDescriptor(self.url + '.' + name)
else:
raise AttributeError(('Blaze remote array does not ' +
'have attribute "%s"') % name)
def __iter__(self):
raise NotImplementedError('remote data descriptor iterator unimplemented')
| {
"repo_name": "zeeshanali/blaze",
"path": "blaze/datadescriptor/remote_data_descriptor.py",
"copies": "7",
"size": "2470",
"license": "bsd-3-clause",
"hash": 2374037271377915000,
"line_mean": 32.8356164384,
"line_max": 93,
"alpha_frac": 0.5979757085,
"autogenerated": false,
"ratio": 4.116666666666666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005196332182264554,
"num_lines": 73
} |
from __future__ import absolute_import, division, print_function
import datashape
from datashape import (discover, Tuple, Record, dshape, Fixed, DataShape,
to_numpy_dtype, isdimension, var)
from datashape.predicates import iscollection, isscalar, isrecord
from pandas import DataFrame, Series
import itertools
import numpy as np
from dynd import nd
import warnings
from ..expr import Expr, Symbol
from ..dispatch import dispatch
from .into import into
from ..compatibility import _strtypes, unicode
from ..resource import resource
__all__ = ['Data', 'Table', 'into', 'to_html']
names = ('_%d' % i for i in itertools.count(1))
class Data(Symbol):
""" Interactive data
The ``Data`` object presents a familiar view onto a variety of forms of
data. This user-level object provides an interactive experience to using
Blaze's abstract expressions.
Parameters
----------
data: anything
Any type with ``discover`` and ``compute`` implementations
fields: list of strings - optional
Field or column names, will be inferred from datasource if possible
dshape: string or DataShape - optional
Datashape describing input data
name: string - optional
A name for the table
Examples
--------
>>> t = Data([(1, 'Alice', 100),
... (2, 'Bob', -200),
... (3, 'Charlie', 300),
... (4, 'Denis', 400),
... (5, 'Edith', -500)],
... fields=['id', 'name', 'balance'])
>>> t[t.balance < 0].name
name
0 Bob
1 Edith
"""
__slots__ = 'data', 'dshape', '_name'
def __init__(self, data, dshape=None, name=None, fields=None, columns=None,
schema=None, **kwargs):
if isinstance(data, _strtypes):
data = resource(data, schema=schema, dshape=dshape,
columns=columns, **kwargs)
if columns:
warnings.warn("columns kwarg deprecated. Use fields instead",
DeprecationWarning)
if columns and not fields:
fields = columns
if schema and dshape:
raise ValueError("Please specify one of schema= or dshape= keyword"
" arguments")
if schema and not dshape:
dshape = var * schema
if dshape and isinstance(dshape, _strtypes):
dshape = datashape.dshape(dshape)
if not dshape:
dshape = discover(data)
types = None
if isinstance(dshape.measure, Tuple) and fields:
types = dshape[1].dshapes
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape + (schema,)))
elif isscalar(dshape.measure) and fields:
types = (dshape.measure,) * int(dshape[-2])
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape + (schema,)))
elif isrecord(dshape.measure) and fields:
types = dshape.measure.types
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape + (schema,)))
self.dshape = datashape.dshape(dshape)
self.data = data
if (hasattr(data, 'schema')
and isinstance(data.schema, (DataShape, str, unicode))
and self.schema != data.schema):
raise TypeError('%s schema %s does not match %s schema %s' %
(type(data).__name__, data.schema,
type(self).__name__, self.schema))
self._name = name or next(names)
def _resources(self):
return {self: self.data}
@property
def _args(self):
return (id(self.data), self.dshape, self._name)
def __setstate__(self, state):
for slot, arg in zip(self.__slots__, state):
setattr(self, slot, arg)
def Table(*args, **kwargs):
""" Deprecated, see Data instead """
warnings.warn("Table is deprecated, use Data instead",
DeprecationWarning)
return Data(*args, **kwargs)
@dispatch(Data, dict)
def _subs(o, d):
return o
@dispatch(Expr)
def compute(expr, **kwargs):
resources = expr._resources()
if not resources:
raise ValueError("No data resources found")
else:
return compute(expr, resources, **kwargs)
def concrete_head(expr, n=10):
""" Return head of computed expression """
if not expr._resources():
raise ValueError("Expression does not contain data resources")
if iscollection(expr.dshape):
head = expr.head(n + 1)
result = compute(head)
if not len(result):
return DataFrame(columns=expr.fields)
if iscollection(expr.dshape):
return into(DataFrame(columns=expr.fields), result)
else:
return compute(expr)
def expr_repr(expr, n=10):
if not expr._resources():
return str(expr)
result = concrete_head(expr, n)
if isinstance(result, (DataFrame, Series)):
s = repr(result)
if len(result) > 10:
result = result[:10]
s = '\n'.join(s.split('\n')[:-1]) + '\n...'
return s
else:
return repr(result) # pragma: no cover
@dispatch(DataFrame)
def to_html(df):
return df.to_html()
@dispatch(Expr)
def to_html(expr):
return to_html(concrete_head(expr))
@dispatch(object)
def to_html(o):
return repr(o)
@dispatch(type, Expr)
def into(a, b, **kwargs):
f = into.dispatch(a, type(b))
return f(a, b, **kwargs)
@dispatch(object, Expr)
def into(a, b, **kwargs):
return into(a, compute(b), dshape=kwargs.pop('dshape', b.dshape),
schema=b.schema, **kwargs)
@dispatch(DataFrame, Expr)
def into(a, b, **kwargs):
return into(DataFrame(columns=b.fields), compute(b))
@dispatch(nd.array, Expr)
def into(a, b, **kwargs):
return into(nd.array(), compute(b), dtype=str(b.schema))
@dispatch(np.ndarray, Expr)
def into(a, b, **kwargs):
schema = dshape(str(b.schema).replace('?', ''))
return into(np.ndarray(0), compute(b), dtype=to_numpy_dtype(schema))
def table_length(expr):
try:
return expr._len()
except TypeError:
return compute(expr.count())
Expr.__repr__ = expr_repr
Expr._repr_html_ = lambda self: to_html(self)
Expr.__len__ = table_length
| {
"repo_name": "vitan/blaze",
"path": "blaze/api/interactive.py",
"copies": "1",
"size": "6383",
"license": "bsd-3-clause",
"hash": 9000879130301823000,
"line_mean": 27.8823529412,
"line_max": 79,
"alpha_frac": 0.5874980417,
"autogenerated": false,
"ratio": 3.8244457759137207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9904914002637506,
"avg_score": 0.0014059629952430568,
"num_lines": 221
} |
from __future__ import absolute_import, division, print_function
import datashape
from datashape import (dshape, DataShape, Record, isdimension, Option,
discover, Tuple)
from .dispatch import dispatch
from .expr import Expr
from .compatibility import _strtypes
__all__ = []
try:
import pyspark
from pyspark import sql, RDD
from pyspark.sql import (IntegerType, FloatType, StringType, TimestampType,
StructType, StructField, ArrayType, SchemaRDD, SQLContext,
ShortType, DoubleType, BooleanType, LongType)
from pyspark import SparkContext
except ImportError:
pyspark = None
def deoption(ds):
"""
>>> deoption('int32')
ctype("int32")
>>> deoption('?int32')
ctype("int32")
"""
if isinstance(ds, str):
ds = dshape(ds)
if isinstance(ds, DataShape) and not isdimension(ds[0]):
return deoption(ds[0])
if isinstance(ds, Option):
return ds.ty
else:
return ds
if pyspark:
if not issubclass(SQLContext, object):
raise ImportError("This version of SparkSQL uses old-style classes. "
"Please update to newer version of Spark")
types = {datashape.int16: ShortType(),
datashape.int32: IntegerType(),
datashape.int64: IntegerType(),
datashape.float32: FloatType(),
datashape.float64: DoubleType(),
datashape.real: DoubleType(),
datashape.time_: TimestampType(),
datashape.date_: TimestampType(),
datashape.datetime_: TimestampType(),
datashape.bool_: BooleanType(),
datashape.string: StringType()}
rev_types = {IntegerType(): datashape.int64,
ShortType(): datashape.int32,
LongType(): datashape.int64,
FloatType(): datashape.float32,
DoubleType(): datashape.float64,
StringType(): datashape.string,
TimestampType(): datashape.datetime_,
BooleanType(): datashape.bool_}
def sparksql_to_ds(ss):
""" Convert datashape to SparkSQL type system
>>> sparksql_to_ds(IntegerType()) # doctest: +SKIP
ctype("int64")
>>> sparksql_to_ds(ArrayType(IntegerType(), False)) # doctest: +SKIP
dshape("var * int64")
>>> sparksql_to_ds(ArrayType(IntegerType(), True)) # doctest: +SKIP
dshape("var * ?int64")
>>> sparksql_to_ds(StructType([ # doctest: +SKIP
... StructField('name', StringType(), False),
... StructField('amount', IntegerType(), True)]))
dshape("{ name : string, amount : ?int64 }")
"""
if ss in rev_types:
return rev_types[ss]
if isinstance(ss, ArrayType):
elem = sparksql_to_ds(ss.elementType)
if ss.containsNull:
return datashape.var * Option(elem)
else:
return datashape.var * elem
if isinstance(ss, StructType):
return dshape(Record([[field.name, Option(sparksql_to_ds(field.dataType))
if field.nullable
else sparksql_to_ds(field.dataType)]
for field in ss.fields]))
raise NotImplementedError("SparkSQL type not known %s" % ss)
def ds_to_sparksql(ds):
""" Convert datashape to SparkSQL type system
>>> print(ds_to_sparksql('int32')) # doctest: +SKIP
IntegerType
>>> print(ds_to_sparksql('5 * int32')) # doctest: +SKIP
ArrayType(IntegerType,false)
>>> print(ds_to_sparksql('5 * ?int32')) # doctest: +SKIP
ArrayType(IntegerType,true)
>>> print(ds_to_sparksql('{name: string, amount: int32}')) # doctest: +SKIP
StructType(List(StructField(name,StringType,false),StructField(amount,IntegerType,false)))
>>> print(ds_to_sparksql('10 * {name: string, amount: ?int32}')) # doctest: +SKIP
ArrayType(StructType(List(StructField(name,StringType,false),StructField(amount,IntegerType,true))),false)
"""
if isinstance(ds, str):
return ds_to_sparksql(dshape(ds))
if isinstance(ds, Record):
return sql.StructType([
sql.StructField(name,
ds_to_sparksql(deoption(typ)),
isinstance(typ, datashape.Option))
for name, typ in ds.fields])
if isinstance(ds, DataShape):
if isdimension(ds[0]):
elem = ds.subshape[0]
if isinstance(elem, DataShape) and len(elem) == 1:
elem = elem[0]
return sql.ArrayType(ds_to_sparksql(deoption(elem)),
isinstance(elem, Option))
else:
return ds_to_sparksql(ds[0])
if ds in types:
return types[ds]
raise NotImplementedError()
@dispatch(SQLContext, RDD)
def into(sqlContext, rdd, schema=None, columns=None, **kwargs):
""" Convert a normal PySpark RDD to a SparkSQL RDD
Schema inferred by ds_to_sparksql. Can also specify it explicitly with
schema keyword argument.
"""
schema = schema or discover(rdd).subshape[0]
if isinstance(schema[0], Tuple):
columns = columns or list(range(len(schema[0].dshapes)))
types = schema[0].dshapes
schema = dshape(Record(list(zip(columns, types))))
sql_schema = ds_to_sparksql(schema)
return sqlContext.applySchema(rdd, sql_schema)
@dispatch(SQLContext, (Expr, object) + _strtypes)
def into(sqlContext, o, **kwargs):
schema = kwargs.pop('schema', None) or discover(o).subshape[0]
return into(sqlContext, into(sqlContext._sc, o), schema=schema, **kwargs)
@dispatch((tuple, list, set), SchemaRDD)
def into(a, b, **kwargs):
if not isinstance(a, type):
a = type(a)
return a(map(tuple, b.collect()))
@dispatch(SchemaRDD)
def discover(srdd):
return datashape.var * sparksql_to_ds(srdd.schema())
| {
"repo_name": "vitan/blaze",
"path": "blaze/sparksql.py",
"copies": "1",
"size": "6243",
"license": "bsd-3-clause",
"hash": -5786337599849421000,
"line_mean": 34.6742857143,
"line_max": 114,
"alpha_frac": 0.5720006407,
"autogenerated": false,
"ratio": 4.032945736434108,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5104946377134107,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import datashape
from datashape import Record, DataShape, dshape, TimeDelta, Decimal, Option
from datashape import coretypes as ct
from datashape.predicates import iscollection, isboolean, isnumeric, isdatelike
from numpy import inf
from odo.utils import copydoc
import toolz
from .core import common_subexpression
from .expressions import Expr, ndim, dshape_method_list, method_properties
from .strings import isstring
def _normalize_axis(axis, child):
if axis is None:
axis = tuple(range(child.ndim))
if isinstance(axis, (set, list)):
axis = tuple(axis)
if not isinstance(axis, tuple):
axis = (axis,)
return tuple(sorted(axis))
class Reduction(Expr):
""" A column-wise reduction
Blaze supports the same class of reductions as NumPy and Pandas.
sum, min, max, any, all, mean, var, std, count, nunique
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = t['amount'].sum()
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 3]]
>>> from blaze.compute.python import compute
>>> compute(e, data)
350
"""
_arguments = '_child', 'axis', 'keepdims'
def __new__(cls, _child, axis=None, keepdims=False):
axis = _normalize_axis(axis, _child)
return super(Reduction, cls).__new__(cls, _child, axis, keepdims)
def _dshape(self):
axis = self.axis
if self.keepdims:
shape = tuple(1 if i in axis else d
for i, d in enumerate(self._child.shape))
else:
shape = tuple(d
for i, d in enumerate(self._child.shape)
if i not in axis)
return DataShape(*(shape + (self.schema,)))
def _schema(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
result = toolz.first(schema.types)
else:
result = schema
return DataShape(result)
@property
def symbol(self):
return type(self).__name__
@property
def _name(self):
child_name = self._child._name
if child_name is None or child_name == '_':
return type(self).__name__
else:
return '%s_%s' % (child_name, type(self).__name__)
def __str__(self):
kwargs = list()
if self.keepdims:
kwargs.append('keepdims=True')
if self.axis != tuple(range(self._child.ndim)):
kwargs.append('axis=' + str(self.axis))
other = sorted(
set(self._arguments[1:]) - set(['_child', 'axis', 'keepdims']))
for slot in other:
kwargs.append('%s=%s' % (slot, getattr(self, slot)))
name = type(self).__name__
if kwargs:
return '%s(%s, %s)' % (name, self._child, ', '.join(kwargs))
else:
return '%s(%s)' % (name, self._child)
class any(Reduction):
schema = dshape(ct.bool_)
class all(Reduction):
schema = dshape(ct.bool_)
class sum(Reduction):
def _schema(self):
return DataShape(datashape.maxtype(super(sum, self)._schema()))
class max(Reduction):
pass
class min(Reduction):
pass
class FloatingReduction(Reduction):
def _schema(self):
measure = self._child.schema.measure
base = getattr(measure, 'ty', measure)
return_type = Option if isinstance(measure, Option) else toolz.identity
return DataShape(return_type(
base
if isinstance(base, Decimal) else
base
if isinstance(base, TimeDelta) else
ct.float64,
))
class mean(FloatingReduction):
pass
class var(FloatingReduction):
"""Variance
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute an unbiased estimate of the population variance if this is
``True``. In NumPy and pandas, this parameter is called ``ddof`` (delta
degrees of freedom) and is equal to 1 for unbiased and 0 for biased.
"""
_arguments = '_child', 'unbiased', 'axis', 'keepdims'
def __new__(cls, child, unbiased=False, axis=None, keepdims=False):
axis = _normalize_axis(axis, child)
return super(Reduction, cls).__new__(
cls, child, unbiased, axis, keepdims,
)
class std(FloatingReduction):
"""Standard Deviation
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute the square root of an unbiased estimate of the population
variance if this is ``True``.
.. warning::
This does *not* return an unbiased estimate of the population
standard deviation.
See Also
--------
var
"""
_arguments = '_child', 'unbiased', 'axis', 'keepdims'
def __new__(cls, child, unbiased=False, axis=None, keepdims=False):
axis = _normalize_axis(axis, child)
return super(Reduction, cls).__new__(
cls, child, unbiased, axis, keepdims,
)
class count(Reduction):
""" The number of non-null elements """
schema = dshape(ct.int32)
class nunique(Reduction):
schema = dshape(ct.int32)
class nelements(Reduction):
"""Compute the number of elements in a collection, including missing values.
See Also
---------
blaze.expr.reductions.count: compute the number of non-null elements
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: float64}')
>>> t[t.amount < 1].nelements()
nelements(t[t.amount < 1])
"""
schema = dshape(ct.int32)
def nrows(expr):
return nelements(expr, axis=(0,))
class Summary(Expr):
""" A collection of named reductions
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = summary(number=t.id.nunique(), sum=t.amount.sum())
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 1]]
>>> from blaze import compute
>>> compute(expr, data)
(2, 350)
"""
_arguments = '_child', 'names', 'values', 'axis', 'keepdims'
def __new__(cls, _child, names, values, axis=None, keepdims=False):
return super(Summary, cls).__new__(
cls, _child, names, values, axis, keepdims,
)
def _dshape(self):
axis = self.axis
if self.keepdims:
shape = tuple(1 if i in axis else d
for i, d in enumerate(self._child.shape))
else:
shape = tuple(d
for i, d in enumerate(self._child.shape)
if i not in axis)
measure = Record(list(zip(self.names,
[v.schema for v in self.values])))
return DataShape(*(shape + (measure,)))
def __str__(self):
s = 'summary('
s += ', '.join('%s=%s' % (name, str(val))
for name, val in zip(self.fields, self.values))
if self.keepdims:
s += ', keepdims=True'
s += ')'
return s
@copydoc(Summary)
def summary(keepdims=False, axis=None, **kwargs):
items = sorted(kwargs.items(), key=toolz.first)
names = tuple(map(toolz.first, items))
values = tuple(map(toolz.second, items))
child = common_subexpression(*values)
if len(kwargs) == 1 and not iscollection(child.dshape):
while not iscollection(child.dshape):
children = [i for i in child._inputs if isinstance(i, Expr)]
if len(children) == 1:
child = children[0]
else:
child = common_subexpression(*children)
if axis is None:
axis = tuple(range(ndim(child)))
if isinstance(axis, (set, list)):
axis = tuple(axis)
if not isinstance(axis, tuple):
axis = (axis,)
return Summary(child, names, values, keepdims=keepdims, axis=axis)
def vnorm(expr, ord=None, axis=None, keepdims=False):
""" Vector norm
See np.linalg.norm
"""
if ord is None or ord == 'fro':
ord = 2
if ord == inf:
return max(abs(expr), axis=axis, keepdims=keepdims)
elif ord == -inf:
return min(abs(expr), axis=axis, keepdims=keepdims)
elif ord == 1:
return sum(abs(expr), axis=axis, keepdims=keepdims)
elif ord % 2 == 0:
return sum(expr ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
return sum(abs(expr) ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
dshape_method_list.extend([
(iscollection, set([count, nelements])),
(lambda ds: (iscollection(ds) and
(isstring(ds) or isnumeric(ds) or isboolean(ds) or
isdatelike(ds))),
set([min, max])),
(lambda ds: len(ds.shape) == 1,
set([nrows, nunique])),
(lambda ds: iscollection(ds) and isboolean(ds),
set([any, all])),
(lambda ds: iscollection(ds) and (isnumeric(ds) or isboolean(ds)),
set([mean, sum, std, var, vnorm])),
])
method_properties.update([nrows])
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/expr/reductions.py",
"copies": "3",
"size": "9312",
"license": "bsd-3-clause",
"hash": 7065385435187259000,
"line_mean": 27.1329305136,
"line_max": 80,
"alpha_frac": 0.5678694158,
"autogenerated": false,
"ratio": 3.783827712312068,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5851697128112068,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import datashape
from datashape import Record, DataShape, dshape, TimeDelta
from datashape import coretypes as ct
from datashape.predicates import iscollection, isboolean, isnumeric, isdatelike
from numpy import inf
from odo.utils import copydoc
import toolz
from .core import common_subexpression
from .expressions import Expr, ndim
from .strings import isstring
from .expressions import dshape_method_list, method_properties
class Reduction(Expr):
""" A column-wise reduction
Blaze supports the same class of reductions as NumPy and Pandas.
sum, min, max, any, all, mean, var, std, count, nunique
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = t['amount'].sum()
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 3]]
>>> from blaze.compute.python import compute
>>> compute(e, data)
350
"""
__slots__ = '_hash', '_child', 'axis', 'keepdims'
def __init__(self, _child, axis=None, keepdims=False):
self._child = _child
if axis is None:
axis = tuple(range(_child.ndim))
if isinstance(axis, (set, list)):
axis = tuple(axis)
if not isinstance(axis, tuple):
axis = (axis,)
axis = tuple(sorted(axis))
self.axis = axis
self.keepdims = keepdims
@property
def dshape(self):
axis = self.axis
if self.keepdims:
shape = tuple(1 if i in axis else d
for i, d in enumerate(self._child.shape))
else:
shape = tuple(d
for i, d in enumerate(self._child.shape)
if i not in axis)
return DataShape(*(shape + (self.schema,)))
@property
def schema(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
result = toolz.first(schema.types)
else:
result = schema
return DataShape(result)
@property
def symbol(self):
return type(self).__name__
@property
def _name(self):
child_name = self._child._name
if child_name is None or child_name == '_':
return type(self).__name__
else:
return '%s_%s' % (child_name, type(self).__name__)
def __str__(self):
kwargs = list()
if self.keepdims:
kwargs.append('keepdims=True')
if self.axis != tuple(range(self._child.ndim)):
kwargs.append('axis=' + str(self.axis))
other = sorted(
set(self.__slots__[1:]) - set(['_child', 'axis', 'keepdims']))
for slot in other:
kwargs.append('%s=%s' % (slot, getattr(self, slot)))
name = type(self).__name__
if kwargs:
return '%s(%s, %s)' % (name, self._child, ', '.join(kwargs))
else:
return '%s(%s)' % (name, self._child)
class any(Reduction):
schema = dshape(ct.bool_)
class all(Reduction):
schema = dshape(ct.bool_)
class sum(Reduction):
@property
def schema(self):
return DataShape(datashape.maxtype(super(sum, self).schema))
class max(Reduction):
pass
class min(Reduction):
pass
class mean(Reduction):
schema = dshape(ct.real)
class var(Reduction):
"""Variance
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute an unbiased estimate of the population variance if this is
``True``. In NumPy and pandas, this parameter is called ``ddof`` (delta
degrees of freedom) and is equal to 1 for unbiased and 0 for biased.
"""
__slots__ = '_hash', '_child', 'unbiased', 'axis', 'keepdims'
schema = dshape(ct.real)
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
super(var, self).__init__(child, *args, **kwargs)
class std(Reduction):
"""Standard Deviation
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute the square root of an unbiased estimate of the population
variance if this is ``True``.
.. warning::
This does *not* return an unbiased estimate of the population
standard deviation.
See Also
--------
var
"""
__slots__ = '_hash', '_child', 'unbiased', 'axis', 'keepdims'
schema = dshape(ct.real)
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
super(std, self).__init__(child, *args, **kwargs)
class count(Reduction):
""" The number of non-null elements """
schema = dshape(ct.int32)
class nunique(Reduction):
schema = dshape(ct.int32)
class nelements(Reduction):
"""Compute the number of elements in a collection, including missing values.
See Also
---------
blaze.expr.reductions.count: compute the number of non-null elements
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: float64}')
>>> t[t.amount < 1].nelements()
nelements(t[t.amount < 1])
"""
schema = dshape(ct.int32)
def nrows(expr):
return nelements(expr, axis=(0,))
class Summary(Expr):
""" A collection of named reductions
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = summary(number=t.id.nunique(), sum=t.amount.sum())
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 1]]
>>> from blaze import compute
>>> compute(expr, data)
(2, 350)
"""
__slots__ = '_hash', '_child', 'names', 'values', 'axis', 'keepdims'
def __init__(self, _child, names, values, axis=None, keepdims=False):
self._child = _child
self.names = names
self.values = values
self.keepdims = keepdims
self.axis = axis
@property
def dshape(self):
axis = self.axis
if self.keepdims:
shape = tuple(1 if i in axis else d
for i, d in enumerate(self._child.shape))
else:
shape = tuple(d
for i, d in enumerate(self._child.shape)
if i not in axis)
measure = Record(list(zip(self.names,
[v.schema for v in self.values])))
return DataShape(*(shape + (measure,)))
def __str__(self):
s = 'summary('
s += ', '.join('%s=%s' % (name, str(val))
for name, val in zip(self.fields, self.values))
if self.keepdims:
s += ', keepdims=True'
s += ')'
return s
@copydoc(Summary)
def summary(keepdims=False, axis=None, **kwargs):
items = sorted(kwargs.items(), key=toolz.first)
names = tuple(map(toolz.first, items))
values = tuple(map(toolz.second, items))
child = common_subexpression(*values)
if len(kwargs) == 1 and not iscollection(child.dshape):
while not iscollection(child.dshape):
children = [i for i in child._inputs if isinstance(i, Expr)]
if len(children) == 1:
child = children[0]
else:
child = common_subexpression(*children)
if axis is None:
axis = tuple(range(ndim(child)))
if isinstance(axis, (set, list)):
axis = tuple(axis)
if not isinstance(axis, tuple):
axis = (axis,)
return Summary(child, names, values, keepdims=keepdims, axis=axis)
def vnorm(expr, ord=None, axis=None, keepdims=False):
""" Vector norm
See np.linalg.norm
"""
if ord is None or ord == 'fro':
ord = 2
if ord == inf:
return max(abs(expr), axis=axis, keepdims=keepdims)
elif ord == -inf:
return min(abs(expr), axis=axis, keepdims=keepdims)
elif ord == 1:
return sum(abs(expr), axis=axis, keepdims=keepdims)
elif ord % 2 == 0:
return sum(expr ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
return sum(abs(expr) ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
dshape_method_list.extend([
(iscollection, set([count, nelements])),
(lambda ds: (iscollection(ds) and
(isstring(ds) or isnumeric(ds) or isboolean(ds) or
isdatelike(ds) or isinstance(ds, TimeDelta))),
set([min, max])),
(lambda ds: len(ds.shape) == 1,
set([nrows, nunique])),
(lambda ds: iscollection(ds) and isboolean(ds),
set([any, all])),
(lambda ds: iscollection(ds) and (isnumeric(ds) or isboolean(ds)),
set([mean, sum, std, var, vnorm])),
])
method_properties.update([nrows])
| {
"repo_name": "jdmcbr/blaze",
"path": "blaze/expr/reductions.py",
"copies": "10",
"size": "8915",
"license": "bsd-3-clause",
"hash": 8618265578898406000,
"line_mean": 26.600619195,
"line_max": 80,
"alpha_frac": 0.5623107123,
"autogenerated": false,
"ratio": 3.742653232577666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9304963944877667,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import datashape
from datashape import String, DataShape, Option, bool_
from odo.utils import copydoc
from .expressions import schema_method_list, ElemWise
from .arithmetic import Interp, Repeat, _mkbin, repeat, interp, _add, _radd
from ..compatibility import basestring
__all__ = ['Like', 'like', 'strlen', 'UnaryStringFunction']
class Like(ElemWise):
""" Filter expression by string comparison
>>> from blaze import symbol, like, compute
>>> t = symbol('t', 'var * {name: string, city: string}')
>>> expr = t[t.name.like('Alice*')]
>>> data = [('Alice Smith', 'New York'),
... ('Bob Jones', 'Chicago'),
... ('Alice Walker', 'LA')]
>>> list(compute(expr, data))
[('Alice Smith', 'New York'), ('Alice Walker', 'LA')]
"""
__slots__ = '_hash', '_child', 'pattern'
def _dshape(self):
shape, schema = self._child.dshape.shape, self._child.schema
schema = Option(bool_) if isinstance(schema.measure, Option) else bool_
return DataShape(*(shape + (schema,)))
@copydoc(Like)
def like(child, pattern):
if not isinstance(pattern, basestring):
raise TypeError('pattern argument must be a string')
return Like(child, pattern)
class UnaryStringFunction(ElemWise):
"""String function that only takes a single argument.
"""
__slots__ = '_hash', '_child'
class strlen(UnaryStringFunction):
schema = datashape.int64
def isstring(ds):
measure = ds.measure
return isinstance(getattr(measure, 'ty', measure), String)
_mod, _rmod = _mkbin('mod', Interp)
_mul, _rmul = _mkbin('mul', Repeat)
schema_method_list.extend([
(
isstring,
set([
_add, _radd, _mod, _rmod, _mul, _rmul, repeat, interp, like, strlen
])
)
])
| {
"repo_name": "cpcloud/blaze",
"path": "blaze/expr/strings.py",
"copies": "2",
"size": "1851",
"license": "bsd-3-clause",
"hash": 9044334406268298000,
"line_mean": 25.4428571429,
"line_max": 79,
"alpha_frac": 0.622906537,
"autogenerated": false,
"ratio": 3.5190114068441063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 70
} |
from __future__ import absolute_import, division, print_function
import datashape
from datashape import String
from datashape.predicates import isrecord, iscollection
from .expressions import Expr, schema_method_list, ElemWise
__all__ = ['Like', 'like', 'strlen', 'UnaryStringFunction']
class Like(Expr):
""" Filter expression by string comparison
>>> from blaze import symbol, like, compute
>>> t = symbol('t', 'var * {name: string, city: string}')
>>> expr = like(t, name='Alice*')
>>> data = [('Alice Smith', 'New York'),
... ('Bob Jones', 'Chicago'),
... ('Alice Walker', 'LA')]
>>> list(compute(expr, data))
[('Alice Smith', 'New York'), ('Alice Walker', 'LA')]
"""
__slots__ = '_hash', '_child', '_patterns'
@property
def patterns(self):
return dict(self._patterns)
@property
def dshape(self):
return datashape.var * self._child.dshape.subshape[0]
def like(child, **kwargs):
return Like(child, tuple(kwargs.items()))
like.__doc__ = Like.__doc__
class UnaryStringFunction(ElemWise):
"""String function that only takes a single argument.
"""
__slots__ = '_hash', '_child'
class strlen(UnaryStringFunction):
schema = datashape.int64
def isstring(ds):
measure = ds.measure
return isinstance(getattr(measure, 'ty', measure), String)
schema_method_list.extend([
(lambda ds: isstring(ds) or (isrecord(ds.measure) and
any(isinstance(getattr(typ, 'ty', typ),
String)
for typ in ds.measure.types)),
set([like])),
(lambda ds: isinstance(getattr(ds.measure, 'ty', ds.measure), String),
set([strlen]))
])
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/expr/strings.py",
"copies": "1",
"size": "1774",
"license": "bsd-3-clause",
"hash": -2493880716499492400,
"line_mean": 26.2923076923,
"line_max": 74,
"alpha_frac": 0.5868094701,
"autogenerated": false,
"ratio": 3.7584745762711864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9842536793623933,
"avg_score": 0.0005494505494505495,
"num_lines": 65
} |
from __future__ import absolute_import, division, print_function
import datashape
from datashape import String
from datashape.predicates import isrecord
from .expressions import Expr, schema_method_list, ElemWise
from .arithmetic import Interp, Repeat, _mkbin, repeat, interp, _add, _radd
__all__ = ['Like', 'like', 'strlen', 'UnaryStringFunction']
class Like(Expr):
""" Filter expression by string comparison
>>> from blaze import symbol, like, compute
>>> t = symbol('t', 'var * {name: string, city: string}')
>>> expr = like(t, name='Alice*')
>>> data = [('Alice Smith', 'New York'),
... ('Bob Jones', 'Chicago'),
... ('Alice Walker', 'LA')]
>>> list(compute(expr, data))
[('Alice Smith', 'New York'), ('Alice Walker', 'LA')]
"""
__slots__ = '_hash', '_child', '_patterns'
@property
def patterns(self):
return dict(self._patterns)
@property
def dshape(self):
return datashape.var * self._child.dshape.subshape[0]
def like(child, **kwargs):
return Like(child, tuple(kwargs.items()))
like.__doc__ = Like.__doc__
class UnaryStringFunction(ElemWise):
"""String function that only takes a single argument.
"""
__slots__ = '_hash', '_child'
class strlen(UnaryStringFunction):
schema = datashape.int64
def isstring(ds):
measure = ds.measure
return isinstance(getattr(measure, 'ty', measure), String)
_mod, _rmod = _mkbin('mod', Interp)
_mul, _rmul = _mkbin('mul', Repeat)
schema_method_list.extend([
(isstring, set([_add, _radd, _mod, _rmod, _mul, _rmul, repeat, interp])),
(lambda ds: isstring(ds) or (isrecord(ds.measure) and
any(isinstance(getattr(typ, 'ty', typ),
String)
for typ in ds.measure.types)),
set([like])),
(lambda ds: isinstance(getattr(ds.measure, 'ty', ds.measure), String),
set([strlen]))
])
| {
"repo_name": "dwillmer/blaze",
"path": "blaze/expr/strings.py",
"copies": "1",
"size": "1988",
"license": "bsd-3-clause",
"hash": -4918247206733027000,
"line_mean": 27,
"line_max": 77,
"alpha_frac": 0.5890342052,
"autogenerated": false,
"ratio": 3.5627240143369177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4651758219536918,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import datashape
from datashape import String
from datashape.predicates import isrecord
from odo.utils import copydoc
from .expressions import Expr, schema_method_list, ElemWise
from .arithmetic import Interp, Repeat, _mkbin, repeat, interp, _add, _radd
__all__ = ['Like', 'like', 'strlen', 'UnaryStringFunction']
class Like(Expr):
""" Filter expression by string comparison
>>> from blaze import symbol, like, compute
>>> t = symbol('t', 'var * {name: string, city: string}')
>>> expr = like(t, name='Alice*')
>>> data = [('Alice Smith', 'New York'),
... ('Bob Jones', 'Chicago'),
... ('Alice Walker', 'LA')]
>>> list(compute(expr, data))
[('Alice Smith', 'New York'), ('Alice Walker', 'LA')]
"""
__slots__ = '_hash', '_child', '_patterns'
@property
def patterns(self):
return dict(self._patterns)
@property
def dshape(self):
return datashape.var * self._child.dshape.subshape[0]
@copydoc(Like)
def like(child, **kwargs):
return Like(child, tuple(kwargs.items()))
class UnaryStringFunction(ElemWise):
"""String function that only takes a single argument.
"""
__slots__ = '_hash', '_child'
class strlen(UnaryStringFunction):
schema = datashape.int64
def isstring(ds):
measure = ds.measure
return isinstance(getattr(measure, 'ty', measure), String)
_mod, _rmod = _mkbin('mod', Interp)
_mul, _rmul = _mkbin('mul', Repeat)
schema_method_list.extend([
(isstring, set([_add, _radd, _mod, _rmod, _mul, _rmul, repeat, interp])),
(lambda ds: isstring(ds) or (isrecord(ds.measure) and
any(isinstance(getattr(typ, 'ty', typ),
String)
for typ in ds.measure.types)),
set([like])),
(lambda ds: isinstance(getattr(ds.measure, 'ty', ds.measure), String),
set([strlen]))
])
| {
"repo_name": "jcrist/blaze",
"path": "blaze/expr/strings.py",
"copies": "10",
"size": "2006",
"license": "bsd-3-clause",
"hash": 3489335385711116300,
"line_mean": 26.4794520548,
"line_max": 77,
"alpha_frac": 0.5947158524,
"autogenerated": false,
"ratio": 3.550442477876106,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9145158330276105,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import datashape
import datetime
from datashape import discover, Tuple, Record, DataShape, var
from datashape.predicates import iscollection, isscalar, isrecord
from pandas import DataFrame, Series
import itertools
from functools import reduce
import numpy as np
import warnings
from collections import Iterator
from .expr import Expr, Symbol, ndim
from .utils import ignoring
from .dispatch import dispatch
from odo import into, resource
from .compatibility import _strtypes
__all__ = ['Data', 'Table', 'into', 'to_html']
names = ('_%d' % i for i in itertools.count(1))
not_an_iterator = []
try:
import bcolz
not_an_iterator.append(bcolz.carray)
except ImportError:
pass
try:
import pymongo
not_an_iterator.append(pymongo.collection.Collection)
except ImportError:
pass
def Data(data, dshape=None, name=None, fields=None, columns=None, schema=None,
**kwargs):
sub_uri = ''
if isinstance(data, _strtypes):
if '::' in data:
data, sub_uri = data.split('::')
data = resource(data, schema=schema, dshape=dshape, columns=columns,
**kwargs)
if (isinstance(data, Iterator) and
not isinstance(data, tuple(not_an_iterator))):
data = tuple(data)
if columns:
warnings.warn("columns kwarg deprecated. Use fields instead",
DeprecationWarning)
if columns and not fields:
fields = columns
if schema and dshape:
raise ValueError("Please specify one of schema= or dshape= keyword"
" arguments")
if schema and not dshape:
dshape = var * schema
if dshape and isinstance(dshape, _strtypes):
dshape = datashape.dshape(dshape)
if not dshape:
dshape = discover(data)
types = None
if isinstance(dshape.measure, Tuple) and fields:
types = dshape[1].dshapes
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape + (schema,)))
elif isscalar(dshape.measure) and fields:
types = (dshape.measure,) * int(dshape[-2])
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape[:-1] + (schema,)))
elif isrecord(dshape.measure) and fields:
types = dshape.measure.types
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape + (schema,)))
ds = datashape.dshape(dshape)
result = InteractiveSymbol(data, ds, name)
if sub_uri:
for field in sub_uri.split('/'):
if field:
result = result[field]
return result
class InteractiveSymbol(Symbol):
""" Interactive data
The ``Data`` object presents a familiar view onto a variety of forms of
data. This user-level object provides an interactive experience to using
Blaze's abstract expressions.
Parameters
----------
data: anything
Any type with ``discover`` and ``compute`` implementations
fields: list of strings - optional
Field or column names, will be inferred from datasource if possible
dshape: string or DataShape - optional
Datashape describing input data
name: string - optional
A name for the table
Examples
--------
>>> t = Data([(1, 'Alice', 100),
... (2, 'Bob', -200),
... (3, 'Charlie', 300),
... (4, 'Denis', 400),
... (5, 'Edith', -500)],
... fields=['id', 'name', 'balance'])
>>> t[t.balance < 0].name
name
0 Bob
1 Edith
"""
__slots__ = 'data', 'dshape', '_name'
def __init__(self, data, dshape, name=None):
self.data = data
self.dshape = dshape
self._name = name or (next(names)
if isrecord(dshape.measure)
else None)
def _resources(self):
return {self: self.data}
@property
def _args(self):
return (id(self.data), self.dshape, self._name)
def __setstate__(self, state):
for slot, arg in zip(self.__slots__, state):
setattr(self, slot, arg)
def Table(*args, **kwargs):
""" Deprecated, see Data instead """
warnings.warn("Table is deprecated, use Data instead",
DeprecationWarning)
return Data(*args, **kwargs)
@dispatch(InteractiveSymbol, dict)
def _subs(o, d):
return o
@dispatch(Expr)
def compute(expr, **kwargs):
resources = expr._resources()
if not resources:
raise ValueError("No data resources found")
else:
return compute(expr, resources, **kwargs)
def concrete_head(expr, n=10):
""" Return head of computed expression """
if not expr._resources():
raise ValueError("Expression does not contain data resources")
if not iscollection(expr.dshape):
return compute(expr)
head = expr.head(n + 1)
if not iscollection(expr.dshape):
return into(object, head)
elif isrecord(expr.dshape.measure):
return into(DataFrame, head)
else:
df = into(DataFrame, head)
df.columns = [expr._name]
return df
result = compute(head)
if len(result) == 0:
return DataFrame(columns=expr.fields)
if isrecord(expr.dshape.measure):
return into(DataFrame, result, dshape=expr.dshape)
else:
df = into(DataFrame, result, dshape=expr.dshape)
df.columns = [expr._name]
return df
def repr_tables(expr, n=10):
result = concrete_head(expr, n).rename(columns={None: ''})
if isinstance(result, (DataFrame, Series)):
s = repr(result)
if len(result) > 10:
result = result[:10]
s = '\n'.join(s.split('\n')[:-1]) + '\n...'
return s
else:
return repr(result) # pragma: no cover
def numel(shape):
if var in shape:
return None
if not shape:
return 1
return reduce(lambda x, y: x * y, shape, 1)
def short_dshape(ds, nlines=5):
s = datashape.coretypes.pprint(ds)
lines = s.split('\n')
if len(lines) > 5:
s = '\n'.join(lines[:nlines]) + '\n ...'
return s
def coerce_to(typ, x):
try:
return typ(x)
except:
return into(typ, x)
def coerce_scalar(result, dshape):
if 'float' in dshape:
return coerce_to(float, result)
elif 'int' in dshape:
return coerce_to(int, result)
elif 'bool' in dshape:
return coerce_to(bool, result)
elif 'datetime' in dshape:
return coerce_to(datetime.datetime, result)
elif 'date' in dshape:
return coerce_to(datetime.date, result)
else:
return result
def expr_repr(expr, n=10):
# Pure Expressions, not interactive
if not expr._resources():
return str(expr)
# Scalars
if ndim(expr) == 0 and isscalar(expr.dshape):
return repr(coerce_scalar(compute(expr), str(expr.dshape)))
# Tables
with ignoring(Exception):
if ndim(expr) == 1:
return repr_tables(expr, 10)
# Smallish arrays
if ndim(expr) >= 2 and numel(expr.shape) and numel(expr.shape) < 1000000:
return repr(compute(expr))
# Other
dat = expr._resources().values()
if len(dat) == 1:
dat = list(dat)[0] # may be dict_values
s = 'Data: %s' % dat
if not isinstance(expr, Symbol):
s += '\nExpr: %s' % str(expr)
s += '\nDataShape: %s' % short_dshape(expr.dshape, nlines=7)
return s
@dispatch(DataFrame)
def to_html(df):
return df.to_html()
@dispatch(Expr)
def to_html(expr):
# Tables
if not expr._resources() or ndim(expr) != 1:
return to_html(repr(expr))
return to_html(concrete_head(expr))
@dispatch(object)
def to_html(o):
return repr(o)
@dispatch(_strtypes)
def to_html(o):
return o.replace('\n', '<br>')
@dispatch((object, type, str), Expr)
def into(a, b, **kwargs):
result = compute(b, **kwargs)
kwargs['dshape'] = b.dshape
return into(a, result, **kwargs)
def table_length(expr):
try:
return expr._len()
except ValueError:
return compute(expr.count())
Expr.__repr__ = expr_repr
Expr._repr_html_ = lambda x: to_html(x)
Expr.__len__ = table_length
def intonumpy(data, dtype=None, **kwargs):
# TODO: Don't ignore other kwargs like copy
result = into(np.ndarray, data)
if dtype and result.dtype != dtype:
result = result.astype(dtype)
return result
def convert_base(typ, x):
x = compute(x)
try:
return typ(x)
except:
return typ(into(typ, x))
Expr.__array__ = intonumpy
Expr.__int__ = lambda x: convert_base(int, x)
Expr.__float__ = lambda x: convert_base(float, x)
Expr.__complex__ = lambda x: convert_base(complex, x)
Expr.__bool__ = lambda x: convert_base(bool, x)
Expr.__nonzero__ = lambda x: convert_base(bool, x)
Expr.__iter__ = into(Iterator)
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/interactive.py",
"copies": "1",
"size": "9027",
"license": "bsd-3-clause",
"hash": 9174926787857535000,
"line_mean": 25.6283185841,
"line_max": 78,
"alpha_frac": 0.5973191537,
"autogenerated": false,
"ratio": 3.64874696847211,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.474606612217211,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import datashape
import datetime
import operator
import itertools
import warnings
from collections import Iterator
from functools import reduce
from datashape import discover, Tuple, Record, DataShape, var
from datashape.predicates import iscollection, isscalar, isrecord, istabular
from pandas import DataFrame, Series
import numpy as np
from odo import resource, odo
from odo.utils import ignoring
from odo.compatibility import unicode
from .expr import Expr, Symbol, ndim
from .dispatch import dispatch
from .compatibility import _strtypes
__all__ = ['Data', 'Table', 'into', 'to_html']
names = ('_%d' % i for i in itertools.count(1))
not_an_iterator = []
with ignoring(ImportError):
import bcolz
not_an_iterator.append(bcolz.carray)
with ignoring(ImportError):
import pymongo
not_an_iterator.append(pymongo.collection.Collection)
not_an_iterator.append(pymongo.database.Database)
def Data(data, dshape=None, name=None, fields=None, columns=None, schema=None,
**kwargs):
if columns:
raise ValueError("columns argument deprecated, use fields instead")
if schema and dshape:
raise ValueError("Please specify one of schema= or dshape= keyword"
" arguments")
if isinstance(data, _strtypes):
data = resource(data, schema=schema, dshape=dshape, columns=columns,
**kwargs)
if (isinstance(data, Iterator) and
not isinstance(data, tuple(not_an_iterator))):
data = tuple(data)
if schema and not dshape:
dshape = var * schema
if dshape and isinstance(dshape, _strtypes):
dshape = datashape.dshape(dshape)
if not dshape:
dshape = discover(data)
types = None
if isinstance(dshape.measure, Tuple) and fields:
types = dshape[1].dshapes
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape + (schema,)))
elif isscalar(dshape.measure) and fields:
types = (dshape.measure,) * int(dshape[-2])
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape[:-1] + (schema,)))
elif isrecord(dshape.measure) and fields:
ds = discover(data)
assert isrecord(ds.measure)
names = ds.measure.names
if names != fields:
raise ValueError('data column names %s\n'
'\tnot equal to fields parameter %s,\n'
'\tuse Data(data).relabel(%s) to rename '
'fields' % (names,
fields,
', '.join('%s=%r' % (k, v)
for k, v in
zip(names, fields))))
types = dshape.measure.types
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape + (schema,)))
ds = datashape.dshape(dshape)
return InteractiveSymbol(data, ds, name)
class InteractiveSymbol(Symbol):
"""Interactive data.
The ``Data`` object presents a familiar view onto a variety of forms of
data. This user-level object provides an interactive experience to using
Blaze's abstract expressions.
Parameters
----------
data : object
Any type with ``discover`` and ``compute`` implementations
fields : list, optional
Field or column names, will be inferred from datasource if possible
dshape : str or DataShape, optional
DataShape describing input data
name : str, optional
A name for the data.
Examples
--------
>>> t = Data([(1, 'Alice', 100),
... (2, 'Bob', -200),
... (3, 'Charlie', 300),
... (4, 'Denis', 400),
... (5, 'Edith', -500)],
... fields=['id', 'name', 'balance'])
>>> t[t.balance < 0].name
name
0 Bob
1 Edith
"""
__slots__ = 'data', 'dshape', '_name'
def __init__(self, data, dshape, name=None):
self.data = data
self.dshape = dshape
self._name = name or (next(names)
if isrecord(dshape.measure)
else None)
def _resources(self):
return {self: self.data}
@property
def _args(self):
return id(self.data), self.dshape, self._name
def __setstate__(self, state):
for slot, arg in zip(self.__slots__, state):
setattr(self, slot, arg)
Data.__doc__ = InteractiveSymbol.__doc__
def Table(*args, **kwargs):
""" Deprecated, see Data instead """
warnings.warn("Table is deprecated, use Data instead",
DeprecationWarning)
return Data(*args, **kwargs)
@dispatch(InteractiveSymbol, dict)
def _subs(o, d):
return o
@dispatch(Expr)
def compute(expr, **kwargs):
resources = expr._resources()
if not resources:
raise ValueError("No data resources found")
else:
return compute(expr, resources, **kwargs)
def concrete_head(expr, n=10):
""" Return head of computed expression """
if not expr._resources():
raise ValueError("Expression does not contain data resources")
if not iscollection(expr.dshape):
return compute(expr)
head = expr.head(n + 1)
if not iscollection(expr.dshape):
return odo(head, object)
elif isrecord(expr.dshape.measure):
return odo(head, DataFrame)
else:
df = odo(head, DataFrame)
df.columns = [expr._name]
return df
result = compute(head)
if len(result) == 0:
return DataFrame(columns=expr.fields)
if isrecord(expr.dshape.measure):
return odo(result, DataFrame, dshape=expr.dshape)
else:
df = odo(result, DataFrame, dshape=expr.dshape)
df.columns = [expr._name]
return df
def repr_tables(expr, n=10):
result = concrete_head(expr, n).rename(columns={None: ''})
if isinstance(result, (DataFrame, Series)):
s = repr(result)
if len(result) > 10:
s = '\n'.join(s.split('\n')[:-1]) + '\n...'
return s
else:
return repr(result) # pragma: no cover
def numel(shape):
if var in shape:
return None
if not shape:
return 1
return reduce(operator.mul, shape, 1)
def short_dshape(ds, nlines=5):
s = datashape.coretypes.pprint(ds)
lines = s.split('\n')
if len(lines) > 5:
s = '\n'.join(lines[:nlines]) + '\n ...'
return s
def coerce_to(typ, x):
try:
return typ(x)
except TypeError:
return odo(x, typ)
def coerce_scalar(result, dshape):
if 'float' in dshape:
return coerce_to(float, result)
elif 'int' in dshape:
return coerce_to(int, result)
elif 'bool' in dshape:
return coerce_to(bool, result)
elif 'datetime' in dshape:
return coerce_to(datetime.datetime, result)
elif 'date' in dshape:
return coerce_to(datetime.date, result)
else:
return result
def expr_repr(expr, n=10):
# Pure Expressions, not interactive
if not expr._resources():
return str(expr)
# Scalars
if ndim(expr) == 0 and isscalar(expr.dshape):
return repr(coerce_scalar(compute(expr), str(expr.dshape)))
# Tables
if (ndim(expr) == 1 and (istabular(expr.dshape) or
isscalar(expr.dshape.measure))):
return repr_tables(expr, 10)
# Smallish arrays
if ndim(expr) >= 2 and numel(expr.shape) and numel(expr.shape) < 1000000:
return repr(compute(expr))
# Other
dat = expr._resources().values()
if len(dat) == 1:
dat = list(dat)[0] # may be dict_values
s = 'Data: %s' % dat
if not isinstance(expr, Symbol):
s += '\nExpr: %s' % str(expr)
s += '\nDataShape: %s' % short_dshape(expr.dshape, nlines=7)
return s
@dispatch(DataFrame)
def to_html(df):
return df.to_html()
@dispatch(Expr)
def to_html(expr):
# Tables
if not expr._resources() or ndim(expr) != 1:
return to_html(repr(expr))
return to_html(concrete_head(expr))
@dispatch(object)
def to_html(o):
return repr(o)
@dispatch(_strtypes)
def to_html(o):
return o.replace('\n', '<br>')
@dispatch((object, type, str, unicode), Expr)
def into(a, b, **kwargs):
result = compute(b, **kwargs)
kwargs['dshape'] = b.dshape
return into(a, result, **kwargs)
def table_length(expr):
try:
return expr._len()
except ValueError:
return compute(expr.count())
Expr.__repr__ = expr_repr
Expr._repr_html_ = lambda x: to_html(x)
Expr.__len__ = table_length
def intonumpy(data, dtype=None, **kwargs):
# TODO: Don't ignore other kwargs like copy
result = odo(data, np.ndarray)
if dtype and result.dtype != dtype:
result = result.astype(dtype)
return result
def convert_base(typ, x):
x = compute(x)
try:
return typ(x)
except:
return typ(odo(x, typ))
Expr.__array__ = intonumpy
Expr.__int__ = lambda x: convert_base(int, x)
Expr.__float__ = lambda x: convert_base(float, x)
Expr.__complex__ = lambda x: convert_base(complex, x)
Expr.__bool__ = lambda x: convert_base(bool, x)
Expr.__nonzero__ = lambda x: convert_base(bool, x)
Expr.__iter__ = into(Iterator)
| {
"repo_name": "dwillmer/blaze",
"path": "blaze/interactive.py",
"copies": "1",
"size": "9540",
"license": "bsd-3-clause",
"hash": 8447383822870959000,
"line_mean": 26.652173913,
"line_max": 78,
"alpha_frac": 0.5850104822,
"autogenerated": false,
"ratio": 3.7149532710280373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9798219255106728,
"avg_score": 0.0003488996242619431,
"num_lines": 345
} |
from __future__ import absolute_import, division, print_function
import datashape
import blaze
from blaze.optional_packages import tables_is_here
import unittest
from blaze.catalog.tests.catalog_harness import CatalogHarness
from blaze.py2help import skipIf
class TestCatalog(unittest.TestCase):
def setUp(self):
self.cat = CatalogHarness()
blaze.catalog.load_config(self.cat.catfile)
def tearDown(self):
blaze.catalog.load_default()
self.cat.close()
def test_dir_traversal(self):
blaze.catalog.cd('/')
self.assertEquals(blaze.catalog.cwd(), '/')
entities = ['csv_arr', 'json_arr', 'npy_arr', 'py_arr', 'subdir']
if tables_is_here:
entities.append('hdf5_arr')
entities.sort()
self.assertEquals(blaze.catalog.ls(), entities)
arrays = ['csv_arr', 'json_arr', 'npy_arr', 'py_arr']
if tables_is_here:
arrays.append('hdf5_arr')
arrays.sort()
self.assertEquals(blaze.catalog.ls_arrs(), arrays)
self.assertEquals(blaze.catalog.ls_dirs(),
['subdir'])
blaze.catalog.cd('subdir')
self.assertEquals(blaze.catalog.cwd(), '/subdir')
self.assertEquals(blaze.catalog.ls(),
['csv_arr2'])
def test_load_csv(self):
# Confirms that a simple csv file can be loaded
blaze.catalog.cd('/')
a = blaze.catalog.get('csv_arr')
ds = datashape.dshape('5, {Letter: string; Number: int32}')
self.assertEqual(a.dshape, ds)
dat = blaze.datadescriptor.dd_as_py(a._data)
self.assertEqual(dat, [{'Letter': 'alpha', 'Number': 0},
{'Letter': 'beta', 'Number': 1},
{'Letter': 'gamma', 'Number': 2},
{'Letter': 'delta', 'Number': 3},
{'Letter': 'epsilon', 'Number': 4}])
def test_load_json(self):
# Confirms that a simple json file can be loaded
blaze.catalog.cd('/')
a = blaze.catalog.get('json_arr')
ds = datashape.dshape('2, var, int32')
self.assertEqual(a.dshape, ds)
dat = blaze.datadescriptor.dd_as_py(a._data)
self.assertEqual(dat, [[1, 2, 3], [1, 2]])
@skipIf(not tables_is_here, 'pytables is not installed')
def test_load_hdf5(self):
# Confirms that a simple hdf5 array in a file can be loaded
blaze.catalog.cd('/')
a = blaze.catalog.get('hdf5_arr')
ds = datashape.dshape('2, 3, int32')
self.assertEqual(a.dshape, ds)
dat = blaze.datadescriptor.dd_as_py(a._data)
self.assertEqual(dat, [[1, 2, 3], [3, 2, 1]])
def test_load_npy(self):
# Confirms that a simple npy file can be loaded
blaze.catalog.cd('/')
a = blaze.catalog.get('npy_arr')
ds = datashape.dshape('20, {idx: int32; val: string}')
self.assertEqual(a.dshape, ds)
dat = blaze.datadescriptor.dd_as_py(a._data)
self.assertEqual([x['idx'] for x in dat],
list(range(20)))
self.assertEqual([x['val'] for x in dat],
['yes', 'no'] * 10)
def test_load_py(self):
# Confirms that a simple py file can generate a blaze array
blaze.catalog.cd('/')
a = blaze.catalog.get('py_arr')
ds = datashape.dshape('5, int32')
self.assertEqual(a.dshape, ds)
dat = blaze.datadescriptor.dd_as_py(a._data)
self.assertEqual(dat, [1, 2, 3, 4, 5])
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "markflorisson/blaze-core",
"path": "blaze/catalog/tests/test_catalog.py",
"copies": "5",
"size": "3630",
"license": "bsd-3-clause",
"hash": -3648254729173832700,
"line_mean": 37.2105263158,
"line_max": 73,
"alpha_frac": 0.5608815427,
"autogenerated": false,
"ratio": 3.408450704225352,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003898635477582846,
"num_lines": 95
} |
from __future__ import absolute_import, division, print_function
import datetime as dt
import glob
import math
import os
import time
import zlib
import bitarray
import numpy as np
from six.moves import cPickle as pickle
from six.moves import range
from .bloomfilter import BloomFilter
from .hashfunctions import generate_hashfunctions
class DailyTemporalBloomFilter(object):
"""Long Range Temporal BloomFilter using a daily resolution.
For really high value of expiration (like 60 days) with low requirement on precision.
The actual error of this BF will the be native error of the BF + the error related
to the coarse aspect of the expiration, since we no longer expires information precisely.
Also, as opposed to a classic Bloom Filter, this one will aslo have false positive (reporting membership for a non-member)
AND false negative (reporting non-membership for a member).
The upper bound of the temporal_error can be theoricaly quite high. However, if the
items of the set are uniformly distributed over time, the avg error will be something like 1.0 / expiration
"""
def __init__(self, capacity, error_rate, expiration, name, snapshot_path):
self.error_rate = error_rate
self.capacity = capacity
self.nbr_slices = int(np.ceil(np.log2(1.0 / error_rate)))
self.bits_per_slice = int(np.ceil((capacity * abs(np.log(error_rate))) / (self.nbr_slices * (np.log(2) ** 2))))
self.nbr_bits = self.nbr_slices * self.bits_per_slice
self.initialize_bitarray()
self.count = 0
self.hashes = generate_hashfunctions(self.bits_per_slice, self.nbr_slices)
self.hashed_values = []
self.name = name
self.snapshot_path = snapshot_path
self.expiration = expiration
self.initialize_period()
self.snapshot_to_load = None
self.ready = False
self.warm_period = None
self.next_snapshot_load = time.time()
def initialize_bitarray(self):
"""Initialize both bitarray.
This BF contain two bit arrays instead of single one like a plain BF. bitarray
is the main bit array where all the historical items are stored. It's the one
used for the membership query. The second one, current_day_bitarray is the one
used for creating the daily snapshot.
"""
self.bitarray = bitarray.bitarray(self.nbr_bits)
self.current_day_bitarray = bitarray.bitarray(self.nbr_bits)
self.bitarray.setall(False)
self.current_day_bitarray.setall(False)
def __contains__(self, key):
"""Check membership."""
self.hashed_values = self.hashes(key)
offset = 0
for value in self.hashed_values:
if not self.bitarray[offset + value]:
return False
offset += self.bits_per_slice
return True
def add(self, key):
if key in self:
return True
offset = 0
if not self.hashed_values:
self.hashed_values = self.hashes(key)
for value in self.hashed_values:
self.bitarray[offset + value] = True
self.current_day_bitarray[offset + value] = True
offset += self.bits_per_slice
self.count += 1
return False
def initialize_period(self, period=None):
"""Initialize the period of BF.
:period: datetime.datetime for setting the period explicity.
"""
if not period:
self.current_period = dt.datetime.now()
else:
self.current_period = period
self.current_period = dt.datetime(self.current_period.year, self.current_period.month, self.current_period.day)
self.date = self.current_period.strftime("%Y-%m-%d")
def maintenance(self):
"""Expire the old element of the set.
Initialize a new bitarray and load the previous snapshot. Execute this guy
at the beginining of each day.
"""
self.initialize_period()
self.initialize_bitarray()
self.restore_from_disk()
def compute_refresh_period(self):
self.warm_period = (60 * 60 * 24) // (self.expiration-2)
def _should_warm(self):
return time.time() >= self.next_snapshot_load
def warm(self, jittering_ratio=0.2):
"""Progressively load the previous snapshot during the day.
Loading all the snapshots at once can takes a substantial amount of time. This method, if called
periodically during the day will progressively load those snapshots one by one. Because many workers are
going to use this method at the same time, we add a jittering to the period between load to avoid
hammering the disk at the same time.
"""
if self.snapshot_to_load == None:
last_period = self.current_period - dt.timedelta(days=self.expiration-1)
self.compute_refresh_period()
self.snapshot_to_load = []
base_filename = "%s/%s_%s_*.dat" % (self.snapshot_path, self.name, self.expiration)
availables_snapshots = glob.glob(base_filename)
for filename in availables_snapshots:
snapshot_period = dt.datetime.strptime(filename.split('_')[-1].strip('.dat'), "%Y-%m-%d")
if snapshot_period >= last_period:
self.snapshot_to_load.append(filename)
self.ready = False
if self.snapshot_to_load and self._should_warm():
filename = self.snapshot_to_load.pop()
self._union_bf_from_file(filename)
jittering = self.warm_period * (np.random.random()-0.5) * jittering_ratio
self.next_snapshot_load = time.time() + self.warm_period + jittering
if not self.snapshot_to_load:
self.ready = True
def _union_bf_from_file(self, filename, current=False):
snapshot = pickle.loads(zlib.decompress(open(filename,'r').read()))
if current:
self.current_day_bitarray = self.current_day_bitarray | snapshot
else:
self.bitarray = self.bitarray | snapshot
def restore_from_disk(self, clean_old_snapshot=False):
"""Restore the state of the BF using previous snapshots.
:clean_old_snapshot: Delete the old snapshot on the disk (period < current - expiration)
"""
base_filename = "%s/%s_%s_*.dat" % (self.snapshot_path, self.name, self.expiration)
availables_snapshots = glob.glob(base_filename)
last_period = self.current_period - dt.timedelta(days=self.expiration-1)
for filename in availables_snapshots:
snapshot_period = dt.datetime.strptime(filename.split('_')[-1].strip('.dat'), "%Y-%m-%d")
if snapshot_period < last_period and not clean_old_snapshot:
continue
else:
self._union_bf_from_file(filename)
if snapshot_period == self.current_period:
self._union_bf_from_file(filename, current=True)
if snapshot_period < last_period and clean_old_snapshot:
os.remove(filename)
self.ready = True
def save_snaphot(self):
"""Save the current state of the current day bitarray on disk.
Save the internal representation (bitarray) into a binary file using this format:
filename : name_expiration_2013-01-01.dat
"""
filename = "%s/%s_%s_%s.dat" % (self.snapshot_path, self.name, self.expiration, self.date)
with open(filename, 'w') as f:
f.write(zlib.compress(pickle.dumps(self.current_day_bitarray, protocol=pickle.HIGHEST_PROTOCOL)))
def union_current_day(self, bf):
"""Union only the current_day of an other BF."""
self.bitarray = self.bitarray | bf.current_day_bitarray
if __name__ == "__main__":
import numpy as np
bf = DailyTemporalBloomFilter(10000, 0.01, 30, 'test', './')
random_items = [str(r) for r in np.random.randn(20000)]
for item in random_items[:10000]:
bf.add(item)
false_positive = 0
for item in random_items[10000:20000]:
if item in bf:
false_positive += 1
print("Error rate (false positive): %s" % str(float(false_positive) / 10000))
| {
"repo_name": "Parsely/probably",
"path": "probably/temporal_daily.py",
"copies": "1",
"size": "8286",
"license": "mit",
"hash": -3118767756969751000,
"line_mean": 40.223880597,
"line_max": 126,
"alpha_frac": 0.6342022689,
"autogenerated": false,
"ratio": 3.9608030592734225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019318970682760982,
"num_lines": 201
} |
from __future__ import absolute_import, division, print_function
import datetime
from collections import Iterator
from itertools import islice, product
import os
import re
try:
from cytoolz import nth, unique, concat
except ImportError:
from toolz import nth, unique, concat
import numpy as np
# these are used throughout blaze, don't remove them
from odo.utils import tmpfile, filetext, filetexts, raises, keywords, ignoring
import pandas as pd
import psutil
import sqlalchemy as sa
from toolz.curried import do
# Imports that replace older utils.
from .compatibility import map, zip
def nth_list(n, seq):
"""
>>> tuple(nth_list([0, 1, 4], 'Hello'))
('H', 'e', 'o')
>>> tuple(nth_list([4, 1, 0], 'Hello'))
('o', 'e', 'H')
>>> tuple(nth_list([0, 0, 0], 'Hello'))
('H', 'H', 'H')
"""
seq = iter(seq)
result = []
old = 0
item = next(seq)
for index in sorted(n):
for i in range(index - old):
item = next(seq)
result.append(item)
old = index
order = [x[1] for x in sorted(zip(n, range(len(n))))]
return (result[i] for i in order)
def get(ind, coll, lazy=False):
"""
>>> get(0, 'Hello')
'H'
>>> get([1, 0], 'Hello')
('e', 'H')
>>> get(slice(1, 4), 'Hello')
('e', 'l', 'l')
>>> get(slice(1, 4), 'Hello', lazy=True)
<itertools.islice object at ...>
"""
if isinstance(ind, list):
result = nth_list(ind, coll)
elif isinstance(ind, slice):
result = islice(coll, ind.start, ind.stop, ind.step)
else:
if isinstance(coll, Iterator):
result = nth(ind, coll)
else:
result = coll[ind]
if not lazy and isinstance(result, Iterator):
result = tuple(result)
return result
def ndget(ind, data):
"""
Get from N-Dimensional getable
Can index with elements, lists, or slices. Mimic's numpy fancy indexing on
generic indexibles.
>>> data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
>>> ndget(0, data)
[[1, 2], [3, 4]]
>>> ndget((0, 1), data)
[3, 4]
>>> ndget((0, 0, 0), data)
1
>>> ndget((slice(0, 2), [0, 1], 0), data)
((1, 3), (5, 7))
"""
if isinstance(ind, tuple) and len(ind) == 1:
ind = ind[0]
if not isinstance(ind, tuple):
return get(ind, data)
result = get(ind[0], data)
if isinstance(ind[0], (list, slice)):
return type(result)(ndget(ind[1:], row) for row in result)
else:
return ndget(ind[1:], result)
def normalize_to_date(dt):
if isinstance(dt, datetime.datetime) and not dt.time():
return dt.date()
else:
return dt
def assert_allclose(lhs, rhs):
for tb in map(zip, lhs, rhs):
for left, right in tb:
if isinstance(left, (np.floating, float)):
# account for nans
assert np.all(np.isclose(left, right, equal_nan=True))
continue
if isinstance(left, datetime.datetime):
left = normalize_to_date(left)
if isinstance(right, datetime.datetime):
right = normalize_to_date(right)
assert left == right
def example(filename, datapath=os.path.join('examples', 'data')):
import blaze
return os.path.join(os.path.dirname(blaze.__file__), datapath, filename)
def available_memory():
return psutil.virtual_memory().available
def listpack(x):
"""
>>> listpack(1)
[1]
>>> listpack((1, 2))
[1, 2]
>>> listpack([1, 2])
[1, 2]
"""
if isinstance(x, tuple):
return list(x)
elif isinstance(x, list):
return x
else:
return [x]
def normalize(s):
"""Normalize a sql expression for comparison in tests.
Parameters
----------
s : str or Selectable
The expression to normalize. If this is a selectable, it will be
compiled with literals inlined.
Returns
-------
cs : Any
An object that can be compared against another normalized sql
expression.
"""
if isinstance(s, sa.sql.Selectable):
s = literal_compile(s)
s = re.sub(r'(\(|\))', r' \1 ', s) # normalize spaces around parens
s = ' '.join(s.strip().split()).lower() # normalize whitespace and case
s = re.sub(r'(alias)_?\d*', r'\1', s) # normalize aliases
return re.sub(r'__([A-Za-z_][A-Za-z_0-9]*)', r'\1', s)
def literal_compile(s):
"""Compile a sql expression with bind params inlined as literals.
Parameters
----------
s : Selectable
The expression to compile.
Returns
-------
cs : str
An equivalent sql string.
"""
return str(s.compile(compile_kwargs={'literal_binds': True}))
def ordered_intersect(*sets):
"""Set intersection of two sequences that preserves order.
Parameters
----------
sets : tuple of Sequence
Returns
-------
generator
Examples
--------
>>> list(ordered_intersect('abcd', 'cdef'))
['c', 'd']
>>> list(ordered_intersect('bcda', 'bdfga'))
['b', 'd', 'a']
>>> list(ordered_intersect('zega', 'age')) # 1st sequence determines order
['e', 'g', 'a']
>>> list(ordered_intersect('gah', 'bag', 'carge'))
['g', 'a']
"""
common = frozenset.intersection(*map(frozenset, sets))
return (x for x in unique(concat(sets)) if x in common)
class attribute(object):
"""An attribute that can be overridden by instances.
This is like a non data descriptor property.
Parameters
----------
f : callable
The function to execute.
"""
def __init__(self, f):
self._f = f
def __get__(self, instance, owner):
if instance is None:
return self
return self._f(instance)
def parameter_space(*args):
"""Unpack a sequence of positional parameter spaces into the product of each
space.
Parameters
----------
*args
The parameters spaces to create a product of.
Returns
-------
param_space : tuple[tuple]
The product of each of the spaces.
Examples
--------
# trivial case
>>> parameter_space(0, 1, 2)
((0, 1, 2),)
# two 2-tuples
>>> parameter_space((0, 1), (2, 3))
((0, 2), (0, 3), (1, 2), (1, 3))
Notes
-----
This is a convenience for passing to :func:`pytest.mark.parameterized`
"""
return tuple(product(*(
arg if isinstance(arg, tuple) else (arg,) for arg in args
)))
def as_attribute(ob, name=None):
"""Decorator to define an object as an attribute of another object.
Parameters
----------
ob : any
The object to attach this to.
name : str, optional
The name of the attribute. By default this is the decorated value's
``__name__``.
Returns
-------
dec : callable[any, any]
Decorator that registers an object as an attribute of another object and
returns it unchanged.
"""
return do(lambda f: setattr(ob, f.__name__ if name is None else name, f))
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/utils.py",
"copies": "3",
"size": "7079",
"license": "bsd-3-clause",
"hash": -135684928601881540,
"line_mean": 23.4948096886,
"line_max": 80,
"alpha_frac": 0.5633564063,
"autogenerated": false,
"ratio": 3.6508509541000516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5714207360400051,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import datetime
from functools import partial
from datashape import Mono, DataShape
import pandas as pd
from blaze.dispatch import dispatch
from blaze.compatibility import unicode
json_dumps_ns = dict()
dispatch = partial(dispatch, namespace=json_dumps_ns)
@dispatch(datetime.datetime)
def json_dumps(dt):
if dt is pd.NaT:
# NaT has an isoformat but it is totally invalid.
# This keeps the parsing on the client side simple.
s = u'NaT'
else:
s = dt.isoformat()
if isinstance(s, bytes):
s = s.decode('utf-8')
return {u'__!datetime': s}
@dispatch(frozenset)
def json_dumps(ds):
return {u'__!frozenset': list(ds)}
@dispatch(datetime.timedelta)
def json_dumps(ds):
return {u'__!timedelta': ds.total_seconds()}
@dispatch(Mono)
def json_dumps(m):
return {u'__!mono': unicode(m)}
@dispatch(DataShape)
def json_dumps(ds):
return {u'__!datashape': unicode(ds)}
@dispatch(object)
def json_dumps(ob):
return pd.io.packers.encode(ob)
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/server/serialization/json_dumps.py",
"copies": "3",
"size": "1089",
"license": "bsd-3-clause",
"hash": 6788164172767670000,
"line_mean": 19.5471698113,
"line_max": 64,
"alpha_frac": 0.6721763085,
"autogenerated": false,
"ratio": 3.330275229357798,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 53
} |
from __future__ import absolute_import, division, print_function
import datetime
import hashlib
import json
import re
import semantic_version
from appr.exception import (InvalidUsage, InvalidRelease, PackageAlreadyExists,
PackageReleaseNotFound, raise_package_not_found)
from appr.models.blob_base import BlobBase
from appr.semver import last_version, select_version
SCHEMA_VERSION = "v0"
def get_media_type(mediatype):
if mediatype:
match = re.match(r"application/vnd\.appr\.package-manifest\.(.+?)\.(.+).json", mediatype)
if match:
mediatype = match.group(1)
return mediatype
def content_media_type(media_type):
return "application/vnd.appr.package.%s.%s.tar+gzip" % (media_type, SCHEMA_VERSION)
def manifest_media_type(media_type):
return "application/vnd.appr.package-manifest.%s.%s.json" % (get_media_type(media_type),
SCHEMA_VERSION)
def digest_manifest(manifest):
return hashlib.sha256(json.dumps(manifest, sort_keys=True)).hexdigest()
class PackageBase(object):
def __init__(self, package_name, release=None, media_type=None, blob=None, metadata=None):
self.package = package_name
self.media_type = get_media_type(media_type)
self.namespace, self.name = package_name.split("/")
self.release = release
self._data = None
self.created_at = None
self.packager = None
self._blob = None
self._blob_size = 0
self._digest = None
self._blob = None
self.metadata = metadata
self.blob = blob
@property
def blob(self):
return self._blob
@blob.setter
def blob(self, value):
if value is not None:
if not isinstance(value, BlobBase):
raise ValueError("blob must be a BlobBase instance")
self._blob = value
def channels(self, channel_class, iscurrent=True):
""" Returns all available channels for a package """
channels = channel_class.all(self.package)
result = []
# yapf:disable
for channel in channels:
if ((iscurrent and channel.current == self.release) or
(not iscurrent and self.release in channel.releases())):
result.append(channel.name)
# yapf:enable
return result
@property
def digest(self):
if not self._digest and self.blob:
self._digest = self.blob.digest
return self._digest
@property
def blob_size(self):
if not self._blob_size and self.blob:
self._blob_size = self.blob.size
return self._blob_size
@property
def content_media_type(self):
return content_media_type(self.media_type)
@property
def manifest_media_type(self):
return manifest_media_type(self.media_type)
def content_descriptor(self):
return {
"mediaType": self.content_media_type,
"size": self.blob_size,
"digest": self.digest,
"urls": []}
@classmethod
def view_manifests(cls, package_name, release, manifest_only=False, media_type=None):
res = []
for mtype in cls.manifests(package_name, release):
if media_type is not None and media_type != mtype:
continue
package = cls.get(package_name, release, mtype)
if manifest_only:
res.append(package.manifest())
else:
res.append(package.data)
return res
def manifest(self):
manifest = {"mediaType": self.manifest_media_type, "content": self.content_descriptor()}
return manifest
@classmethod
def view_releases(cls, package, media_type=None):
return [
item
for release in cls.all_releases(package, media_type=media_type)
for item in cls.view_manifests(package, release, False, media_type=media_type)]
@property
def data(self):
if self._data is None:
self._data = {'created_at': datetime.datetime.utcnow().isoformat()}
d = {
"package": self.package,
"release": self.release,
"metadata": self.metadata,
"mediaType": self.manifest_media_type,
"content": self.content_descriptor()}
self._data.update(d)
return self._data
@data.setter
def data(self, data):
self._data = data
self.created_at = data['created_at']
self.metadata = data.get('metadata', None)
self.release = data['release']
self._digest = data['content']['digest']
self._blob_size = data['content']['size']
self.media_type = get_media_type(data['mediaType'])
@classmethod
def check_release(cls, release):
try:
semantic_version.Version(release)
except ValueError as e:
raise InvalidRelease(str(e), {"version": release})
return None
@classmethod
def _find_media_type(cls, package, release):
manifests = cls.manifests(package, release)
if len(manifests) != 1:
raise InvalidUsage("media-type non specified: [%s]" % ','.join(manifests))
else:
return manifests[0]
@classmethod
def get(cls, package, release, media_type):
"""
package: string following "namespace/package_name" format
release: release query. If None return latest release
returns: (package blob(targz) encoded in base64, release)
"""
p = cls(package, release)
p.pull(release, media_type)
return p
@classmethod
def get_release(cls, package, release_query, stable=False):
releases = cls.all_releases(package)
if not releases:
raise_package_not_found(package, release=release_query)
if release_query is None or release_query == 'default':
return last_version(releases, stable)
else:
try:
return select_version(releases, str(release_query), stable)
except ValueError as e:
raise InvalidRelease(e.message, {"release": release_query})
def pull(self, release_query=None, media_type=None):
# Find release
if release_query is None:
release_query = self.release
package = self.package
release = self.get_release(package, release_query)
if release is None:
raise PackageReleaseNotFound("No release match '%s' for package '%s'" % (release_query,
package),
{"package": package,
"release_query": release_query})
# Find media_type
if media_type == "-":
media_type = self._find_media_type(package, str(release))
media_type = get_media_type(media_type)
if media_type is None:
media_type = self.media_type
self.data = self._fetch(package, str(release), media_type)
return self
def save(self, force=False, **kwargs):
self.check_release(self.release)
if self.isdeleted_release(self.package, self.release) and not force:
raise PackageAlreadyExists("Package release %s existed" % self.package, {
"package": self.package,
"release": self.release})
self.blob.save(self.content_media_type)
self._save(force, **kwargs)
def releases(self):
return self.all_releases(self.package)
@classmethod
def delete(cls, package, release, media_type):
cls._delete(package, release, media_type)
def _save(self, force=False, **kwargs):
raise NotImplementedError
@classmethod
def all(cls, namespace=None, **kwargs):
raise NotImplementedError
@classmethod
def _fetch(cls, package, release, media_type):
raise NotImplementedError
@classmethod
def _delete(cls, package, release, media_type):
raise NotImplementedError
@classmethod
def all_releases(cls, package, media_type=None):
raise NotImplementedError
@classmethod
def search(cls, query, **kwargs):
raise NotImplementedError
@classmethod
def isdeleted_release(cls, package, release):
raise NotImplementedError
@classmethod
def reindex(cls):
raise NotImplementedError
@classmethod
def dump_all(cls, blob_cls):
""" produce a dict with all packages """
raise NotImplementedError
@classmethod
def manifests(cls, package, release):
""" Returns an array of string """
raise NotImplementedError
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "appr/models/package_base.py",
"copies": "2",
"size": "8827",
"license": "apache-2.0",
"hash": -8093664008787529000,
"line_mean": 31.6925925926,
"line_max": 99,
"alpha_frac": 0.5963521015,
"autogenerated": false,
"ratio": 4.268375241779497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5864727343279497,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import datetime
import json
from copy import deepcopy
import stripe
from stripe import api_requestor, util, six
def _compute_diff(current, previous):
if isinstance(current, dict):
previous = previous or {}
diff = current.copy()
for key in set(previous.keys()) - set(diff.keys()):
diff[key] = ""
return diff
return current if current is not None else ""
def _serialize_list(array, previous):
array = array or []
previous = previous or []
params = {}
for i, v in enumerate(array):
previous_item = previous[i] if len(previous) > i else None
if hasattr(v, "serialize"):
params[str(i)] = v.serialize(previous_item)
else:
params[str(i)] = _compute_diff(v, previous_item)
return params
class StripeObject(dict):
class ReprJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return api_requestor._encode_datetime(obj)
return super(StripeObject.ReprJSONEncoder, self).default(obj)
def __init__(
self,
id=None,
api_key=None,
stripe_version=None,
stripe_account=None,
last_response=None,
**params
):
super(StripeObject, self).__init__()
self._unsaved_values = set()
self._transient_values = set()
self._last_response = last_response
self._retrieve_params = params
self._previous = None
object.__setattr__(self, "api_key", api_key)
object.__setattr__(self, "stripe_version", stripe_version)
object.__setattr__(self, "stripe_account", stripe_account)
if id:
self["id"] = id
@property
def last_response(self):
return self._last_response
def update(self, update_dict):
for k in update_dict:
self._unsaved_values.add(k)
return super(StripeObject, self).update(update_dict)
def __setattr__(self, k, v):
if k[0] == "_" or k in self.__dict__:
return super(StripeObject, self).__setattr__(k, v)
self[k] = v
return None
def __getattr__(self, k):
if k[0] == "_":
raise AttributeError(k)
try:
return self[k]
except KeyError as err:
raise AttributeError(*err.args)
def __delattr__(self, k):
if k[0] == "_" or k in self.__dict__:
return super(StripeObject, self).__delattr__(k)
else:
del self[k]
def __setitem__(self, k, v):
if v == "":
raise ValueError(
"You cannot set %s to an empty string. "
"We interpret empty strings as None in requests."
"You may set %s.%s = None to delete the property"
% (k, str(self), k)
)
# Allows for unpickling in Python 3.x
if not hasattr(self, "_unsaved_values"):
self._unsaved_values = set()
self._unsaved_values.add(k)
super(StripeObject, self).__setitem__(k, v)
def __getitem__(self, k):
try:
return super(StripeObject, self).__getitem__(k)
except KeyError as err:
if k in self._transient_values:
raise KeyError(
"%r. HINT: The %r attribute was set in the past."
"It was then wiped when refreshing the object with "
"the result returned by Stripe's API, probably as a "
"result of a save(). The attributes currently "
"available on this object are: %s"
% (k, k, ", ".join(list(self.keys())))
)
else:
raise err
def __delitem__(self, k):
super(StripeObject, self).__delitem__(k)
# Allows for unpickling in Python 3.x
if hasattr(self, "_unsaved_values") and k in self._unsaved_values:
self._unsaved_values.remove(k)
# Custom unpickling method that uses `update` to update the dictionary
# without calling __setitem__, which would fail if any value is an empty
# string
def __setstate__(self, state):
self.update(state)
# Custom pickling method to ensure the instance is pickled as a custom
# class and not as a dict, otherwise __setstate__ would not be called when
# unpickling.
def __reduce__(self):
reduce_value = (
type(self), # callable
( # args
self.get("id", None),
self.api_key,
self.stripe_version,
self.stripe_account,
),
dict(self), # state
)
return reduce_value
@classmethod
def construct_from(
cls,
values,
key,
stripe_version=None,
stripe_account=None,
last_response=None,
):
instance = cls(
values.get("id"),
api_key=key,
stripe_version=stripe_version,
stripe_account=stripe_account,
last_response=last_response,
)
instance.refresh_from(
values,
api_key=key,
stripe_version=stripe_version,
stripe_account=stripe_account,
last_response=last_response,
)
return instance
def refresh_from(
self,
values,
api_key=None,
partial=False,
stripe_version=None,
stripe_account=None,
last_response=None,
):
self.api_key = api_key or getattr(values, "api_key", None)
self.stripe_version = stripe_version or getattr(
values, "stripe_version", None
)
self.stripe_account = stripe_account or getattr(
values, "stripe_account", None
)
self._last_response = last_response or getattr(
values, "_last_response", None
)
# Wipe old state before setting new. This is useful for e.g.
# updating a customer, where there is no persistent card
# parameter. Mark those values which don't persist as transient
if partial:
self._unsaved_values = self._unsaved_values - set(values)
else:
removed = set(self.keys()) - set(values)
self._transient_values = self._transient_values | removed
self._unsaved_values = set()
self.clear()
self._transient_values = self._transient_values - set(values)
for k, v in six.iteritems(values):
super(StripeObject, self).__setitem__(
k,
util.convert_to_stripe_object(
v, api_key, stripe_version, stripe_account
),
)
self._previous = values
@classmethod
def api_base(cls):
return None
def request(self, method, url, params=None, headers=None):
if params is None:
params = self._retrieve_params
requestor = api_requestor.APIRequestor(
key=self.api_key,
api_base=self.api_base(),
api_version=self.stripe_version,
account=self.stripe_account,
)
response, api_key = requestor.request(method, url, params, headers)
return util.convert_to_stripe_object(
response, api_key, self.stripe_version, self.stripe_account
)
def request_stream(self, method, url, params=None, headers=None):
if params is None:
params = self._retrieve_params
requestor = api_requestor.APIRequestor(
key=self.api_key,
api_base=self.api_base(),
api_version=self.stripe_version,
account=self.stripe_account,
)
response, _ = requestor.request_stream(method, url, params, headers)
return response
def __repr__(self):
ident_parts = [type(self).__name__]
if isinstance(self.get("object"), six.string_types):
ident_parts.append(self.get("object"))
if isinstance(self.get("id"), six.string_types):
ident_parts.append("id=%s" % (self.get("id"),))
unicode_repr = "<%s at %s> JSON: %s" % (
" ".join(ident_parts),
hex(id(self)),
str(self),
)
if six.PY2:
return unicode_repr.encode("utf-8")
else:
return unicode_repr
def __str__(self):
return json.dumps(
self.to_dict_recursive(),
sort_keys=True,
indent=2,
cls=self.ReprJSONEncoder,
)
def to_dict(self):
return dict(self)
def to_dict_recursive(self):
def maybe_to_dict_recursive(value):
if value is None:
return None
elif isinstance(value, StripeObject):
return value.to_dict_recursive()
else:
return value
return {
key: list(map(maybe_to_dict_recursive, value))
if isinstance(value, list)
else maybe_to_dict_recursive(value)
for key, value in six.iteritems(dict(self))
}
@property
def stripe_id(self):
return self.id
def serialize(self, previous):
params = {}
unsaved_keys = self._unsaved_values or set()
previous = previous or self._previous or {}
for k, v in six.iteritems(self):
if k == "id" or (isinstance(k, str) and k.startswith("_")):
continue
elif isinstance(v, stripe.api_resources.abstract.APIResource):
continue
elif hasattr(v, "serialize"):
child = v.serialize(previous.get(k, None))
if child != {}:
params[k] = child
elif k in unsaved_keys:
params[k] = _compute_diff(v, previous.get(k, None))
elif k == "additional_owners" and v is not None:
params[k] = _serialize_list(v, previous.get(k, None))
return params
# This class overrides __setitem__ to throw exceptions on inputs that it
# doesn't like. This can cause problems when we try to copy an object
# wholesale because some data that's returned from the API may not be valid
# if it was set to be set manually. Here we override the class' copy
# arguments so that we can bypass these possible exceptions on __setitem__.
def __copy__(self):
copied = StripeObject(
self.get("id"),
self.api_key,
stripe_version=self.stripe_version,
stripe_account=self.stripe_account,
)
copied._retrieve_params = self._retrieve_params
for k, v in six.iteritems(self):
# Call parent's __setitem__ to avoid checks that we've added in the
# overridden version that can throw exceptions.
super(StripeObject, copied).__setitem__(k, v)
return copied
# This class overrides __setitem__ to throw exceptions on inputs that it
# doesn't like. This can cause problems when we try to copy an object
# wholesale because some data that's returned from the API may not be valid
# if it was set to be set manually. Here we override the class' copy
# arguments so that we can bypass these possible exceptions on __setitem__.
def __deepcopy__(self, memo):
copied = self.__copy__()
memo[id(self)] = copied
for k, v in six.iteritems(self):
# Call parent's __setitem__ to avoid checks that we've added in the
# overridden version that can throw exceptions.
super(StripeObject, copied).__setitem__(k, deepcopy(v, memo))
return copied
| {
"repo_name": "stripe/stripe-python",
"path": "stripe/stripe_object.py",
"copies": "1",
"size": "11864",
"license": "mit",
"hash": -1205466749329961000,
"line_mean": 31.0648648649,
"line_max": 79,
"alpha_frac": 0.5533546864,
"autogenerated": false,
"ratio": 4.2041105598866055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5257465246286606,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import datetime
import json
import tempfile
import uuid
from collections import OrderedDict
import pytest
import stripe
from stripe import six, util
from stripe.stripe_response import StripeResponse, StripeStreamResponse
from stripe.six.moves.urllib.parse import urlsplit
import urllib3
VALID_API_METHODS = ("get", "post", "delete")
class GMT1(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=1)
def dst(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "Europe/Prague"
class APIHeaderMatcher(object):
EXP_KEYS = [
"Authorization",
"Stripe-Version",
"User-Agent",
"X-Stripe-Client-User-Agent",
]
METHOD_EXTRA_KEYS = {"post": ["Content-Type", "Idempotency-Key"]}
def __init__(
self,
api_key=None,
extra={},
request_method=None,
user_agent=None,
app_info=None,
idempotency_key=None,
):
self.request_method = request_method
self.api_key = api_key or stripe.api_key
self.extra = extra
self.user_agent = user_agent
self.app_info = app_info
self.idempotency_key = idempotency_key
def __eq__(self, other):
return (
self._keys_match(other)
and self._auth_match(other)
and self._user_agent_match(other)
and self._x_stripe_ua_contains_app_info(other)
and self._idempotency_key_match(other)
and self._extra_match(other)
)
def __repr__(self):
return (
"APIHeaderMatcher(request_method=%s, api_key=%s, extra=%s, "
"user_agent=%s, app_info=%s, idempotency_key=%s)"
% (
repr(self.request_method),
repr(self.api_key),
repr(self.extra),
repr(self.user_agent),
repr(self.app_info),
repr(self.idempotency_key),
)
)
def _keys_match(self, other):
expected_keys = list(set(self.EXP_KEYS + list(self.extra.keys())))
if (
self.request_method is not None
and self.request_method in self.METHOD_EXTRA_KEYS
):
expected_keys.extend(self.METHOD_EXTRA_KEYS[self.request_method])
return sorted(other.keys()) == sorted(expected_keys)
def _auth_match(self, other):
return other["Authorization"] == "Bearer %s" % (self.api_key,)
def _user_agent_match(self, other):
if self.user_agent is not None:
return other["User-Agent"] == self.user_agent
return True
def _idempotency_key_match(self, other):
if self.idempotency_key is not None:
return other["Idempotency-Key"] == self.idempotency_key
return True
def _x_stripe_ua_contains_app_info(self, other):
if self.app_info:
ua = json.loads(other["X-Stripe-Client-User-Agent"])
if "application" not in ua:
return False
return ua["application"] == self.app_info
return True
def _extra_match(self, other):
for k, v in six.iteritems(self.extra):
if other[k] != v:
return False
return True
class QueryMatcher(object):
def __init__(self, expected):
self.expected = sorted(expected)
def __eq__(self, other):
query = urlsplit(other).query or other
parsed = stripe.util.parse_qsl(query)
return self.expected == sorted(parsed)
def __repr__(self):
return "QueryMatcher(expected=%s)" % (repr(self.expected))
class UrlMatcher(object):
def __init__(self, expected):
self.exp_parts = urlsplit(expected)
def __eq__(self, other):
other_parts = urlsplit(other)
for part in ("scheme", "netloc", "path", "fragment"):
expected = getattr(self.exp_parts, part)
actual = getattr(other_parts, part)
if expected != actual:
print(
'Expected %s "%s" but got "%s"' % (part, expected, actual)
)
return False
q_matcher = QueryMatcher(stripe.util.parse_qsl(self.exp_parts.query))
return q_matcher == other
def __repr__(self):
return "UrlMatcher(exp_parts=%s)" % (repr(self.exp_parts))
class AnyUUID4Matcher(object):
def __eq__(self, other):
try:
uuid.UUID(other, version=4)
except ValueError:
return False
return True
def __repr__(self):
return "AnyUUID4Matcher()"
class TestAPIRequestor(object):
ENCODE_INPUTS = {
"dict": {
"astring": "bar",
"anint": 5,
"anull": None,
"adatetime": datetime.datetime(2013, 1, 1, tzinfo=GMT1()),
"atuple": (1, 2),
"adict": {"foo": "bar", "boz": 5},
"alist": ["foo", "bar"],
},
"list": [1, "foo", "baz"],
"string": "boo",
"unicode": u"\u1234",
"datetime": datetime.datetime(2013, 1, 1, second=1, tzinfo=GMT1()),
"none": None,
}
ENCODE_EXPECTATIONS = {
"dict": [
("%s[astring]", "bar"),
("%s[anint]", 5),
("%s[adatetime]", 1356994800),
("%s[adict][foo]", "bar"),
("%s[adict][boz]", 5),
("%s[alist][0]", "foo"),
("%s[alist][1]", "bar"),
("%s[atuple][0]", 1),
("%s[atuple][1]", 2),
],
"list": [("%s[0]", 1), ("%s[1]", "foo"), ("%s[2]", "baz")],
"string": [("%s", "boo")],
"unicode": [("%s", stripe.util.utf8(u"\u1234"))],
"datetime": [("%s", 1356994801)],
"none": [],
}
@pytest.fixture(autouse=True)
def setup_stripe(self):
orig_attrs = {
"api_key": stripe.api_key,
"api_version": stripe.api_version,
"default_http_client": stripe.default_http_client,
"enable_telemetry": stripe.enable_telemetry,
}
stripe.api_key = "sk_test_123"
stripe.api_version = "2017-12-14"
stripe.default_http_client = None
stripe.enable_telemetry = False
yield
stripe.api_key = orig_attrs["api_key"]
stripe.api_version = orig_attrs["api_version"]
stripe.default_http_client = orig_attrs["default_http_client"]
stripe.enable_telemetry = orig_attrs["enable_telemetry"]
@pytest.fixture
def http_client(self, mocker):
http_client = mocker.Mock(stripe.http_client.HTTPClient)
http_client._verify_ssl_certs = True
http_client.name = "mockclient"
return http_client
@pytest.fixture
def requestor(self, http_client):
requestor = stripe.api_requestor.APIRequestor(client=http_client)
return requestor
@pytest.fixture
def mock_response(self, mocker, http_client):
def mock_response(return_body, return_code, headers=None):
http_client.request_with_retries = mocker.Mock(
return_value=(return_body, return_code, headers or {})
)
return mock_response
@pytest.fixture
def mock_streaming_response(self, mocker, http_client):
def mock_streaming_response(return_body, return_code, headers=None):
http_client.request_stream_with_retries = mocker.Mock(
return_value=(return_body, return_code, headers or {})
)
return mock_streaming_response
@pytest.fixture
def check_call(self, http_client):
def check_call(
method,
abs_url=None,
headers=None,
post_data=None,
is_streaming=False,
):
if not abs_url:
abs_url = "%s%s" % (stripe.api_base, self.valid_path)
if not headers:
headers = APIHeaderMatcher(request_method=method)
if is_streaming:
http_client.request_stream_with_retries.assert_called_with(
method, abs_url, headers, post_data
)
else:
http_client.request_with_retries.assert_called_with(
method, abs_url, headers, post_data
)
return check_call
@property
def valid_path(self):
return "/foo"
def encoder_check(self, key):
stk_key = "my%s" % (key,)
value = self.ENCODE_INPUTS[key]
expectation = [
(k % (stk_key,), v) for k, v in self.ENCODE_EXPECTATIONS[key]
]
stk = []
fn = getattr(stripe.api_requestor.APIRequestor, "encode_%s" % (key,))
fn(stk, stk_key, value)
if isinstance(value, dict):
expectation.sort()
stk.sort()
assert stk == expectation, stk
def _test_encode_naive_datetime(self):
stk = []
stripe.api_requestor.APIRequestor.encode_datetime(
stk, "test", datetime.datetime(2013, 1, 1)
)
# Naive datetimes will encode differently depending on your system
# local time. Since we don't know the local time of your system,
# we just check that naive encodings are within 24 hours of correct.
assert abs(stk[0][1] - 1356994800) <= 60 * 60 * 24
def test_param_encoding(self, requestor, mock_response, check_call):
mock_response("{}", 200)
requestor.request("get", "", self.ENCODE_INPUTS)
expectation = []
for type_, values in six.iteritems(self.ENCODE_EXPECTATIONS):
expectation.extend([(k % (type_,), str(v)) for k, v in values])
check_call("get", QueryMatcher(expectation))
def test_dictionary_list_encoding(self):
params = {"foo": {"0": {"bar": "bat"}}}
encoded = list(stripe.api_requestor._api_encode(params))
key, value = encoded[0]
assert key == "foo[0][bar]"
assert value == "bat"
def test_ordereddict_encoding(self):
params = {
"ordered": OrderedDict(
[
("one", 1),
("two", 2),
("three", 3),
("nested", OrderedDict([("a", "a"), ("b", "b")])),
]
)
}
encoded = list(stripe.api_requestor._api_encode(params))
assert encoded[0][0] == "ordered[one]"
assert encoded[1][0] == "ordered[two]"
assert encoded[2][0] == "ordered[three]"
assert encoded[3][0] == "ordered[nested][a]"
assert encoded[4][0] == "ordered[nested][b]"
def test_url_construction(self, requestor, mock_response, check_call):
CASES = (
("%s?foo=bar" % stripe.api_base, "", {"foo": "bar"}),
("%s?foo=bar" % stripe.api_base, "?", {"foo": "bar"}),
(stripe.api_base, "", {}),
(
"%s/%%20spaced?foo=bar%%24&baz=5" % stripe.api_base,
"/%20spaced?foo=bar%24",
{"baz": "5"},
),
(
"%s?foo=bar&foo=bar" % stripe.api_base,
"?foo=bar",
{"foo": "bar"},
),
)
for expected, url, params in CASES:
mock_response("{}", 200)
requestor.request("get", url, params)
check_call("get", expected)
def test_empty_methods(self, requestor, mock_response, check_call):
for meth in VALID_API_METHODS:
mock_response("{}", 200)
resp, key = requestor.request(meth, self.valid_path, {})
if meth == "post":
post_data = ""
else:
post_data = None
check_call(meth, post_data=post_data)
assert isinstance(resp, StripeResponse)
assert resp.data == {}
assert resp.data == json.loads(resp.body)
def test_empty_methods_streaming_response(
self, requestor, mock_streaming_response, check_call
):
for meth in VALID_API_METHODS:
mock_streaming_response(util.io.BytesIO(b"thisisdata"), 200)
resp, key = requestor.request_stream(
meth,
self.valid_path,
{},
)
if meth == "post":
post_data = ""
else:
post_data = None
check_call(meth, post_data=post_data, is_streaming=True)
assert isinstance(resp, StripeStreamResponse)
assert resp.io.getvalue() == b"thisisdata"
def test_methods_with_params_and_response(
self, requestor, mock_response, check_call
):
for method in VALID_API_METHODS:
mock_response('{"foo": "bar", "baz": 6}', 200)
params = {
"alist": [1, 2, 3],
"adict": {"frobble": "bits"},
"adatetime": datetime.datetime(2013, 1, 1, tzinfo=GMT1()),
}
encoded = (
"adict[frobble]=bits&adatetime=1356994800&"
"alist[0]=1&alist[1]=2&alist[2]=3"
)
resp, key = requestor.request(method, self.valid_path, params)
assert isinstance(resp, StripeResponse)
assert resp.data == {"foo": "bar", "baz": 6}
assert resp.data == json.loads(resp.body)
if method == "post":
check_call(
method,
post_data=QueryMatcher(stripe.util.parse_qsl(encoded)),
)
else:
abs_url = "%s%s?%s" % (
stripe.api_base,
self.valid_path,
encoded,
)
check_call(method, abs_url=UrlMatcher(abs_url))
def test_methods_with_params_and_streaming_response(
self, requestor, mock_streaming_response, check_call
):
for method in VALID_API_METHODS:
mock_streaming_response(
util.io.BytesIO(b'{"foo": "bar", "baz": 6}'), 200
)
params = {
"alist": [1, 2, 3],
"adict": {"frobble": "bits"},
"adatetime": datetime.datetime(2013, 1, 1, tzinfo=GMT1()),
}
encoded = (
"adict[frobble]=bits&adatetime=1356994800&"
"alist[0]=1&alist[1]=2&alist[2]=3"
)
resp, key = requestor.request_stream(
method,
self.valid_path,
params,
)
assert isinstance(resp, StripeStreamResponse)
assert resp.io.getvalue() == b'{"foo": "bar", "baz": 6}'
if method == "post":
check_call(
method,
post_data=QueryMatcher(stripe.util.parse_qsl(encoded)),
is_streaming=True,
)
else:
abs_url = "%s%s?%s" % (
stripe.api_base,
self.valid_path,
encoded,
)
check_call(
method, abs_url=UrlMatcher(abs_url), is_streaming=True
)
def test_uses_headers(self, requestor, mock_response, check_call):
mock_response("{}", 200)
requestor.request("get", self.valid_path, {}, {"foo": "bar"})
check_call("get", headers=APIHeaderMatcher(extra={"foo": "bar"}))
def test_uses_instance_key(self, http_client, mock_response, check_call):
key = "fookey"
requestor = stripe.api_requestor.APIRequestor(key, client=http_client)
mock_response("{}", 200)
resp, used_key = requestor.request("get", self.valid_path, {})
check_call("get", headers=APIHeaderMatcher(key, request_method="get"))
assert used_key == key
def test_uses_instance_api_version(
self, http_client, mock_response, check_call
):
api_version = "fooversion"
requestor = stripe.api_requestor.APIRequestor(
api_version=api_version, client=http_client
)
mock_response("{}", 200)
requestor.request("get", self.valid_path, {})
check_call(
"get",
headers=APIHeaderMatcher(
extra={"Stripe-Version": "fooversion"}, request_method="get"
),
)
def test_uses_instance_account(
self, http_client, mock_response, check_call
):
account = "acct_foo"
requestor = stripe.api_requestor.APIRequestor(
account=account, client=http_client
)
mock_response("{}", 200)
requestor.request("get", self.valid_path, {})
check_call(
"get",
headers=APIHeaderMatcher(
extra={"Stripe-Account": account}, request_method="get"
),
)
def test_sets_default_http_client(self, http_client):
assert not stripe.default_http_client
stripe.api_requestor.APIRequestor(client=http_client)
# default_http_client is not populated if a client is provided
assert not stripe.default_http_client
stripe.api_requestor.APIRequestor()
# default_http_client is set when no client is specified
assert stripe.default_http_client
new_default_client = stripe.default_http_client
stripe.api_requestor.APIRequestor()
# the newly created client is reused
assert stripe.default_http_client == new_default_client
def test_uses_app_info(self, requestor, mock_response, check_call):
try:
old = stripe.app_info
stripe.set_app_info(
"MyAwesomePlugin",
url="https://myawesomeplugin.info",
version="1.2.34",
partner_id="partner_12345",
)
mock_response("{}", 200)
requestor.request("get", self.valid_path, {})
ua = "Stripe/v1 PythonBindings/%s" % (stripe.version.VERSION,)
ua += " MyAwesomePlugin/1.2.34 (https://myawesomeplugin.info)"
header_matcher = APIHeaderMatcher(
user_agent=ua,
app_info={
"name": "MyAwesomePlugin",
"url": "https://myawesomeplugin.info",
"version": "1.2.34",
"partner_id": "partner_12345",
},
)
check_call("get", headers=header_matcher)
finally:
stripe.app_info = old
def test_uses_given_idempotency_key(
self, requestor, mock_response, check_call
):
mock_response("{}", 200)
meth = "post"
requestor.request(
meth, self.valid_path, {}, {"Idempotency-Key": "123abc"}
)
header_matcher = APIHeaderMatcher(
request_method=meth, idempotency_key="123abc"
)
check_call(meth, headers=header_matcher, post_data="")
def test_uuid4_idempotency_key_when_not_given(
self, requestor, mock_response, check_call
):
mock_response("{}", 200)
meth = "post"
requestor.request(meth, self.valid_path, {})
header_matcher = APIHeaderMatcher(
request_method=meth, idempotency_key=AnyUUID4Matcher()
)
check_call(meth, headers=header_matcher, post_data="")
def test_fails_without_api_key(self, requestor):
stripe.api_key = None
with pytest.raises(stripe.error.AuthenticationError):
requestor.request("get", self.valid_path, {})
def test_invalid_request_error_404(self, requestor, mock_response):
mock_response('{"error": {}}', 404)
with pytest.raises(stripe.error.InvalidRequestError):
requestor.request("get", self.valid_path, {})
def test_invalid_request_error_400(self, requestor, mock_response):
mock_response('{"error": {}}', 400)
with pytest.raises(stripe.error.InvalidRequestError):
requestor.request("get", self.valid_path, {})
def test_idempotency_error(self, requestor, mock_response):
mock_response('{"error": {"type": "idempotency_error"}}', 400)
with pytest.raises(stripe.error.IdempotencyError):
requestor.request("get", self.valid_path, {})
def test_authentication_error(self, requestor, mock_response):
mock_response('{"error": {}}', 401)
with pytest.raises(stripe.error.AuthenticationError):
requestor.request("get", self.valid_path, {})
def test_permissions_error(self, requestor, mock_response):
mock_response('{"error": {}}', 403)
with pytest.raises(stripe.error.PermissionError):
requestor.request("get", self.valid_path, {})
def test_card_error(self, requestor, mock_response):
mock_response('{"error": {"code": "invalid_expiry_year"}}', 402)
with pytest.raises(stripe.error.CardError) as excinfo:
requestor.request("get", self.valid_path, {})
assert excinfo.value.code == "invalid_expiry_year"
def test_rate_limit_error(self, requestor, mock_response):
mock_response('{"error": {}}', 429)
with pytest.raises(stripe.error.RateLimitError):
requestor.request("get", self.valid_path, {})
def test_old_rate_limit_error(self, requestor, mock_response):
"""
Tests legacy rate limit error pre-2015-09-18
"""
mock_response('{"error": {"code":"rate_limit"}}', 400)
with pytest.raises(stripe.error.RateLimitError):
requestor.request("get", self.valid_path, {})
def test_server_error(self, requestor, mock_response):
mock_response('{"error": {}}', 500)
with pytest.raises(stripe.error.APIError):
requestor.request("get", self.valid_path, {})
def test_invalid_json(self, requestor, mock_response):
mock_response("{", 200)
with pytest.raises(stripe.error.APIError):
requestor.request("get", self.valid_path, {})
def test_invalid_method(self, requestor):
with pytest.raises(stripe.error.APIConnectionError):
requestor.request("foo", "bar")
def test_oauth_invalid_requestor_error(self, requestor, mock_response):
mock_response('{"error": "invalid_request"}', 400)
with pytest.raises(stripe.oauth_error.InvalidRequestError):
requestor.request("get", self.valid_path, {})
def test_invalid_client_error(self, requestor, mock_response):
mock_response('{"error": "invalid_client"}', 401)
with pytest.raises(stripe.oauth_error.InvalidClientError):
requestor.request("get", self.valid_path, {})
def test_invalid_grant_error(self, requestor, mock_response):
mock_response('{"error": "invalid_grant"}', 400)
with pytest.raises(stripe.oauth_error.InvalidGrantError):
requestor.request("get", self.valid_path, {})
def test_extract_error_from_stream_request_for_bytes(
self, requestor, mock_streaming_response
):
mock_streaming_response(
util.io.BytesIO(b'{"error": "invalid_grant"}'), 400
)
with pytest.raises(stripe.oauth_error.InvalidGrantError):
requestor.request_stream("get", self.valid_path, {})
def test_extract_error_from_stream_request_for_response(
self, requestor, mock_streaming_response
):
# Responses don't have getvalue, they only have a read method.
mock_streaming_response(
urllib3.response.HTTPResponse(
body=util.io.BytesIO(b'{"error": "invalid_grant"}'),
preload_content=False,
),
400,
)
with pytest.raises(stripe.oauth_error.InvalidGrantError):
requestor.request_stream("get", self.valid_path, {})
def test_raw_request_with_file_param(self, requestor, mock_response):
test_file = tempfile.NamedTemporaryFile()
test_file.write("\u263A".encode("utf-16"))
test_file.seek(0)
params = {"file": test_file, "purpose": "dispute_evidence"}
supplied_headers = {"Content-Type": "multipart/form-data"}
mock_response("{}", 200)
requestor.request("post", "/v1/files", params, supplied_headers)
class TestDefaultClient(object):
@pytest.fixture(autouse=True)
def setup_stripe(self):
orig_attrs = {
"api_key": stripe.api_key,
"default_http_client": stripe.default_http_client,
}
stripe.api_key = "sk_test_123"
yield
stripe.api_key = orig_attrs["api_key"]
stripe.default_http_client = orig_attrs["default_http_client"]
def test_default_http_client_called(self, mocker):
hc = mocker.Mock(stripe.http_client.HTTPClient)
hc._verify_ssl_certs = True
hc.name = "mockclient"
hc.request_with_retries = mocker.Mock(return_value=("{}", 200, {}))
stripe.default_http_client = hc
stripe.Charge.list(limit=3)
hc.request_with_retries.assert_called_with(
"get",
"https://api.stripe.com/v1/charges?limit=3",
mocker.ANY,
None,
)
| {
"repo_name": "stripe/stripe-python",
"path": "tests/test_api_requestor.py",
"copies": "1",
"size": "25332",
"license": "mit",
"hash": -7947236868041611000,
"line_mean": 31.7286821705,
"line_max": 78,
"alpha_frac": 0.5481999053,
"autogenerated": false,
"ratio": 3.832375189107413,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48805750944074133,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import datetime
import json
import appr.semver
from appr.exception import (PackageAlreadyExists, PackageNotFound, ResourceNotFound,
raise_channel_not_found, raise_package_not_found)
DEFAULT_LOCK_TIMEOUT = 3
class ModelsIndexBase(object):
packages_key = "packages.json"
def __init__(self, package=None):
self._packages = None
self._releases = None
self.package = package
self.locks = set()
@property
def releases_key(self):
return self.package + "/" + "releases.json"
@property
def releases_data(self):
path = self.releases_key
if self._releases is None:
try:
self._releases = self._fetch_data(path)
except ResourceNotFound:
raise_package_not_found(self.package)
return self._releases
def blob_key(self, digest, mod="sha256"):
return "%s/digests/%s/%s" % (self.package, mod, digest)
def add_blob(self, b64blob, digest):
try:
path = self.blob_key(digest)
self.get_lock(path)
self._write_raw_data(path, b64blob)
return True
finally:
self.release_lock(path)
def delete_blob(self, digest):
try:
path = self.blob_key(digest)
self.get_lock(path)
self._delete_data(path)
return True
finally:
self.release_lock(path)
def get_blob(self, digest):
try:
path = self.blob_key(digest)
return self._fetch_raw_data(path)
except ResourceNotFound:
raise_package_not_found(self.package, digest)
def add_package(self, package_name):
try:
self.get_lock(self.packages_key)
namespace, name = package_name.split("/")
if namespace not in self.packages_data['packages']:
self.packages_data['packages'][namespace] = {}
if name not in self.packages_data['packages'][namespace]:
pdata = {
"created_at": datetime.datetime.utcnow().isoformat(),
'name': name,
'namespace': namespace}
self.packages_data['packages'][namespace][name] = pdata
self._write_data(self.packages_key, self.packages_data)
finally:
self.release_lock(self.packages_key)
def delete_package(self, package_name):
try:
self.get_lock(self.packages_key)
namespace, name = package_name.split("/")
if (namespace not in self.packages_data['packages'] or
name not in self.packages_data['packages'][namespace]):
return None
pdata = self.packages_data['packages'][namespace].pop(name)
if not self.packages_data['packages'][namespace]:
self.packages_data['packages'].pop(namespace)
self._write_data(self.packages_key, self.packages_data)
return pdata
finally:
self.release_lock(self.packages_key)
def add_release(self, package_data, release, media_type, force=False):
try:
self.get_lock(self.releases_key)
try:
data = self.releases_data
except PackageNotFound:
data = {'page': 0, 'channels': {}, 'releases': {}}
if release not in data['releases']:
data['releases'][release] = {'manifests': {}, 'channels': []}
if (release in data['releases'] and
media_type in data['releases'][release]['manifests'] and not force):
raise PackageAlreadyExists("Package exists already", {
"package": self.package,
"release": release,
"media_type": media_type})
data['releases'][release]['manifests'][media_type] = package_data
self._write_data(self.releases_key, data)
self.add_package(self.package)
return data
finally:
self.release_lock(self.releases_key)
def delete_release(self, release, media_type):
try:
self.get_lock(self.releases_key)
data = self.releases_data
if release not in data['releases'] or media_type not in data['releases'][release][
'manifests']:
raise_package_not_found(self.package)
data['releases'][release]['manifests'].pop(media_type)
if not data['releases'][release]['manifests']:
data['releases'].pop(release)
if not data['releases']:
self.delete_package(self.package)
self._write_data(self.releases_key, data)
return True
finally:
self.release_lock(self.releases_key)
@property
def packages_data(self):
if self._packages is None:
try:
self._packages = self._fetch_data(self.packages_key)
except ResourceNotFound:
try:
self.get_lock(self.packages_key, timeout=None)
self._packages = {"page": 0, "packages": {}}
self._write_data(self.packages_key, self._packages)
finally:
self.release_lock(self.packages_key)
return self._packages
def releases(self, media_type=None):
if media_type is not None:
result = []
for release_name, release in self.releases_data['releases'].iteritems():
if media_type in release['manifests']:
result.append(release_name)
else:
result = self.releases_data['releases'].keys()
return result
def release_manifests(self, release):
try:
manifests = self.releases_data['releases'][release]['manifests']
return manifests
except KeyError:
raise_package_not_found(self.package, release)
def release_formats(self, release=None):
if release:
return self.release_manifests(release).keys()
else:
formats = set()
for _, release in self.releases_data['releases'].iteritems():
[formats.add(x) for x in release['manifests'].keys()]
return list(formats)
def release(self, release, media_type):
try:
return self.release_manifests(release)[media_type]
except KeyError:
raise_package_not_found(self.package, release, media_type)
def ispackage_exists(self):
return (len(self.releases()) > 0)
def channels(self):
data = self.releases_data['channels']
if data:
return data.values()
else:
return []
def channel(self, channel):
try:
return self.releases_data['channels'][channel]
except KeyError:
raise_channel_not_found(channel)
def _set_channel(self, channel, release):
try:
self.get_lock(self.releases_key)
data = self.releases_data
data['channels'][channel] = {
'name': channel,
'current': release,
'package': self.package}
if channel not in data['releases'][release]['channels']:
data['releases'][release]['channels'].append(channel)
self._write_data(self.releases_key, data)
return True
finally:
self.release_lock(self.releases_key)
def add_channel(self, channel, current):
return self._set_channel(channel, current)
def delete_channel(self, channel):
""" Delete the channel from all releases """
if not self.ischannel_exists(channel):
raise_channel_not_found(channel)
try:
self.get_lock(self.releases_key)
data = self.releases_data
for release in self.channel_releases(channel):
self._releases = self._delete_channel_release(channel, release)
if channel in data['channels']:
data['channels'].pop(channel)
self._write_data(self.releases_key, data)
finally:
self.release_lock(self.releases_key)
def set_channel_default(self, channel, release):
self._check_channel_release(channel, release)
return self._set_channel(channel, release)
def _check_channel_release(self, channel, release):
if not self.ischannel_exists(channel):
raise_channel_not_found(channel)
if release not in self.releases_data['releases']:
raise_package_not_found(self.package, release)
def add_channel_release(self, channel, release):
self._check_channel_release(channel, release)
try:
self.get_lock(self.releases_key)
data = self.releases_data
if channel not in data['releases'][release]['channels']:
data['releases'][release]['channels'].append(channel)
self._write_data(self.releases_key, data)
return True
finally:
self.release_lock(self.releases_key)
def delete_channel_release(self, channel, release):
self._check_channel_release(channel, release)
try:
self.get_lock(self.releases_key)
data = self._delete_channel_release(channel, release)
releases = self.channel_releases(channel)
if not releases:
data['channels'].pop(channel)
else:
self.set_channel_default(channel, releases[0])
self._write_data(self.releases_key, data)
return True
finally:
self.release_lock(self.releases_key)
def _delete_channel_release(self, channel, release):
data = self.releases_data
channels = set(data['releases'][release]['channels'])
if channel in channels:
channels.discard(channel)
data['releases'][release]['channels'] = list(channels)
return data
def channel_releases(self, channel):
if not self.ischannel_exists(channel):
raise_channel_not_found(self.package, channel)
releases = [
release for release, x in self.releases_data['releases'].iteritems()
if channel in x['channels']]
ordered_releases = [
str(x) for x in sorted(appr.semver.versions(releases, False), reverse=True)]
return ordered_releases
def release_channels(self, release):
if release not in self.releases_data['releases']:
raise_package_not_found(self.package, release)
return self.releases_data['releases'][release]['channels']
def package_names(self, namespace=None):
result = []
if namespace is not None:
if namespace in self.packages_data['packages']:
result = [
"%s/%s" % (namespace, name)
for name in self.packages_data['packages'][namespace].keys()]
else:
for namespace, packages in self.packages_data['packages'].iteritems():
for name in packages.keys():
result.append("%s/%s" % (namespace, name))
return result
def ischannel_exists(self, channel):
return channel in self.releases_data['channels']
def packages(self, namespace=None):
result = []
if namespace is not None:
if namespace in self.packages_data['packages']:
result = self.packages_data['packages'][namespace].values()
else:
for namespace, packages in self.packages_data['packages'].iteritems():
for _, data in packages.iteritems():
result.append(data)
return result
def _lock_key(self, key):
return "%s.lock" % (key)
def get_lock(self, key, ttl=3, timeout=DEFAULT_LOCK_TIMEOUT):
lock_key = self._lock_key(key)
if lock_key not in self.locks:
self._get_lock(lock_key, ttl, timeout)
self.locks.add(lock_key)
def release_lock(self, key):
""" Check if owner of the lock """
lock_key = self._lock_key(key)
if lock_key in self.locks:
self.locks.discard(lock_key)
self._release_lock(lock_key)
def _get_lock(self, key, ttl=3, timeout=DEFAULT_LOCK_TIMEOUT):
raise NotImplementedError
def _release_lock(self, key):
""" Remove the lock """
raise NotImplementedError
def _fetch_data(self, key):
return json.loads(self._fetch_raw_data(key))
def _fetch_raw_data(self, key):
raise NotImplementedError
def _write_data(self, key, data):
return self._write_raw_data(key, json.dumps(data))
def _write_raw_data(self, key, data):
raise NotImplementedError
def _delete_data(self, key):
raise NotImplementedError
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "appr/models/kv/models_index_base.py",
"copies": "2",
"size": "13077",
"license": "apache-2.0",
"hash": -3535560254784361000,
"line_mean": 35.8366197183,
"line_max": 94,
"alpha_frac": 0.5705437027,
"autogenerated": false,
"ratio": 4.345962113659023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5916505816359023,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import datetime
import os
import platform
import sys
import uuid
import copy
import numpy
import xarray
from pandas import to_datetime
import datacube
from ..model import GeoPolygon, CRS, Dataset
import yaml
try:
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeDumper
def machine_info():
info = {
'software_versions': {
'python': {'version': sys.version},
'datacube': {'version': datacube.__version__, 'repo_url': 'https://github.com/data-cube/agdc-v2.git'},
},
'hostname': platform.node(),
}
if hasattr(os, 'uname'):
info['uname'] = ' '.join(os.uname())
else:
info['uname'] = ' '.join([platform.system(),
platform.node(),
platform.release(),
platform.version(),
platform.machine()])
return {'lineage': {'machine': info}}
def geobox_info(extent, valid_data=None):
image_bounds = extent.boundingbox
data_bounds = valid_data.boundingbox if valid_data else image_bounds
gp = GeoPolygon([(data_bounds.left, data_bounds.top),
(data_bounds.right, data_bounds.top),
(data_bounds.right, data_bounds.bottom),
(data_bounds.left, data_bounds.bottom)],
extent.crs).to_crs(CRS('EPSG:4326'))
doc = {
'extent': {
'coord': {
'ul': {'lon': gp.points[0][0], 'lat': gp.points[0][1]},
'ur': {'lon': gp.points[1][0], 'lat': gp.points[1][1]},
'lr': {'lon': gp.points[2][0], 'lat': gp.points[2][1]},
'll': {'lon': gp.points[3][0], 'lat': gp.points[3][1]},
}
},
'grid_spatial': {
'projection': {
'spatial_reference': str(extent.crs),
'geo_ref_points': {
'ul': {'x': image_bounds.left, 'y': image_bounds.top},
'ur': {'x': image_bounds.right, 'y': image_bounds.top},
'll': {'x': image_bounds.left, 'y': image_bounds.bottom},
'lr': {'x': image_bounds.right, 'y': image_bounds.bottom},
}
}
}
}
if valid_data:
doc['grid_spatial']['projection']['valid_data'] = {
'type': 'Polygon',
'coordinates': [valid_data.points+[copy.copy(valid_data.points[0])]] # HACK: to disable yaml aliases
}
return doc
def new_dataset_info():
return {
'id': str(uuid.uuid4()),
'creation_dt': datetime.datetime.utcnow().isoformat(),
}
def band_info(band_names):
return {
'image': {
'bands': {name: {'path': '', 'layer': name} for name in band_names}
}
}
def time_info(time):
time_str = to_datetime(time).isoformat()
return {
'extent': {
'from_dt': time_str,
'to_dt': time_str,
'center_dt': time_str,
}
}
def source_info(source_datasets):
return {
'lineage': {
'source_datasets': {str(idx): dataset.metadata_doc for idx, dataset in enumerate(source_datasets)}
}
}
def datasets_to_doc(output_datasets):
"""
Create a yaml document version of every dataset
:param output_datasets: An array of :class:`datacube.model.Dataset`
:type output_datasets: :py:class:`xarray.DataArray`
:return: An array of yaml document strings
:rtype: :py:class:`xarray.DataArray`
"""
def dataset_to_yaml(index, dataset):
return yaml.dump(dataset.metadata_doc, Dumper=SafeDumper, encoding='utf-8')
return xr_apply(output_datasets, dataset_to_yaml, dtype='O').astype('S')
def xr_iter(data_array):
"""
Iterate over every element in an xarray, returning::
* the numerical index eg ``(10, 1)``
* the labeled index eg ``{'time': datetime(), 'band': 'red'}``
* the element (same as ``da[10, 1].item()``)
:param data_array: Array to iterate over
:type data_array: xarray.DataArray
:return: i-index, label-index, value of da element
:rtype tuple, dict, da.dtype
"""
values = data_array.values
coords = {coord_name: v.values for coord_name, v in data_array.coords.items()}
for i in numpy.ndindex(data_array.shape):
entry = values[i]
index = {coord_name: v[i] for coord_name, v in coords.items()}
yield i, index, entry
def xr_apply(data_array, func, dtype):
"""
Apply a function to every element of a :class:`xarray.DataArray`
:type data_array: xarray.DataArray
:param func: function that takes a dict of labels and an element of the array,
and returns a value of the given dtype
:param dtype: The dtype of the returned array
:return: The array with output of the function for every element.
:rtype: xarray.DataArray
"""
data = numpy.empty(shape=data_array.shape, dtype=dtype)
for i, index, entry in xr_iter(data_array):
v = func(index, entry)
data[i] = v
return xarray.DataArray(data, coords=data_array.coords, dims=data_array.dims)
def make_dataset(dataset_type, sources, extent, center_time, valid_data=None, uri=None, app_info=None):
"""
Create Dataset for the data
:param DatasetType dataset_type:
:param sources: source datasets of source datasets
:type sources: list[:class:`Dataset`]
:param GeoPolygon extent: extent of the dataset
:param GeoPolygon valid_data: extent of the valid data
:param center_time: time of the central point of the dataset
:param str uri: The uri of the dataset
:param dict app_info: Additional metadata to be stored about the generation of the product
:rtype: class:`Dataset`
"""
document = {}
merge(document, dataset_type.metadata_doc)
merge(document, new_dataset_info())
merge(document, machine_info())
merge(document, band_info(dataset_type.measurements.keys()))
merge(document, source_info(sources))
merge(document, geobox_info(extent, valid_data))
merge(document, time_info(center_time))
merge(document, app_info or {})
return Dataset(dataset_type,
document,
local_uri=uri,
sources={str(idx): dataset for idx, dataset in enumerate(sources)})
def merge(a, b, path=None):
"""merges b into a
http://stackoverflow.com/a/7205107/5262498
:type a: dict
:type b: dict
:rtype: dict
"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
a[key] = b[key]
return a
| {
"repo_name": "ceos-seo/Data_Cube_v2",
"path": "agdc-v2/datacube/model/utils.py",
"copies": "1",
"size": "7075",
"license": "apache-2.0",
"hash": -638702947836383100,
"line_mean": 31.1590909091,
"line_max": 114,
"alpha_frac": 0.5725795053,
"autogenerated": false,
"ratio": 3.7374537770734286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4810033282373428,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import datetime
import warnings
from functools import wraps
import pandas as pd
from pandas.core.window import Rolling as pd_Rolling
from ..base import tokenize
from ..utils import M, funcname, derived_from
from .core import _emulate
from .utils import make_meta
def overlap_chunk(func, prev_part, current_part, next_part, before, after,
args, kwargs):
msg = ("Partition size is less than overlapping "
"window size. Try using ``df.repartition`` "
"to increase the partition size.")
if prev_part is not None and isinstance(before, int):
if prev_part.shape[0] != before:
raise NotImplementedError(msg)
if next_part is not None and isinstance(after, int):
if next_part.shape[0] != after:
raise NotImplementedError(msg)
# We validate that the window isn't too large for tiemdeltas in map_overlap
parts = [p for p in (prev_part, current_part, next_part) if p is not None]
combined = pd.concat(parts)
out = func(combined, *args, **kwargs)
if prev_part is None:
before = None
if isinstance(before, datetime.timedelta):
before = len(prev_part)
if next_part is None:
return out.iloc[before:]
if isinstance(after, datetime.timedelta):
after = len(next_part)
return out.iloc[before:-after]
def map_overlap(func, df, before, after, *args, **kwargs):
"""Apply a function to each partition, sharing rows with adjacent partitions.
Parameters
----------
func : function
Function applied to each partition.
df : dd.DataFrame, dd.Series
before : int or timedelta
The rows to prepend to partition ``i`` from the end of
partition ``i - 1``.
after : int or timedelta
The rows to append to partition ``i`` from the beginning
of partition ``i + 1``.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
See Also
--------
dd.DataFrame.map_overlap
"""
if (isinstance(before, datetime.timedelta) or isinstance(after, datetime.timedelta)):
if not df.index._meta_nonempty.is_all_dates:
raise TypeError("Must have a `DatetimeIndex` when using string offset "
"for `before` and `after`")
else:
if not (isinstance(before, int) and before >= 0 and
isinstance(after, int) and after >= 0):
raise ValueError("before and after must be positive integers")
if 'token' in kwargs:
func_name = kwargs.pop('token')
token = tokenize(df, before, after, *args, **kwargs)
else:
func_name = 'overlap-' + funcname(func)
token = tokenize(func, df, before, after, *args, **kwargs)
if 'meta' in kwargs:
meta = kwargs.pop('meta')
else:
meta = _emulate(func, df, *args, **kwargs)
meta = make_meta(meta)
name = '{0}-{1}'.format(func_name, token)
name_a = 'overlap-prepend-' + tokenize(df, before)
name_b = 'overlap-append-' + tokenize(df, after)
df_name = df._name
dsk = df.dask.copy()
# Have to do the checks for too large windows in the time-delta case
# here instead of in `overlap_chunk`, since we can't rely on fix-frequency
# index
timedelta_partition_message = (
"Partition size is less than specified window. "
"Try using ``df.repartition`` to increase the partition size"
)
if before and isinstance(before, int):
dsk.update({(name_a, i): (M.tail, (df_name, i), before)
for i in range(df.npartitions - 1)})
prevs = [None] + [(name_a, i) for i in range(df.npartitions - 1)]
elif isinstance(before, datetime.timedelta):
# Assumes monotonic (increasing?) index
deltas = pd.Series(df.divisions).diff().iloc[1:-1]
if (before > deltas).any():
raise ValueError(timedelta_partition_message)
dsk.update({(name_a, i): (_tail_timedelta, (df_name, i), (df_name, i + 1), before)
for i in range(df.npartitions - 1)})
prevs = [None] + [(name_a, i) for i in range(df.npartitions - 1)]
else:
prevs = [None] * df.npartitions
if after and isinstance(after, int):
dsk.update({(name_b, i): (M.head, (df_name, i), after)
for i in range(1, df.npartitions)})
nexts = [(name_b, i) for i in range(1, df.npartitions)] + [None]
elif isinstance(after, datetime.timedelta):
# TODO: Do we have a use-case for this? Pandas doesn't allow negative rolling windows
deltas = pd.Series(df.divisions).diff().iloc[1:-1]
if (after > deltas).any():
raise ValueError(timedelta_partition_message)
dsk.update({(name_b, i): (_head_timedelta, (df_name, i - 0), (df_name, i), after)
for i in range(1, df.npartitions)})
nexts = [(name_b, i) for i in range(1, df.npartitions)] + [None]
else:
nexts = [None] * df.npartitions
for i, (prev, current, next) in enumerate(zip(prevs, df._keys(), nexts)):
dsk[(name, i)] = (overlap_chunk, func, prev, current, next, before,
after, args, kwargs)
return df._constructor(dsk, name, meta, df.divisions)
def wrap_rolling(func, method_name):
"""Create a chunked version of a pandas.rolling_* function"""
@wraps(func)
def rolling(arg, window, *args, **kwargs):
# pd.rolling_* functions are deprecated
warnings.warn(("DeprecationWarning: dd.rolling_{0} is deprecated and "
"will be removed in a future version, replace with "
"df.rolling(...).{0}(...)").format(method_name))
rolling_kwargs = {}
method_kwargs = {}
for k, v in kwargs.items():
if k in {'min_periods', 'center', 'win_type', 'axis', 'freq'}:
rolling_kwargs[k] = v
else:
method_kwargs[k] = v
rolling = arg.rolling(window, **rolling_kwargs)
return getattr(rolling, method_name)(*args, **method_kwargs)
return rolling
def _head_timedelta(current, next_, after):
"""Return rows of ``next_`` whose index is before the last
observation in ``current`` + ``after``.
Parameters
----------
current : DataFrame
next_ : DataFrame
after : timedelta
Returns
-------
overlapped : DataFrame
"""
return next_[next_.index < (current.index.max() + after)]
def _tail_timedelta(prev, current, before):
"""Return rows of ``prev`` whose index is after the first
observation in ``current`` - ``before``.
Parameters
----------
current : DataFrame
next_ : DataFrame
before : timedelta
Returns
-------
overlapped : DataFrame
"""
return prev[prev.index > (current.index.min() - before)]
rolling_count = wrap_rolling(pd.rolling_count, 'count')
rolling_sum = wrap_rolling(pd.rolling_sum, 'sum')
rolling_mean = wrap_rolling(pd.rolling_mean, 'mean')
rolling_median = wrap_rolling(pd.rolling_median, 'median')
rolling_min = wrap_rolling(pd.rolling_min, 'min')
rolling_max = wrap_rolling(pd.rolling_max, 'max')
rolling_std = wrap_rolling(pd.rolling_std, 'std')
rolling_var = wrap_rolling(pd.rolling_var, 'var')
rolling_skew = wrap_rolling(pd.rolling_skew, 'skew')
rolling_kurt = wrap_rolling(pd.rolling_kurt, 'kurt')
rolling_quantile = wrap_rolling(pd.rolling_quantile, 'quantile')
rolling_apply = wrap_rolling(pd.rolling_apply, 'apply')
@wraps(pd.rolling_window)
def rolling_window(arg, window, **kwargs):
if kwargs.pop('mean', True):
return rolling_mean(arg, window, **kwargs)
return rolling_sum(arg, window, **kwargs)
def pandas_rolling_method(df, rolling_kwargs, name, *args, **kwargs):
rolling = df.rolling(**rolling_kwargs)
return getattr(rolling, name)(*args, **kwargs)
class Rolling(object):
"""Provides rolling window calculations."""
def __init__(self, obj, window=None, min_periods=None, freq=None,
center=False, win_type=None, axis=0):
if freq is not None:
msg = 'The deprecated freq argument is not supported.'
raise NotImplementedError(msg)
self.obj = obj # dataframe or series
self.window = window
self.min_periods = min_periods
self.center = center
self.axis = axis
self.win_type = win_type
# Allow pandas to raise if appropriate
pd_roll = obj._meta.rolling(**self._rolling_kwargs())
# Using .rolling(window='2s'), pandas will convert the
# offset str to a window in nanoseconds. But pandas doesn't
# accept the integer window with win_type='freq', so we store
# that information here.
# See https://github.com/pandas-dev/pandas/issues/15969
self._window = pd_roll.window
self._win_type = pd_roll.win_type
self._min_periods = pd_roll.min_periods
def _rolling_kwargs(self):
return {'window': self.window,
'min_periods': self.min_periods,
'center': self.center,
'win_type': self.win_type,
'axis': self.axis}
@property
def _has_single_partition(self):
"""
Indicator for whether the object has a single partition (True)
or multiple (False).
"""
return (self.axis in (1, 'columns') or
(isinstance(self.window, int) and self.window <= 1) or
self.obj.npartitions == 1)
def _call_method(self, method_name, *args, **kwargs):
rolling_kwargs = self._rolling_kwargs()
meta = pandas_rolling_method(self.obj._meta_nonempty, rolling_kwargs,
method_name, *args, **kwargs)
if self._has_single_partition:
# There's no overlap just use map_partitions
return self.obj.map_partitions(pandas_rolling_method,
rolling_kwargs, method_name,
*args, token=method_name, meta=meta,
**kwargs)
# Convert window to overlap
if self.center:
before = self.window // 2
after = self.window - before - 1
elif self._win_type == 'freq':
before = pd.Timedelta(self.window)
after = 0
else:
before = self.window - 1
after = 0
return map_overlap(pandas_rolling_method, self.obj, before, after,
rolling_kwargs, method_name, *args,
token=method_name, meta=meta, **kwargs)
@derived_from(pd_Rolling)
def count(self):
return self._call_method('count')
@derived_from(pd_Rolling)
def sum(self):
return self._call_method('sum')
@derived_from(pd_Rolling)
def mean(self):
return self._call_method('mean')
@derived_from(pd_Rolling)
def median(self):
return self._call_method('median')
@derived_from(pd_Rolling)
def min(self):
return self._call_method('min')
@derived_from(pd_Rolling)
def max(self):
return self._call_method('max')
@derived_from(pd_Rolling)
def std(self, ddof=1):
return self._call_method('std', ddof=1)
@derived_from(pd_Rolling)
def var(self, ddof=1):
return self._call_method('var', ddof=1)
@derived_from(pd_Rolling)
def skew(self):
return self._call_method('skew')
@derived_from(pd_Rolling)
def kurt(self):
return self._call_method('kurt')
@derived_from(pd_Rolling)
def quantile(self, quantile):
return self._call_method('quantile', quantile)
@derived_from(pd_Rolling)
def apply(self, func, args=(), kwargs={}):
return self._call_method('apply', func, args=args, kwargs=kwargs)
def __repr__(self):
def order(item):
k, v = item
_order = {'window': 0, 'min_periods': 1, 'center': 2,
'win_type': 3, 'axis': 4}
return _order[k]
rolling_kwargs = self._rolling_kwargs()
# pandas translates the '2S' offset to nanoseconds
rolling_kwargs['window'] = self._window
rolling_kwargs['win_type'] = self._win_type
return 'Rolling [{}]'.format(','.join(
'{}={}'.format(k, v)
for k, v in sorted(rolling_kwargs.items(), key=order)
if v is not None))
| {
"repo_name": "mraspaud/dask",
"path": "dask/dataframe/rolling.py",
"copies": "2",
"size": "12635",
"license": "bsd-3-clause",
"hash": 5927844971846327000,
"line_mean": 34.3921568627,
"line_max": 93,
"alpha_frac": 0.5921646221,
"autogenerated": false,
"ratio": 3.790879087908791,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019033449562900576,
"num_lines": 357
} |
from __future__ import absolute_import, division, print_function
import datetime
import numpy as np
from pandas import DataFrame, Series
from datashape import to_numpy, to_numpy_dtype
from numbers import Number
from ..expr import (
Reduction, Field, Projection, Broadcast, Selection, ndim,
Distinct, Sort, Tail, Head, Label, ReLabel, Expr, Slice, Join,
std, var, count, nunique, Summary, IsIn,
BinOp, UnaryOp, USub, Not, nelements, Repeat, Concat, Interp,
UTCFromTimestamp, DateTimeTruncate,
Transpose, TensorDot, Coerce, isnan,
greatest, least, BinaryMath, atan2,
)
from ..utils import keywords
from .core import base, compute
from ..dispatch import dispatch
from odo import into
import pandas as pd
__all__ = ['np']
@dispatch(Field, np.ndarray)
def compute_up(c, x, **kwargs):
if x.dtype.names and c._name in x.dtype.names:
return x[c._name]
if not x.dtype.names and x.shape[1] == len(c._child.fields):
return x[:, c._child.fields.index(c._name)]
raise NotImplementedError() # pragma: no cover
@dispatch(Projection, np.ndarray)
def compute_up(t, x, **kwargs):
if x.dtype.names and all(col in x.dtype.names for col in t.fields):
return x[t.fields]
if not x.dtype.names and x.shape[1] == len(t._child.fields):
return x[:, [t._child.fields.index(col) for col in t.fields]]
raise NotImplementedError() # pragma: no cover
try:
from .numba import broadcast_numba as broadcast_ndarray
except ImportError:
def broadcast_ndarray(t, *data, **kwargs):
del kwargs['scope']
d = dict(zip(t._scalar_expr._leaves(), data))
return compute(t._scalar_expr, d, **kwargs)
compute_up.register(Broadcast, np.ndarray)(broadcast_ndarray)
for i in range(2, 6):
compute_up.register(Broadcast, *([(np.ndarray, Number)] * i))(broadcast_ndarray)
@dispatch(Repeat, np.ndarray)
def compute_up(t, data, _char_mul=np.char.multiply, **kwargs):
if isinstance(t.lhs, Expr):
return _char_mul(data, t.rhs)
else:
return _char_mul(t.lhs, data)
@compute_up.register(Repeat, np.ndarray, (np.ndarray, base))
@compute_up.register(Repeat, base, np.ndarray)
def compute_up_np_repeat(t, lhs, rhs, _char_mul=np.char.multiply, **kwargs):
return _char_mul(lhs, rhs)
def _interp(arr, v, _Series=pd.Series, _charmod=np.char.mod):
"""
Delegate to the most efficient string formatting technique based on
the length of the array.
"""
if len(arr) >= 145:
return _Series(arr) % v
return _charmod(arr, v)
@dispatch(Interp, np.ndarray)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return _interp(data, t.rhs)
else:
return _interp(t.lhs, data)
@compute_up.register(Interp, np.ndarray, (np.ndarray, base))
@compute_up.register(Interp, base, np.ndarray)
def compute_up_np_interp(t, lhs, rhs, **kwargs):
return _interp(lhs, rhs)
@compute_up.register(greatest, np.ndarray, (np.ndarray, base))
@compute_up.register(greatest, base, np.ndarray)
def compute_up_greatest(expr, lhs, rhs, **kwargs):
return np.maximum(lhs, rhs)
@compute_up.register(least, np.ndarray, (np.ndarray, base))
@compute_up.register(least, base, np.ndarray)
def compute_up_least(expr, lhs, rhs, **kwargs):
return np.minimum(lhs, rhs)
@dispatch(BinOp, np.ndarray, (np.ndarray, base))
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(BinOp, base, np.ndarray)
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(BinOp, np.ndarray)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return t.op(data, t.rhs)
else:
return t.op(t.lhs, data)
@compute_up.register(BinaryMath, np.ndarray, (np.ndarray, base))
@compute_up.register(BinaryMath, base, np.ndarray)
def compute_up_binary_math(t, lhs, rhs, **kwargs):
return getattr(np, type(t).__name__)(lhs, rhs)
@dispatch(BinaryMath, np.ndarray)
def compute_up(t, data, **kwargs):
func = getattr(np, type(t).__name__)
if isinstance(t.lhs, Expr):
return func(data, t.rhs)
else:
return func(t.lhs, data)
@compute_up.register(atan2, np.ndarray, (np.ndarray, base))
@compute_up.register(atan2, base, np.ndarray)
def compute_up_binary_math(t, lhs, rhs, **kwargs):
return np.arctan2(lhs, rhs)
@dispatch(atan2, np.ndarray)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return np.arctan2(data, t.rhs)
else:
return np.arctan2(t.lhs, data)
@dispatch(UnaryOp, np.ndarray)
def compute_up(t, x, **kwargs):
return getattr(np, t.symbol)(x)
@dispatch(Not, np.ndarray)
def compute_up(t, x, **kwargs):
return np.logical_not(x)
@dispatch(USub, np.ndarray)
def compute_up(t, x, **kwargs):
return np.negative(x)
inat = np.datetime64('NaT').view('int64')
@dispatch(count, np.ndarray)
def compute_up(t, x, **kwargs):
result_dtype = to_numpy_dtype(t.dshape)
if issubclass(x.dtype.type, (np.floating, np.object_)):
return pd.notnull(x).sum(keepdims=t.keepdims, axis=t.axis,
dtype=result_dtype)
elif issubclass(x.dtype.type, np.datetime64):
return (x.view('int64') != inat).sum(keepdims=t.keepdims, axis=t.axis,
dtype=result_dtype)
else:
return np.ones(x.shape, dtype=result_dtype).sum(keepdims=t.keepdims,
axis=t.axis,
dtype=result_dtype)
@dispatch(nunique, np.ndarray)
def compute_up(t, x, **kwargs):
assert t.axis == tuple(range(ndim(t._child)))
result = len(np.unique(x))
if t.keepdims:
result = np.array([result])
return result
@dispatch(Reduction, np.ndarray)
def compute_up(t, x, **kwargs):
# can't use the method here, as they aren't Python functions
reducer = getattr(np, t.symbol)
if 'dtype' in keywords(reducer):
return reducer(x, axis=t.axis, keepdims=t.keepdims,
dtype=to_numpy_dtype(t.schema))
return reducer(x, axis=t.axis, keepdims=t.keepdims)
def axify(expr, axis, keepdims=False):
""" inject axis argument into expression
Helper function for compute_up(Summary, np.ndarray)
>>> from blaze import symbol
>>> s = symbol('s', '10 * 10 * int')
>>> expr = s.sum()
>>> axify(expr, axis=0)
sum(s, axis=(0,))
"""
return type(expr)(expr._child, axis=axis, keepdims=keepdims)
@dispatch(Summary, np.ndarray)
def compute_up(expr, data, **kwargs):
shape, dtype = to_numpy(expr.dshape)
if shape:
result = np.empty(shape=shape, dtype=dtype)
for n, v in zip(expr.names, expr.values):
result[n] = compute(axify(v, expr.axis, expr.keepdims), data)
return result
else:
return tuple(compute(axify(v, expr.axis), data) for v in expr.values)
@dispatch((std, var), np.ndarray)
def compute_up(t, x, **kwargs):
return getattr(x, t.symbol)(ddof=t.unbiased, axis=t.axis,
keepdims=t.keepdims)
@compute_up.register(Distinct, np.recarray)
def recarray_distinct(t, rec, **kwargs):
return pd.DataFrame.from_records(rec).drop_duplicates(
subset=t.on or None).to_records(index=False).astype(rec.dtype)
@dispatch(Distinct, np.ndarray)
def compute_up(t, arr, _recarray_distinct=recarray_distinct, **kwargs):
if t.on:
if getattr(arr.dtype, 'names', None) is not None:
return _recarray_distinct(t, arr, **kwargs).view(np.ndarray)
else:
raise ValueError('malformed expression: no columns to distinct on')
return np.unique(arr)
@dispatch(Sort, np.ndarray)
def compute_up(t, x, **kwargs):
if x.dtype.names is None: # not a struct array
result = np.sort(x)
elif (t.key in x.dtype.names or # struct array
isinstance(t.key, list) and all(k in x.dtype.names for k in t.key)):
result = np.sort(x, order=t.key)
elif t.key:
raise NotImplementedError("Sort key %s not supported" % t.key)
if not t.ascending:
result = result[::-1]
return result
@dispatch(Head, np.ndarray)
def compute_up(t, x, **kwargs):
return x[:t.n]
@dispatch(Tail, np.ndarray)
def compute_up(t, x, **kwargs):
return x[-t.n:]
@dispatch(Label, np.ndarray)
def compute_up(t, x, **kwargs):
return np.array(x, dtype=[(t.label, x.dtype.type)])
@dispatch(ReLabel, np.ndarray)
def compute_up(t, x, **kwargs):
types = [x.dtype[i] for i in range(len(x.dtype))]
return np.array(x, dtype=list(zip(t.fields, types)))
@dispatch(Selection, np.ndarray)
def compute_up(sel, x, **kwargs):
predicate = compute(sel.predicate, {sel._child: x})
cond = getattr(predicate, 'values', predicate)
return x[cond]
@dispatch(Selection, np.ndarray, np.ndarray)
def compute_up(expr, arr, predicate, **kwargs):
return arr[predicate]
@dispatch(Selection, np.ndarray, Series)
def compute_up(expr, arr, predicate, **kwargs):
return arr[predicate.values]
@dispatch(UTCFromTimestamp, np.ndarray)
def compute_up(expr, data, **kwargs):
return (data * 1e6).astype('datetime64[us]')
@dispatch(Slice, np.ndarray)
def compute_up(expr, x, **kwargs):
return x[expr.index]
@dispatch(Expr, np.ndarray)
def compute_up(t, x, **kwargs):
ds = t._child.dshape
if x.ndim > 1 or isinstance(x, np.recarray) or x.dtype.fields is not None:
return compute_up(t, into(DataFrame, x, dshape=ds), **kwargs)
else:
return compute_up(t, into(Series, x, dshape=ds), **kwargs)
@dispatch(nelements, np.ndarray)
def compute_up(expr, data, **kwargs):
axis = expr.axis
if expr.keepdims:
shape = tuple(data.shape[i] if i not in axis else 1
for i in range(ndim(expr._child)))
else:
shape = tuple(data.shape[i] for i in range(ndim(expr._child))
if i not in axis)
value = np.prod([data.shape[i] for i in axis])
result = np.empty(shape)
result.fill(value)
result = result.astype('int64')
return result
# Note the use of 'week': 'M8[D]' here.
# We truncate week offsets "manually" in the compute_up implementation by first
# converting to days then multiplying our measure by 7 this simplifies our code
# by only requiring us to calculate the week offset relative to the day of week.
precision_map = {'year': 'M8[Y]',
'month': 'M8[M]',
'week': 'M8[D]',
'day': 'M8[D]',
'hour': 'M8[h]',
'minute': 'M8[m]',
'second': 'M8[s]',
'millisecond': 'M8[ms]',
'microsecond': 'M8[us]',
'nanosecond': 'M8[ns]'}
# these offsets are integers in units of their representation
epoch = datetime.datetime(1970, 1, 1)
offsets = {
'week': epoch.isoweekday(),
'day': epoch.toordinal() # number of days since *Python's* epoch (01/01/01)
}
@dispatch(DateTimeTruncate, (np.ndarray, np.datetime64))
def compute_up(expr, data, **kwargs):
np_dtype = precision_map[expr.unit]
offset = offsets.get(expr.unit, 0)
measure = expr.measure * 7 if expr.unit == 'week' else expr.measure
result = (((data.astype(np_dtype)
.view('int64')
+ offset)
// measure
* measure
- offset)
.astype(np_dtype))
return result
@dispatch(isnan, np.ndarray)
def compute_up(expr, data, **kwargs):
return np.isnan(data)
@dispatch(np.ndarray)
def chunks(x, chunksize=1024):
start = 0
n = len(x)
while start < n:
yield x[start:start + chunksize]
start += chunksize
@dispatch(Transpose, np.ndarray)
def compute_up(expr, x, **kwargs):
return np.transpose(x, axes=expr.axes)
@dispatch(TensorDot, np.ndarray, np.ndarray)
def compute_up(expr, lhs, rhs, **kwargs):
return np.tensordot(lhs, rhs, axes=[expr._left_axes, expr._right_axes])
@dispatch(IsIn, np.ndarray)
def compute_up(expr, data, **kwargs):
return np.in1d(data, tuple(expr._keys))
@compute_up.register(Join, DataFrame, np.ndarray)
@compute_up.register(Join, np.ndarray, DataFrame)
@compute_up.register(Join, np.ndarray, np.ndarray)
def join_ndarray(expr, lhs, rhs, **kwargs):
if isinstance(lhs, np.ndarray):
lhs = DataFrame(lhs)
if isinstance(rhs, np.ndarray):
rhs = DataFrame(rhs)
return compute_up(expr, lhs, rhs, **kwargs)
@dispatch(Coerce, np.ndarray)
def compute_up(expr, data, **kwargs):
return data.astype(to_numpy_dtype(expr.schema))
@dispatch(Concat, np.ndarray, np.ndarray)
def compute_up(expr, lhs, rhs, _concat=np.concatenate, **kwargs):
return _concat((lhs, rhs), axis=expr.axis)
| {
"repo_name": "cpcloud/blaze",
"path": "blaze/compute/numpy.py",
"copies": "2",
"size": "12793",
"license": "bsd-3-clause",
"hash": 2202862593433332200,
"line_mean": 28.075,
"line_max": 84,
"alpha_frac": 0.6330024232,
"autogenerated": false,
"ratio": 3.1847149614139907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9814089170942661,
"avg_score": 0.0007256427342660123,
"num_lines": 440
} |
from __future__ import absolute_import, division, print_function
import datetime
try:
from cytoolz import curry
except ImportError:
from toolz import curry
from datashape import dshape
import numpy as np
import pandas as pd
from pandas.io.packers import decode
# Imports that replace older utils.
from blaze.compatibility import PY2
# dict of converters. This is stored as a default arg to object hook for
# performance because this function is really really slow when unpacking data.
# This is a mutable default but it is not a bug!
_converters = {}
if PY2:
_keys = dict.keys
else:
def _keys(d, _dkeys=dict.keys, _list=list):
return _list(_dkeys(d))
def object_hook(ob,
# Cached for performance. Forget these exist.
_len=len,
_keys=_keys,
_first_three_chars=np.s_[:3],
_converters=_converters):
"""Convert a json object dict back into a python object.
This looks for our objects that have encoded richer representations
with a ``__!{type}`` key.
Parameters
----------
ob : dict
The raw json parsed dictionary.
Returns
-------
parsed : any
The richer form of the object.
Notes
-----
The types that this reads can be extended with the ``register`` method.
For example:
>>> class MyList(list):
... pass
>>> @object_hook.register('MyList')
... def _parse_my_list(ob):
... return MyList(ob)
Register can also be called as a function like:
>>> a = object_hook.register('frozenset', frozenset)
>>> a is frozenset
True
"""
if _len(ob) != 1:
return decode(ob)
key = _keys(ob)[0]
if key[_first_three_chars] != '__!':
return ob
return _converters[key](ob[key])
@curry
def register(typename, converter, converters=_converters):
converters['__!' + typename] = converter
return converter
object_hook.register = register
object_hook._converters = _converters # make this accesible for debugging
del _converters
del _keys # captured by default args
object_hook.register('datetime', pd.Timestamp)
object_hook.register('frozenset', frozenset)
object_hook.register('datashape', dshape)
@object_hook.register('mono')
def _read_mono(m):
return dshape(m).measure
@object_hook.register('timedelta')
def _read_timedelta(ds):
return datetime.timedelta(seconds=ds)
@object_hook.register('bytes')
def _read_bytes(bs):
if not isinstance(bs, bytes):
bs = bs.encode('latin1')
return bs
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/server/serialization/object_hook.py",
"copies": "3",
"size": "2571",
"license": "bsd-3-clause",
"hash": 7754648920608950000,
"line_mean": 22.8055555556,
"line_max": 78,
"alpha_frac": 0.6468300272,
"autogenerated": false,
"ratio": 3.8088888888888888,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5955718916088889,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import enum
from .utils import xml_to_dict
from .models import BaseModel
class ProductCode(enum.Enum):
QuickSSLPremium = "QuickSSLPremium"
QuickSSL = "QuickSSL"
FreeSSL = "FreeSSL"
RapidSSL = "RapidSSL"
EnterpriseSSL = "ESSL"
TrueBusinessID = "TrueBizID"
TrueCredentials = "TrueCredentials"
GeoCenterAdmin = "GeoCenterAdmin"
TrueBusinessIDWithEV = "TrueBizIDEV"
SecureSite = "SecureSite"
SecureSitePro = "SecureSitePro"
SecureSiteWithEV = "SecureSiteEV"
SecureSiteProWithEV = "SecureSiteProEV"
SSL123 = "SSL123"
SSL123AdditionalLicense = "SSL123ASL"
SGCSuperCerts = "SGCSuperCerts"
SGCSuperCertAdditionalLicense = "SGCSuperCertsASL"
SSLWebServer = "SSLWebServer"
SSLWebServerWithEV = "SSLWebServerEV"
SSLWebServerAdditionalLicense = "SSLWebServerASL"
SSLWebServerWithEVAdditionalLicense = "SSLWebServerEVASL"
SymantecCodeSigningCertificate = "VeriSignCSC"
ThawteCodeSigningCertificate = "thawteCSC"
GeoTrustFreeTrial = "GeoTrustFreeTrial"
PartnerAuth = "PartnerAuth"
VerifiedSiteSealOrg = "VerifiedSiteSealOrg"
TrustSealInd = "TrustSealInd"
TrustSealOrg = "TrustSealOrg"
HourlyUsage = "HourlyUsage"
MalwareBasic = "Malwarebasic"
MalwareScan = "Malwarescan"
class ValidityPeriod(enum.IntEnum):
Month = 1
OneYear = 12
TwoYears = 24
ThreeYears = 36
FourYears = 48
class WebServer(enum.IntEnum):
ApacheSSL = 1
ApacheRaven = 2
ApacheSSLeay = 3
ApacheOpenSSL = 20
Apache2 = 21
ApacheApacheSSL = 22
IIS4 = 12
IIS5 = 13
IIS = 33
LotusDominoGo4625 = 9
LotusDominoGo4626 = 10
LotusDomino = 11
C2NetStronghold = 4
IBMHTTP = 7
iPlanet = 8
NetscapeEnterpriseFastrack = 14
ZeusV3 = 17
CobaltSeries = 23
Cpanel = 24
Ensim = 25
Hsphere = 26
Ipswitch = 27
Plesk = 28
JakartTomcat = 29
WebLogic = 30
OReillyWebSiteProfessional = 31
WebStar = 32
Other = 18
class Order(BaseModel):
_command = "QuickOrder"
def response_result(self, xml):
return {
"PartnerOrderID": xml.xpath(
"OrderResponseHeader/PartnerOrderID/text()"
)[0],
"GeoTrustOrderID": xml.xpath("GeoTrustOrderID/text()")[0],
}
class GetOrderByPartnerOrderID(BaseModel):
_command = "GetOrderByPartnerOrderID"
def response_result(self, xml):
return xml_to_dict(xml.xpath("OrderDetail")[0])
class GetOrdersByDateRange(BaseModel):
_command = "GetOrdersByDateRange"
def response_result(self, xml):
results = []
for order in xml.xpath("OrderDetails//OrderInfo"):
results.append(dict((i.tag, i.text) for i in order))
return results
class GetModifiedOrders(BaseModel):
_command = "GetModifiedOrders"
def response_result(self, xml):
results = []
for order in xml.xpath("OrderDetails/OrderDetail"):
# Since we are grabbing both OrderInfo and Modification Events,
# results is a dict for each category, which is easier than
# predicting the order these two will be in.
nodes = {}
categories = dict((i.tag, i) for i in order)
# Same as in the other "get" methods.
nodes["OrderInfo"] = dict(
(i.tag, i.text) for i in categories["OrderInfo"]
)
# A list of events; each entry contains a dict of values.
events = []
for event in categories["ModificationEvents"]:
events.append(dict((i.tag, i.text) for i in event))
nodes["ModificationEvents"] = events
results.append(nodes)
return results
class ChangeApproverEmail(BaseModel):
_command = "ChangeApproverEmail"
def response_result(self, xml):
return
class Reissue(BaseModel):
_command = "Reissue"
def response_result(self, xml):
return {
"PartnerOrderID": xml.xpath(
"OrderResponseHeader/PartnerOrderID/text()"
)[0],
"GeoTrustOrderID": xml.xpath("GeoTrustOrderID/text()")[0],
}
class Revoke(BaseModel):
_command = "Revoke"
def response_result(self, xml):
return {
"PartnerOrderID": xml.xpath(
"OrderResponseHeader/PartnerOrderID/text()"
)[0],
"GeoTrustOrderID": xml.xpath("GeoTrustOrderID/text()")[0],
"SerialNumber": xml.xpath("SerialNumber/text()")[0],
}
class ModifyOperation(enum.Enum):
Approve = "APPROVE"
ApproveESSL = "APPROVE_ESSL"
ResellerApprove = "RESELLER_APPROVE"
ResellerDisapprove = "RESELLER_DISAPPROVE"
Reject = "REJECT"
Cancel = "CANCEL"
Deactivate = "DEACTIVATE"
RequestOnDemandScan = "REQUEST_ON_DEMAND_SCAN"
RequestVulnerabilityScan = "REQUEST_VULNERABILITY_SCAN"
UpdateSealPreferences = "UPDATE_SEAL_PREFERENCES"
UpdatePostStatus = "UPDATE_POST_STATUS"
PushState = "PUSH_ORDER_STATE"
class ModifyOrder(BaseModel):
_command = "ModifyOrder"
def response_result(self, xml):
return
class ValidateOrderParameters(BaseModel):
_command = "ValidateOrderParameters"
def response_result(self, xml):
result = {}
for outer in xml.xpath("/ValidateOrderParameters/child::*"):
if outer.tag == "OrderResponseHeader":
continue
if outer.xpath('count(child::*)') > 0:
result[outer.tag] = dict((i.tag, i.text) for i in outer)
else:
result[outer.tag] = outer.text
return result
class GetQuickApproverList(BaseModel):
_command = "GetQuickApproverList"
def response_result(self, xml):
result = []
for approver in xml.xpath("ApproverList/Approver"):
result.append(dict(
(i.tag, i.text) for i in approver
))
return result
| {
"repo_name": "jmvrbanac/symantecssl",
"path": "symantecssl/order.py",
"copies": "1",
"size": "6071",
"license": "apache-2.0",
"hash": 1291378351544392000,
"line_mean": 23.9835390947,
"line_max": 75,
"alpha_frac": 0.6300444737,
"autogenerated": false,
"ratio": 3.641871625674865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9771916099374864,
"avg_score": 0,
"num_lines": 243
} |
from __future__ import absolute_import, division, print_function
import enum
import lxml
from .datastructures import CaseInsensitiveDict
from .exceptions import SymantecError
class BaseModel(object):
_responsetype = "XML"
def __init__(self, **kwargs):
self.data = CaseInsensitiveDict(kwargs)
def __setattr__(self, name, value):
# Allow us to set our data attribute
if name == "data":
return super(BaseModel, self).__setattr__(name, value)
self.data[name] = value
def __getattr__(self, name):
if name not in self.data:
raise AttributeError(
"'{0}' object has no attribute '{1}'".format(
self.__class__.__name__,
name,
)
)
return self.data[name]
def __delattr__(self, name):
if name not in self.data:
raise AttributeError(
"'{0}' object has no attribute '{1}'".format(
self.__class__.__name__,
name,
)
)
del self.data[name]
def serialize(self):
data = {}
# Serialize the user provided data
for key, value in self.data.items():
# Turn our enums into "real" data
if isinstance(value, enum.Enum):
value = value.value
data[key] = value
# Add the command and response type
data.update({
"command": self._command,
"responsetype": self._responsetype,
})
return data
def response(self, data):
xml = lxml.etree.fromstring(data)
success_code = int(xml.xpath(
"*[ "
" substring( name(), string-length(name() ) - 13 ) "
" = 'ResponseHeader' "
"]"
"/SuccessCode/text()"
)[0])
if success_code == 0:
return self.response_result(xml)
else:
return self.response_error(xml)
def response_error(self, xml):
errors = []
for error in xml.xpath(
"*[ "
" substring( name(), string-length(name() ) - 13 ) "
" = 'ResponseHeader' "
"]"
"/Errors/Error"):
errors.append(dict((i.tag, i.text) for i in error))
# We only display the first error message here, but all of them
# will be available on the exception
raise SymantecError(
"The Symantec API call {0} returned an error: '{1}'".format(
self.__class__.__name__,
errors[0]["ErrorMessage"],
),
errors=errors,
)
def response_result(self, xml):
raise NotImplementedError
| {
"repo_name": "jmvrbanac/symantecssl",
"path": "symantecssl/models.py",
"copies": "1",
"size": "2811",
"license": "apache-2.0",
"hash": 9140226084404838000,
"line_mean": 26.5588235294,
"line_max": 75,
"alpha_frac": 0.4966204198,
"autogenerated": false,
"ratio": 4.512038523274478,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 102
} |
from __future__ import absolute_import, division, print_function
import errno
import inspect
from collections import Container
from contextlib import contextmanager
from datetime import datetime, timedelta
from hashlib import md5
from pathlib import Path
import six
from boltons.formatutils import DeferredValue as DV
from represent import ReprHelperMixin
from .backends import Backend, PickleBackend
from .compat.contextlib import suppress
from .exceptions import (
BackendLoadError, KeyExpirationError, KeyFileNotFoundError, KeyInvalidError)
from .keymakers import DefaultKeyMaker
from .log import log_handled_exception, logger, logger_config
from .utilities import DecoratorFactory, PrunedFilesInfo, raise_invalid_keys
__all__ = ('Bucket', 'DeferredWriteBucket', 'deferred_write')
class Bucket(ReprHelperMixin, Container, object):
"""Dictionary-like object backed by a file cache.
Parameters:
backend: Backend class. Default:
:py:class:`~bucketcache.backends.PickleBackend`
path: Directory for storing cached objects. Will be created if missing.
config: `Config` instance for backend.
keymaker: `KeyMaker` instance for object -> key serialization.
lifetime: Key lifetime.
kwargs: Keyword arguments to pass to :py:class:`datetime.timedelta`
as shortcut for lifetime.
:type backend: :py:class:`~bucketcache.backends.Backend`
:type config: :py:class:`~bucketcache.config.Config`
:type keymaker: :py:class:`~bucketcache.keymakers.KeyMaker`
:type lifetime: :py:class:`~datetime.timedelta`
"""
def __init__(self, path, backend=None, config=None, keymaker=None,
lifetime=None, **kwargs):
if kwargs:
valid_kwargs = {'days', 'seconds', 'microseconds', 'milliseconds',
'minutes', 'hours', 'weeks'}
passed_kwargs = set(kwargs)
raise_invalid_keys(valid_kwargs, passed_kwargs,
'Invalid lifetime argument{s}: {keys}')
if lifetime:
raise ValueError("either 'lifetime' or lifetime arguments "
"('seconds', 'minutes', ...) can be passed, "
"but not both.")
lifetime = timedelta(**kwargs)
# Now we're thinking with portals.
self._cache = dict()
_path = Path(path)
with suppress(OSError):
_path.mkdir()
self._path = _path.resolve()
if backend is not None:
self.backend = backend
else:
self.backend = PickleBackend
self.config = config
if keymaker is None:
keymaker = DefaultKeyMaker()
self.keymaker = keymaker
self.lifetime = lifetime
@property
def path(self):
return self._path
@property
def backend(self):
return self._backend
@backend.setter
def backend(self, value):
error = TypeError("'backend' must inherit from "
"bucketcache.Backend")
if inspect.isclass(value):
if not issubclass(value, Backend):
raise error
else:
raise error
value.check_concrete()
self._backend = value
@property
def lifetime(self):
return self._lifetime
@lifetime.setter
def lifetime(self, value):
if value is not None and value < timedelta(0):
raise ValueError('lifetime cannot be negative.')
if not value:
self._lifetime = None
else:
if not isinstance(value, timedelta):
raise TypeError('lifetime must be type datetime.timedelta '
'(or a subclass)')
self._lifetime = value
def __contains__(self, item):
try:
self[item]
except KeyError:
return False
else:
return True
def __setitem__(self, key, value):
key_hash = self._hash_for_key(key)
obj = self._update_or_make_obj_with_hash(key_hash, value)
self._set_obj_with_hash(key_hash, obj)
def setitem(self, key, value):
"""Provide setitem method as alternative to ``bucket[key] = value``"""
return self.__setitem__(key, value)
def _update_or_make_obj_with_hash(self, key_hash, value):
try:
obj = self._get_obj_from_hash(key_hash, load_file=False)
obj.value = value
except KeyInvalidError:
obj = self.backend(value, config=self.config)
obj.expiration_date = self._object_expiration_date()
return obj
def _set_obj_with_hash(self, key_hash, obj):
file_path = self._path_for_hash(key_hash)
with open(str(file_path), self._write_mode) as f:
obj.dump(f)
self._cache[key_hash] = obj
def __getitem__(self, key):
obj = self._get_obj(key)
return obj.value
def getitem(self, key):
"""Provide getitem method as alternative to ``bucket[key]``."""
return self.__getitem__(key)
def _get_obj(self, key):
key_hash = self._hash_for_key(key)
try:
obj = self._get_obj_from_hash(key_hash)
except KeyInvalidError:
raise KeyError(self._abbreviate(key))
else:
return obj
def _get_obj_from_hash(self, key_hash, load_file=True):
file_path = self._path_for_hash(key_hash)
if key_hash in self._cache:
obj = self._cache[key_hash]
elif load_file:
logger.info('Attempt load from file: {}', file_path)
try:
with file_path.open(self._read_mode) as f:
obj = self.backend.from_file(f, config=self.config)
except IOError as e:
if e.errno == errno.ENOENT:
msg = 'File not found: {}'.format(file_path)
log_handled_exception(msg)
raise KeyFileNotFoundError(msg)
else:
msg = 'Unexpected exception trying to load file: {}'
logger.exception(msg, file_path)
raise
except BackendLoadError:
msg = 'Backend {} failed to load file: {}'
msg = msg.format(self.backend, file_path)
log_handled_exception(msg)
raise KeyInvalidError(msg)
except Exception:
msg = 'Unhandled exception trying to load file: {}'
logger.exception(msg, file_path)
raise
self._cache[key_hash] = obj
else:
raise KeyInvalidError("<key hash not found in internal "
"cache '{}'>".format(key_hash))
if self.lifetime:
# If object expires after now + lifetime, then it was saved with a
# previous Bucket() with a longer lifetime. Let's expire the key.
if not obj.expiration_date:
lifetime_changed = True
else:
lifetime_changed = obj.expiration_date > self._object_expiration_date()
if lifetime_changed:
logger.warning('Object expires after now + current lifetime. '
'Object must have been saved with previous '
'cache settings. Expiring key.')
else:
lifetime_changed = False
if obj.has_expired() or lifetime_changed:
file_path.unlink()
del self._cache[key_hash]
raise KeyExpirationError("<key hash '{}'>".format(key_hash))
return obj
def __delitem__(self, key):
file_path, key_hash = self._path_and_hash_for_key(key)
if key in self:
file_path.unlink()
del self._cache[key_hash]
else:
raise KeyError(self._abbreviate(key))
def prune_directory(self):
"""Delete any objects that can be loaded and are expired according to
the current lifetime setting.
A file will be deleted if the following conditions are met:
- The file extension matches :py:meth:`bucketcache.backends.Backend.file_extension`
- The object can be loaded by the configured backend.
- The object's expiration date has passed.
Returns:
File size and number of files deleted.
:rtype: :py:class:`~bucketcache.utilities.PrunedFilesInfo`
.. note::
For any buckets that share directories, ``prune_directory`` will
affect files saved with both, if they use the same backend class.
This is not destructive, because only files that have expired
according to the lifetime of the original bucket are deleted.
"""
glob = '*.{ext}'.format(ext=self.backend.file_extension)
totalsize = 0
totalnum = 0
for f in self._path.glob(glob):
filesize = f.stat().st_size
key_hash = f.stem
in_cache = key_hash in self._cache
try:
self._get_obj_from_hash(key_hash)
except KeyExpirationError:
# File has been deleted by `_get_obj_from_hash`
totalsize += filesize
totalnum += 1
except KeyInvalidError:
pass
except Exception:
raise
else:
if not in_cache:
del self._cache[key_hash]
return PrunedFilesInfo(size=totalsize, num=totalnum)
def unload_key(self, key):
"""Remove key from memory, leaving file in place."""
key_hash = self._hash_for_key(key)
if key in self:
del self._cache[key_hash]
def __call__(self, *args, **kwargs):
"""Use Bucket instance as a decorator.
.. code:: python
@bucket
def fun(...):
...
Use `method=True` for instance methods:
.. code:: python
@bucket(method=True)
def fun(self, ...):
...
Use `nocache='argname'` for argument that can skip cache.
.. code:: python
@bucket(nocache='refresh')
def fun(refresh=False):
...
fun(refresh=True) # Cache not used.
Use `ignore=['argname1', 'argname2', ...]` to ignore arguments when
making cache key.
.. code:: python
@bucket(ignore=['log'])
def get(name, log):
...
get('spam')
get('spam', log=True) # Cache used even though arguments differ.
"""
f = None
default_kwargs = {'method': False, 'nocache': None, 'ignore': None}
error = ('To use an instance of {}() as a decorator, '
'use @bucket or @bucket(<args>) '
'(See documentation)'.format(self.__class__.__name__))
if len(args) + len(kwargs) < 1:
# We need f or decorator arguments
raise TypeError(error)
if len(args) == 1:
# Positional arg must be the to-be-wrapped function.
f = args[0]
# Allow method=True to be omitted when decorating properties, as
# this can be detected.
if not callable(f) and not isinstance(f, property):
raise ValueError(error)
elif len(args) > 1:
raise TypeError(error)
if len(kwargs):
if len(args):
raise TypeError(error)
passed_kwargs = set(kwargs)
valid_kwargs = set(default_kwargs)
missing_kwargs = valid_kwargs - passed_kwargs
raise_invalid_keys(valid_kwargs, passed_kwargs,
'Invalid decorator argument{s}: {keys}')
kwargs.update({k: default_kwargs[k] for k in missing_kwargs})
method = kwargs['method']
nocache = kwargs['nocache']
ignore = kwargs['ignore']
if f:
# We've been passed f as a standard decorator. Instantiate cached
# function class and return the decorator.
cf = DecoratorFactory(bucket=self, method=method, nocache=nocache,
ignore=ignore)
return cf.decorate(f)
else:
# We've been called with decorator arguments, so we need to return
# a function that makes a decorator.
cf = DecoratorFactory(bucket=self, method=method, nocache=nocache,
ignore=ignore)
def make_decorator(f):
return cf.decorate(f)
return make_decorator
def _path_and_hash_for_key(self, key):
key_hash = self._hash_for_key(key)
path = self._path_for_hash(key_hash)
return path, key_hash
def _path_for_key(self, key):
key_hash = self._hash_for_key(key)
return self._path_for_hash(key_hash)
def _path_for_hash(self, key_hash):
filename = '{}.{}'.format(key_hash, self.backend.file_extension)
return self._path / filename
def _hash_for_key(self, key):
if logger_config.log_full_keys:
dkey = key
else:
dkey = DV(lambda: self._abbreviate(key))
logger.debug('_hash_for_key <{}>', dkey)
md5hash = md5(self.backend.__name__.encode('utf-8'))
for batch in self.keymaker.make_key(key):
if logger_config.log_full_keys:
logger.debug('_hash_for_key received bytes: {}', batch)
md5hash.update(batch)
digest = md5hash.hexdigest()
logger.debug('_hash_for_key finished with digest {}', digest)
return digest
@staticmethod
def _abbreviate(obj):
string = repr(obj)
if len(string) > 80:
return string[:77] + '...'
else:
return string
def _object_expiration_date(self):
if self.lifetime:
return datetime.utcnow() + self.lifetime
else:
return None
@property
def _read_mode(self):
return 'rb' if self.backend.binary_format else 'r'
@property
def _write_mode(self):
return 'wb' if self.backend.binary_format else 'w'
def _repr_helper_(self, r):
r.keyword_with_value('path', str(self.path))
r.keyword_from_attr('config')
r.keyword_with_value('backend', self.backend.__name__, raw=True)
if self.lifetime:
for attr in ('days', 'seconds', 'microseconds'):
value = getattr(self.lifetime, attr)
if value:
r.keyword_with_value(attr, value)
class DeferredWriteBucket(Bucket):
"""Alternative implementation of :py:class:`~bucketcache.buckets.Bucket`
that defers writing to file until
:py:meth:`~bucketcache.buckets.DeferredWriteBucket.sync` is called.
"""
@classmethod
def from_bucket(cls, bucket):
self = cls(path=bucket.path, backend=bucket.backend,
config=bucket.config, keymaker=bucket.keymaker,
lifetime=bucket.lifetime)
self._cache = bucket._cache
return self
def _set_obj_with_hash(self, key_hash, obj):
"""Reimplement Bucket._set_obj_with_hash to skip writing to file."""
self._cache[key_hash] = obj
def unload_key(self, key):
"""Remove key from memory, leaving file in place.
This forces :py:meth:`~bucketcache.buckets.DeferredWriteBucket.sync`.
"""
self.sync()
return super(DeferredWriteBucket, self).unload_key(key)
def sync(self):
"""Commit deferred writes to file."""
for key_hash, obj in six.iteritems(self._cache):
# Objects are checked for expiration in __getitem__,
# but we can check here to avoid unnecessary writes.
if not obj.has_expired():
file_path = self._path_for_hash(key_hash)
with open(str(file_path), self._write_mode) as f:
obj.dump(f)
@contextmanager
def deferred_write(bucket):
"""Context manager for deferring writes of a :py:class:`Bucket` within a
block.
Parameters:
bucket (:py:class:`Bucket`): Bucket to defer writes for within context.
Returns:
Bucket to use within context.
:rtype: :py:class:`DeferredWriteBucket`
When the context is closed, the stored objects are written to file. The
in-memory cache of objects is used to update that of the original bucket.
.. code-block:: python
bucket = Bucket(path)
with deferred_write(bucket) as deferred:
deferred[key] = value
...
"""
deferred_write_bucket = DeferredWriteBucket.from_bucket(bucket)
yield deferred_write_bucket
deferred_write_bucket.sync()
bucket._cache.update(deferred_write_bucket._cache)
| {
"repo_name": "RazerM/bucketcache",
"path": "bucketcache/buckets.py",
"copies": "1",
"size": "16989",
"license": "mit",
"hash": -332298416887734000,
"line_mean": 32.3772102161,
"line_max": 91,
"alpha_frac": 0.5675437048,
"autogenerated": false,
"ratio": 4.322900763358779,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00006793493240348397,
"num_lines": 509
} |
from __future__ import absolute_import, division, print_function
import flask
import requests
from odo import resource
from datashape import dshape
from ..expr import Expr
from ..dispatch import dispatch
from .server import DEFAULT_PORT
from .serialization import json
# These are a hack for testing
# It's convenient to use requests for live production but use
# flask for testing. Sadly they have different Response objects,
# hence the dispatched functions
__all__ = 'Client',
def content(response):
if isinstance(response, flask.Response):
return response.data
if isinstance(response, requests.Response):
return response.content
def ok(response):
if isinstance(response, flask.Response):
return 'OK' in response.status
if isinstance(response, requests.Response):
return response.ok
def reason(response):
if isinstance(response, flask.Response):
return response.status
if isinstance(response, requests.Response):
return response.text
class Client(object):
""" Client for Blaze Server
Provides programmatic access to datasets living on Blaze Server
Parameters
----------
url : str
URL of a Blaze server
serial : SerializationFormat, optional
The serialization format object to use. Defaults to JSON.
A serialization format is an object that supports:
name, loads, and dumps.
Examples
--------
>>> # This example matches with the docstring of ``Server``
>>> from blaze import Data
>>> c = Client('localhost:6363')
>>> t = Data(c) # doctest: +SKIP
See Also
--------
blaze.server.server.Server
"""
__slots__ = 'url', 'serial'
def __init__(self, url, serial=json, **kwargs):
url = url.strip('/')
if not url[:4] == 'http':
url = 'http://' + url
self.url = url
self.serial = serial
@property
def dshape(self):
"""The datashape of the client"""
response = requests.get('%s/datashape' % self.url)
if not ok(response):
raise ValueError("Bad Response: %s" % reason(response))
return dshape(content(response).decode('utf-8'))
@dispatch(Client)
def discover(c):
return c.dshape
@dispatch(Expr, Client)
def compute_down(expr, ec, **kwargs):
from .server import to_tree
tree = to_tree(expr)
serial = ec.serial
r = requests.get('%s/compute.%s' % (ec.url, serial.name),
data=serial.dumps({'expr': tree}))
if not ok(r):
raise ValueError("Bad response: %s" % reason(r))
return serial.loads(content(r))['data']
@resource.register('blaze://.+')
def resource_blaze(uri, leaf=None, **kwargs):
if leaf is not None:
raise ValueError('The syntax blaze://...::{leaf} is no longer '
'supported as of version 0.8.1.\n'
'You can access {leaf!r} using this syntax:\n'
'Data({uri})[{leaf!r}]'
.format(leaf=leaf, uri=uri))
uri = uri[len('blaze://'):]
sp = uri.split('/')
tld, rest = sp[0], sp[1:]
if ':' not in tld:
tld += ':%d' % DEFAULT_PORT
uri = '/'.join([tld] + list(rest))
return Client(uri)
| {
"repo_name": "dwillmer/blaze",
"path": "blaze/server/client.py",
"copies": "1",
"size": "3276",
"license": "bsd-3-clause",
"hash": -3073289098982557700,
"line_mean": 25,
"line_max": 71,
"alpha_frac": 0.6068376068,
"autogenerated": false,
"ratio": 3.913978494623656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 126
} |
from __future__ import absolute_import, division, print_function
import fnmatch
from toolz import compose, identity
from datashape.predicates import isscalar
from ..expr import (
Apply,
By,
Distinct,
ElemWise,
Expr,
Head,
Join,
Label,
Like,
Merge,
ReLabel,
Reduction,
SimpleSelection,
Sort,
Summary,
by,
)
from .python import (
binops,
compute,
pair_assemble,
reduce_by_funcs,
rowfunc,
rrowfunc,
)
from ..expr.broadcast import broadcast_collect
from ..expr.optimize import simple_selections
from ..compatibility import builtins, unicode
from ..expr import reductions
from ..dispatch import dispatch
from .core import compute_up
from .varargs import VarArgs
import py4j
from pyspark import SparkContext
import pyspark
from pyspark.rdd import RDD
__all__ = ['RDD', 'pyspark', 'SparkContext']
# PySpark adds a SIGCHLD signal handler, but that breaks other packages, so we
# remove it
# See https://issues.apache.org/jira/browse/SPARK-1394
try:
import signal
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
except:
pass
@dispatch(Expr, RDD)
def optimize(expr, seq):
return simple_selections(broadcast_collect(expr))
@dispatch(ElemWise, RDD)
def compute_up(t, rdd, **kwargs):
func = rowfunc(t)
return rdd.map(func)
@dispatch(Merge, VarArgs[RDD])
def compute_up(expr, args, **kwargs):
# TODO: How is this done in spark?
raise NotImplementedError()
@dispatch(SimpleSelection, RDD)
def compute_up(t, rdd, **kwargs):
predicate = optimize(t.predicate, rdd)
predicate = rrowfunc(predicate, t._child)
return rdd.filter(predicate)
rdd_reductions = {
reductions.sum: RDD.sum,
reductions.min: RDD.min,
reductions.max: RDD.max,
reductions.count: RDD.count,
reductions.mean: RDD.mean,
reductions.var: RDD.variance,
reductions.std: RDD.stdev,
reductions.nunique: compose(RDD.count, RDD.distinct)
}
@dispatch(tuple(rdd_reductions), RDD)
def compute_up(t, rdd, **kwargs):
return rdd_reductions[type(t)](rdd)
def istruthy(x):
return not not x
@dispatch(reductions.any, RDD)
def compute_up(t, rdd, **kwargs):
return istruthy(rdd.filter(identity).take(1))
@dispatch(reductions.all, RDD)
def compute_up(t, rdd, **kwargs):
return not rdd.filter(lambda x: not x).take(1)
@dispatch(Head, RDD)
def compute_up(t, rdd, **kwargs):
return rdd.take(t.n)
@dispatch(Apply, RDD)
def compute_up(t, rdd, **kwargs):
if t._splittable:
return rdd.mapPartitions(t.func)
else:
raise NotImplementedError("Can only apply splittable functions."
"To apply function to each partition add "
"splittable=True kwarg to call to apply. "
"t.apply(func, dshape, splittable=True)")
@dispatch(Sort, RDD)
def compute_up(t, rdd, **kwargs):
if isinstance(t.key, (str, unicode, tuple, list)):
key = rowfunc(t._child[t.key])
else:
key = optimize(t.key, rdd)
key = rrowfunc(key, t._child)
return (rdd.keyBy(key)
.sortByKey(ascending=t.ascending)
.map(lambda x: x[1]))
@dispatch(Distinct, RDD)
def compute_up(t, rdd, **kwargs):
if t.on:
raise NotImplementedError(
'spark backend cannot specify what columns to distinct on'
)
return rdd.distinct()
def jgetattr(data, attr, default=None):
"""Spark's API doesn't properly implement the ``getattr`` interface, so
we work around it.
"""
try:
return getattr(data, attr, default)
except py4j.protocol.Py4JJavaError:
return default
@compute_up.register(Join, RDD, RDD)
def spark_join(t, lhs, rhs, **kwargs):
on_left = rowfunc(t.lhs[t.on_left])
on_right = rowfunc(t.rhs[t.on_right])
lhs = lhs.keyBy(on_left)
rhs = rhs.keyBy(on_right)
how = t.how
if how == 'inner':
joiner = lhs.join
elif how == 'left':
joiner = lhs.leftOuterJoin
elif how == 'right':
joiner = lhs.rightOuterJoin
elif how == 'outer':
joiner = lhs.fullOuterJoin
else:
raise ValueError("Invalid join type %r, must be one of "
"{'inner', 'left', 'right', 'outer'}" % how)
rdd = joiner(rhs)
assemble = pair_assemble(t)
return rdd.map(lambda x: assemble(x[1]))
@dispatch(By, RDD)
def compute_up(t, rdd, **kwargs):
grouper = optimize(t.grouper, rdd)
apply = optimize(t.apply, rdd)
t = by(grouper, apply)
if ((isinstance(t.apply, Reduction) and type(t.apply) in binops) or
(isinstance(t.apply, Summary) and
builtins.all(type(val) in binops for val in t.apply.values))):
grouper, binop, combiner, initial = reduce_by_funcs(t)
if isscalar(t.grouper.dshape.measure):
keyfunc = lambda x: (x,)
else:
keyfunc = identity
if isscalar(t.apply.dshape.measure):
valfunc = lambda x: (x,)
else:
valfunc = identity
unpack = lambda kv: keyfunc(kv[0]) + valfunc(kv[1])
create = lambda v: binop(initial, v)
return (rdd.keyBy(grouper)
.combineByKey(create, binop, combiner)
.map(unpack))
else:
raise NotImplementedError("By only implemented for common reductions."
"\nGot %s" % type(t.apply))
@dispatch((Label, ReLabel), RDD)
def compute_up(t, rdd, **kwargs):
return rdd
@dispatch(Summary, RDD)
def compute_up(t, rdd, **kwargs):
rdd = rdd.cache()
return tuple(
compute(value, {t._child: rdd}, return_type='native')
for value in t.values
)
@dispatch(Like, RDD)
def compute_up(t, rdd, **kwargs):
def func(value, pattern=t.pattern):
return fnmatch.fnmatch(value, pattern)
return rdd.map(func)
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/compute/spark.py",
"copies": "3",
"size": "5897",
"license": "bsd-3-clause",
"hash": 7260012619172740000,
"line_mean": 23.7773109244,
"line_max": 78,
"alpha_frac": 0.6252331694,
"autogenerated": false,
"ratio": 3.3988472622478385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009157266510207687,
"num_lines": 238
} |
from __future__ import absolute_import, division, print_function
import fnmatch
from toolz import compose, identity
from datashape.predicates import isscalar
from ..expr import (
Expr, ElemWise, SimpleSelection, Sort, Apply, Distinct, Join, By, Label,
Summary, by, ReLabel, Like, Reduction, Head
)
from .python import (
compute, rrowfunc, rowfunc, pair_assemble, reduce_by_funcs, binops
)
from ..expr.broadcast import broadcast_collect
from ..expr.optimize import simple_selections
from ..compatibility import builtins, unicode
from ..expr import reductions
from ..dispatch import dispatch
from .core import compute_up
import py4j
from pyspark import SparkContext
import pyspark
from pyspark.rdd import RDD
__all__ = ['RDD', 'pyspark', 'SparkContext']
# PySpark adds a SIGCHLD signal handler, but that breaks other packages, so we
# remove it
# See https://issues.apache.org/jira/browse/SPARK-1394
try:
import signal
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
except:
pass
@dispatch(Expr, RDD)
def optimize(expr, seq):
return simple_selections(broadcast_collect(expr))
@dispatch(ElemWise, RDD)
def compute_up(t, rdd, **kwargs):
func = rowfunc(t)
return rdd.map(func)
@dispatch(SimpleSelection, RDD)
def compute_up(t, rdd, **kwargs):
predicate = optimize(t.predicate, rdd)
predicate = rrowfunc(predicate, t._child)
return rdd.filter(predicate)
rdd_reductions = {
reductions.sum: RDD.sum,
reductions.min: RDD.min,
reductions.max: RDD.max,
reductions.count: RDD.count,
reductions.mean: RDD.mean,
reductions.var: RDD.variance,
reductions.std: RDD.stdev,
reductions.nunique: compose(RDD.count, RDD.distinct)
}
@dispatch(tuple(rdd_reductions), RDD)
def compute_up(t, rdd, **kwargs):
return rdd_reductions[type(t)](rdd)
def istruthy(x):
return not not x
@dispatch(reductions.any, RDD)
def compute_up(t, rdd, **kwargs):
return istruthy(rdd.filter(identity).take(1))
@dispatch(reductions.all, RDD)
def compute_up(t, rdd, **kwargs):
return not rdd.filter(lambda x: not x).take(1)
@dispatch(Head, RDD)
def compute_up(t, rdd, **kwargs):
return rdd.take(t.n)
@dispatch(Apply, RDD)
def compute_up(t, rdd, **kwargs):
if t._splittable:
return rdd.mapPartitions(t.func)
else:
raise NotImplementedError("Can only apply splittable functions."
"To apply function to each partition add "
"splittable=True kwarg to call to apply. "
"t.apply(func, dshape, splittable=True)")
@dispatch(Sort, RDD)
def compute_up(t, rdd, **kwargs):
if isinstance(t.key, (str, unicode, tuple, list)):
key = rowfunc(t._child[t.key])
else:
key = optimize(t.key, rdd)
key = rrowfunc(key, t._child)
return (rdd.keyBy(key)
.sortByKey(ascending=t.ascending)
.map(lambda x: x[1]))
@dispatch(Distinct, RDD)
def compute_up(t, rdd, **kwargs):
if t.on:
raise NotImplementedError(
'spark backend cannot specify what columns to distinct on'
)
return rdd.distinct()
def jgetattr(data, attr, default=None):
"""Spark's API doesn't properly implement the ``getattr`` interface, so
we work around it.
"""
try:
return getattr(data, attr, default)
except py4j.protocol.Py4JJavaError:
return default
@compute_up.register(Join, RDD, RDD)
def spark_join(t, lhs, rhs, **kwargs):
on_left = rowfunc(t.lhs[t.on_left])
on_right = rowfunc(t.rhs[t.on_right])
lhs = lhs.keyBy(on_left)
rhs = rhs.keyBy(on_right)
how = t.how
if how == 'inner':
joiner = lhs.join
elif how == 'left':
joiner = lhs.leftOuterJoin
elif how == 'right':
joiner = lhs.rightOuterJoin
elif how == 'outer':
joiner = lhs.fullOuterJoin
else:
raise ValueError("Invalid join type %r, must be one of "
"{'inner', 'left', 'right', 'outer'}" % how)
rdd = joiner(rhs)
assemble = pair_assemble(t)
return rdd.map(lambda x: assemble(x[1]))
@dispatch(By, RDD)
def compute_up(t, rdd, **kwargs):
grouper = optimize(t.grouper, rdd)
apply = optimize(t.apply, rdd)
t = by(grouper, apply)
if ((isinstance(t.apply, Reduction) and type(t.apply) in binops) or
(isinstance(t.apply, Summary) and
builtins.all(type(val) in binops for val in t.apply.values))):
grouper, binop, combiner, initial = reduce_by_funcs(t)
if isscalar(t.grouper.dshape.measure):
keyfunc = lambda x: (x,)
else:
keyfunc = identity
if isscalar(t.apply.dshape.measure):
valfunc = lambda x: (x,)
else:
valfunc = identity
unpack = lambda kv: keyfunc(kv[0]) + valfunc(kv[1])
create = lambda v: binop(initial, v)
return (rdd.keyBy(grouper)
.combineByKey(create, binop, combiner)
.map(unpack))
else:
raise NotImplementedError("By only implemented for common reductions."
"\nGot %s" % type(t.apply))
@dispatch((Label, ReLabel), RDD)
def compute_up(t, rdd, **kwargs):
return rdd
@dispatch(Summary, RDD)
def compute_up(t, rdd, **kwargs):
rdd = rdd.cache()
return tuple(compute(value, {t._child: rdd}) for value in t.values)
@dispatch(Like, RDD)
def compute_up(t, rdd, **kwargs):
def func(value, pattern=t.pattern):
return fnmatch.fnmatch(value, pattern)
return rdd.map(func)
| {
"repo_name": "cpcloud/blaze",
"path": "blaze/compute/spark.py",
"copies": "2",
"size": "5598",
"license": "bsd-3-clause",
"hash": -7543090797152899000,
"line_mean": 25.6571428571,
"line_max": 78,
"alpha_frac": 0.6332618792,
"autogenerated": false,
"ratio": 3.3783946891973446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5011656568397345,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import functools
from six import string_types, with_metaclass
class MethodsInjector(type):
def __new__(cls, *args, **kwargs):
generated_cls = type.__new__(cls, *args, **kwargs)
def _init(self, value):
value_type = getattr(generated_cls, 'value_type', None)
if value_type is None or isinstance(value, value_type):
self.value = value
else:
raise RuntimeError("MethodsInjector: wrong init type.")
def _repr(self):
return repr(self.value)
def _eq(self, other):
return type(self) == type(other) and self.value == other.value
def _gt(self, other):
return self.value > other.value
def _hash(self):
return hash(self.value)
def inject_method(generated_cls, text, func):
if text not in generated_cls.__dict__:
setattr(generated_cls, text, func)
inject_method(generated_cls, '__init__', _init)
inject_method(generated_cls, '__repr__', _repr)
inject_method(generated_cls, '__eq__', _eq)
inject_method(generated_cls, '__gt__', _gt)
inject_method(generated_cls, '__hash__', _hash)
return functools.total_ordering(generated_cls)
class ID(with_metaclass(MethodsInjector)):
value_type = string_types
class LongString(with_metaclass(MethodsInjector)):
value_type = string_types
class ShortString(with_metaclass(MethodsInjector)):
value_type = string_types
class Integer(with_metaclass(MethodsInjector)):
value_type = int
class Float(with_metaclass(MethodsInjector)):
value_type = float
class Boolean(with_metaclass(MethodsInjector)):
value_type = bool
class List(with_metaclass(MethodsInjector)):
value_type = list
class Dict(with_metaclass(MethodsInjector)):
value_type = dict
class TestCase(object):
def __init__(self, id, input_value, output_value):
self.id = id
self.input_value = input_value
self.output_value = output_value
def __repr__(self):
template = (b'<ID: {0},'
b' Input: {1},'
b' Output: {2}>')
return template.format(
repr(self.id), repr(self.input_value), repr(self.output_value),
)
| {
"repo_name": "huntzhan/tcg",
"path": "tcg/ast/elements.py",
"copies": "1",
"size": "2369",
"license": "mit",
"hash": -5444955809567215000,
"line_mean": 25.9204545455,
"line_max": 75,
"alpha_frac": 0.6057408189,
"autogenerated": false,
"ratio": 3.8836065573770493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49893473762770496,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import functools
import hmac
import io
import logging
import sys
import os
import re
import stripe
from stripe import six
from stripe.six.moves.urllib.parse import parse_qsl
STRIPE_LOG = os.environ.get("STRIPE_LOG")
logger = logging.getLogger("stripe")
__all__ = [
"io",
"parse_qsl",
"utf8",
"log_info",
"log_debug",
"dashboard_link",
"logfmt",
]
def utf8(value):
if six.PY2 and isinstance(value, six.text_type):
return value.encode("utf-8")
else:
return value
def is_appengine_dev():
return "APPENGINE_RUNTIME" in os.environ and "Dev" in os.environ.get(
"SERVER_SOFTWARE", ""
)
def _console_log_level():
if stripe.log in ["debug", "info"]:
return stripe.log
elif STRIPE_LOG in ["debug", "info"]:
return STRIPE_LOG
else:
return None
def log_debug(message, **params):
msg = logfmt(dict(message=message, **params))
if _console_log_level() == "debug":
print(msg, file=sys.stderr)
logger.debug(msg)
def log_info(message, **params):
msg = logfmt(dict(message=message, **params))
if _console_log_level() in ["debug", "info"]:
print(msg, file=sys.stderr)
logger.info(msg)
def _test_or_live_environment():
if stripe.api_key is None:
return
match = re.match(r"sk_(live|test)_", stripe.api_key)
if match is None:
return
return match.groups()[0]
def dashboard_link(request_id):
return "https://dashboard.stripe.com/{env}/logs/{reqid}".format(
env=_test_or_live_environment() or "test", reqid=request_id
)
def logfmt(props):
def fmt(key, val):
# Handle case where val is a bytes or bytesarray
if six.PY3 and hasattr(val, "decode"):
val = val.decode("utf-8")
# Check if val is already a string to avoid re-encoding into
# ascii. Since the code is sent through 2to3, we can't just
# use unicode(val, encoding='utf8') since it will be
# translated incorrectly.
if not isinstance(val, six.string_types):
val = six.text_type(val)
if re.search(r"\s", val):
val = repr(val)
# key should already be a string
if re.search(r"\s", key):
key = repr(key)
return u"{key}={val}".format(key=key, val=val)
return u" ".join([fmt(key, val) for key, val in sorted(props.items())])
# Borrowed from Django's source code
if hasattr(hmac, "compare_digest"):
# Prefer the stdlib implementation, when available.
def secure_compare(val1, val2):
return hmac.compare_digest(utf8(val1), utf8(val2))
else:
def secure_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
For the sake of simplicity, this function executes in constant time
only when the two strings have the same length. It short-circuits when
they have different lengths.
"""
val1, val2 = utf8(val1), utf8(val2)
if len(val1) != len(val2):
return False
result = 0
if six.PY3 and isinstance(val1, bytes) and isinstance(val2, bytes):
for x, y in zip(val1, val2):
result |= x ^ y
else:
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
def get_object_classes():
# This is here to avoid a circular dependency
from stripe.object_classes import OBJECT_CLASSES
return OBJECT_CLASSES
def convert_to_stripe_object(
resp, api_key=None, stripe_version=None, stripe_account=None
):
# If we get a StripeResponse, we'll want to return a
# StripeObject with the last_response field filled out with
# the raw API response information
stripe_response = None
if isinstance(resp, stripe.stripe_response.StripeResponse):
stripe_response = resp
resp = stripe_response.data
if isinstance(resp, list):
return [
convert_to_stripe_object(
i, api_key, stripe_version, stripe_account
)
for i in resp
]
elif isinstance(resp, dict) and not isinstance(
resp, stripe.stripe_object.StripeObject
):
resp = resp.copy()
klass_name = resp.get("object")
if isinstance(klass_name, six.string_types):
klass = get_object_classes().get(
klass_name, stripe.stripe_object.StripeObject
)
else:
klass = stripe.stripe_object.StripeObject
return klass.construct_from(
resp,
api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
last_response=stripe_response,
)
else:
return resp
def convert_to_dict(obj):
"""Converts a StripeObject back to a regular dict.
Nested StripeObjects are also converted back to regular dicts.
:param obj: The StripeObject to convert.
:returns: The StripeObject as a dict.
"""
if isinstance(obj, list):
return [convert_to_dict(i) for i in obj]
# This works by virtue of the fact that StripeObjects _are_ dicts. The dict
# comprehension returns a regular dict and recursively applies the
# conversion to each value.
elif isinstance(obj, dict):
return {k: convert_to_dict(v) for k, v in six.iteritems(obj)}
else:
return obj
def populate_headers(idempotency_key):
if idempotency_key is not None:
return {"Idempotency-Key": idempotency_key}
return None
def merge_dicts(x, y):
z = x.copy()
z.update(y)
return z
class class_method_variant(object):
def __init__(self, class_method_name):
self.class_method_name = class_method_name
def __call__(self, method):
self.method = method
return self
def __get__(self, obj, objtype=None):
@functools.wraps(self.method)
def _wrapper(*args, **kwargs):
if obj is not None:
# Method was called as an instance method, e.g.
# instance.method(...)
return self.method(obj, *args, **kwargs)
elif len(args) > 0 and isinstance(args[0], objtype):
# Method was called as a class method with the instance as the
# first argument, e.g. Class.method(instance, ...) which in
# Python is the same thing as calling an instance method
return self.method(args[0], *args[1:], **kwargs)
else:
# Method was called as a class method, e.g. Class.method(...)
class_method = getattr(objtype, self.class_method_name)
return class_method(*args, **kwargs)
return _wrapper
| {
"repo_name": "stripe/stripe-python",
"path": "stripe/util.py",
"copies": "1",
"size": "6925",
"license": "mit",
"hash": -5246313024708685000,
"line_mean": 27.9748953975,
"line_max": 79,
"alpha_frac": 0.6053429603,
"autogenerated": false,
"ratio": 3.733153638814016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4838496599114016,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import functools
import inspect
import operator
import sys
import types
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
if PY3:
import builtins
from queue import Queue, Empty
from itertools import zip_longest
from io import StringIO, BytesIO
from urllib.request import urlopen
from urllib.parse import urlparse
from urllib.parse import quote, unquote
unicode = str
long = int
def apply(func, args, kwargs=None):
if kwargs:
return func(*args, **kwargs)
else:
return func(*args)
range = range
operator_div = operator.truediv
def _getargspec(func):
return inspect.getfullargspec(func)
else:
import __builtin__ as builtins
from Queue import Queue, Empty
from itertools import izip_longest as zip_longest
from StringIO import StringIO
from io import BytesIO
from urllib2 import urlopen
from urlparse import urlparse
from urllib import quote, unquote
unicode = unicode
long = long
apply = apply
range = xrange
operator_div = operator.div
def _getargspec(func):
return inspect.getargspec(func)
def getargspec(func):
"""Version of inspect.getargspec that works for functools.partial objects"""
if isinstance(func, functools.partial):
return _getargspec(func.func)
else:
if isinstance(func, type):
return _getargspec(func.__init__)
else:
return _getargspec(func)
def skip(func):
return
def bind_method(cls, name, func):
"""Bind a method to class
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has bound/unbound method issue
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
| {
"repo_name": "pombredanne/dask",
"path": "dask/compatibility.py",
"copies": "2",
"size": "2080",
"license": "bsd-3-clause",
"hash": 6442610088047308000,
"line_mean": 22.908045977,
"line_max": 80,
"alpha_frac": 0.6466346154,
"autogenerated": false,
"ratio": 4.416135881104034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6062770496504034,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import functools
import inspect
import sys
import warnings
from collections import OrderedDict
import attr
import py
from py._code.code import FormattedExcinfo
import _pytest
from _pytest import nodes
from _pytest._code.code import TerminalRepr
from _pytest.compat import (
NOTSET, exc_clear, _format_args,
getfslineno, get_real_func,
is_generator, isclass, getimfunc,
getlocation, getfuncargnames,
safe_getattr,
FuncargnamesCompatAttr,
)
from _pytest.outcomes import fail, TEST_OUTCOME
def pytest_sessionstart(session):
import _pytest.python
scopename2class.update({
'class': _pytest.python.Class,
'module': _pytest.python.Module,
'function': _pytest.main.Item,
'session': _pytest.main.Session,
})
session._fixturemanager = FixtureManager(session)
scopename2class = {}
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
raise ValueError("unknown scope")
return node.getparent(cls)
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, _pytest.python.Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except TEST_OUTCOME:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indices.items() is random order of argnames. Need to
# sort this so that different calls to
# get_parametrized_fixture_keys will be deterministic.
for argname, param_index in sorted(cs.indices.items()):
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache, scopenum + 1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
newargkeys = OrderedDict.fromkeys(k for k in argkeys if k not in ignore)
if newargkeys: # found a slicing key
slicing_argkey, _ = newargkeys.popitem()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "class", "module", "session"
self.scope = "function"
self._fixture_values = {} # argname -> fixture value
self._fixture_defs = {} # argname -> FixtureDef
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def fixturenames(self):
# backward incompatible note: now a readonly property
return list(self._pyfuncitem._fixtureinfo.names_closure)
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfixturevalue(argname) usage which was naturally
# not known at parsing/collection time
parentid = self._pyfuncitem.parent.nodeid
fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(_pytest.python.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(_pytest.python.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfixturevalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfixturevalue(self, argname):
""" Dynamically run a named fixture function.
Declaring fixtures via function argument is recommended where possible.
But if you can only decide whether to use another fixture at test
setup time, you may use this function to retrieve it inside a fixture
or test function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def getfuncargvalue(self, argname):
""" Deprecated, use getfixturevalue. """
from _pytest import deprecated
warnings.warn(
deprecated.GETFUNCARGVALUE,
DeprecationWarning,
stacklevel=2)
return self.getfixturevalue(argname)
def _get_active_fixturedef(self, argname):
try:
return self._fixture_defs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfixturevalue(fixturedef)
self._fixture_values[argname] = result
self._fixture_defs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
values = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
values.reverse()
return values
values.append(fixturedef)
current = current._parent_request
def _getfixturevalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
if fixturedef.params is not None:
frame = inspect.stack()[3]
frameinfo = inspect.getframeinfo(frame[0])
source_path = frameinfo.filename
source_lineno = frameinfo.lineno
source_path = py.path.local(source_path)
if source_path.relto(funcitem.config.rootdir):
source_path = source_path.relto(funcitem.config.rootdir)
msg = (
"The requested fixture has no parameter defined for the "
"current test.\n\nRequested fixture '{0}' defined in:\n{1}"
"\n\nRequested here:\n{2}:{3}".format(
fixturedef.argname,
getlocation(fixturedef.func, funcitem.config.rootdir),
source_path,
source_lineno,
)
)
fail(msg)
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(functools.partial(fixturedef.finish, request=subrequest),
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" % (
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" % (
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format(scope, self._pyfuncitem)
return node
def __repr__(self):
return "<FixtureRequest for %r>" % (self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self._pyfuncitem = request._pyfuncitem
self._fixture_values = request._fixture_values
self._fixture_defs = request._fixture_defs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
def addfinalizer(self, finalizer):
self._fixturedef.addfinalizer(finalizer)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
def scope2index(scope, descr, where=None):
"""Look up the index of ``scope`` and raise a descriptive value error
if not defined.
"""
try:
return scopes.index(scope)
except ValueError:
raise ValueError(
"{0} {1}has an unsupported scope value '{2}'".format(
descr, 'from {0} '.format(where) if where else '',
scope)
)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
# the last fixture raise an error, let's present
# it at the requesting side
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (IOError, IndexError, TypeError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno + 1))
else:
addline("file %s, line %s" % (fspath, lineno + 1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
parentid = self.request._pyfuncitem.parent.nodeid
for name, fixturedefs in fm._arg2fixturedefs.items():
faclist = list(fm._matchfactories(fixturedefs, parentid))
if faclist and name not in available:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" % (", ".join(sorted(available)),)
msg += "\n use 'pytest --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
# tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
lines = self.errorstring.split("\n")
if lines:
tw.line('{0} {1}'.format(FormattedExcinfo.fail_marker,
lines[0].strip()), red=True)
for line in lines[1:]:
tw.line('{0} {1}'.format(FormattedExcinfo.flow_marker,
line.strip()), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno + 1))
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno + 1)
source = _pytest._code.Source(fixturefunc)
fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs):
yieldctx = is_generator(fixturefunc)
if yieldctx:
it = fixturefunc(**kwargs)
res = next(it)
def teardown():
try:
next(it)
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scope2index(
scope or "function",
descr='fixture {0}'.format(func.__name__),
where=baseid
)
self.params = params
self.argnames = getfuncargnames(func, is_method=unittest)
self.unittest = unittest
self.ids = ids
self._finalizers = []
def addfinalizer(self, finalizer):
self._finalizers.append(finalizer)
def finish(self, request):
exceptions = []
try:
while self._finalizers:
try:
func = self._finalizers.pop()
func()
except: # noqa
exceptions.append(sys.exc_info())
if exceptions:
e = exceptions[0]
del exceptions # ensure we don't keep all frames alive because of the traceback
py.builtin._reraise(*e)
finally:
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
hook.pytest_fixture_post_finalizer(fixturedef=self, request=request)
# even if finalization fails, we invalidate
# the cached fixture value and remove
# all finalizers because they may be bound methods which will
# keep instances alive
if hasattr(self, "cached_result"):
del self.cached_result
self._finalizers = []
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
if argname != "request":
fixturedef.addfinalizer(functools.partial(self.finish, request=request))
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish(request)
assert not hasattr(self, "cached_result")
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
return hook.pytest_fixture_setup(fixturedef=self, request=request)
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def pytest_fixture_setup(fixturedef, request):
""" Execution of fixture setup. """
kwargs = {}
for argname in fixturedef.argnames:
fixdef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixdef.cached_result
request._check_scope(argname, request.scope, fixdef.scope)
kwargs[argname] = result
fixturefunc = fixturedef.func
if fixturedef.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = fixturedef.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "fixturedef" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(fixturedef.func)
if fixturefunc != fixturedef.func:
fixturefunc = fixturefunc.__get__(request.instance)
my_cache_key = request.param_index
try:
result = call_fixture_func(fixturefunc, request, kwargs)
except TEST_OUTCOME:
fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
raise
fixturedef.cached_result = (result, my_cache_key, None)
return result
def _ensure_immutable_ids(ids):
if ids is None:
return
if callable(ids):
return ids
return tuple(ids)
@attr.s(frozen=True)
class FixtureFunctionMarker(object):
scope = attr.ib()
params = attr.ib(convert=attr.converters.optional(tuple))
autouse = attr.ib(default=False)
ids = attr.ib(default=None, convert=_ensure_immutable_ids)
name = attr.ib(default=None)
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or without parameters) to define a
fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module" or "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
:arg name: the name of the fixture. This defaults to the name of the
decorated function. If a fixture is used in the same module in
which it is defined, the function name of the fixture will be
shadowed by the function arg that requests the fixture; one way
to resolve this is to name the decorated function
``fixture_<fixturename>`` and then use
``@pytest.fixture(name='<fixturename>')``.
Fixtures can optionally provide their values to test functions using a ``yield`` statement,
instead of ``return``. In this case, the code block after the ``yield`` statement is executed
as teardown code regardless of the test outcome. A fixture function must yield exactly once.
"""
if callable(scope) and params is None and autouse is False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, name=name)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None):
""" (return a) decorator to mark a yield-fixture factory function.
.. deprecated:: 3.0
Use :py:func:`pytest.fixture` directly instead.
"""
if callable(scope) and params is None and not autouse:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, ids=ids, name=name)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
defaultfuncargprefixmarker = fixture()
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
argnames = getfuncargnames(func, cls=cls)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != nodes.SEP:
nodeid = nodeid.replace(p.sep, nodes.SEP)
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i + 1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
parametrize_func = getattr(metafunc.function, 'parametrize', None)
func_params = getattr(parametrize_func, 'args', [[None]])
func_kwargs = getattr(parametrize_func, 'kwargs', {})
# skip directly parametrized arguments
if "argnames" in func_kwargs:
argnames = parametrize_func.kwargs["argnames"]
else:
argnames = func_params[0]
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if argname not in func_params and argname not in argnames:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
# The attribute can be an arbitrary descriptor, so the attribute
# access below can raise. safe_getatt() ignores such exceptions.
obj = safe_getattr(holderobj, name, None)
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
if not callable(obj):
continue
marker = defaultfuncargprefixmarker
from _pytest import deprecated
self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name), nodeid=nodeid)
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
if marker.name:
name = marker.name
msg = 'fixtures cannot have "pytest_funcarg__" prefix ' \
'and be decorated with @pytest.fixture:\n%s' % name
assert not name.startswith(self._argprefix), msg
fixture_def = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixture_def.has_location:
faclist.append(fixture_def)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixture_def)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
"""
Gets a list of fixtures which are applicable to the given node id.
:param str argname: name of the fixture to search for
:param str nodeid: full node id of the requesting test.
:return: list[FixtureDef]
"""
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodes.ischildnode(fixturedef.baseid, nodeid):
yield fixturedef
| {
"repo_name": "avadacatavra/servo",
"path": "tests/wpt/web-platform-tests/tools/third_party/pytest/_pytest/fixtures.py",
"copies": "15",
"size": "45996",
"license": "mpl-2.0",
"hash": -2146177017441203700,
"line_mean": 39.1011333915,
"line_max": 111,
"alpha_frac": 0.6082702844,
"autogenerated": false,
"ratio": 4.386838340486409,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import functools
import itertools
from collections import defaultdict
from datetime import timedelta
import numpy as np
import pandas as pd
import xarray as xr # only for Dataset and DataArray
from . import (
arithmetic, common, dtypes, duck_array_ops, indexing, nputils, ops, utils,)
from .indexing import (
BasicIndexer, OuterIndexer, PandasIndexAdapter, VectorizedIndexer,
as_indexable)
from .pycompat import (
OrderedDict, basestring, dask_array_type, integer_types, zip)
from .utils import OrderedSet
try:
import dask.array as da
except ImportError:
pass
NON_NUMPY_SUPPORTED_ARRAY_TYPES = (
indexing.ExplicitlyIndexed, pd.Index) + dask_array_type
BASIC_INDEXING_TYPES = integer_types + (slice,)
class MissingDimensionsError(ValueError):
"""Error class used when we can't safely guess a dimension name.
"""
# inherits from ValueError for backward compatibility
# TODO: move this to an xarray.exceptions module?
def as_variable(obj, name=None):
"""Convert an object into a Variable.
Parameters
----------
obj : object
Object to convert into a Variable.
- If the object is already a Variable, return a shallow copy.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new Variable.
- If all else fails, attempt to convert the object into a Variable by
unpacking it into the arguments for creating a new Variable.
name : str, optional
If provided:
- `obj` can be a 1D array, which is assumed to label coordinate values
along a dimension of this given name.
- Variables with name matching one of their dimensions are converted
into `IndexVariable` objects.
Returns
-------
var : Variable
The newly created variable.
"""
# TODO: consider extending this method to automatically handle Iris and
# pandas objects.
if hasattr(obj, 'variable'):
# extract the primary Variable from DataArrays
obj = obj.variable
if isinstance(obj, Variable):
obj = obj.copy(deep=False)
elif hasattr(obj, 'dims') and (hasattr(obj, 'data') or
hasattr(obj, 'values')):
obj_data = getattr(obj, 'data', None)
if obj_data is None:
obj_data = getattr(obj, 'values')
obj = Variable(obj.dims, obj_data,
getattr(obj, 'attrs', None),
getattr(obj, 'encoding', None))
elif isinstance(obj, tuple):
try:
obj = Variable(*obj)
except TypeError:
# use .format() instead of % because it handles tuples consistently
raise TypeError('tuples to convert into variables must be of the '
'form (dims, data[, attrs, encoding]): '
'{}'.format(obj))
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None:
obj = Variable(obj.name, obj)
elif name is not None:
data = as_compatible_data(obj)
if data.ndim != 1:
raise MissingDimensionsError(
'cannot set variable %r with %r-dimensional data '
'without explicit dimension names. Pass a tuple of '
'(dims, data) instead.' % (name, data.ndim))
obj = Variable(name, obj, fastpath=True)
else:
raise TypeError('unable to convert object into a variable without an '
'explicit list of dimensions: %r' % obj)
if name is not None and name in obj.dims:
# convert the Variable into an Index
if obj.ndim != 1:
raise MissingDimensionsError(
'%r has more than 1-dimension and the same name as one of its '
'dimensions %r. xarray disallows such variables because they '
'conflict with the coordinates used to label '
'dimensions.' % (name, obj.dims))
obj = obj.to_index_variable()
return obj
def _maybe_wrap_data(data):
"""
Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure
they can be indexed properly.
NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should
all pass through unmodified.
"""
if isinstance(data, pd.Index):
return PandasIndexAdapter(data)
return data
def _possibly_convert_objects(values):
"""Convert arrays of datetime.datetime and datetime.timedelta objects into
datetime64 and timedelta64, according to the pandas convention.
"""
return np.asarray(pd.Series(values.ravel())).reshape(values.shape)
def as_compatible_data(data, fastpath=False):
"""Prepare and wrap data to put in a Variable.
- If data does not have the necessary attributes, convert it to ndarray.
- If data has dtype=datetime64, ensure that it has ns precision. If it's a
pandas.Timestamp, convert it to datetime64.
- If data is already a pandas or xarray object (other than an Index), just
use the values.
Finally, wrap it up with an adapter if necessary.
"""
if fastpath and getattr(data, 'ndim', 0) > 0:
# can't use fastpath (yet) for scalars
return _maybe_wrap_data(data)
if isinstance(data, Variable):
return data.data
if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES):
return _maybe_wrap_data(data)
if isinstance(data, tuple):
data = utils.to_0d_object_array(data)
if isinstance(data, pd.Timestamp):
# TODO: convert, handle datetime objects, too
data = np.datetime64(data.value, 'ns')
if isinstance(data, timedelta):
data = np.timedelta64(getattr(data, 'value', data), 'ns')
# we don't want nested self-described arrays
data = getattr(data, 'values', data)
if isinstance(data, np.ma.MaskedArray):
mask = np.ma.getmaskarray(data)
if mask.any():
dtype, fill_value = dtypes.maybe_promote(data.dtype)
data = np.asarray(data, dtype=dtype)
data[mask] = fill_value
else:
data = np.asarray(data)
# validate whether the data is valid data types
data = np.asarray(data)
if isinstance(data, np.ndarray):
if data.dtype.kind == 'O':
data = _possibly_convert_objects(data)
elif data.dtype.kind == 'M':
data = np.asarray(data, 'datetime64[ns]')
elif data.dtype.kind == 'm':
data = np.asarray(data, 'timedelta64[ns]')
return _maybe_wrap_data(data)
def _as_array_or_item(data):
"""Return the given values as a numpy array, or as an individual item if
it's a 0d datetime64 or timedelta64 array.
Importantly, this function does not copy data if it is already an ndarray -
otherwise, it will not be possible to update Variable values in place.
This function mostly exists because 0-dimensional ndarrays with
dtype=datetime64 are broken :(
https://github.com/numpy/numpy/issues/4337
https://github.com/numpy/numpy/issues/7619
TODO: remove this (replace with np.asarray) once these issues are fixed
"""
data = np.asarray(data)
if data.ndim == 0:
if data.dtype.kind == 'M':
data = np.datetime64(data, 'ns')
elif data.dtype.kind == 'm':
data = np.timedelta64(data, 'ns')
return data
class Variable(common.AbstractArray, arithmetic.SupportsArithmetic,
utils.NdimSizeLenMixin):
"""A netcdf-like variable consisting of dimensions, data and attributes
which describe a single Array. A single Variable object is not fully
described outside the context of its parent Dataset (if you want such a
fully described object, use a DataArray instead).
The main functional difference between Variables and numpy arrays is that
numerical operations on Variables implement array broadcasting by dimension
name. For example, adding an Variable with dimensions `('time',)` to
another Variable with dimensions `('space',)` results in a new Variable
with dimensions `('time', 'space')`. Furthermore, numpy reduce operations
like ``mean`` or ``sum`` are overwritten to take a "dimension" argument
instead of an "axis".
Variables are light-weight objects used as the building block for datasets.
They are more primitive objects, so operations with them provide marginally
higher performance than using DataArrays. However, manipulating data in the
form of a Dataset or DataArray should almost always be preferred, because
they can use more complete metadata in context of coordinate labels.
"""
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
"""
Parameters
----------
dims : str or sequence of str
Name(s) of the the data dimension(s). Must be either a string (only
for 1D data) or a sequence of strings with length equal to the
number of dimensions.
data : array_like
Data array which supports numpy-like data access.
attrs : dict_like or None, optional
Attributes to assign to the new variable. If None (default), an
empty attribute dictionary is initialized.
encoding : dict_like or None, optional
Dictionary specifying how to encode this array's data into a
serialized format like netCDF4. Currently used keys (for netCDF)
include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'.
Well-behaved code to serialize a Variable should ignore
unrecognized encoding items.
"""
self._data = as_compatible_data(data, fastpath=fastpath)
self._dims = self._parse_dimensions(dims)
self._attrs = None
self._encoding = None
if attrs is not None:
self.attrs = attrs
if encoding is not None:
self.encoding = encoding
@property
def dtype(self):
return self._data.dtype
@property
def shape(self):
return self._data.shape
@property
def nbytes(self):
return self.size * self.dtype.itemsize
@property
def _in_memory(self):
return (isinstance(self._data, (np.ndarray, np.number,
PandasIndexAdapter)) or
(isinstance(self._data, indexing.MemoryCachedArray) and
isinstance(self._data.array, indexing.NumpyIndexingAdapter)))
@property
def data(self):
if isinstance(self._data, dask_array_type):
return self._data
else:
return self.values
@data.setter
def data(self, data):
data = as_compatible_data(data)
if data.shape != self.shape:
raise ValueError(
"replacement data must match the Variable's shape")
self._data = data
def load(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return this variable.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
if isinstance(self._data, dask_array_type):
self._data = as_compatible_data(self._data.compute(**kwargs))
elif not isinstance(self._data, np.ndarray):
self._data = np.asarray(self._data)
return self
def compute(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return a new variable. The original is
left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
new = self.copy(deep=False)
return new.load(**kwargs)
def __dask_graph__(self):
if isinstance(self._data, dask_array_type):
return self._data.__dask_graph__()
else:
return None
def __dask_keys__(self):
return self._data.__dask_keys__()
@property
def __dask_optimize__(self):
return self._data.__dask_optimize__
@property
def __dask_scheduler__(self):
return self._data.__dask_scheduler__
def __dask_postcompute__(self):
array_func, array_args = self._data.__dask_postcompute__()
return self._dask_finalize, (array_func, array_args, self._dims,
self._attrs, self._encoding)
def __dask_postpersist__(self):
array_func, array_args = self._data.__dask_postpersist__()
return self._dask_finalize, (array_func, array_args, self._dims,
self._attrs, self._encoding)
@staticmethod
def _dask_finalize(results, array_func, array_args, dims, attrs, encoding):
if isinstance(results, dict): # persist case
name = array_args[0]
results = {k: v for k, v in results.items() if k[0] == name}
data = array_func(results, *array_args)
return Variable(dims, data, attrs=attrs, encoding=encoding)
@property
def values(self):
"""The variable's data as a numpy.ndarray"""
return _as_array_or_item(self._data)
@values.setter
def values(self, values):
self.data = values
def to_base_variable(self):
"""Return this variable as a base xarray.Variable"""
return Variable(self.dims, self._data, self._attrs,
encoding=self._encoding, fastpath=True)
to_variable = utils.alias(to_base_variable, 'to_variable')
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return IndexVariable(self.dims, self._data, self._attrs,
encoding=self._encoding, fastpath=True)
to_coord = utils.alias(to_index_variable, 'to_coord')
def to_index(self):
"""Convert this variable to a pandas.Index"""
return self.to_index_variable().to_index()
@property
def dims(self):
"""Tuple of dimension names with which this variable is associated.
"""
return self._dims
def _parse_dimensions(self, dims):
if isinstance(dims, basestring):
dims = (dims,)
dims = tuple(dims)
if len(dims) != self.ndim:
raise ValueError('dimensions %s must have the same length as the '
'number of data dimensions, ndim=%s'
% (dims, self.ndim))
return dims
@dims.setter
def dims(self, value):
self._dims = self._parse_dimensions(value)
def _item_key_to_tuple(self, key):
if utils.is_dict_like(key):
return tuple(key.get(dim, slice(None)) for dim in self.dims)
else:
return key
def _broadcast_indexes(self, key):
"""Prepare an indexing key for an indexing operation.
Parameters
-----------
key: int, slice, array, dict or tuple of integer, slices and arrays
Any valid input for indexing.
Returns
-------
dims: tuple
Dimension of the resultant variable.
indexers: IndexingTuple subclass
Tuple of integer, array-like, or slices to use when indexing
self._data. The type of this argument indicates the type of
indexing to perform, either basic, outer or vectorized.
new_order : Optional[Sequence[int]]
Optional reordering to do on the result of indexing. If not None,
the first len(new_order) indexing should be moved to these
positions.
"""
key = self._item_key_to_tuple(key) # key is a tuple
# key is a tuple of full size
key = indexing.expanded_indexer(key, self.ndim)
# Convert a scalar Variable to an integer
key = tuple(
k.data.item() if isinstance(k, Variable) and k.ndim == 0 else k
for k in key)
# Convert a 0d-array to an integer
key = tuple(
k.item() if isinstance(k, np.ndarray) and k.ndim == 0 else k
for k in key)
if all(isinstance(k, BASIC_INDEXING_TYPES) for k in key):
return self._broadcast_indexes_basic(key)
self._validate_indexers(key)
# Detect it can be mapped as an outer indexer
# If all key is unlabeled, or
# key can be mapped as an OuterIndexer.
if all(not isinstance(k, Variable) for k in key):
return self._broadcast_indexes_outer(key)
# If all key is 1-dimensional and there are no duplicate labels,
# key can be mapped as an OuterIndexer.
dims = []
for k, d in zip(key, self.dims):
if isinstance(k, Variable):
if len(k.dims) > 1:
return self._broadcast_indexes_vectorized(key)
dims.append(k.dims[0])
elif not isinstance(k, integer_types):
dims.append(d)
if len(set(dims)) == len(dims):
return self._broadcast_indexes_outer(key)
return self._broadcast_indexes_vectorized(key)
def _broadcast_indexes_basic(self, key):
dims = tuple(dim for k, dim in zip(key, self.dims)
if not isinstance(k, integer_types))
return dims, BasicIndexer(key), None
def _validate_indexers(self, key):
""" Make sanity checks """
for dim, k in zip(self.dims, key):
if isinstance(k, BASIC_INDEXING_TYPES):
pass
else:
if not isinstance(k, Variable):
k = np.asarray(k)
if k.ndim > 1:
raise IndexError(
"Unlabeled multi-dimensional array cannot be "
"used for indexing: {}".format(k))
if k.dtype.kind == 'b':
if self.shape[self.get_axis_num(dim)] != len(k):
raise IndexError(
"Boolean array size {0:d} is used to index array "
"with shape {1:s}.".format(len(k),
str(self.shape)))
if k.ndim > 1:
raise IndexError("{}-dimensional boolean indexing is "
"not supported. ".format(k.ndim))
if getattr(k, 'dims', (dim, )) != (dim, ):
raise IndexError(
"Boolean indexer should be unlabeled or on the "
"same dimension to the indexed array. Indexer is "
"on {0:s} but the target dimension is "
"{1:s}.".format(str(k.dims), dim))
def _broadcast_indexes_outer(self, key):
dims = tuple(k.dims[0] if isinstance(k, Variable) else dim
for k, dim in zip(key, self.dims)
if not isinstance(k, integer_types))
new_key = []
for k in key:
if isinstance(k, Variable):
k = k.data
if not isinstance(k, BASIC_INDEXING_TYPES):
k = np.asarray(k)
if k.dtype.kind == 'b':
(k,) = np.nonzero(k)
new_key.append(k)
return dims, OuterIndexer(tuple(new_key)), None
def _nonzero(self):
""" Equivalent numpy's nonzero but returns a tuple of Varibles. """
# TODO we should replace dask's native nonzero
# after https://github.com/dask/dask/issues/1076 is implemented.
nonzeros = np.nonzero(self.data)
return tuple(Variable((dim), nz) for nz, dim
in zip(nonzeros, self.dims))
def _broadcast_indexes_vectorized(self, key):
variables = []
out_dims_set = OrderedSet()
for dim, value in zip(self.dims, key):
if isinstance(value, slice):
out_dims_set.add(dim)
else:
variable = (value if isinstance(value, Variable) else
as_variable(value, name=dim))
if variable.dtype.kind == 'b': # boolean indexing case
(variable,) = variable._nonzero()
variables.append(variable)
out_dims_set.update(variable.dims)
variable_dims = set()
for variable in variables:
variable_dims.update(variable.dims)
slices = []
for i, (dim, value) in enumerate(zip(self.dims, key)):
if isinstance(value, slice):
if dim in variable_dims:
# We only convert slice objects to variables if they share
# a dimension with at least one other variable. Otherwise,
# we can equivalently leave them as slices aknd transpose
# the result. This is significantly faster/more efficient
# for most array backends.
values = np.arange(*value.indices(self.sizes[dim]))
variables.insert(i - len(slices), Variable((dim,), values))
else:
slices.append((i, value))
try:
variables = _broadcast_compat_variables(*variables)
except ValueError:
raise IndexError("Dimensions of indexers mismatch: {}".format(key))
out_key = [variable.data for variable in variables]
out_dims = tuple(out_dims_set)
slice_positions = set()
for i, value in slices:
out_key.insert(i, value)
new_position = out_dims.index(self.dims[i])
slice_positions.add(new_position)
if slice_positions:
new_order = [i for i in range(len(out_dims))
if i not in slice_positions]
else:
new_order = None
return out_dims, VectorizedIndexer(tuple(out_key)), new_order
def __getitem__(self, key):
"""Return a new Array object whose contents are consistent with
getting the provided key from the underlying data.
NB. __getitem__ and __setitem__ implement xarray-style indexing,
where if keys are unlabeled arrays, we index the array orthogonally
with them. If keys are labeled array (such as Variables), they are
broadcasted with our usual scheme and then the array is indexed with
the broadcasted key, like numpy's fancy indexing.
If you really want to do indexing like `x[x > 0]`, manipulate the numpy
array `x.values` directly.
"""
dims, indexer, new_order = self._broadcast_indexes(key)
data = as_indexable(self._data)[indexer]
if new_order:
data = np.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def _finalize_indexing_result(self, dims, data):
"""Used by IndexVariable to return IndexVariable objects when possible.
"""
return type(self)(dims, data, self._attrs, self._encoding,
fastpath=True)
def _getitem_with_mask(self, key, fill_value=dtypes.NA):
"""Index this Variable with -1 remapped to fill_value."""
# TODO(shoyer): expose this method in public API somewhere (isel?) and
# use it for reindex.
# TODO(shoyer): add a sanity check that all other integers are
# non-negative
# TODO(shoyer): add an optimization, remapping -1 to an adjacent value
# that is actually indexed rather than mapping it to the last value
# along each axis.
if fill_value is dtypes.NA:
fill_value = dtypes.get_fill_value(self.dtype)
dims, indexer, new_order = self._broadcast_indexes(key)
if self.size:
if isinstance(self._data, dask_array_type):
# dask's indexing is faster this way; also vindex does not
# support negative indices yet:
# https://github.com/dask/dask/pull/2967
actual_indexer = indexing.posify_mask_indexer(indexer)
else:
actual_indexer = indexer
data = as_indexable(self._data)[actual_indexer]
chunks_hint = getattr(data, 'chunks', None)
mask = indexing.create_mask(indexer, self.shape, chunks_hint)
data = duck_array_ops.where(mask, fill_value, data)
else:
# array cannot be indexed along dimensions of size 0, so just
# build the mask directly instead.
mask = indexing.create_mask(indexer, self.shape)
data = np.broadcast_to(fill_value, getattr(mask, 'shape', ()))
if new_order:
data = np.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def __setitem__(self, key, value):
"""__setitem__ is overloaded to access the underlying numpy values with
orthogonal indexing.
See __getitem__ for more details.
"""
dims, index_tuple, new_order = self._broadcast_indexes(key)
if not isinstance(value, Variable):
value = as_compatible_data(value)
if value.ndim > len(dims):
raise ValueError(
'shape mismatch: value array of shape %s could not be '
'broadcast to indexing result with %s dimensions'
% (value.shape, len(dims)))
if value.ndim == 0:
value = Variable((), value)
else:
value = Variable(dims[-value.ndim:], value)
# broadcast to become assignable
value = value.set_dims(dims).data
if new_order:
value = duck_array_ops.asarray(value)
value = value[(len(dims) - value.ndim) * (np.newaxis,) +
(Ellipsis,)]
value = np.moveaxis(value, new_order, range(len(new_order)))
indexable = as_indexable(self._data)
indexable[index_tuple] = value
@property
def attrs(self):
"""Dictionary of local attributes on this variable.
"""
if self._attrs is None:
self._attrs = OrderedDict()
return self._attrs
@attrs.setter
def attrs(self, value):
self._attrs = OrderedDict(value)
@property
def encoding(self):
"""Dictionary of encodings on this variable.
"""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value):
try:
self._encoding = dict(value)
except ValueError:
raise ValueError('encoding must be castable to a dictionary')
def copy(self, deep=True):
"""Returns a copy of this object.
If `deep=True`, the data array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
"""
data = self._data
if isinstance(data, indexing.MemoryCachedArray):
# don't share caching between copies
data = indexing.MemoryCachedArray(data.array)
if deep:
if isinstance(data, dask_array_type):
data = data.copy()
elif not isinstance(data, PandasIndexAdapter):
# pandas.Index is immutable
data = np.array(data)
# note:
# dims is already an immutable tuple
# attributes and encoding will be copied when the new Array is created
return type(self)(self.dims, data, self._attrs, self._encoding,
fastpath=True)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
# mutable objects should not be hashable
__hash__ = None
@property
def chunks(self):
"""Block dimensions for this array's data or None if it's not a dask
array.
"""
return getattr(self._data, 'chunks', None)
_array_counter = itertools.count()
def chunk(self, chunks=None, name=None, lock=False):
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
name : str, optional
Used to generate the name for this array in the internal dask
graph. Does not need not be unique.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xarray.Variable
"""
import dask.array as da
if utils.is_dict_like(chunks):
chunks = dict((self.get_axis_num(dim), chunk)
for dim, chunk in chunks.items())
if chunks is None:
chunks = self.chunks or self.shape
data = self._data
if isinstance(data, da.Array):
data = data.rechunk(chunks)
else:
if utils.is_dict_like(chunks):
chunks = tuple(chunks.get(n, s)
for n, s in enumerate(self.shape))
# da.from_array works by using lazily indexing with a tuple of
# slices. Using OuterIndexer is a pragmatic choice: dask does not
# yet handle different indexing types in an explicit way:
# https://github.com/dask/dask/issues/2883
data = indexing.ImplicitToExplicitIndexingAdapter(
data, indexing.OuterIndexer)
data = da.from_array(data, chunks, name=name, lock=lock)
return type(self)(self.dims, data, self._attrs, self._encoding,
fastpath=True)
def isel(self, **indexers):
"""Return a new array indexed along the specified dimension(s).
Parameters
----------
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
Returns
-------
obj : Array object
A new Array with the selected data and dimensions. In general,
the new variable's data will be a view of this variable's data,
unless numpy fancy indexing was triggered by using an array
indexer, in which case the data will be a copy.
"""
invalid = [k for k in indexers if k not in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
key = [slice(None)] * self.ndim
for i, dim in enumerate(self.dims):
if dim in indexers:
key[i] = indexers[dim]
return self[tuple(key)]
def squeeze(self, dim=None):
"""Return a new object with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
Returns
-------
squeezed : same type as caller
This object, but with with all or a subset of the dimensions of
length 1 removed.
See Also
--------
numpy.squeeze
"""
dims = common.get_squeeze_dims(self, dim)
return self.isel(**{d: 0 for d in dims})
def _shift_one_dim(self, dim, count):
axis = self.get_axis_num(dim)
if count > 0:
keep = slice(None, -count)
elif count < 0:
keep = slice(-count, None)
else:
keep = slice(None)
trimmed_data = self[(slice(None),) * axis + (keep,)].data
dtype, fill_value = dtypes.maybe_promote(self.dtype)
shape = list(self.shape)
shape[axis] = min(abs(count), shape[axis])
if isinstance(trimmed_data, dask_array_type):
chunks = list(trimmed_data.chunks)
chunks[axis] = (shape[axis],)
full = functools.partial(da.full, chunks=chunks)
else:
full = np.full
nans = full(shape, fill_value, dtype=dtype)
if count > 0:
arrays = [nans, trimmed_data]
else:
arrays = [trimmed_data, nans]
data = duck_array_ops.concatenate(arrays, axis)
if isinstance(data, dask_array_type):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return type(self)(self.dims, data, self._attrs, fastpath=True)
def shift(self, **shifts):
"""
Return a new Variable with shifted data.
Parameters
----------
**shifts : keyword arguments of the form {dim: offset}
Integer offset to shift along each of the given dimensions.
Positive offsets shift to the right; negative offsets shift to the
left.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but shifted data.
"""
result = self
for dim, count in shifts.items():
result = result._shift_one_dim(dim, count)
return result
def pad_with_fill_value(self, fill_value=dtypes.NA, **pad_widths):
"""
Return a new Variable with paddings.
Parameters
----------
**pad_width: keyword arguments of the form {dim: (before, after)}
Number of values padded to the edges of each dimension.
"""
if fill_value is dtypes.NA: # np.nan is passed
dtype, fill_value = dtypes.maybe_promote(self.dtype)
else:
dtype = self.dtype
if isinstance(self.data, dask_array_type):
array = self.data
# Dask does not yet support pad. We manually implement it.
# https://github.com/dask/dask/issues/1926
for d, pad in pad_widths.items():
axis = self.get_axis_num(d)
before_shape = list(array.shape)
before_shape[axis] = pad[0]
before_chunks = list(array.chunks)
before_chunks[axis] = (pad[0], )
after_shape = list(array.shape)
after_shape[axis] = pad[1]
after_chunks = list(array.chunks)
after_chunks[axis] = (pad[1], )
arrays = []
if pad[0] > 0:
arrays.append(da.full(before_shape, fill_value,
dtype=dtype, chunks=before_chunks))
arrays.append(array)
if pad[1] > 0:
arrays.append(da.full(after_shape, fill_value,
dtype=dtype, chunks=after_chunks))
if len(arrays) > 1:
array = da.concatenate(arrays, axis=axis)
else:
pads = [(0, 0) if d not in pad_widths else pad_widths[d]
for d in self.dims]
array = np.pad(self.data.astype(dtype, copy=False), pads,
mode='constant', constant_values=fill_value)
return type(self)(self.dims, array)
def _roll_one_dim(self, dim, count):
axis = self.get_axis_num(dim)
count %= self.shape[axis]
if count != 0:
indices = [slice(-count, None), slice(None, -count)]
else:
indices = [slice(None)]
arrays = [self[(slice(None),) * axis + (idx,)].data
for idx in indices]
data = duck_array_ops.concatenate(arrays, axis)
if isinstance(data, dask_array_type):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return type(self)(self.dims, data, self._attrs, fastpath=True)
def roll(self, **shifts):
"""
Return a new Variable with rolld data.
Parameters
----------
**shifts : keyword arguments of the form {dim: offset}
Integer offset to roll along each of the given dimensions.
Positive offsets roll to the right; negative offsets roll to the
left.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but rolled data.
"""
result = self
for dim, count in shifts.items():
result = result._roll_one_dim(dim, count)
return result
def transpose(self, *dims):
"""Return a new Variable object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : Variable
The returned object has transposed data and dimensions with the
same attributes as the original.
Notes
-----
Although this operation returns a view of this variable's data, it is
not lazy -- the data will be fully loaded.
See Also
--------
numpy.transpose
"""
if len(dims) == 0:
dims = self.dims[::-1]
axes = self.get_axis_num(dims)
if len(dims) < 2: # no need to transpose if only one dimension
return self.copy(deep=False)
data = as_indexable(self._data).transpose(axes)
return type(self)(dims, data, self._attrs, self._encoding,
fastpath=True)
def expand_dims(self, *args):
import warnings
warnings.warn('Variable.expand_dims is deprecated: use '
'Variable.set_dims instead', DeprecationWarning,
stacklevel=2)
return self.expand_dims(*args)
def set_dims(self, dims, shape=None):
"""Return a new variable with given set of dimensions.
This method might be used to attach new dimension(s) to variable.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
"""
if isinstance(dims, basestring):
dims = [dims]
if shape is None and utils.is_dict_like(dims):
shape = dims.values()
missing_dims = set(self.dims) - set(dims)
if missing_dims:
raise ValueError('new dimensions %r must be a superset of '
'existing dimensions %r' % (dims, self.dims))
self_dims = set(self.dims)
expanded_dims = tuple(
d for d in dims if d not in self_dims) + self.dims
if self.dims == expanded_dims:
# don't use broadcast_to unless necessary so the result remains
# writeable if possible
expanded_data = self.data
elif shape is not None:
dims_map = dict(zip(dims, shape))
tmp_shape = tuple(dims_map[d] for d in expanded_dims)
expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape)
else:
expanded_data = self.data[
(None,) * (len(expanded_dims) - self.ndim)]
expanded_var = Variable(expanded_dims, expanded_data, self._attrs,
self._encoding, fastpath=True)
return expanded_var.transpose(*dims)
def _stack_once(self, dims, new_dim):
if not set(dims) <= set(self.dims):
raise ValueError('invalid existing dimensions: %s' % dims)
if new_dim in self.dims:
raise ValueError('cannot create a new dimension with the same '
'name as an existing dimension')
if len(dims) == 0:
# don't stack
return self.copy(deep=False)
other_dims = [d for d in self.dims if d not in dims]
dim_order = other_dims + list(dims)
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[:len(other_dims)] + (-1,)
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[:len(other_dims)] + (new_dim,)
return Variable(new_dims, new_data, self._attrs, self._encoding,
fastpath=True)
def stack(self, **dimensions):
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
**dimensions : keyword arguments of the form new_name=(dim1, dim2, ...)
Names of new dimensions, and the existing dimensions that they
replace.
Returns
-------
stacked : Variable
Variable with the same attributes but stacked data.
See also
--------
Variable.unstack
"""
result = self
for new_dim, dims in dimensions.items():
result = result._stack_once(dims, new_dim)
return result
def _unstack_once(self, dims, old_dim):
new_dim_names = tuple(dims.keys())
new_dim_sizes = tuple(dims.values())
if old_dim not in self.dims:
raise ValueError('invalid existing dimension: %s' % old_dim)
if set(new_dim_names).intersection(self.dims):
raise ValueError('cannot create a new dimension with the same '
'name as an existing dimension')
if np.prod(new_dim_sizes) != self.sizes[old_dim]:
raise ValueError('the product of the new dimension sizes must '
'equal the size of the old dimension')
other_dims = [d for d in self.dims if d != old_dim]
dim_order = other_dims + [old_dim]
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[:len(other_dims)] + new_dim_sizes
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[:len(other_dims)] + new_dim_names
return Variable(new_dims, new_data, self._attrs, self._encoding,
fastpath=True)
def unstack(self, **dimensions):
"""
Unstack an existing dimension into multiple new dimensions.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
**dimensions : keyword arguments of the form old_dim={dim1: size1, ...}
Names of existing dimensions, and the new dimensions and sizes
that they map to.
Returns
-------
unstacked : Variable
Variable with the same attributes but unstacked data.
See also
--------
Variable.stack
"""
result = self
for old_dim, dims in dimensions.items():
result = result._unstack_once(dims, old_dim)
return result
def fillna(self, value):
return ops.fillna(self, value)
def where(self, cond, other=dtypes.NA):
return ops.where_method(self, cond, other)
def reduce(self, func, dim=None, axis=None, keep_attrs=False,
allow_lazy=False, **kwargs):
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
the reduction is calculated over the flattened array (by calling
`func(x)` without an axis argument).
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim is not None and axis is not None:
raise ValueError("cannot supply both 'axis' and 'dim' arguments")
if dim is not None:
axis = self.get_axis_num(dim)
data = func(self.data if allow_lazy else self.values,
axis=axis, **kwargs)
if getattr(data, 'shape', ()) == self.shape:
dims = self.dims
else:
removed_axes = (range(self.ndim) if axis is None
else np.atleast_1d(axis) % self.ndim)
dims = [adim for n, adim in enumerate(self.dims)
if n not in removed_axes]
attrs = self._attrs if keep_attrs else None
return Variable(dims, data, attrs=attrs)
@classmethod
def concat(cls, variables, dim='concat_dim', positions=None,
shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Array
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to
which to assign each dataset along the concatenated dimension.
If not supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
if not isinstance(dim, basestring):
dim, = dim.dims
# can't do this lazily: we need to loop through variables at least
# twice
variables = list(variables)
first_var = variables[0]
arrays = [v.data for v in variables]
if dim in first_var.dims:
axis = first_var.get_axis_num(dim)
dims = first_var.dims
data = duck_array_ops.concatenate(arrays, axis=axis)
if positions is not None:
# TODO: deprecate this option -- we don't need it for groupby
# any more.
indices = nputils.inverse_permutation(
np.concatenate(positions))
data = duck_array_ops.take(data, indices, axis=axis)
else:
axis = 0
dims = (dim,) + first_var.dims
data = duck_array_ops.stack(arrays, axis=axis)
attrs = OrderedDict(first_var.attrs)
encoding = OrderedDict(first_var.encoding)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError('inconsistent dimensions')
utils.remove_incompatible_items(attrs, var.attrs)
return cls(dims, data, attrs, encoding)
def equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the same dimensions and values;
otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for Variables
does element-wise comparisons (like numpy.ndarrays).
"""
other = getattr(other, 'variable', other)
try:
return (self.dims == other.dims and
(self._data is other._data or
equiv(self.data, other.data)))
except (TypeError, AttributeError):
return False
def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the values after being broadcast against
each other; otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
"""
try:
self, other = broadcast_variables(self, other)
except (ValueError, AttributeError):
return False
return self.equals(other, equiv=equiv)
def identical(self, other):
"""Like equals, but also checks attributes.
"""
try:
return (utils.dict_equiv(self.attrs, other.attrs) and
self.equals(other))
except (TypeError, AttributeError):
return False
def no_conflicts(self, other):
"""True if the intersection of two Variable's non-null data is
equal; otherwise false.
Variables can thus still be equal if there are locations where either,
or both, contain NaN values.
"""
return self.broadcast_equals(
other, equiv=duck_array_ops.array_notnull_equiv)
def quantile(self, q, dim=None, interpolation='linear'):
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements.
Parameters
----------
q : float in range of [0,1] (or sequence of floats)
Quantile to compute, which must be between 0 and 1
inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile and a quantile dimension
is added to the return array. The other dimensions are the
dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile,
DataArray.quantile
"""
if isinstance(self.data, dask_array_type):
raise TypeError("quantile does not work for arrays stored as dask "
"arrays. Load the data via .compute() or .load() "
"prior to calling this method.")
q = np.asarray(q, dtype=np.float64)
new_dims = list(self.dims)
if dim is not None:
axis = self.get_axis_num(dim)
if utils.is_scalar(dim):
new_dims.remove(dim)
else:
for d in dim:
new_dims.remove(d)
else:
axis = None
new_dims = []
# only add the quantile dimension if q is array like
if q.ndim != 0:
new_dims = ['quantile'] + new_dims
qs = np.nanpercentile(self.data, q * 100., axis=axis,
interpolation=interpolation)
return Variable(new_dims, qs)
def rank(self, dim, pct=False):
"""Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that
set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
dim : str
Dimension over which to compute rank.
pct : bool, optional
If True, compute percentage ranks, otherwise compute integer ranks.
Returns
-------
ranked : Variable
See Also
--------
Dataset.rank, DataArray.rank
"""
import bottleneck as bn
if isinstance(self.data, dask_array_type):
raise TypeError("rank does not work for arrays stored as dask "
"arrays. Load the data via .compute() or .load() "
"prior to calling this method.")
axis = self.get_axis_num(dim)
func = bn.nanrankdata if self.dtype.kind is 'f' else bn.rankdata
ranked = func(self.data, axis=axis)
if pct:
count = np.sum(~np.isnan(self.data), axis=axis, keepdims=True)
ranked /= count
return Variable(self.dims, ranked)
def rolling_window(self, dim, window, window_dim, center=False,
fill_value=dtypes.NA):
"""
Make a rolling_window along dim and add a new_dim to the last place.
Parameters
----------
dim: str
Dimension over which to compute rolling_window
window: int
Window size of the rolling
window_dim: str
New name of the window dimension.
center: boolean. default False.
If True, pad fill_value for both ends. Otherwise, pad in the head
of the axis.
fill_value:
value to be filled.
Returns
-------
Variable that is a view of the original array with a added dimension of
size w.
The return dim: self.dims + (window_dim, )
The return shape: self.shape + (window, )
Examples
--------
>>> v=Variable(('a', 'b'), np.arange(8).reshape((2,4)))
>>> v.rolling_window(x, 'b', 3, 'window_dim')
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, nan, 0], [nan, 0, 1], [0, 1, 2], [1, 2, 3]],
[[nan, nan, 4], [nan, 4, 5], [4, 5, 6], [5, 6, 7]]])
>>> v.rolling_window(x, 'b', 3, 'window_dim', center=True)
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, nan]],
[[nan, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, nan]]])
"""
if fill_value is dtypes.NA: # np.nan is passed
dtype, fill_value = dtypes.maybe_promote(self.dtype)
array = self.astype(dtype, copy=False).data
else:
dtype = self.dtype
array = self.data
new_dims = self.dims + (window_dim, )
return Variable(new_dims, duck_array_ops.rolling_window(
array, axis=self.get_axis_num(dim), window=window,
center=center, fill_value=fill_value))
@property
def real(self):
return type(self)(self.dims, self.data.real, self._attrs)
@property
def imag(self):
return type(self)(self.dims, self.data.imag, self._attrs)
def __array_wrap__(self, obj, context=None):
return Variable(self.dims, obj)
@staticmethod
def _unary_op(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
with np.errstate(all='ignore'):
return self.__array_wrap__(f(self.data, *args, **kwargs))
return func
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
if isinstance(other, (xr.DataArray, xr.Dataset)):
return NotImplemented
self_data, other_data, dims = _broadcast_compat_data(self, other)
with np.errstate(all='ignore'):
new_data = (f(self_data, other_data)
if not reflexive
else f(other_data, self_data))
result = Variable(dims, new_data)
return result
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, xr.Dataset):
raise TypeError('cannot add a Dataset to a Variable in-place')
self_data, other_data, dims = _broadcast_compat_data(self, other)
if dims != self.dims:
raise ValueError('dimensions cannot change for in-place '
'operations')
with np.errstate(all='ignore'):
self.values = f(self_data, other_data)
return self
return func
ops.inject_all_ops_and_reduce_methods(Variable)
class IndexVariable(Variable):
"""Wrapper for accommodating a pandas.Index in an xarray.Variable.
IndexVariable preserve loaded values in the form of a pandas.Index instead
of a NumPy array. Hence, their values are immutable and must always be one-
dimensional.
They also have a name property, which is the name of their sole dimension
unless another name is given.
"""
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
super(IndexVariable, self).__init__(dims, data, attrs, encoding,
fastpath)
if self.ndim != 1:
raise ValueError('%s objects must be 1-dimensional' %
type(self).__name__)
# Unlike in Variable, always eagerly load values into memory
if not isinstance(self._data, PandasIndexAdapter):
self._data = PandasIndexAdapter(self._data)
def load(self):
# data is already loaded into memory for IndexVariable
return self
@Variable.data.setter
def data(self, data):
Variable.data.fset(self, data)
if not isinstance(self._data, PandasIndexAdapter):
self._data = PandasIndexAdapter(self._data)
def chunk(self, chunks=None, name=None, lock=False):
# Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk()
return self.copy(deep=False)
def _finalize_indexing_result(self, dims, data):
if getattr(data, 'ndim', 0) != 1:
# returns Variable rather than IndexVariable if multi-dimensional
return Variable(dims, data, self._attrs, self._encoding)
else:
return type(self)(dims, data, self._attrs,
self._encoding, fastpath=True)
def __setitem__(self, key, value):
raise TypeError('%s values cannot be modified' % type(self).__name__)
@classmethod
def concat(cls, variables, dim='concat_dim', positions=None,
shortcut=False):
"""Specialized version of Variable.concat for IndexVariable objects.
This exists because we want to avoid converting Index objects to NumPy
arrays, if possible.
"""
if not isinstance(dim, basestring):
dim, = dim.dims
variables = list(variables)
first_var = variables[0]
if any(not isinstance(v, cls) for v in variables):
raise TypeError('IndexVariable.concat requires that all input '
'variables be IndexVariable objects')
indexes = [v._data.array for v in variables]
if not indexes:
data = []
else:
data = indexes[0].append(indexes[1:])
if positions is not None:
indices = nputils.inverse_permutation(
np.concatenate(positions))
data = data.take(indices)
attrs = OrderedDict(first_var.attrs)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError('inconsistent dimensions')
utils.remove_incompatible_items(attrs, var.attrs)
return cls(first_var.dims, data, attrs)
def copy(self, deep=True):
"""Returns a copy of this object.
`deep` is ignored since data is stored in the form of pandas.Index,
which is already immutable. Dimensions, attributes and encodings are
always copied.
"""
return type(self)(self.dims, self._data, self._attrs,
self._encoding, fastpath=True)
def equals(self, other, equiv=None):
# if equiv is specified, super up
if equiv is not None:
return super(IndexVariable, self).equals(other, equiv)
# otherwise use the native index equals, rather than looking at _data
other = getattr(other, 'variable', other)
try:
return (self.dims == other.dims and
self._data_equals(other))
except (TypeError, AttributeError):
return False
def _data_equals(self, other):
return self.to_index().equals(other.to_index())
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return self
to_coord = utils.alias(to_index_variable, 'to_coord')
def to_index(self):
"""Convert this variable to a pandas.Index"""
# n.b. creating a new pandas.Index from an old pandas.Index is
# basically free as pandas.Index objects are immutable
assert self.ndim == 1
index = self._data.array
if isinstance(index, pd.MultiIndex):
# set default names for multi-index unnamed levels so that
# we can safely rename dimension / coordinate later
valid_level_names = [name or '{}_level_{}'.format(self.dims[0], i)
for i, name in enumerate(index.names)]
index = index.set_names(valid_level_names)
else:
index = index.set_names(self.name)
return index
@property
def level_names(self):
"""Return MultiIndex level names or None if this IndexVariable has no
MultiIndex.
"""
index = self.to_index()
if isinstance(index, pd.MultiIndex):
return index.names
else:
return None
def get_level_variable(self, level):
"""Return a new IndexVariable from a given MultiIndex level."""
if self.level_names is None:
raise ValueError("IndexVariable %r has no MultiIndex" % self.name)
index = self.to_index()
return type(self)(self.dims, index.get_level_values(level))
@property
def name(self):
return self.dims[0]
@name.setter
def name(self, value):
raise AttributeError('cannot modify name of IndexVariable in-place')
# for backwards compatibility
Coordinate = utils.alias(IndexVariable, 'Coordinate')
def _unified_dims(variables):
# validate dimensions
all_dims = OrderedDict()
for var in variables:
var_dims = var.dims
if len(set(var_dims)) < len(var_dims):
raise ValueError('broadcasting cannot handle duplicate '
'dimensions: %r' % list(var_dims))
for d, s in zip(var_dims, var.shape):
if d not in all_dims:
all_dims[d] = s
elif all_dims[d] != s:
raise ValueError('operands cannot be broadcast together '
'with mismatched lengths for dimension %r: %s'
% (d, (all_dims[d], s)))
return all_dims
def _broadcast_compat_variables(*variables):
"""Create broadcast compatible variables, with the same dimensions.
Unlike the result of broadcast_variables(), some variables may have
dimensions of size 1 instead of the the size of the broadcast dimension.
"""
dims = tuple(_unified_dims(variables))
return tuple(var.set_dims(dims) if var.dims != dims else var
for var in variables)
def broadcast_variables(*variables):
"""Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearance in the first variable's
dimensions followed by the second variable's dimensions.
"""
dims_map = _unified_dims(variables)
dims_tuple = tuple(dims_map)
return tuple(var.set_dims(dims_map) if var.dims != dims_tuple else var
for var in variables)
def _broadcast_compat_data(self, other):
if all(hasattr(other, attr) for attr
in ['dims', 'data', 'shape', 'encoding']):
# `other` satisfies the necessary Variable API for broadcast_variables
new_self, new_other = _broadcast_compat_variables(self, other)
self_data = new_self.data
other_data = new_other.data
dims = new_self.dims
else:
# rely on numpy broadcasting rules
self_data = self.data
other_data = other
dims = self.dims
return self_data, other_data, dims
def concat(variables, dim='concat_dim', positions=None, shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Array
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
variables = list(variables)
if all(isinstance(v, IndexVariable) for v in variables):
return IndexVariable.concat(variables, dim, positions, shortcut)
else:
return Variable.concat(variables, dim, positions, shortcut)
def assert_unique_multiindex_level_names(variables):
"""Check for uniqueness of MultiIndex level names in all given
variables.
Not public API. Used for checking consistency of DataArray and Dataset
objects.
"""
level_names = defaultdict(list)
for var_name, var in variables.items():
if isinstance(var._data, PandasIndexAdapter):
idx_level_names = var.to_index_variable().level_names
if idx_level_names is not None:
for n in idx_level_names:
level_names[n].append('%r (%s)' % (n, var_name))
for k, v in level_names.items():
if k in variables:
v.append('(%s)' % k)
duplicate_names = [v for v in level_names.values() if len(v) > 1]
if duplicate_names:
conflict_str = '\n'.join([', '.join(v) for v in duplicate_names])
raise ValueError('conflicting MultiIndex level name(s):\n%s'
% conflict_str)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/variable.py",
"copies": "1",
"size": "70803",
"license": "apache-2.0",
"hash": 6490055112699620000,
"line_mean": 36.4817363684,
"line_max": 79,
"alpha_frac": 0.5813312995,
"autogenerated": false,
"ratio": 4.396882568465504,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5478213867965503,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import functools
import itertools
import warnings
import numpy as np
from ..core.formatting import format_item
from ..core.pycompat import getargspec
from .utils import (
_determine_cmap_params, _infer_xy_labels, import_matplotlib_pyplot)
# Overrides axes.labelsize, xtick.major.size, ytick.major.size
# from mpl.rcParams
_FONTSIZE = 'small'
# For major ticks on x, y axes
_NTICKS = 5
def _nicetitle(coord, value, maxchar, template):
"""
Put coord, value in template and truncate at maxchar
"""
prettyvalue = format_item(value, quote_strings=False)
title = template.format(coord=coord, value=prettyvalue)
if len(title) > maxchar:
title = title[:(maxchar - 3)] + '...'
return title
class FacetGrid(object):
"""
Initialize the matplotlib figure and FacetGrid object.
The :class:`FacetGrid` is an object that links a xarray DataArray to
a matplotlib figure with a particular structure.
In particular, :class:`FacetGrid` is used to draw plots with multiple
Axes where each Axes shows the same relationship conditioned on
different levels of some dimension. It's possible to condition on up to
two variables by assigning variables to the rows and columns of the
grid.
The general approach to plotting here is called "small multiples",
where the same kind of plot is repeated multiple times, and the
specific use of small multiples to display the same relationship
conditioned on one ore more other variables is often called a "trellis
plot".
The basic workflow is to initialize the :class:`FacetGrid` object with
the DataArray and the variable names that are used to structure the grid.
Then plotting functions can be applied to each subset by calling
:meth:`FacetGrid.map_dataarray` or :meth:`FacetGrid.map`.
Attributes
----------
axes : numpy object array
Contains axes in corresponding position, as returned from
plt.subplots
fig : matplotlib.Figure
The figure containing all the axes
name_dicts : numpy object array
Contains dictionaries mapping coordinate names to values. None is
used as a sentinel value for axes which should remain empty, ie.
sometimes the bottom right grid
"""
def __init__(self, data, col=None, row=None, col_wrap=None,
sharex=True, sharey=True, figsize=None, aspect=1, size=3,
subplot_kws=None):
"""
Parameters
----------
data : DataArray
xarray DataArray to be plotted
row, col : strings
Dimesion names that define subsets of the data, which will be drawn
on separate facets in the grid.
col_wrap : int, optional
"Wrap" the column variable at this width, so that the column facets
sharex : bool, optional
If true, the facets will share x axes
sharey : bool, optional
If true, the facets will share y axes
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
If set, overrides ``size`` and ``aspect``.
aspect : scalar, optional
Aspect ratio of each facet, so that ``aspect * size`` gives the
width of each facet in inches
size : scalar, optional
Height (in inches) of each facet. See also: ``aspect``
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots
"""
plt = import_matplotlib_pyplot()
# Handle corner case of nonunique coordinates
rep_col = col is not None and not data[col].to_index().is_unique
rep_row = row is not None and not data[row].to_index().is_unique
if rep_col or rep_row:
raise ValueError('Coordinates used for faceting cannot '
'contain repeated (nonunique) values.')
# single_group is the grouping variable, if there is exactly one
if col and row:
single_group = False
nrow = len(data[row])
ncol = len(data[col])
nfacet = nrow * ncol
if col_wrap is not None:
warnings.warn('Ignoring col_wrap since both col and row '
'were passed')
elif row and not col:
single_group = row
elif not row and col:
single_group = col
else:
raise ValueError(
'Pass a coordinate name as an argument for row or col')
# Compute grid shape
if single_group:
nfacet = len(data[single_group])
if col:
# idea - could add heuristic for nice shapes like 3x4
ncol = nfacet
if row:
ncol = 1
if col_wrap is not None:
# Overrides previous settings
ncol = col_wrap
nrow = int(np.ceil(nfacet / ncol))
# Set the subplot kwargs
subplot_kws = {} if subplot_kws is None else subplot_kws
if figsize is None:
# Calculate the base figure size with extra horizontal space for a
# colorbar
cbar_space = 1
figsize = (ncol * size * aspect + cbar_space, nrow * size)
fig, axes = plt.subplots(nrow, ncol,
sharex=sharex, sharey=sharey, squeeze=False,
figsize=figsize, subplot_kw=subplot_kws)
# Set up the lists of names for the row and column facet variables
col_names = list(data[col].values) if col else []
row_names = list(data[row].values) if row else []
if single_group:
full = [{single_group: x} for x in
data[single_group].values]
empty = [None for x in range(nrow * ncol - len(full))]
name_dicts = full + empty
else:
rowcols = itertools.product(row_names, col_names)
name_dicts = [{row: r, col: c} for r, c in rowcols]
name_dicts = np.array(name_dicts).reshape(nrow, ncol)
# Set up the class attributes
# ---------------------------
# First the public API
self.data = data
self.name_dicts = name_dicts
self.fig = fig
self.axes = axes
self.row_names = row_names
self.col_names = col_names
# Next the private variables
self._single_group = single_group
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._col_wrap = col_wrap
self._x_var = None
self._y_var = None
self._cmap_extend = None
self._mappables = []
@property
def _left_axes(self):
return self.axes[:, 0]
@property
def _bottom_axes(self):
return self.axes[-1, :]
def map_dataarray(self, func, x, y, **kwargs):
"""
Apply a plotting function to a 2d facet's subset of the data.
This is more convenient and less general than ``FacetGrid.map``
Parameters
----------
func : callable
A plotting function with the same signature as a 2d xarray
plotting method such as `xarray.plot.imshow`
x, y : string
Names of the coordinates to plot on x, y axes
kwargs :
additional keyword arguments to func
Returns
-------
self : FacetGrid object
"""
cmapkw = kwargs.get('cmap')
colorskw = kwargs.get('colors')
# colors is mutually exclusive with cmap
if cmapkw and colorskw:
raise ValueError("Can't specify both cmap and colors.")
# These should be consistent with xarray.plot._plot2d
cmap_kwargs = {'plot_data': self.data.values,
# MPL default
'levels': 7 if 'contour' in func.__name__ else None,
'filled': func.__name__ != 'contour',
}
cmap_args = getargspec(_determine_cmap_params).args
cmap_kwargs.update((a, kwargs[a]) for a in cmap_args if a in kwargs)
cmap_params = _determine_cmap_params(**cmap_kwargs)
if colorskw is not None:
cmap_params['cmap'] = None
# Order is important
func_kwargs = kwargs.copy()
func_kwargs.update(cmap_params)
func_kwargs.update({'add_colorbar': False, 'add_labels': False})
# Get x, y labels for the first subplot
x, y = _infer_xy_labels(
darray=self.data.loc[self.name_dicts.flat[0]], x=x, y=y,
imshow=func.__name__ == 'imshow', rgb=kwargs.get('rgb', None))
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# None is the sentinel value
if d is not None:
subset = self.data.loc[d]
mappable = func(subset, x, y, ax=ax, **func_kwargs)
self._mappables.append(mappable)
self._cmap_extend = cmap_params.get('extend')
self._finalize_grid(x, y)
if kwargs.get('add_colorbar', True):
self.add_colorbar()
return self
def _finalize_grid(self, *axlabels):
"""Finalize the annotations and layout."""
self.set_axis_labels(*axlabels)
self.set_titles()
self.fig.tight_layout()
for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):
if namedict is None:
ax.set_visible(False)
def add_colorbar(self, **kwargs):
"""Draw a colorbar
"""
kwargs = kwargs.copy()
if self._cmap_extend is not None:
kwargs.setdefault('extend', self._cmap_extend)
if getattr(self.data, 'name', None) is not None:
kwargs.setdefault('label', self.data.name)
self.cbar = self.fig.colorbar(self._mappables[-1],
ax=list(self.axes.flat),
**kwargs)
return self
def set_axis_labels(self, x_var=None, y_var=None):
"""Set axis labels on the left column and bottom row of the grid."""
if x_var is not None:
self._x_var = x_var
self.set_xlabels(x_var)
if y_var is not None:
self._y_var = y_var
self.set_ylabels(y_var)
return self
def set_xlabels(self, label=None, **kwargs):
"""Label the x axis on the bottom row of the grid."""
if label is None:
label = self._x_var
for ax in self._bottom_axes:
ax.set_xlabel(label, **kwargs)
return self
def set_ylabels(self, label=None, **kwargs):
"""Label the y axis on the left column of the grid."""
if label is None:
label = self._y_var
for ax in self._left_axes:
ax.set_ylabel(label, **kwargs)
return self
def set_titles(self, template="{coord} = {value}", maxchar=30,
**kwargs):
"""
Draw titles either above each facet or on the grid margins.
Parameters
----------
template : string
Template for plot titles containing {coord} and {value}
maxchar : int
Truncate titles at maxchar
kwargs : keyword args
additional arguments to matplotlib.text
Returns
-------
self: FacetGrid object
"""
import matplotlib as mpl
kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"])
nicetitle = functools.partial(_nicetitle, maxchar=maxchar,
template=template)
if self._single_group:
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# Only label the ones with data
if d is not None:
coord, value = list(d.items()).pop()
title = nicetitle(coord, value, maxchar=maxchar)
ax.set_title(title, **kwargs)
else:
# The row titles on the right edge of the grid
for ax, row_name in zip(self.axes[:, -1], self.row_names):
title = nicetitle(coord=self._row_var, value=row_name,
maxchar=maxchar)
ax.annotate(title, xy=(1.02, .5), xycoords="axes fraction",
rotation=270, ha="left", va="center", **kwargs)
# The column titles on the top row
for ax, col_name in zip(self.axes[0, :], self.col_names):
title = nicetitle(coord=self._col_var, value=col_name,
maxchar=maxchar)
ax.set_title(title, **kwargs)
return self
def set_ticks(self, max_xticks=_NTICKS, max_yticks=_NTICKS,
fontsize=_FONTSIZE):
"""
Set and control tick behavior
Parameters
----------
max_xticks, max_yticks : int, optional
Maximum number of labeled ticks to plot on x, y axes
fontsize : string or int
Font size as used by matplotlib text
Returns
-------
self : FacetGrid object
"""
from matplotlib.ticker import MaxNLocator
# Both are necessary
x_major_locator = MaxNLocator(nbins=max_xticks)
y_major_locator = MaxNLocator(nbins=max_yticks)
for ax in self.axes.flat:
ax.xaxis.set_major_locator(x_major_locator)
ax.yaxis.set_major_locator(y_major_locator)
for tick in itertools.chain(ax.xaxis.get_major_ticks(),
ax.yaxis.get_major_ticks()):
tick.label.set_fontsize(fontsize)
return self
def map(self, func, *args, **kwargs):
"""
Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : FacetGrid object
"""
plt = import_matplotlib_pyplot()
for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):
if namedict is not None:
data = self.data.loc[namedict]
plt.sca(ax)
innerargs = [data[a].values for a in args]
# TODO: is it possible to verify that an artist is mappable?
mappable = func(*innerargs, **kwargs)
self._mappables.append(mappable)
self._finalize_grid(*args[:2])
return self
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/plot/facetgrid.py",
"copies": "1",
"size": "15298",
"license": "apache-2.0",
"hash": -7728895621259051000,
"line_mean": 34.167816092,
"line_max": 79,
"alpha_frac": 0.5656948621,
"autogenerated": false,
"ratio": 4.276768241543192,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 435
} |
from __future__ import absolute_import, division, print_function
import functools
import operator
from collections import Hashable, defaultdict
from datetime import timedelta
import numpy as np
import pandas as pd
from . import duck_array_ops, nputils, utils
from .pycompat import (
dask_array_type, integer_types, iteritems, range, suppress)
from .utils import is_dict_like
def expanded_indexer(key, ndim):
"""Given a key for indexing an ndarray, return an equivalent key which is a
tuple with length equal to the number of dimensions.
The expansion is done by replacing all `Ellipsis` items with the right
number of full slices and then padding the key with full slices so that it
reaches the appropriate dimensionality.
"""
if not isinstance(key, tuple):
# numpy treats non-tuple keys equivalent to tuples of length 1
key = (key,)
new_key = []
# handling Ellipsis right is a little tricky, see:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
found_ellipsis = False
for k in key:
if k is Ellipsis:
if not found_ellipsis:
new_key.extend((ndim + 1 - len(key)) * [slice(None)])
found_ellipsis = True
else:
new_key.append(slice(None))
else:
new_key.append(k)
if len(new_key) > ndim:
raise IndexError('too many indices')
new_key.extend((ndim - len(new_key)) * [slice(None)])
return tuple(new_key)
def _expand_slice(slice_, size):
return np.arange(*slice_.indices(size))
def _sanitize_slice_element(x):
from .variable import Variable
from .dataarray import DataArray
if isinstance(x, (Variable, DataArray)):
x = x.values
if isinstance(x, np.ndarray):
if x.ndim != 0:
raise ValueError('cannot use non-scalar arrays in a slice for '
'xarray indexing: {}'.format(x))
x = x[()]
if isinstance(x, np.timedelta64):
# pandas does not support indexing with np.timedelta64 yet:
# https://github.com/pandas-dev/pandas/issues/20393
x = pd.Timedelta(x)
return x
def _asarray_tuplesafe(values):
"""
Convert values into a numpy array of at most 1-dimension, while preserving
tuples.
Adapted from pandas.core.common._asarray_tuplesafe
"""
if isinstance(values, tuple):
result = utils.to_0d_object_array(values)
else:
result = np.asarray(values)
if result.ndim == 2:
result = np.empty(len(values), dtype=object)
result[:] = values
return result
def _is_nested_tuple(possible_tuple):
return (isinstance(possible_tuple, tuple) and
any(isinstance(value, (tuple, list, slice))
for value in possible_tuple))
def _index_method_kwargs(method, tolerance):
# backwards compatibility for pandas<0.16 (method) or pandas<0.17
# (tolerance)
kwargs = {}
if method is not None:
kwargs['method'] = method
if tolerance is not None:
kwargs['tolerance'] = tolerance
return kwargs
def get_loc(index, label, method=None, tolerance=None):
kwargs = _index_method_kwargs(method, tolerance)
return index.get_loc(label, **kwargs)
def get_indexer_nd(index, labels, method=None, tolerance=None):
""" Call pd.Index.get_indexer(labels). """
kwargs = _index_method_kwargs(method, tolerance)
flat_labels = np.ravel(labels)
flat_indexer = index.get_indexer(flat_labels, **kwargs)
indexer = flat_indexer.reshape(labels.shape)
return indexer
def convert_label_indexer(index, label, index_name='', method=None,
tolerance=None):
"""Given a pandas.Index and labels (e.g., from __getitem__) for one
dimension, return an indexer suitable for indexing an ndarray along that
dimension. If `index` is a pandas.MultiIndex and depending on `label`,
return a new pandas.Index or pandas.MultiIndex (otherwise return None).
"""
new_index = None
if isinstance(label, slice):
if method is not None or tolerance is not None:
raise NotImplementedError(
'cannot use ``method`` argument if any indexers are '
'slice objects')
indexer = index.slice_indexer(_sanitize_slice_element(label.start),
_sanitize_slice_element(label.stop),
_sanitize_slice_element(label.step))
if not isinstance(indexer, slice):
# unlike pandas, in xarray we never want to silently convert a
# slice indexer into an array indexer
raise KeyError('cannot represent labeled-based slice indexer for '
'dimension %r with a slice over integer positions; '
'the index is unsorted or non-unique' % index_name)
elif is_dict_like(label):
is_nested_vals = _is_nested_tuple(tuple(label.values()))
if not isinstance(index, pd.MultiIndex):
raise ValueError('cannot use a dict-like object for selection on '
'a dimension that does not have a MultiIndex')
elif len(label) == index.nlevels and not is_nested_vals:
indexer = index.get_loc(tuple((label[k] for k in index.names)))
else:
for k, v in label.items():
# index should be an item (i.e. Hashable) not an array-like
if not isinstance(v, Hashable):
raise ValueError('Vectorized selection is not '
'available along level variable: ' + k)
indexer, new_index = index.get_loc_level(
tuple(label.values()), level=tuple(label.keys()))
elif isinstance(label, tuple) and isinstance(index, pd.MultiIndex):
if _is_nested_tuple(label):
indexer = index.get_locs(label)
elif len(label) == index.nlevels:
indexer = index.get_loc(label)
else:
indexer, new_index = index.get_loc_level(
label, level=list(range(len(label)))
)
else:
label = (label if getattr(label, 'ndim', 1) > 1 # vectorized-indexing
else _asarray_tuplesafe(label))
if label.ndim == 0:
if isinstance(index, pd.MultiIndex):
indexer, new_index = index.get_loc_level(label.item(), level=0)
else:
indexer = get_loc(index, label.item(), method, tolerance)
elif label.dtype.kind == 'b':
indexer = label
else:
if isinstance(index, pd.MultiIndex) and label.ndim > 1:
raise ValueError('Vectorized selection is not available along '
'MultiIndex variable: ' + index_name)
indexer = get_indexer_nd(index, label, method, tolerance)
if np.any(indexer < 0):
raise KeyError('not all values found in index %r'
% index_name)
return indexer, new_index
def get_dim_indexers(data_obj, indexers):
"""Given a xarray data object and label based indexers, return a mapping
of label indexers with only dimension names as keys.
It groups multiple level indexers given on a multi-index dimension
into a single, dictionary indexer for that dimension (Raise a ValueError
if it is not possible).
"""
invalid = [k for k in indexers
if k not in data_obj.dims and k not in data_obj._level_coords]
if invalid:
raise ValueError("dimensions or multi-index levels %r do not exist"
% invalid)
level_indexers = defaultdict(dict)
dim_indexers = {}
for key, label in iteritems(indexers):
dim, = data_obj[key].dims
if key != dim:
# assume here multi-index level indexer
level_indexers[dim][key] = label
else:
dim_indexers[key] = label
for dim, level_labels in iteritems(level_indexers):
if dim_indexers.get(dim, False):
raise ValueError("cannot combine multi-index level indexers "
"with an indexer for dimension %s" % dim)
dim_indexers[dim] = level_labels
return dim_indexers
def remap_label_indexers(data_obj, indexers, method=None, tolerance=None):
"""Given an xarray data object and label based indexers, return a mapping
of equivalent location based indexers. Also return a mapping of updated
pandas index objects (in case of multi-index level drop).
"""
if method is not None and not isinstance(method, str):
raise TypeError('``method`` must be a string')
pos_indexers = {}
new_indexes = {}
dim_indexers = get_dim_indexers(data_obj, indexers)
for dim, label in iteritems(dim_indexers):
try:
index = data_obj.indexes[dim]
except KeyError:
# no index for this dimension: reuse the provided labels
if method is not None or tolerance is not None:
raise ValueError('cannot supply ``method`` or ``tolerance`` '
'when the indexed dimension does not have '
'an associated coordinate.')
pos_indexers[dim] = label
else:
idxr, new_idx = convert_label_indexer(index, label,
dim, method, tolerance)
pos_indexers[dim] = idxr
if new_idx is not None:
new_indexes[dim] = new_idx
return pos_indexers, new_indexes
def slice_slice(old_slice, applied_slice, size):
"""Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially
"""
step = (old_slice.step or 1) * (applied_slice.step or 1)
# For now, use the hack of turning old_slice into an ndarray to reconstruct
# the slice start and stop. This is not entirely ideal, but it is still
# definitely better than leaving the indexer as an array.
items = _expand_slice(old_slice, size)[applied_slice]
if len(items) > 0:
start = items[0]
stop = items[-1] + int(np.sign(step))
if stop < 0:
stop = None
else:
start = 0
stop = 0
return slice(start, stop, step)
def _index_indexer_1d(old_indexer, applied_indexer, size):
assert isinstance(applied_indexer, integer_types + (slice, np.ndarray))
if isinstance(applied_indexer, slice) and applied_indexer == slice(None):
# shortcut for the usual case
return old_indexer
if isinstance(old_indexer, slice):
if isinstance(applied_indexer, slice):
indexer = slice_slice(old_indexer, applied_indexer, size)
else:
indexer = _expand_slice(old_indexer, size)[applied_indexer]
else:
indexer = old_indexer[applied_indexer]
return indexer
class ExplicitIndexer(object):
"""Base class for explicit indexer objects.
ExplicitIndexer objects wrap a tuple of values given by their ``tuple``
property. These tuples should always have length equal to the number of
dimensions on the indexed array.
Do not instantiate BaseIndexer objects directly: instead, use one of the
sub-classes BasicIndexer, OuterIndexer or VectorizedIndexer.
"""
def __init__(self, key):
if type(self) is ExplicitIndexer: # noqa
raise TypeError('cannot instantiate base ExplicitIndexer objects')
self._key = tuple(key)
@property
def tuple(self):
return self._key
def __repr__(self):
return '{}({})'.format(type(self).__name__, self.tuple)
def as_integer_or_none(value):
return None if value is None else operator.index(value)
def as_integer_slice(value):
start = as_integer_or_none(value.start)
stop = as_integer_or_none(value.stop)
step = as_integer_or_none(value.step)
return slice(start, stop, step)
class BasicIndexer(ExplicitIndexer):
"""Tuple for basic indexing.
All elements should be int or slice objects. Indexing follows NumPy's
rules for basic indexing: each axis is independently sliced and axes
indexed with an integer are dropped from the result.
"""
def __init__(self, key):
if not isinstance(key, tuple):
raise TypeError('key must be a tuple: {!r}'.format(key))
new_key = []
for k in key:
if isinstance(k, integer_types):
k = int(k)
elif isinstance(k, slice):
k = as_integer_slice(k)
else:
raise TypeError('unexpected indexer type for {}: {!r}'
.format(type(self).__name__, k))
new_key.append(k)
super(BasicIndexer, self).__init__(new_key)
class OuterIndexer(ExplicitIndexer):
"""Tuple for outer/orthogonal indexing.
All elements should be int, slice or 1-dimensional np.ndarray objects with
an integer dtype. Indexing is applied independently along each axis, and
axes indexed with an integer are dropped from the result. This type of
indexing works like MATLAB/Fortran.
"""
def __init__(self, key):
if not isinstance(key, tuple):
raise TypeError('key must be a tuple: {!r}'.format(key))
new_key = []
for k in key:
if isinstance(k, integer_types):
k = int(k)
elif isinstance(k, slice):
k = as_integer_slice(k)
elif isinstance(k, np.ndarray):
if not np.issubdtype(k.dtype, np.integer):
raise TypeError('invalid indexer array, does not have '
'integer dtype: {!r}'.format(k))
if k.ndim != 1:
raise TypeError('invalid indexer array for {}, must have '
'exactly 1 dimension: '
.format(type(self).__name__, k))
k = np.asarray(k, dtype=np.int64)
else:
raise TypeError('unexpected indexer type for {}: {!r}'
.format(type(self).__name__, k))
new_key.append(k)
super(OuterIndexer, self).__init__(new_key)
class VectorizedIndexer(ExplicitIndexer):
"""Tuple for vectorized indexing.
All elements should be slice or N-dimensional np.ndarray objects with an
integer dtype and the same number of dimensions. Indexing follows proposed
rules for np.ndarray.vindex, which matches NumPy's advanced indexing rules
(including broadcasting) except sliced axes are always moved to the end:
https://github.com/numpy/numpy/pull/6256
"""
def __init__(self, key):
if not isinstance(key, tuple):
raise TypeError('key must be a tuple: {!r}'.format(key))
new_key = []
ndim = None
for k in key:
if isinstance(k, slice):
k = as_integer_slice(k)
elif isinstance(k, np.ndarray):
if not np.issubdtype(k.dtype, np.integer):
raise TypeError('invalid indexer array, does not have '
'integer dtype: {!r}'.format(k))
if ndim is None:
ndim = k.ndim
elif ndim != k.ndim:
ndims = [k.ndim for k in key if isinstance(k, np.ndarray)]
raise ValueError('invalid indexer key: ndarray arguments '
'have different numbers of dimensions: {}'
.format(ndims))
k = np.asarray(k, dtype=np.int64)
else:
raise TypeError('unexpected indexer type for {}: {!r}'
.format(type(self).__name__, k))
new_key.append(k)
super(VectorizedIndexer, self).__init__(new_key)
class ExplicitlyIndexed(object):
"""Mixin to mark support for Indexer subclasses in indexing."""
class ExplicitlyIndexedNDArrayMixin(utils.NDArrayMixin, ExplicitlyIndexed):
def __array__(self, dtype=None):
key = BasicIndexer((slice(None),) * self.ndim)
return np.asarray(self[key], dtype=dtype)
class ImplicitToExplicitIndexingAdapter(utils.NDArrayMixin):
"""Wrap an array, converting tuples into the indicated explicit indexer."""
def __init__(self, array, indexer_cls=BasicIndexer):
self.array = as_indexable(array)
self.indexer_cls = indexer_cls
def __array__(self, dtype=None):
return np.asarray(self.array, dtype=dtype)
def __getitem__(self, key):
key = expanded_indexer(key, self.ndim)
return self.array[self.indexer_cls(key)]
class LazilyOuterIndexedArray(ExplicitlyIndexedNDArrayMixin):
"""Wrap an array to make basic and outer indexing lazy.
"""
def __init__(self, array, key=None):
"""
Parameters
----------
array : array_like
Array like object to index.
key : ExplicitIndexer, optional
Array indexer. If provided, it is assumed to already be in
canonical expanded form.
"""
if isinstance(array, type(self)) and key is None:
# unwrap
key = array.key
array = array.array
if key is None:
key = BasicIndexer((slice(None),) * array.ndim)
self.array = as_indexable(array)
self.key = key
def _updated_key(self, new_key):
iter_new_key = iter(expanded_indexer(new_key.tuple, self.ndim))
full_key = []
for size, k in zip(self.array.shape, self.key.tuple):
if isinstance(k, integer_types):
full_key.append(k)
else:
full_key.append(_index_indexer_1d(k, next(iter_new_key), size))
full_key = tuple(full_key)
if all(isinstance(k, integer_types + (slice, )) for k in full_key):
return BasicIndexer(full_key)
return OuterIndexer(full_key)
@property
def shape(self):
shape = []
for size, k in zip(self.array.shape, self.key.tuple):
if isinstance(k, slice):
shape.append(len(range(*k.indices(size))))
elif isinstance(k, np.ndarray):
shape.append(k.size)
return tuple(shape)
def __array__(self, dtype=None):
array = as_indexable(self.array)
return np.asarray(array[self.key], dtype=None)
def transpose(self, order):
return LazilyVectorizedIndexedArray(
self.array, self.key).transpose(order)
def __getitem__(self, indexer):
if isinstance(indexer, VectorizedIndexer):
array = LazilyVectorizedIndexedArray(self.array, self.key)
return array[indexer]
return type(self)(self.array, self._updated_key(indexer))
def __setitem__(self, key, value):
if isinstance(key, VectorizedIndexer):
raise NotImplementedError(
'Lazy item assignment with the vectorized indexer is not yet '
'implemented. Load your data first by .load() or compute().')
full_key = self._updated_key(key)
self.array[full_key] = value
def __repr__(self):
return ('%s(array=%r, key=%r)' %
(type(self).__name__, self.array, self.key))
class LazilyVectorizedIndexedArray(ExplicitlyIndexedNDArrayMixin):
"""Wrap an array to make vectorized indexing lazy.
"""
def __init__(self, array, key):
"""
Parameters
----------
array : array_like
Array like object to index.
key : VectorizedIndexer
"""
if isinstance(key, (BasicIndexer, OuterIndexer)):
self.key = _outer_to_vectorized_indexer(key, array.shape)
else:
self.key = _arrayize_vectorized_indexer(key, array.shape)
self.array = as_indexable(array)
@property
def shape(self):
return np.broadcast(*self.key.tuple).shape
def __array__(self, dtype=None):
return np.asarray(self.array[self.key], dtype=None)
def _updated_key(self, new_key):
return _combine_indexers(self.key, self.shape, new_key)
def __getitem__(self, indexer):
# If the indexed array becomes a scalar, return LazilyOuterIndexedArray
if all(isinstance(ind, integer_types) for ind in indexer.tuple):
key = BasicIndexer(tuple(k[indexer.tuple] for k in self.key.tuple))
return LazilyOuterIndexedArray(self.array, key)
return type(self)(self.array, self._updated_key(indexer))
def transpose(self, order):
key = VectorizedIndexer(tuple(
k.transpose(order) for k in self.key.tuple))
return type(self)(self.array, key)
def __setitem__(self, key, value):
raise NotImplementedError(
'Lazy item assignment with the vectorized indexer is not yet '
'implemented. Load your data first by .load() or compute().')
def __repr__(self):
return ('%s(array=%r, key=%r)' %
(type(self).__name__, self.array, self.key))
def _wrap_numpy_scalars(array):
"""Wrap NumPy scalars in 0d arrays."""
if np.isscalar(array):
return np.array(array)
else:
return array
class CopyOnWriteArray(ExplicitlyIndexedNDArrayMixin):
def __init__(self, array):
self.array = as_indexable(array)
self._copied = False
def _ensure_copied(self):
if not self._copied:
self.array = as_indexable(np.array(self.array))
self._copied = True
def __array__(self, dtype=None):
return np.asarray(self.array, dtype=dtype)
def __getitem__(self, key):
return type(self)(_wrap_numpy_scalars(self.array[key]))
def transpose(self, order):
return self.array.transpose(order)
def __setitem__(self, key, value):
self._ensure_copied()
self.array[key] = value
class MemoryCachedArray(ExplicitlyIndexedNDArrayMixin):
def __init__(self, array):
self.array = _wrap_numpy_scalars(as_indexable(array))
def _ensure_cached(self):
if not isinstance(self.array, NumpyIndexingAdapter):
self.array = NumpyIndexingAdapter(np.asarray(self.array))
def __array__(self, dtype=None):
self._ensure_cached()
return np.asarray(self.array, dtype=dtype)
def __getitem__(self, key):
return type(self)(_wrap_numpy_scalars(self.array[key]))
def transpose(self, order):
return self.array.transpose(order)
def __setitem__(self, key, value):
self.array[key] = value
def as_indexable(array):
"""
This function always returns a ExplicitlyIndexed subclass,
so that the vectorized indexing is always possible with the returned
object.
"""
if isinstance(array, ExplicitlyIndexed):
return array
if isinstance(array, np.ndarray):
return NumpyIndexingAdapter(array)
if isinstance(array, pd.Index):
return PandasIndexAdapter(array)
if isinstance(array, dask_array_type):
return DaskIndexingAdapter(array)
raise TypeError('Invalid array type: {}'.format(type(array)))
def _outer_to_vectorized_indexer(key, shape):
"""Convert an OuterIndexer into an vectorized indexer.
Parameters
----------
key : Outer/Basic Indexer
An indexer to convert.
shape : tuple
Shape of the array subject to the indexing.
Returns
-------
VectorizedIndexer
Tuple suitable for use to index a NumPy array with vectorized indexing.
Each element is an array: broadcasting them together gives the shape
of the result.
"""
key = key.tuple
n_dim = len([k for k in key if not isinstance(k, integer_types)])
i_dim = 0
new_key = []
for k, size in zip(key, shape):
if isinstance(k, integer_types):
new_key.append(np.array(k).reshape((1,) * n_dim))
else: # np.ndarray or slice
if isinstance(k, slice):
k = np.arange(*k.indices(size))
assert k.dtype.kind in {'i', 'u'}
shape = [(1,) * i_dim + (k.size, ) +
(1,) * (n_dim - i_dim - 1)]
new_key.append(k.reshape(*shape))
i_dim += 1
return VectorizedIndexer(tuple(new_key))
def _outer_to_numpy_indexer(key, shape):
"""Convert an OuterIndexer into an indexer for NumPy.
Parameters
----------
key : Basic/OuterIndexer
An indexer to convert.
shape : tuple
Shape of the array subject to the indexing.
Returns
-------
tuple
Tuple suitable for use to index a NumPy array.
"""
if len([k for k in key.tuple if not isinstance(k, slice)]) <= 1:
# If there is only one vector and all others are slice,
# it can be safely used in mixed basic/advanced indexing.
# Boolean index should already be converted to integer array.
return key.tuple
else:
return _outer_to_vectorized_indexer(key, shape).tuple
def _combine_indexers(old_key, shape, new_key):
""" Combine two indexers.
Parameters
----------
old_key: ExplicitIndexer
The first indexer for the original array
shape: tuple of ints
Shape of the original array to be indexed by old_key
new_key:
The second indexer for indexing original[old_key]
"""
if not isinstance(old_key, VectorizedIndexer):
old_key = _outer_to_vectorized_indexer(old_key, shape)
if len(old_key.tuple) == 0:
return new_key
new_shape = np.broadcast(*old_key.tuple).shape
if isinstance(new_key, VectorizedIndexer):
new_key = _arrayize_vectorized_indexer(new_key, new_shape)
else:
new_key = _outer_to_vectorized_indexer(new_key, new_shape)
return VectorizedIndexer(tuple(o[new_key.tuple] for o in
np.broadcast_arrays(*old_key.tuple)))
class IndexingSupport(object): # could inherit from enum.Enum on Python 3
# for backends that support only basic indexer
BASIC = 'BASIC'
# for backends that support basic / outer indexer
OUTER = 'OUTER'
# for backends that support outer indexer including at most 1 vector.
OUTER_1VECTOR = 'OUTER_1VECTOR'
# for backends that support full vectorized indexer.
VECTORIZED = 'VECTORIZED'
def decompose_indexer(indexer, shape, indexing_support):
if isinstance(indexer, VectorizedIndexer):
return _decompose_vectorized_indexer(indexer, shape, indexing_support)
if isinstance(indexer, (BasicIndexer, OuterIndexer)):
return _decompose_outer_indexer(indexer, shape, indexing_support)
raise TypeError('unexpected key type: {}'.format(indexer))
def _decompose_slice(key, size):
""" convert a slice to successive two slices. The first slice always has
a positive step.
"""
start, stop, step = key.indices(size)
if step > 0:
# If key already has a positive step, use it as is in the backend
return key, slice(None)
else:
# determine stop precisely for step > 1 case
# e.g. [98:2:-2] -> [98:3:-2]
stop = start + int((stop - start - 1) / step) * step + 1
start, stop = stop + 1, start + 1
return slice(start, stop, -step), slice(None, None, -1)
def _decompose_vectorized_indexer(indexer, shape, indexing_support):
"""
Decompose vectorized indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index loaded on-memory np.ndarray.
Parameters
----------
indexer: VectorizedIndexer
indexing_support: one of IndexerSupport entries
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a vectorized indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports outer indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> backend_indexer = OuterIndexer([0, 1, 3], [2, 3])
>>> array = array[backend_indexer] # load subslice of the array
>>> np_indexer = VectorizedIndexer([0, 2, 1], [0, 1, 0])
>>> array[np_indexer] # vectorized indexing for on-memory np.ndarray.
"""
assert isinstance(indexer, VectorizedIndexer)
if indexing_support is IndexingSupport.VECTORIZED:
return indexer, BasicIndexer(())
backend_indexer = []
np_indexer = []
# convert negative indices
indexer = [np.where(k < 0, k + s, k) if isinstance(k, np.ndarray) else k
for k, s in zip(indexer.tuple, shape)]
for k, s in zip(indexer, shape):
if isinstance(k, slice):
# If it is a slice, then we will slice it as-is
# (but make its step positive) in the backend,
# and then use all of it (slice(None)) for the in-memory portion.
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
else:
# If it is a (multidimensional) np.ndarray, just pickup the used
# keys without duplication and store them as a 1d-np.ndarray.
oind, vind = np.unique(k, return_inverse=True)
backend_indexer.append(oind)
np_indexer.append(vind.reshape(*k.shape))
backend_indexer = OuterIndexer(tuple(backend_indexer))
np_indexer = VectorizedIndexer(tuple(np_indexer))
if indexing_support is IndexingSupport.OUTER:
return backend_indexer, np_indexer
# If the backend does not support outer indexing,
# backend_indexer (OuterIndexer) is also decomposed.
backend_indexer, np_indexer1 = _decompose_outer_indexer(
backend_indexer, shape, indexing_support)
np_indexer = _combine_indexers(np_indexer1, shape, np_indexer)
return backend_indexer, np_indexer
def _decompose_outer_indexer(indexer, shape, indexing_support):
"""
Decompose outer indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index the loaded on-memory np.ndarray.
Parameters
----------
indexer: VectorizedIndexer
indexing_support: One of the entries of IndexingSupport
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (OuterIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a orthogonal indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports basic indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> backend_indexer = BasicIndexer(slice(0, 3), slice(2, 3))
>>> array = array[backend_indexer] # load subslice of the array
>>> np_indexer = OuterIndexer([0, 2, 1], [0, 1, 0])
>>> array[np_indexer] # outer indexing for on-memory np.ndarray.
"""
if indexing_support == IndexingSupport.VECTORIZED:
return indexer, BasicIndexer(())
assert isinstance(indexer, (OuterIndexer, BasicIndexer))
backend_indexer = []
np_indexer = []
# make indexer positive
pos_indexer = []
for k, s in zip(indexer.tuple, shape):
if isinstance(k, np.ndarray):
pos_indexer.append(np.where(k < 0, k + s, k))
elif isinstance(k, integer_types) and k < 0:
pos_indexer.append(k + s)
else:
pos_indexer.append(k)
indexer = pos_indexer
if indexing_support is IndexingSupport.OUTER_1VECTOR:
# some backends such as h5py supports only 1 vector in indexers
# We choose the most efficient axis
gains = [(np.max(k) - np.min(k) + 1.0) / len(np.unique(k))
if isinstance(k, np.ndarray) else 0 for k in indexer]
array_index = np.argmax(np.array(gains)) if len(gains) > 0 else None
for i, (k, s) in enumerate(zip(indexer, shape)):
if isinstance(k, np.ndarray) and i != array_index:
# np.ndarray key is converted to slice that covers the entire
# entries of this key.
backend_indexer.append(slice(np.min(k), np.max(k) + 1))
np_indexer.append(k - np.min(k))
elif isinstance(k, np.ndarray):
# Remove duplicates and sort them in the increasing order
pkey, ekey = np.unique(k, return_inverse=True)
backend_indexer.append(pkey)
np_indexer.append(ekey)
elif isinstance(k, integer_types):
backend_indexer.append(k)
else: # slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
return (OuterIndexer(tuple(backend_indexer)),
OuterIndexer(tuple(np_indexer)))
if indexing_support == IndexingSupport.OUTER:
for k, s in zip(indexer, shape):
if isinstance(k, slice):
# slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
elif isinstance(k, integer_types):
backend_indexer.append(k)
elif isinstance(k, np.ndarray) and (np.diff(k) >= 0).all():
backend_indexer.append(k)
np_indexer.append(slice(None))
else:
# Remove duplicates and sort them in the increasing order
oind, vind = np.unique(k, return_inverse=True)
backend_indexer.append(oind)
np_indexer.append(vind.reshape(*k.shape))
return (OuterIndexer(tuple(backend_indexer)),
OuterIndexer(tuple(np_indexer)))
# basic indexer
assert indexing_support == IndexingSupport.BASIC
for k, s in zip(indexer, shape):
if isinstance(k, np.ndarray):
# np.ndarray key is converted to slice that covers the entire
# entries of this key.
backend_indexer.append(slice(np.min(k), np.max(k) + 1))
np_indexer.append(k - np.min(k))
elif isinstance(k, integer_types):
backend_indexer.append(k)
else: # slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
return (BasicIndexer(tuple(backend_indexer)),
OuterIndexer(tuple(np_indexer)))
def _arrayize_vectorized_indexer(indexer, shape):
""" Return an identical vindex but slices are replaced by arrays """
slices = [v for v in indexer.tuple if isinstance(v, slice)]
if len(slices) == 0:
return indexer
arrays = [v for v in indexer.tuple if isinstance(v, np.ndarray)]
n_dim = arrays[0].ndim if len(arrays) > 0 else 0
i_dim = 0
new_key = []
for v, size in zip(indexer.tuple, shape):
if isinstance(v, np.ndarray):
new_key.append(np.reshape(v, v.shape + (1, ) * len(slices)))
else: # slice
shape = ((1,) * (n_dim + i_dim) + (-1,) +
(1,) * (len(slices) - i_dim - 1))
new_key.append(np.arange(*v.indices(size)).reshape(shape))
i_dim += 1
return VectorizedIndexer(tuple(new_key))
def _dask_array_with_chunks_hint(array, chunks):
"""Create a dask array using the chunks hint for dimensions of size > 1."""
import dask.array as da
if len(chunks) < array.ndim:
raise ValueError('not enough chunks in hint')
new_chunks = []
for chunk, size in zip(chunks, array.shape):
new_chunks.append(chunk if size > 1 else (1,))
return da.from_array(array, new_chunks)
def _logical_any(args):
return functools.reduce(operator.or_, args)
def _masked_result_drop_slice(key, chunks_hint=None):
key = (k for k in key if not isinstance(k, slice))
if chunks_hint is not None:
key = [_dask_array_with_chunks_hint(k, chunks_hint)
if isinstance(k, np.ndarray) else k
for k in key]
return _logical_any(k == -1 for k in key)
def create_mask(indexer, shape, chunks_hint=None):
"""Create a mask for indexing with a fill-value.
Parameters
----------
indexer : ExplicitIndexer
Indexer with -1 in integer or ndarray value to indicate locations in
the result that should be masked.
shape : tuple
Shape of the array being indexed.
chunks_hint : tuple, optional
Optional tuple indicating desired chunks for the result. If provided,
used as a hint for chunks on the resulting dask. Must have a hint for
each dimension on the result array.
Returns
-------
mask : bool, np.ndarray or dask.array.Array with dtype=bool
Dask array if chunks_hint is provided, otherwise a NumPy array. Has the
same shape as the indexing result.
"""
if isinstance(indexer, OuterIndexer):
key = _outer_to_vectorized_indexer(indexer, shape).tuple
assert not any(isinstance(k, slice) for k in key)
mask = _masked_result_drop_slice(key, chunks_hint)
elif isinstance(indexer, VectorizedIndexer):
key = indexer.tuple
base_mask = _masked_result_drop_slice(key, chunks_hint)
slice_shape = tuple(np.arange(*k.indices(size)).size
for k, size in zip(key, shape)
if isinstance(k, slice))
expanded_mask = base_mask[
(Ellipsis,) + (np.newaxis,) * len(slice_shape)]
mask = duck_array_ops.broadcast_to(
expanded_mask, base_mask.shape + slice_shape)
elif isinstance(indexer, BasicIndexer):
mask = any(k == -1 for k in indexer.tuple)
else:
raise TypeError('unexpected key type: {}'.format(type(indexer)))
return mask
def _posify_mask_subindexer(index):
"""Convert masked indices in a flat array to the nearest unmasked index.
Parameters
----------
index : np.ndarray
One dimensional ndarray with dtype=int.
Returns
-------
np.ndarray
One dimensional ndarray with all values equal to -1 replaced by an
adjacent non-masked element.
"""
masked = index == -1
unmasked_locs = np.flatnonzero(~masked)
if not unmasked_locs.size:
# indexing unmasked_locs is invalid
return np.zeros_like(index)
masked_locs = np.flatnonzero(masked)
prev_value = np.maximum(0, np.searchsorted(unmasked_locs, masked_locs) - 1)
new_index = index.copy()
new_index[masked_locs] = index[unmasked_locs[prev_value]]
return new_index
def posify_mask_indexer(indexer):
"""Convert masked values (-1) in an indexer to nearest unmasked values.
This routine is useful for dask, where it can be much faster to index
adjacent points than arbitrary points from the end of an array.
Parameters
----------
indexer : ExplicitIndexer
Input indexer.
Returns
-------
ExplicitIndexer
Same type of input, with all values in ndarray keys equal to -1
replaced by an adjacent non-masked element.
"""
key = tuple(_posify_mask_subindexer(k.ravel()).reshape(k.shape)
if isinstance(k, np.ndarray) else k
for k in indexer.tuple)
return type(indexer)(key)
class NumpyIndexingAdapter(ExplicitlyIndexedNDArrayMixin):
"""Wrap a NumPy array to use explicit indexing."""
def __init__(self, array):
# In NumpyIndexingAdapter we only allow to store bare np.ndarray
if not isinstance(array, np.ndarray):
raise TypeError('NumpyIndexingAdapter only wraps np.ndarray. '
'Trying to wrap {}'.format(type(array)))
self.array = array
def _ensure_ndarray(self, value):
# We always want the result of indexing to be a NumPy array. If it's
# not, then it really should be a 0d array. Doing the coercion here
# instead of inside variable.as_compatible_data makes it less error
# prone.
if not isinstance(value, np.ndarray):
value = utils.to_0d_array(value)
return value
def _indexing_array_and_key(self, key):
if isinstance(key, OuterIndexer):
array = self.array
key = _outer_to_numpy_indexer(key, self.array.shape)
elif isinstance(key, VectorizedIndexer):
array = nputils.NumpyVIndexAdapter(self.array)
key = key.tuple
elif isinstance(key, BasicIndexer):
array = self.array
key = key.tuple
else:
raise TypeError('unexpected key type: {}'.format(type(key)))
return array, key
def transpose(self, order):
return self.array.transpose(order)
def __getitem__(self, key):
array, key = self._indexing_array_and_key(key)
return self._ensure_ndarray(array[key])
def __setitem__(self, key, value):
array, key = self._indexing_array_and_key(key)
array[key] = value
class DaskIndexingAdapter(ExplicitlyIndexedNDArrayMixin):
"""Wrap a dask array to support explicit indexing."""
def __init__(self, array):
""" This adapter is created in Variable.__getitem__ in
Variable._broadcast_indexes.
"""
self.array = array
def __getitem__(self, key):
if isinstance(key, BasicIndexer):
return self.array[key.tuple]
elif isinstance(key, VectorizedIndexer):
return self.array.vindex[key.tuple]
else:
assert isinstance(key, OuterIndexer)
key = key.tuple
try:
return self.array[key]
except NotImplementedError:
# manual orthogonal indexing.
# TODO: port this upstream into dask in a saner way.
value = self.array
for axis, subkey in reversed(list(enumerate(key))):
value = value[(slice(None),) * axis + (subkey,)]
return value
def __setitem__(self, key, value):
raise TypeError("this variable's data is stored in a dask array, "
'which does not support item assignment. To '
'assign to this variable, you must first load it '
'into memory explicitly using the .load() '
'method or accessing its .values attribute.')
def transpose(self, order):
return self.array.transpose(order)
class PandasIndexAdapter(ExplicitlyIndexedNDArrayMixin):
"""Wrap a pandas.Index to preserve dtypes and handle explicit indexing."""
def __init__(self, array, dtype=None):
self.array = utils.safe_cast_to_index(array)
if dtype is None:
if isinstance(array, pd.PeriodIndex):
dtype = np.dtype('O')
elif hasattr(array, 'categories'):
# category isn't a real numpy dtype
dtype = array.categories.dtype
elif not utils.is_valid_numpy_dtype(array.dtype):
dtype = np.dtype('O')
else:
dtype = array.dtype
self._dtype = dtype
@property
def dtype(self):
return self._dtype
def __array__(self, dtype=None):
if dtype is None:
dtype = self.dtype
array = self.array
if isinstance(array, pd.PeriodIndex):
with suppress(AttributeError):
# this might not be public API
array = array.astype('object')
return np.asarray(array.values, dtype=dtype)
@property
def shape(self):
# .shape is broken on pandas prior to v0.15.2
return (len(self.array),)
def __getitem__(self, indexer):
key = indexer.tuple
if isinstance(key, tuple) and len(key) == 1:
# unpack key so it can index a pandas.Index object (pandas.Index
# objects don't like tuples)
key, = key
if getattr(key, 'ndim', 0) > 1: # Return np-array if multidimensional
return NumpyIndexingAdapter(self.array.values)[indexer]
result = self.array[key]
if isinstance(result, pd.Index):
result = PandasIndexAdapter(result, dtype=self.dtype)
else:
# result is a scalar
if result is pd.NaT:
# work around the impossibility of casting NaT with asarray
# note: it probably would be better in general to return
# pd.Timestamp rather np.than datetime64 but this is easier
# (for now)
result = np.datetime64('NaT', 'ns')
elif isinstance(result, timedelta):
result = np.timedelta64(getattr(result, 'value', result), 'ns')
elif isinstance(result, pd.Timestamp):
# Work around for GH: pydata/xarray#1932 and numpy/numpy#10668
# numpy fails to convert pd.Timestamp to np.datetime64[ns]
result = np.asarray(result.to_datetime64())
elif self.dtype != object:
result = np.asarray(result, dtype=self.dtype)
# as for numpy.ndarray indexing, we always want the result to be
# a NumPy array.
result = utils.to_0d_array(result)
return result
def transpose(self, order):
return self.array # self.array should be always one-dimensional
def __repr__(self):
return ('%s(array=%r, dtype=%r)'
% (type(self).__name__, self.array, self.dtype))
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/indexing.py",
"copies": "1",
"size": "46534",
"license": "apache-2.0",
"hash": 3760284252601153000,
"line_mean": 35.75671406,
"line_max": 86,
"alpha_frac": 0.6060299996,
"autogenerated": false,
"ratio": 4.099550700378821,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5205580699978821,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import functools
import operator
import warnings
from collections import defaultdict
import numpy as np
from . import utils
from .indexing import get_indexer_nd
from .pycompat import OrderedDict, iteritems, suppress
from .utils import is_dict_like, is_full_slice
from .variable import IndexVariable
def _get_joiner(join):
if join == 'outer':
return functools.partial(functools.reduce, operator.or_)
elif join == 'inner':
return functools.partial(functools.reduce, operator.and_)
elif join == 'left':
return operator.itemgetter(0)
elif join == 'right':
return operator.itemgetter(-1)
elif join == 'exact':
# We cannot return a function to "align" in this case, because it needs
# access to the dimension name to give a good error message.
return None
else:
raise ValueError('invalid value for join: %s' % join)
_DEFAULT_EXCLUDE = frozenset()
def align(*objects, **kwargs):
"""align(*objects, join='inner', copy=True, indexes=None,
exclude=frozenset())
Given any number of Dataset and/or DataArray objects, returns new
objects with aligned indexes and dimension sizes.
Array from the aligned objects are suitable as input to mathematical
operators, because along each dimension they have the same index and size.
Missing values (if ``join != 'inner'``) are filled with NaN.
Parameters
----------
*objects : Dataset or DataArray
Objects to align.
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining the indexes of the passed objects along each
dimension:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
copy : bool, optional
If ``copy=True``, data in the return values is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed with
only slice operations, then the output may share memory with the input.
In either case, new xarray objects are always returned.
exclude : sequence of str, optional
Dimensions that must be excluded from alignment
indexes : dict-like, optional
Any indexes explicitly provided with the `indexes` argument should be
used in preference to the aligned indexes.
Returns
-------
aligned : same as *objects
Tuple of objects with aligned coordinates.
Raises
------
ValueError
If any dimensions without labels on the arguments have different sizes,
or a different size than the size of the aligned dimension labels.
"""
join = kwargs.pop('join', 'inner')
copy = kwargs.pop('copy', True)
indexes = kwargs.pop('indexes', None)
exclude = kwargs.pop('exclude', _DEFAULT_EXCLUDE)
if indexes is None:
indexes = {}
if kwargs:
raise TypeError('align() got unexpected keyword arguments: %s'
% list(kwargs))
if not indexes and len(objects) == 1:
# fast path for the trivial case
obj, = objects
return (obj.copy(deep=copy),)
all_indexes = defaultdict(list)
unlabeled_dim_sizes = defaultdict(set)
for obj in objects:
for dim in obj.dims:
if dim not in exclude:
try:
index = obj.indexes[dim]
except KeyError:
unlabeled_dim_sizes[dim].add(obj.sizes[dim])
else:
all_indexes[dim].append(index)
# We don't reindex over dimensions with all equal indexes for two reasons:
# - It's faster for the usual case (already aligned objects).
# - It ensures it's possible to do operations that don't require alignment
# on indexes with duplicate values (which cannot be reindexed with
# pandas). This is useful, e.g., for overwriting such duplicate indexes.
joiner = _get_joiner(join)
joined_indexes = {}
for dim, matching_indexes in iteritems(all_indexes):
if dim in indexes:
index = utils.safe_cast_to_index(indexes[dim])
if (any(not index.equals(other) for other in matching_indexes) or
dim in unlabeled_dim_sizes):
joined_indexes[dim] = index
else:
if (any(not matching_indexes[0].equals(other)
for other in matching_indexes[1:]) or
dim in unlabeled_dim_sizes):
if join == 'exact':
raise ValueError(
'indexes along dimension {!r} are not equal'
.format(dim))
index = joiner(matching_indexes)
joined_indexes[dim] = index
else:
index = matching_indexes[0]
if dim in unlabeled_dim_sizes:
unlabeled_sizes = unlabeled_dim_sizes[dim]
labeled_size = index.size
if len(unlabeled_sizes | {labeled_size}) > 1:
raise ValueError(
'arguments without labels along dimension %r cannot be '
'aligned because they have different dimension size(s) %r '
'than the size of the aligned dimension labels: %r'
% (dim, unlabeled_sizes, labeled_size))
for dim in unlabeled_dim_sizes:
if dim not in all_indexes:
sizes = unlabeled_dim_sizes[dim]
if len(sizes) > 1:
raise ValueError(
'arguments without labels along dimension %r cannot be '
'aligned because they have different dimension sizes: %r'
% (dim, sizes))
result = []
for obj in objects:
valid_indexers = {k: v for k, v in joined_indexes.items()
if k in obj.dims}
if not valid_indexers:
# fast path for no reindexing necessary
new_obj = obj.copy(deep=copy)
else:
new_obj = obj.reindex(copy=copy, **valid_indexers)
new_obj.encoding = obj.encoding
result.append(new_obj)
return tuple(result)
def deep_align(objects, join='inner', copy=True, indexes=None,
exclude=frozenset(), raise_on_invalid=True):
"""Align objects for merging, recursing into dictionary values.
This function is not public API.
"""
if indexes is None:
indexes = {}
def is_alignable(obj):
return hasattr(obj, 'indexes') and hasattr(obj, 'reindex')
positions = []
keys = []
out = []
targets = []
no_key = object()
not_replaced = object()
for n, variables in enumerate(objects):
if is_alignable(variables):
positions.append(n)
keys.append(no_key)
targets.append(variables)
out.append(not_replaced)
elif is_dict_like(variables):
for k, v in variables.items():
if is_alignable(v) and k not in indexes:
# Skip variables in indexes for alignment, because these
# should to be overwritten instead:
# https://github.com/pydata/xarray/issues/725
positions.append(n)
keys.append(k)
targets.append(v)
out.append(OrderedDict(variables))
elif raise_on_invalid:
raise ValueError('object to align is neither an xarray.Dataset, '
'an xarray.DataArray nor a dictionary: %r'
% variables)
else:
out.append(variables)
aligned = align(*targets, join=join, copy=copy, indexes=indexes,
exclude=exclude)
for position, key, aligned_obj in zip(positions, keys, aligned):
if key is no_key:
out[position] = aligned_obj
else:
out[position][key] = aligned_obj
# something went wrong: we should have replaced all sentinel values
assert all(arg is not not_replaced for arg in out)
return out
def reindex_like_indexers(target, other):
"""Extract indexers to align target with other.
Not public API.
Parameters
----------
target : Dataset or DataArray
Object to be aligned.
other : Dataset or DataArray
Object to be aligned with.
Returns
-------
Dict[Any, pandas.Index] providing indexes for reindex keyword arguments.
Raises
------
ValueError
If any dimensions without labels have different sizes.
"""
indexers = {k: v for k, v in other.indexes.items() if k in target.dims}
for dim in other.dims:
if dim not in indexers and dim in target.dims:
other_size = other.sizes[dim]
target_size = target.sizes[dim]
if other_size != target_size:
raise ValueError('different size for unlabeled '
'dimension on argument %r: %r vs %r'
% (dim, other_size, target_size))
return indexers
def reindex_variables(variables, sizes, indexes, indexers, method=None,
tolerance=None, copy=True):
"""Conform a dictionary of aligned variables onto a new set of variables,
filling in missing values with NaN.
Not public API.
Parameters
----------
variables : dict-like
Dictionary of xarray.Variable objects.
sizes : dict-like
Dictionary from dimension names to integer sizes.
indexes : dict-like
Dictionary of xarray.IndexVariable objects associated with variables.
indexers : dict
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mis-matched coordinate values
will be filled in with NaN, and any mis-matched dimension names will
simply be ignored.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values in ``indexers`` not found in
this dataset:
* None (default): don't fill gaps
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact matches.
The values of the index at the matching locations most satisfy the
equation ``abs(index[indexer] - target) <= tolerance``.
copy : bool, optional
If ``copy=True``, data in the return values is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, new xarray objects are always returned.
Returns
-------
reindexed : OrderedDict
Another dict, with the items in variables but replaced indexes.
"""
from .dataarray import DataArray
# build up indexers for assignment along each dimension
int_indexers = {}
targets = {}
masked_dims = set()
unchanged_dims = set()
# size of reindexed dimensions
new_sizes = {}
for name, index in iteritems(indexes):
if name in indexers:
if not index.is_unique:
raise ValueError(
'cannot reindex or align along dimension %r because the '
'index has duplicate values' % name)
target = utils.safe_cast_to_index(indexers[name])
new_sizes[name] = len(target)
int_indexer = get_indexer_nd(index, target, method, tolerance)
# We uses negative values from get_indexer_nd to signify
# values that are missing in the index.
if (int_indexer < 0).any():
masked_dims.add(name)
elif np.array_equal(int_indexer, np.arange(len(index))):
unchanged_dims.add(name)
int_indexers[name] = int_indexer
targets[name] = target
for dim in sizes:
if dim not in indexes and dim in indexers:
existing_size = sizes[dim]
new_size = indexers[dim].size
if existing_size != new_size:
raise ValueError(
'cannot reindex or align along dimension %r without an '
'index because its size %r is different from the size of '
'the new index %r' % (dim, existing_size, new_size))
# create variables for the new dataset
reindexed = OrderedDict()
for dim, indexer in indexers.items():
if isinstance(indexer, DataArray) and indexer.dims != (dim,):
warnings.warn(
"Indexer has dimensions {0:s} that are different "
"from that to be indexed along {1:s}. "
"This will behave differently in the future.".format(
str(indexer.dims), dim),
FutureWarning, stacklevel=3)
if dim in variables:
var = variables[dim]
args = (var.attrs, var.encoding)
else:
args = ()
reindexed[dim] = IndexVariable((dim,), indexers[dim], *args)
for name, var in iteritems(variables):
if name not in indexers:
key = tuple(slice(None)
if d in unchanged_dims
else int_indexers.get(d, slice(None))
for d in var.dims)
needs_masking = any(d in masked_dims for d in var.dims)
if needs_masking:
new_var = var._getitem_with_mask(key)
elif all(is_full_slice(k) for k in key):
# no reindexing necessary
# here we need to manually deal with copying data, since
# we neither created a new ndarray nor used fancy indexing
new_var = var.copy(deep=copy)
else:
new_var = var[key]
reindexed[name] = new_var
return reindexed
def broadcast(*args, **kwargs):
"""Explicitly broadcast any number of DataArray or Dataset objects against
one another.
xarray objects automatically broadcast against each other in arithmetic
operations, so this function should not be necessary for normal use.
If no change is needed, the input data is returned to the output without
being copied.
Parameters
----------
*args : DataArray or Dataset objects
Arrays to broadcast against each other.
exclude : sequence of str, optional
Dimensions that must not be broadcasted
Returns
-------
broadcast : tuple of xarray objects
The same data as the input arrays, but with additional dimensions
inserted so that all data arrays have the same dimensions and shape.
Examples
--------
Broadcast two data arrays against one another to fill out their dimensions:
>>> a = xr.DataArray([1, 2, 3], dims='x')
>>> b = xr.DataArray([5, 6], dims='y')
>>> a
<xarray.DataArray (x: 3)>
array([1, 2, 3])
Coordinates:
* x (x) int64 0 1 2
>>> b
<xarray.DataArray (y: 2)>
array([5, 6])
Coordinates:
* y (y) int64 0 1
>>> a2, b2 = xr.broadcast(a, b)
>>> a2
<xarray.DataArray (x: 3, y: 2)>
array([[1, 1],
[2, 2],
[3, 3]])
Coordinates:
* x (x) int64 0 1 2
* y (y) int64 0 1
>>> b2
<xarray.DataArray (x: 3, y: 2)>
array([[5, 6],
[5, 6],
[5, 6]])
Coordinates:
* y (y) int64 0 1
* x (x) int64 0 1 2
Fill out the dimensions of all data variables in a dataset:
>>> ds = xr.Dataset({'a': a, 'b': b})
>>> ds2, = xr.broadcast(ds) # use tuple unpacking to extract one dataset
>>> ds2
<xarray.Dataset>
Dimensions: (x: 3, y: 2)
Coordinates:
* x (x) int64 0 1 2
* y (y) int64 0 1
Data variables:
a (x, y) int64 1 1 2 2 3 3
b (x, y) int64 5 6 5 6 5 6
"""
from .dataarray import DataArray
from .dataset import Dataset
exclude = kwargs.pop('exclude', None)
if exclude is None:
exclude = set()
if kwargs:
raise TypeError('broadcast() got unexpected keyword arguments: %s'
% list(kwargs))
args = align(*args, join='outer', copy=False, exclude=exclude)
common_coords = OrderedDict()
dims_map = OrderedDict()
for arg in args:
for dim in arg.dims:
if dim not in common_coords and dim not in exclude:
dims_map[dim] = arg.sizes[dim]
if dim in arg.coords:
common_coords[dim] = arg.coords[dim].variable
def _set_dims(var):
# Add excluded dims to a copy of dims_map
var_dims_map = dims_map.copy()
for dim in exclude:
with suppress(ValueError):
# ignore dim not in var.dims
var_dims_map[dim] = var.shape[var.dims.index(dim)]
return var.set_dims(var_dims_map)
def _broadcast_array(array):
data = _set_dims(array.variable)
coords = OrderedDict(array.coords)
coords.update(common_coords)
return DataArray(data, coords, data.dims, name=array.name,
attrs=array.attrs, encoding=array.encoding)
def _broadcast_dataset(ds):
data_vars = OrderedDict(
(k, _set_dims(ds.variables[k]))
for k in ds.data_vars)
coords = OrderedDict(ds.coords)
coords.update(common_coords)
return Dataset(data_vars, coords, ds.attrs)
result = []
for arg in args:
if isinstance(arg, DataArray):
result.append(_broadcast_array(arg))
elif isinstance(arg, Dataset):
result.append(_broadcast_dataset(arg))
else:
raise ValueError('all input must be Dataset or DataArray objects')
return tuple(result)
def broadcast_arrays(*args):
import warnings
warnings.warn('xarray.broadcast_arrays is deprecated: use '
'xarray.broadcast instead', DeprecationWarning, stacklevel=2)
return broadcast(*args)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/alignment.py",
"copies": "1",
"size": "18616",
"license": "apache-2.0",
"hash": 5063458242454570000,
"line_mean": 34.662835249,
"line_max": 79,
"alpha_frac": 0.5881499785,
"autogenerated": false,
"ratio": 4.37406015037594,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 522
} |
from __future__ import absolute_import, division, print_function
import functools
import operator
import warnings
from distutils.version import LooseVersion
import numpy as np
from .. import Variable, coding
from ..coding.variables import pop_to
from ..core import indexing
from ..core.pycompat import (
PY3, OrderedDict, basestring, iteritems, suppress)
from ..core.utils import FrozenOrderedDict, close_on_error, is_remote_uri
from .common import (
HDF5_LOCK, BackendArray, DataStorePickleMixin, WritableCFDataStore,
find_root, robust_getitem)
from .netcdf3 import encode_nc3_attr_value, encode_nc3_variable
# This lookup table maps from dtype.byteorder to a readable endian
# string used by netCDF4.
_endian_lookup = {'=': 'native',
'>': 'big',
'<': 'little',
'|': 'native'}
class BaseNetCDF4Array(BackendArray):
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_array()
self.shape = array.shape
dtype = array.dtype
if dtype is str:
# use object dtype because that's the only way in numpy to
# represent variable length strings; it also prevents automatic
# string concatenation via conventions.decode_cf_variable
dtype = np.dtype('O')
self.dtype = dtype
def __setitem__(self, key, value):
with self.datastore.ensure_open(autoclose=True):
data = self.get_array()
data[key] = value
def get_array(self):
self.datastore.assert_open()
return self.datastore.ds.variables[self.variable_name]
class NetCDF4ArrayWrapper(BaseNetCDF4Array):
def __getitem__(self, key):
key, np_inds = indexing.decompose_indexer(
key, self.shape, indexing.IndexingSupport.OUTER)
if self.datastore.is_remote: # pragma: no cover
getitem = functools.partial(robust_getitem, catch=RuntimeError)
else:
getitem = operator.getitem
with self.datastore.ensure_open(autoclose=True):
try:
array = getitem(self.get_array(), key.tuple)
except IndexError:
# Catch IndexError in netCDF4 and return a more informative
# error message. This is most often called when an unsorted
# indexer is used before the data is loaded from disk.
msg = ('The indexing operation you are attempting to perform '
'is not valid on netCDF4.Variable object. Try loading '
'your data into memory first by calling .load().')
if not PY3:
import traceback
msg += '\n\nOriginal traceback:\n' + traceback.format_exc()
raise IndexError(msg)
if len(np_inds.tuple) > 0:
array = indexing.NumpyIndexingAdapter(array)[np_inds]
return array
def _encode_nc4_variable(var):
for coder in [coding.strings.EncodedStringCoder(allows_unicode=True),
coding.strings.CharacterArrayCoder()]:
var = coder.encode(var)
return var
def _get_datatype(var, nc_format='NETCDF4'):
if nc_format == 'NETCDF4':
datatype = _nc4_dtype(var)
else:
datatype = var.dtype
return datatype
def _nc4_dtype(var):
if coding.strings.is_unicode_dtype(var.dtype):
dtype = str
elif var.dtype.kind in ['i', 'u', 'f', 'c', 'S']:
dtype = var.dtype
else:
raise ValueError('unsupported dtype for netCDF4 variable: {}'
.format(var.dtype))
return dtype
def _netcdf4_create_group(dataset, name):
return dataset.createGroup(name)
def _nc4_require_group(ds, group, mode, create_group=_netcdf4_create_group):
if group in set([None, '', '/']):
# use the root group
return ds
else:
# make sure it's a string
if not isinstance(group, basestring):
raise ValueError('group must be a string or None')
# support path-like syntax
path = group.strip('/').split('/')
for key in path:
try:
ds = ds.groups[key]
except KeyError as e:
if mode != 'r':
ds = create_group(ds, key)
else:
# wrap error to provide slightly more helpful message
raise IOError('group not found: %s' % key, e)
return ds
def _ensure_fill_value_valid(data, attributes):
# work around for netCDF4/scipy issue where _FillValue has the wrong type:
# https://github.com/Unidata/netcdf4-python/issues/271
if data.dtype.kind == 'S' and '_FillValue' in attributes:
attributes['_FillValue'] = np.string_(attributes['_FillValue'])
def _force_native_endianness(var):
# possible values for byteorder are:
# = native
# < little-endian
# > big-endian
# | not applicable
# Below we check if the data type is not native or NA
if var.dtype.byteorder not in ['=', '|']:
# if endianness is specified explicitly, convert to the native type
data = var.data.astype(var.dtype.newbyteorder('='))
var = Variable(var.dims, data, var.attrs, var.encoding)
# if endian exists, remove it from the encoding.
var.encoding.pop('endian', None)
# check to see if encoding has a value for endian its 'native'
if not var.encoding.get('endian', 'native') is 'native':
raise NotImplementedError("Attempt to write non-native endian type, "
"this is not supported by the netCDF4 "
"python library.")
return var
def _extract_nc4_variable_encoding(variable, raise_on_invalid=False,
lsd_okay=True, h5py_okay=False,
backend='netCDF4', unlimited_dims=None):
if unlimited_dims is None:
unlimited_dims = ()
encoding = variable.encoding.copy()
safe_to_drop = set(['source', 'original_shape'])
valid_encodings = set(['zlib', 'complevel', 'fletcher32', 'contiguous',
'chunksizes', 'shuffle', '_FillValue'])
if lsd_okay:
valid_encodings.add('least_significant_digit')
if h5py_okay:
valid_encodings.add('compression')
valid_encodings.add('compression_opts')
if not raise_on_invalid and encoding.get('chunksizes') is not None:
# It's possible to get encoded chunksizes larger than a dimension size
# if the original file had an unlimited dimension. This is problematic
# if the new file no longer has an unlimited dimension.
chunksizes = encoding['chunksizes']
chunks_too_big = any(
c > d and dim not in unlimited_dims
for c, d, dim in zip(chunksizes, variable.shape, variable.dims))
changed_shape = encoding.get('original_shape') != variable.shape
if chunks_too_big or changed_shape:
del encoding['chunksizes']
for k in safe_to_drop:
if k in encoding:
del encoding[k]
if raise_on_invalid:
invalid = [k for k in encoding if k not in valid_encodings]
if invalid:
raise ValueError('unexpected encoding parameters for %r backend: '
' %r' % (backend, invalid))
else:
for k in list(encoding):
if k not in valid_encodings:
del encoding[k]
return encoding
def _open_netcdf4_group(filename, mode, group=None, **kwargs):
import netCDF4 as nc4
ds = nc4.Dataset(filename, mode=mode, **kwargs)
with close_on_error(ds):
ds = _nc4_require_group(ds, group, mode)
_disable_auto_decode_group(ds)
return ds
def _disable_auto_decode_variable(var):
"""Disable automatic decoding on a netCDF4.Variable.
We handle these types of decoding ourselves.
"""
var.set_auto_maskandscale(False)
# only added in netCDF4-python v1.2.8
with suppress(AttributeError):
var.set_auto_chartostring(False)
def _disable_auto_decode_group(ds):
"""Disable automatic decoding on all variables in a netCDF4.Group."""
for var in ds.variables.values():
_disable_auto_decode_variable(var)
def _is_list_of_strings(value):
if (np.asarray(value).dtype.kind in ['U', 'S'] and
np.asarray(value).size > 1):
return True
else:
return False
def _set_nc_attribute(obj, key, value):
if _is_list_of_strings(value):
# encode as NC_STRING if attr is list of strings
try:
obj.setncattr_string(key, value)
except AttributeError:
# Inform users with old netCDF that does not support
# NC_STRING that we can't serialize lists of strings
# as attrs
msg = ('Attributes which are lists of strings are not '
'supported with this version of netCDF. Please '
'upgrade to netCDF4-python 1.2.4 or greater.')
raise AttributeError(msg)
else:
obj.setncattr(key, value)
class NetCDF4DataStore(WritableCFDataStore, DataStorePickleMixin):
"""Store for reading and writing data via the Python-NetCDF4 library.
This store supports NetCDF3, NetCDF4 and OpenDAP datasets.
"""
def __init__(self, netcdf4_dataset, mode='r', writer=None, opener=None,
autoclose=False, lock=HDF5_LOCK):
if autoclose and opener is None:
raise ValueError('autoclose requires an opener')
_disable_auto_decode_group(netcdf4_dataset)
self._ds = netcdf4_dataset
self._autoclose = autoclose
self._isopen = True
self.format = self.ds.data_model
self._filename = self.ds.filepath()
self.is_remote = is_remote_uri(self._filename)
self._mode = mode = 'a' if mode == 'w' else mode
if opener:
self._opener = functools.partial(opener, mode=self._mode)
else:
self._opener = opener
super(NetCDF4DataStore, self).__init__(writer, lock=lock)
@classmethod
def open(cls, filename, mode='r', format='NETCDF4', group=None,
writer=None, clobber=True, diskless=False, persist=False,
autoclose=False, lock=HDF5_LOCK):
import netCDF4 as nc4
if (len(filename) == 88 and
LooseVersion(nc4.__version__) < "1.3.1"):
warnings.warn(
'A segmentation fault may occur when the '
'file path has exactly 88 characters as it does '
'in this case. The issue is known to occur with '
'version 1.2.4 of netCDF4 and can be addressed by '
'upgrading netCDF4 to at least version 1.3.1. '
'More details can be found here: '
'https://github.com/pydata/xarray/issues/1745')
if format is None:
format = 'NETCDF4'
opener = functools.partial(_open_netcdf4_group, filename, mode=mode,
group=group, clobber=clobber,
diskless=diskless, persist=persist,
format=format)
ds = opener()
return cls(ds, mode=mode, writer=writer, opener=opener,
autoclose=autoclose, lock=lock)
def open_store_variable(self, name, var):
with self.ensure_open(autoclose=False):
dimensions = var.dimensions
data = indexing.LazilyOuterIndexedArray(
NetCDF4ArrayWrapper(name, self))
attributes = OrderedDict((k, var.getncattr(k))
for k in var.ncattrs())
_ensure_fill_value_valid(data, attributes)
# netCDF4 specific encoding; save _FillValue for later
encoding = {}
filters = var.filters()
if filters is not None:
encoding.update(filters)
chunking = var.chunking()
if chunking is not None:
if chunking == 'contiguous':
encoding['contiguous'] = True
encoding['chunksizes'] = None
else:
encoding['contiguous'] = False
encoding['chunksizes'] = tuple(chunking)
# TODO: figure out how to round-trip "endian-ness" without raising
# warnings from netCDF4
# encoding['endian'] = var.endian()
pop_to(attributes, encoding, 'least_significant_digit')
# save source so __repr__ can detect if it's local or not
encoding['source'] = self._filename
encoding['original_shape'] = var.shape
return Variable(dimensions, data, attributes, encoding)
def get_variables(self):
with self.ensure_open(autoclose=False):
dsvars = FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in
iteritems(self.ds.variables))
return dsvars
def get_attrs(self):
with self.ensure_open(autoclose=True):
attrs = FrozenOrderedDict((k, self.ds.getncattr(k))
for k in self.ds.ncattrs())
return attrs
def get_dimensions(self):
with self.ensure_open(autoclose=True):
dims = FrozenOrderedDict((k, len(v))
for k, v in iteritems(self.ds.dimensions))
return dims
def get_encoding(self):
with self.ensure_open(autoclose=True):
encoding = {}
encoding['unlimited_dims'] = {
k for k, v in self.ds.dimensions.items() if v.isunlimited()}
return encoding
def set_dimension(self, name, length, is_unlimited=False):
with self.ensure_open(autoclose=False):
dim_length = length if not is_unlimited else None
self.ds.createDimension(name, size=dim_length)
def set_attribute(self, key, value):
with self.ensure_open(autoclose=False):
if self.format != 'NETCDF4':
value = encode_nc3_attr_value(value)
_set_nc_attribute(self.ds, key, value)
def set_variables(self, *args, **kwargs):
with self.ensure_open(autoclose=False):
super(NetCDF4DataStore, self).set_variables(*args, **kwargs)
def encode_variable(self, variable):
variable = _force_native_endianness(variable)
if self.format == 'NETCDF4':
variable = _encode_nc4_variable(variable)
else:
variable = encode_nc3_variable(variable)
return variable
def prepare_variable(self, name, variable, check_encoding=False,
unlimited_dims=None):
datatype = _get_datatype(variable, self.format)
attrs = variable.attrs.copy()
fill_value = attrs.pop('_FillValue', None)
if datatype is str and fill_value is not None:
raise NotImplementedError(
'netCDF4 does not yet support setting a fill value for '
'variable-length strings '
'(https://github.com/Unidata/netcdf4-python/issues/730). '
"Either remove '_FillValue' from encoding on variable %r "
"or set {'dtype': 'S1'} in encoding to use the fixed width "
'NC_CHAR type.' % name)
encoding = _extract_nc4_variable_encoding(
variable, raise_on_invalid=check_encoding,
unlimited_dims=unlimited_dims)
if name in self.ds.variables:
nc4_var = self.ds.variables[name]
else:
nc4_var = self.ds.createVariable(
varname=name,
datatype=datatype,
dimensions=variable.dims,
zlib=encoding.get('zlib', False),
complevel=encoding.get('complevel', 4),
shuffle=encoding.get('shuffle', True),
fletcher32=encoding.get('fletcher32', False),
contiguous=encoding.get('contiguous', False),
chunksizes=encoding.get('chunksizes'),
endian='native',
least_significant_digit=encoding.get(
'least_significant_digit'),
fill_value=fill_value)
_disable_auto_decode_variable(nc4_var)
for k, v in iteritems(attrs):
# set attributes one-by-one since netCDF4<1.0.10 can't handle
# OrderedDict as the input to setncatts
_set_nc_attribute(nc4_var, k, v)
target = NetCDF4ArrayWrapper(name, self)
return target, variable.data
def sync(self, compute=True):
with self.ensure_open(autoclose=True):
super(NetCDF4DataStore, self).sync(compute=compute)
self.ds.sync()
def close(self):
if self._isopen:
# netCDF4 only allows closing the root group
ds = find_root(self.ds)
if ds._isopen:
ds.close()
self._isopen = False
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/backends/netCDF4_.py",
"copies": "1",
"size": "17244",
"license": "apache-2.0",
"hash": -4800416034808279000,
"line_mean": 36.7330415755,
"line_max": 79,
"alpha_frac": 0.5845511482,
"autogenerated": false,
"ratio": 4.118461905899212,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5203013054099211,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import functools
import six
from django.conf import settings
from rest_framework.exceptions import ParseError, PermissionDenied
from rest_framework.response import Response
from sentry import features
from sentry.api.bases import OrganizationEventsEndpointBase, OrganizationEventPermission
from sentry.api.helpers.group_index import (
build_query_params_from_request,
calculate_stats_period,
delete_groups,
get_by_short_id,
rate_limit_endpoint,
update_groups,
ValidationError,
)
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import StreamGroupSerializerSnuba
from sentry.api.utils import get_date_range_from_params, InvalidParams
from sentry.models import Group, GroupStatus
from sentry.search.snuba.backend import EventsDatasetSnubaSearchBackend
from sentry.snuba import discover
from sentry.utils.validators import normalize_event_id
from sentry.utils.compat import map
ERR_INVALID_STATS_PERIOD = "Invalid stats_period. Valid choices are '', '24h', and '14d'"
search = EventsDatasetSnubaSearchBackend(**settings.SENTRY_SEARCH_OPTIONS)
class OrganizationGroupIndexEndpoint(OrganizationEventsEndpointBase):
permission_classes = (OrganizationEventPermission,)
skip_snuba_fields = {
"query",
"status",
"bookmarked_by",
"assigned_to",
"unassigned",
"linked",
"subscribed_by",
"active_at",
"first_release",
"first_seen",
}
def _search(self, request, organization, projects, environments, extra_query_kwargs=None):
query_kwargs = build_query_params_from_request(
request, organization, projects, environments
)
if extra_query_kwargs is not None:
assert "environment" not in extra_query_kwargs
query_kwargs.update(extra_query_kwargs)
query_kwargs["environments"] = environments if environments else None
result = search.query(**query_kwargs)
return result, query_kwargs
@rate_limit_endpoint(limit=10, window=1)
def get(self, request, organization):
"""
List an Organization's Issues
`````````````````````````````
Return a list of issues (groups) bound to an organization. All parameters are
supplied as query string parameters.
A default query of ``is:unresolved`` is applied. To return results
with other statuses send an new query value (i.e. ``?query=`` for all
results).
The ``groupStatsPeriod`` parameter can be used to select the timeline
stats which should be present. Possible values are: '' (disable),
'24h', '14d'
The ``statsPeriod`` parameter can be used to select a date window starting
from now. Ex. ``14d``.
The ``start`` and ``end`` parameters can be used to select an absolute
date period to fetch issues from.
:qparam string statsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam string groupStatsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam string start: Beginning date. You must also provide ``end``.
:qparam string end: End date. You must also provide ``start``.
:qparam bool shortIdLookup: if this is set to true then short IDs are
looked up by this function as well. This
can cause the return value of the function
to return an event issue of a different
project which is why this is an opt-in.
Set to `1` to enable.
:qparam querystring query: an optional Sentry structured search
query. If not provided an implied
``"is:unresolved"`` is assumed.)
:pparam string organization_slug: the slug of the organization the
issues belong to.
:auth: required
:qparam list expand: an optional list of strings to opt in to additional data. Supports `inbox`
:qparam list collapse: an optional list of strings to opt out of certain pieces of data. Supports `stats`, `lifetime`, `base`
"""
stats_period = request.GET.get("groupStatsPeriod")
try:
start, end = get_date_range_from_params(request.GET)
except InvalidParams as e:
raise ParseError(detail=six.text_type(e))
expand = request.GET.getlist("expand", [])
collapse = request.GET.getlist("collapse", [])
has_inbox = features.has("organizations:inbox", organization, actor=request.user)
has_workflow_owners = features.has(
"organizations:workflow-owners", organization, actor=request.user
)
if stats_period not in (None, "", "24h", "14d", "auto"):
return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400)
stats_period, stats_period_start, stats_period_end = calculate_stats_period(
stats_period, start, end
)
environments = self.get_environments(request, organization)
serializer = functools.partial(
StreamGroupSerializerSnuba,
environment_ids=[env.id for env in environments],
stats_period=stats_period,
stats_period_start=stats_period_start,
stats_period_end=stats_period_end,
expand=expand,
collapse=collapse,
has_inbox=has_inbox,
has_workflow_owners=has_workflow_owners,
)
projects = self.get_projects(request, organization)
project_ids = [p.id for p in projects]
if not projects:
return Response([])
if len(projects) > 1 and not features.has(
"organizations:global-views", organization, actor=request.user
):
return Response(
{"detail": "You do not have the multi project stream feature enabled"}, status=400
)
# we ignore date range for both short id and event ids
query = request.GET.get("query", "").strip()
if query:
# check to see if we've got an event ID
event_id = normalize_event_id(query)
if event_id:
# For a direct hit lookup we want to use any passed project ids
# (we've already checked permissions on these) plus any other
# projects that the user is a member of. This gives us a better
# chance of returning the correct result, even if the wrong
# project is selected.
direct_hit_projects = set(project_ids) | set(
[project.id for project in request.access.projects]
)
groups = list(Group.objects.filter_by_event_id(direct_hit_projects, event_id))
if len(groups) == 1:
response = Response(
serialize(groups, request.user, serializer(matching_event_id=event_id))
)
response["X-Sentry-Direct-Hit"] = "1"
return response
if groups:
return Response(serialize(groups, request.user, serializer()))
group = get_by_short_id(organization.id, request.GET.get("shortIdLookup"), query)
if group is not None:
# check all projects user has access to
if request.access.has_project_access(group.project):
response = Response(serialize([group], request.user, serializer()))
response["X-Sentry-Direct-Hit"] = "1"
return response
# If group ids specified, just ignore any query components
try:
group_ids = set(map(int, request.GET.getlist("group")))
except ValueError:
return Response({"detail": "Group ids must be integers"}, status=400)
if group_ids:
groups = list(Group.objects.filter(id__in=group_ids, project_id__in=project_ids))
if any(g for g in groups if not request.access.has_project_access(g.project)):
raise PermissionDenied
return Response(serialize(groups, request.user, serializer()))
try:
cursor_result, query_kwargs = self._search(
request,
organization,
projects,
environments,
{"count_hits": True, "date_to": end, "date_from": start},
)
except (ValidationError, discover.InvalidSearchQuery) as exc:
return Response({"detail": six.text_type(exc)}, status=400)
results = list(cursor_result)
context = serialize(
results,
request.user,
serializer(
start=start,
end=end,
search_filters=query_kwargs["search_filters"]
if "search_filters" in query_kwargs
else None,
),
)
# HACK: remove auto resolved entries
# TODO: We should try to integrate this into the search backend, since
# this can cause us to arbitrarily return fewer results than requested.
status = [
search_filter
for search_filter in query_kwargs.get("search_filters", [])
if search_filter.key.name == "status"
]
if status and status[0].value.raw_value == GroupStatus.UNRESOLVED:
context = [r for r in context if "status" not in r or r["status"] == "unresolved"]
response = Response(context)
self.add_cursor_headers(request, response, cursor_result)
# TODO(jess): add metrics that are similar to project endpoint here
return response
@rate_limit_endpoint(limit=10, window=1)
def put(self, request, organization):
"""
Bulk Mutate a List of Issues
````````````````````````````
Bulk mutate various attributes on issues. The list of issues
to modify is given through the `id` query parameter. It is repeated
for each issue that should be modified.
- For non-status updates, the `id` query parameter is required.
- For status updates, the `id` query parameter may be omitted
for a batch "update all" query.
- An optional `status` query parameter may be used to restrict
mutations to only events with the given status.
The following attributes can be modified and are supplied as
JSON object in the body:
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the issues to be mutated. This
parameter shall be repeated for each issue. It
is optional only if a status is mutated in which
case an implicit `update all` is assumed.
:qparam string status: optionally limits the query to issues of the
specified status. Valid values are
``"resolved"``, ``"unresolved"`` and
``"ignored"``.
:pparam string organization_slug: the slug of the organization the
issues belong to.
:param string status: the new status for the issues. Valid values
are ``"resolved"``, ``"resolvedInNextRelease"``,
``"unresolved"``, and ``"ignored"``. Status
updates that include release data are only allowed
for groups within a single project.
:param map statusDetails: additional details about the resolution.
Valid values are ``"inRelease"``, ``"inNextRelease"``,
``"inCommit"``, ``"ignoreDuration"``, ``"ignoreCount"``,
``"ignoreWindow"``, ``"ignoreUserCount"``, and
``"ignoreUserWindow"``. Status detail
updates that include release data are only allowed
for groups within a single project.
:param int ignoreDuration: the number of minutes to ignore this issue.
:param boolean isPublic: sets the issue to public or private.
:param boolean merge: allows to merge or unmerge different issues.
:param string assignedTo: the user or team that should be assigned to
these issues. Can be of the form ``"<user_id>"``,
``"user:<user_id>"``, ``"<username>"``,
``"<user_primary_email>"``, or ``"team:<team_id>"``.
Bulk assigning issues is limited to groups
within a single project.
:param boolean hasSeen: in case this API call is invoked with a user
context this allows changing of the flag
that indicates if the user has seen the
event.
:param boolean isBookmarked: in case this API call is invoked with a
user context this allows changing of
the bookmark flag.
:auth: required
"""
projects = self.get_projects(request, organization)
has_inbox = features.has("organizations:inbox", organization, actor=request.user)
if len(projects) > 1 and not features.has(
"organizations:global-views", organization, actor=request.user
):
return Response(
{"detail": "You do not have the multi project stream feature enabled"}, status=400
)
search_fn = functools.partial(
self._search,
request,
organization,
projects,
self.get_environments(request, organization),
)
return update_groups(request, projects, organization.id, search_fn, has_inbox)
@rate_limit_endpoint(limit=10, window=1)
def delete(self, request, organization):
"""
Bulk Remove a List of Issues
````````````````````````````
Permanently remove the given issues. The list of issues to
modify is given through the `id` query parameter. It is repeated
for each issue that should be removed.
Only queries by 'id' are accepted.
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the issues to be removed. This
parameter shall be repeated for each issue.
:pparam string organization_slug: the slug of the organization the
issues belong to.
:auth: required
"""
projects = self.get_projects(request, organization)
if len(projects) > 1 and not features.has(
"organizations:global-views", organization, actor=request.user
):
return Response(
{"detail": "You do not have the multi project stream feature enabled"}, status=400
)
search_fn = functools.partial(
self._search,
request,
organization,
projects,
self.get_environments(request, organization),
)
return delete_groups(request, projects, organization.id, search_fn)
| {
"repo_name": "beeftornado/sentry",
"path": "src/sentry/api/endpoints/organization_group_index.py",
"copies": "1",
"size": "15942",
"license": "bsd-3-clause",
"hash": 61428417555670460,
"line_mean": 43.4066852368,
"line_max": 133,
"alpha_frac": 0.5769037762,
"autogenerated": false,
"ratio": 4.86333129957291,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009543976542072439,
"num_lines": 359
} |
from __future__ import absolute_import, division, print_function
import functools
import six
from django.conf import settings
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from sentry import features
from sentry.api.bases import OrganizationEventsEndpointBase, OrganizationEventPermission
from sentry.api.helpers.group_index import (
build_query_params_from_request,
delete_groups,
get_by_short_id,
update_groups,
ValidationError,
)
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import StreamGroupSerializerSnuba
from sentry.api.utils import get_date_range_from_params, InvalidParams
from sentry.models import Group, GroupStatus
from sentry.search.snuba.backend import SnubaSearchBackend
from sentry.utils.validators import normalize_event_id
ERR_INVALID_STATS_PERIOD = "Invalid stats_period. Valid choices are '', '24h', and '14d'"
search = SnubaSearchBackend(**settings.SENTRY_SEARCH_OPTIONS)
class OrganizationGroupIndexEndpoint(OrganizationEventsEndpointBase):
permission_classes = (OrganizationEventPermission,)
def _search(self, request, organization, projects, environments, extra_query_kwargs=None):
query_kwargs = build_query_params_from_request(
request, organization, projects, environments
)
if extra_query_kwargs is not None:
assert "environment" not in extra_query_kwargs
query_kwargs.update(extra_query_kwargs)
query_kwargs["environments"] = environments if environments else None
result = search.query(**query_kwargs)
return result, query_kwargs
def get(self, request, organization):
"""
List an Organization's Issues
`````````````````````````````
Return a list of issues (groups) bound to an organization. All parameters are
supplied as query string parameters.
A default query of ``is:unresolved`` is applied. To return results
with other statuses send an new query value (i.e. ``?query=`` for all
results).
The ``groupStatsPeriod`` parameter can be used to select the timeline
stats which should be present. Possible values are: '' (disable),
'24h', '14d'
The ``statsPeriod`` parameter can be used to select a date window starting
from now. Ex. ``14d``.
The ``start`` and ``end`` parameters can be used to select an absolute
date period to fetch issues from.
:qparam string statsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam string groupStatsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam string start: Beginning date. You must also provide ``end``.
:qparam string end: End date. You must also provide ``start``.
:qparam bool shortIdLookup: if this is set to true then short IDs are
looked up by this function as well. This
can cause the return value of the function
to return an event issue of a different
project which is why this is an opt-in.
Set to `1` to enable.
:qparam querystring query: an optional Sentry structured search
query. If not provided an implied
``"is:unresolved"`` is assumed.)
:pparam string organization_slug: the slug of the organization the
issues belong to.
:auth: required
"""
stats_period = request.GET.get("groupStatsPeriod")
if stats_period not in (None, "", "24h", "14d"):
return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400)
elif stats_period is None:
# default
stats_period = "24h"
elif stats_period == "":
# disable stats
stats_period = None
environments = self.get_environments(request, organization)
serializer = functools.partial(
StreamGroupSerializerSnuba,
environment_ids=[env.id for env in environments],
stats_period=stats_period,
)
projects = self.get_projects(request, organization)
project_ids = [p.id for p in projects]
if not projects:
return Response([])
if len(projects) > 1 and not features.has(
"organizations:global-views", organization, actor=request.user
):
return Response(
{"detail": "You do not have the multi project stream feature enabled"}, status=400
)
# we ignore date range for both short id and event ids
query = request.GET.get("query", "").strip()
if query:
# check to see if we've got an event ID
event_id = normalize_event_id(query)
if event_id:
# For a direct hit lookup we want to use any passed project ids
# (we've already checked permissions on these) plus any other
# projects that the user is a member of. This gives us a better
# chance of returning the correct result, even if the wrong
# project is selected.
direct_hit_projects = set(project_ids) | set(
[project.id for project in request.access.projects]
)
groups = list(Group.objects.filter_by_event_id(direct_hit_projects, event_id))
if len(groups) == 1:
response = Response(
serialize(groups, request.user, serializer(matching_event_id=event_id))
)
response["X-Sentry-Direct-Hit"] = "1"
return response
if groups:
return Response(serialize(groups, request.user, serializer()))
group = get_by_short_id(organization.id, request.GET.get("shortIdLookup"), query)
if group is not None:
# check all projects user has access to
if request.access.has_project_access(group.project):
response = Response(serialize([group], request.user, serializer()))
response["X-Sentry-Direct-Hit"] = "1"
return response
# If group ids specified, just ignore any query components
try:
group_ids = set(map(int, request.GET.getlist("group")))
except ValueError:
return Response({"detail": "Group ids must be integers"}, status=400)
if group_ids:
groups = list(Group.objects.filter(id__in=group_ids, project_id__in=project_ids))
if any(g for g in groups if not request.access.has_project_access(g.project)):
raise PermissionDenied
return Response(serialize(groups, request.user, serializer()))
try:
start, end = get_date_range_from_params(request.GET)
except InvalidParams as exc:
return Response({"detail": exc.message}, status=400)
try:
cursor_result, query_kwargs = self._search(
request,
organization,
projects,
environments,
{"count_hits": True, "date_to": end, "date_from": start},
)
except ValidationError as exc:
return Response({"detail": six.text_type(exc)}, status=400)
results = list(cursor_result)
context = serialize(results, request.user, serializer())
# HACK: remove auto resolved entries
# TODO: We should try to integrate this into the search backend, since
# this can cause us to arbitrarily return fewer results than requested.
status = [
search_filter
for search_filter in query_kwargs.get("search_filters", [])
if search_filter.key.name == "status"
]
if status and status[0].value.raw_value == GroupStatus.UNRESOLVED:
context = [r for r in context if r["status"] == "unresolved"]
response = Response(context)
self.add_cursor_headers(request, response, cursor_result)
# TODO(jess): add metrics that are similar to project endpoint here
return response
def put(self, request, organization):
"""
Bulk Mutate a List of Issues
````````````````````````````
Bulk mutate various attributes on issues. The list of issues
to modify is given through the `id` query parameter. It is repeated
for each issue that should be modified.
- For non-status updates, the `id` query parameter is required.
- For status updates, the `id` query parameter may be omitted
for a batch "update all" query.
- An optional `status` query parameter may be used to restrict
mutations to only events with the given status.
The following attributes can be modified and are supplied as
JSON object in the body:
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the issues to be mutated. This
parameter shall be repeated for each issue. It
is optional only if a status is mutated in which
case an implicit `update all` is assumed.
:qparam string status: optionally limits the query to issues of the
specified status. Valid values are
``"resolved"``, ``"unresolved"`` and
``"ignored"``.
:pparam string organization_slug: the slug of the organization the
issues belong to.
:param string status: the new status for the issues. Valid values
are ``"resolved"``, ``"resolvedInNextRelease"``,
``"unresolved"``, and ``"ignored"``. Status
updates that include release data are only allowed
for groups within a single project.
:param map statusDetails: additional details about the resolution.
Valid values are ``"inRelease"``, ``"inNextRelease"``,
``"inCommit"``, ``"ignoreDuration"``, ``"ignoreCount"``,
``"ignoreWindow"``, ``"ignoreUserCount"``, and
``"ignoreUserWindow"``. Status detail
updates that include release data are only allowed
for groups within a single project.
:param int ignoreDuration: the number of minutes to ignore this issue.
:param boolean isPublic: sets the issue to public or private.
:param boolean merge: allows to merge or unmerge different issues.
:param string assignedTo: the actor id (or username) of the user or team that should be
assigned to this issue. Bulk assigning issues
is limited to groups within a single project.
:param boolean hasSeen: in case this API call is invoked with a user
context this allows changing of the flag
that indicates if the user has seen the
event.
:param boolean isBookmarked: in case this API call is invoked with a
user context this allows changing of
the bookmark flag.
:auth: required
"""
projects = self.get_projects(request, organization)
if len(projects) > 1 and not features.has(
"organizations:global-views", organization, actor=request.user
):
return Response(
{"detail": "You do not have the multi project stream feature enabled"}, status=400
)
search_fn = functools.partial(
self._search,
request,
organization,
projects,
self.get_environments(request, organization),
)
return update_groups(request, projects, organization.id, search_fn)
def delete(self, request, organization):
"""
Bulk Remove a List of Issues
````````````````````````````
Permanently remove the given issues. The list of issues to
modify is given through the `id` query parameter. It is repeated
for each issue that should be removed.
Only queries by 'id' are accepted.
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the issues to be removed. This
parameter shall be repeated for each issue.
:pparam string organization_slug: the slug of the organization the
issues belong to.
:auth: required
"""
projects = self.get_projects(request, organization)
if len(projects) > 1 and not features.has(
"organizations:global-views", organization, actor=request.user
):
return Response(
{"detail": "You do not have the multi project stream feature enabled"}, status=400
)
search_fn = functools.partial(
self._search,
request,
organization,
projects,
self.get_environments(request, organization),
)
return delete_groups(request, projects, organization.id, search_fn)
| {
"repo_name": "mvaled/sentry",
"path": "src/sentry/api/endpoints/organization_group_index.py",
"copies": "1",
"size": "14036",
"license": "bsd-3-clause",
"hash": -2762507686516122000,
"line_mean": 43.417721519,
"line_max": 98,
"alpha_frac": 0.5777286976,
"autogenerated": false,
"ratio": 4.977304964539007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6055033662139007,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.