response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Make a fixture attribute an alias of another fixture's attribute by default. Parameters ---------- attr_name : str The name of the attribute to alias. Returns ------- p : classproperty A class property that does the property aliasing. Examples -------- >>> class C(object): ... attr = 1 ... >>> class D(C): ... attr_alias = alias('attr') ... >>> D.attr 1 >>> D.attr_alias 1 >>> class E(D): ... attr_alias = 2 ... >>> E.attr 1 >>> E.attr_alias 2
def alias(attr_name): """Make a fixture attribute an alias of another fixture's attribute by default. Parameters ---------- attr_name : str The name of the attribute to alias. Returns ------- p : classproperty A class property that does the property aliasing. Examples -------- >>> class C(object): ... attr = 1 ... >>> class D(C): ... attr_alias = alias('attr') ... >>> D.attr 1 >>> D.attr_alias 1 >>> class E(D): ... attr_alias = 2 ... >>> E.attr 1 >>> E.attr_alias 2 """ return classproperty(flip(getattr, attr_name))
Equivalent to dts.get_loc(dt, method='ffill'), but with reasonable microperformance.
def fast_get_loc_ffilled(dts, dt): """ Equivalent to dts.get_loc(dt, method='ffill'), but with reasonable microperformance. """ ix = dts.searchsorted(dt, side='right') - 1 if ix < 0: raise KeyError(dt) return ix
Get the argument names of a function >>> def f(x, y=2): ... pass >>> keywords(f) ['x', 'y'] Notes ----- Taken from odo.utils
def keywords(func): """Get the argument names of a function >>> def f(x, y=2): ... pass >>> keywords(f) ['x', 'y'] Notes ----- Taken from odo.utils """ if isinstance(func, type): return keywords(func.__init__) elif isinstance(func, partial): return keywords(func.func) return getargspec(func).args
Return a dict of valid kwargs for `f` from a subset of `kwargs` Examples -------- >>> def f(a, b=1, c=2): ... return a + b + c ... >>> raw_kwargs = dict(a=1, b=3, d=4) >>> f(**raw_kwargs) Traceback (most recent call last): ... TypeError: f() got an unexpected keyword argument 'd' >>> kwargs = filter_kwargs(f, raw_kwargs) >>> f(**kwargs) 6 Notes ----- Taken from odo.utils
def filter_kwargs(f, kwargs): """Return a dict of valid kwargs for `f` from a subset of `kwargs` Examples -------- >>> def f(a, b=1, c=2): ... return a + b + c ... >>> raw_kwargs = dict(a=1, b=3, d=4) >>> f(**raw_kwargs) Traceback (most recent call last): ... TypeError: f() got an unexpected keyword argument 'd' >>> kwargs = filter_kwargs(f, raw_kwargs) >>> f(**kwargs) 6 Notes ----- Taken from odo.utils """ return keyfilter(op.contains(keywords(f)), kwargs)
Format the path for final display. Parameters ---------- path : iterable of str The path to the values that are not equal. Returns ------- fmtd : str The formatted path to put into the error message.
def _fmt_path(path): """Format the path for final display. Parameters ---------- path : iterable of str The path to the values that are not equal. Returns ------- fmtd : str The formatted path to put into the error message. """ if not path: return '' return 'path: _' + ''.join(path)
Format the message for final display. Parameters ---------- msg : str The message to show to the user to provide additional context. returns ------- fmtd : str The formatted message to put into the error message.
def _fmt_msg(msg): """Format the message for final display. Parameters ---------- msg : str The message to show to the user to provide additional context. returns ------- fmtd : str The formatted message to put into the error message. """ if not msg: return '' return msg + '\n'
Assert that ``subcls`` is a subclass of ``cls``. Parameters ---------- subcls : type The type to check. cls : type The type to check ``subcls`` against. msg : str, optional An extra assertion message to print if this fails.
def assert_is_subclass(subcls, cls, msg=''): """Assert that ``subcls`` is a subclass of ``cls``. Parameters ---------- subcls : type The type to check. cls : type The type to check ``subcls`` against. msg : str, optional An extra assertion message to print if this fails. """ assert issubclass(subcls, cls), ( '%s is not a subclass of %s\n%s' % ( _safe_cls_name(subcls), _safe_cls_name(cls), msg, ) )
Assert that ``not_subcls`` is not a subclass of ``cls``. Parameters ---------- not_subcls : type The type to check. cls : type The type to check ``not_subcls`` against. msg : str, optional An extra assertion message to print if this fails.
def assert_is_not_subclass(not_subcls, cls, msg=''): """Assert that ``not_subcls`` is not a subclass of ``cls``. Parameters ---------- not_subcls : type The type to check. cls : type The type to check ``not_subcls`` against. msg : str, optional An extra assertion message to print if this fails. """ assert not issubclass(not_subcls, cls), ( '%s is a subclass of %s\n%s' % ( _safe_cls_name(not_subcls), _safe_cls_name(cls), msg, ) )
Assert that ``expected`` matches the result. Parameters ---------- result : str The string to search. expected : str or compiled regex The pattern to search for in ``result``. msg : str, optional An extra assertion message to print if this fails.
def assert_regex(result, expected, msg=''): """Assert that ``expected`` matches the result. Parameters ---------- result : str The string to search. expected : str or compiled regex The pattern to search for in ``result``. msg : str, optional An extra assertion message to print if this fails. """ assert re.search(expected, result), ( '%s%r not found in %r' % (_fmt_msg(msg), expected, result) )
Assert that some exception is raised in a context and that the message matches some pattern. Parameters ---------- exc : type or tuple[type] The exception type or types to expect. pattern : str or compiled regex The pattern to search for in the str of the raised exception. msg : str, optional An extra assertion message to print if this fails.
def assert_raises_regex(exc, pattern, msg=''): """Assert that some exception is raised in a context and that the message matches some pattern. Parameters ---------- exc : type or tuple[type] The exception type or types to expect. pattern : str or compiled regex The pattern to search for in the str of the raised exception. msg : str, optional An extra assertion message to print if this fails. """ def check_exception(e): assert re.search(pattern, str(e)), ( '%s%r not found in %r' % (_fmt_msg(msg), pattern, str(e)) ) return _assert_raises_helper( do_check=check_exception, exc_type=exc, msg=msg, )
Assert that some exception is raised in a context and that the message exactly matches some string. Parameters ---------- exc : type or tuple[type] The exception type or types to expect. expected_str : str The expected result of ``str(exception)``. msg : str, optional An extra assertion message to print if this fails.
def assert_raises_str(exc, expected_str, msg=''): """Assert that some exception is raised in a context and that the message exactly matches some string. Parameters ---------- exc : type or tuple[type] The exception type or types to expect. expected_str : str The expected result of ``str(exception)``. msg : str, optional An extra assertion message to print if this fails. """ def check_exception(e): result = str(e) assert_messages_equal(result, expected_str, msg=msg) return _assert_raises_helper( check_exception, exc_type=exc, msg=msg, )
Create an assertion error formatted for use in ``assert_equal``. Parameters ---------- assertion_message : str The concrete reason for the failure. path : tuple[str] The path leading up to the failure. msg : str The user supplied message. Returns ------- exception_instance : AssertionError The new exception instance. Notes ----- This doesn't raise the exception, it only returns it.
def make_assert_equal_assertion_error(assertion_message, path, msg): """Create an assertion error formatted for use in ``assert_equal``. Parameters ---------- assertion_message : str The concrete reason for the failure. path : tuple[str] The path leading up to the failure. msg : str The user supplied message. Returns ------- exception_instance : AssertionError The new exception instance. Notes ----- This doesn't raise the exception, it only returns it. """ return AssertionError( '%s%s\n%s' % ( _fmt_msg(msg), assertion_message, _fmt_path(path), ), )
Assert that two objects are equal using the ``==`` operator. Parameters ---------- result : object The result that came from the function under test. expected : object The expected result. Raises ------ AssertionError Raised when ``result`` is not equal to ``expected``.
def assert_equal(result, expected, path=(), msg='', **kwargs): """Assert that two objects are equal using the ``==`` operator. Parameters ---------- result : object The result that came from the function under test. expected : object The expected result. Raises ------ AssertionError Raised when ``result`` is not equal to ``expected``. """ if result != expected: raise make_assert_equal_assertion_error( '%s != %s' % (result, expected), path, msg, )
Compare two sets. This is used to check dictionary keys and sets. Parameters ---------- result : set expected : set msg : str path : tuple type : str The type of an element. For dict we use ``'key'`` and for set we use ``'element'``.
def _check_sets(result, expected, msg, path, type_): """Compare two sets. This is used to check dictionary keys and sets. Parameters ---------- result : set expected : set msg : str path : tuple type : str The type of an element. For dict we use ``'key'`` and for set we use ``'element'``. """ if result != expected: if result > expected: diff = result - expected msg = 'extra %s in result: %r' % (s(type_, diff), diff) elif result < expected: diff = expected - result msg = 'result is missing %s: %r' % (s(type_, diff), diff) else: in_result = result - expected in_expected = expected - result msg = '%s only in result: %s\n%s only in expected: %s' % ( s(type_, in_result), in_result, s(type_, in_expected), in_expected, ) raise AssertionError( '%ss do not match\n%s%s' % ( type_, _fmt_msg(msg), _fmt_path(path), ), )
Register a new check for an ndframe object. Parameters ---------- type_ : type The class to register an ``assert_equal`` dispatch for. assert_eq : callable[type_, type_] The function which checks that if the two ndframes are equal. Returns ------- assert_ndframe_equal : callable[type_, type_] The wrapped function registered with ``assert_equal``.
def _register_assert_equal_wrapper(type_, assert_eq): """Register a new check for an ndframe object. Parameters ---------- type_ : type The class to register an ``assert_equal`` dispatch for. assert_eq : callable[type_, type_] The function which checks that if the two ndframes are equal. Returns ------- assert_ndframe_equal : callable[type_, type_] The wrapped function registered with ``assert_equal``. """ @assert_equal.register(type_, type_) def assert_ndframe_equal(result, expected, path=(), msg='', **kwargs): try: assert_eq( result, expected, **filter_kwargs(assert_eq, kwargs) ) except AssertionError as e: raise AssertionError( _fmt_msg(msg) + '\n'.join((str(e), _fmt_path(path))), ) return assert_ndframe_equal
Branch for comparing python datetime (which includes pandas Timestamp) and np.datetime64 as equal. Returns raises unless ``allow_datetime_coercions`` is passed as True.
def assert_timestamp_and_datetime_equal(result, expected, path=(), msg='', allow_datetime_coercions=False, compare_nat_equal=True, **kwargs): """ Branch for comparing python datetime (which includes pandas Timestamp) and np.datetime64 as equal. Returns raises unless ``allow_datetime_coercions`` is passed as True. """ assert allow_datetime_coercions or type(result) == type(expected), ( "%sdatetime types (%s, %s) don't match and " "allow_datetime_coercions was not set.\n%s" % ( _fmt_msg(msg), type(result), type(expected), _fmt_path(path), ) ) if isinstance(result, pd.Timestamp) and isinstance(expected, pd.Timestamp): assert_equal( result.tz, expected.tz, path=path + ('.tz',), msg=msg, **kwargs ) result = pd.Timestamp(result) expected = pd.Timestamp(expected) if compare_nat_equal and pd.isnull(result) and pd.isnull(expected): return assert_equal.dispatch(object, object)( result, expected, path=path, msg=msg, **kwargs )
Assertion helper for comparing very long strings (e.g. error messages).
def assert_messages_equal(result, expected, msg=''): """Assertion helper for comparing very long strings (e.g. error messages). """ # The arg here is "keepends" which keeps trailing newlines (which # matters for checking trailing whitespace). You can't pass keepends by # name :(. left_lines = result.splitlines(True) right_lines = expected.splitlines(True) iter_lines = enumerate(zip_longest(left_lines, right_lines)) for line, (ll, rl) in iter_lines: if ll != rl: col = index_of_first_difference(ll, rl) raise AssertionError( "{msg}Messages differ on line {line}, col {col}:" "\n{ll!r}\n!=\n{rl!r}".format( msg=_fmt_msg(msg), line=line, col=col, ll=ll, rl=rl ) )
Get the index of the first difference between two strings.
def index_of_first_difference(left, right): """Get the index of the first difference between two strings.""" difflocs = (i for (i, (lc, rc)) in enumerate(zip_longest(left, right)) if lc != rc) try: return next(difflocs) except StopIteration: raise ValueError("Left was equal to right!")
Decorator for API methods that should only be called during or before TradingAlgorithm.initialize. `exception` will be raised if the method is called after initialize. Examples -------- @require_not_initialized(SomeException("Don't do that!")) def method(self): # Do stuff that should only be allowed during initialize.
def require_not_initialized(exception): """ Decorator for API methods that should only be called during or before TradingAlgorithm.initialize. `exception` will be raised if the method is called after initialize. Examples -------- @require_not_initialized(SomeException("Don't do that!")) def method(self): # Do stuff that should only be allowed during initialize. """ def decorator(method): @wraps(method) def wrapped_method(self, *args, **kwargs): if self.initialized: raise exception return method(self, *args, **kwargs) return wrapped_method return decorator
Decorator for API methods that should only be called after TradingAlgorithm.initialize. `exception` will be raised if the method is called before initialize has completed. Examples -------- @require_initialized(SomeException("Don't do that!")) def method(self): # Do stuff that should only be allowed after initialize.
def require_initialized(exception): """ Decorator for API methods that should only be called after TradingAlgorithm.initialize. `exception` will be raised if the method is called before initialize has completed. Examples -------- @require_initialized(SomeException("Don't do that!")) def method(self): # Do stuff that should only be allowed after initialize. """ def decorator(method): @wraps(method) def wrapped_method(self, *args, **kwargs): if not self.initialized: raise exception return method(self, *args, **kwargs) return wrapped_method return decorator
Decorator for API methods that cannot be called from within TradingAlgorithm.before_trading_start. `exception` will be raised if the method is called inside `before_trading_start`. Examples -------- @disallowed_in_before_trading_start(SomeException("Don't do that!")) def method(self): # Do stuff that is not allowed inside before_trading_start.
def disallowed_in_before_trading_start(exception): """ Decorator for API methods that cannot be called from within TradingAlgorithm.before_trading_start. `exception` will be raised if the method is called inside `before_trading_start`. Examples -------- @disallowed_in_before_trading_start(SomeException("Don't do that!")) def method(self): # Do stuff that is not allowed inside before_trading_start. """ def decorator(method): @wraps(method) def wrapped_method(self, *args, **kwargs): if self._in_before_trading_start: raise exception return method(self, *args, **kwargs) return wrapped_method return decorator
Checks for the presence of an extra to the argument list. Raises expections if this is unexpected or if it is missing and expected.
def _expect_extra(expected, present, exc_unexpected, exc_missing, exc_args): """ Checks for the presence of an extra to the argument list. Raises expections if this is unexpected or if it is missing and expected. """ if present: if not expected: raise exc_unexpected(*exc_args) elif expected and expected is not Argument.ignore: raise exc_missing(*exc_args)
Checks the callable_ to make sure that it satisfies the given expectations. expected_args should be an iterable of Arguments in the order you expect to receive them. expect_starargs means that the function should or should not take a *args param. expect_kwargs says the callable should or should not take **kwargs param. If expected_args, expect_starargs, or expect_kwargs is Argument.ignore, then the checks related to that argument will not occur. Example usage: callable_check( f, [Argument('a'), Argument('b', 1)], expect_starargs=True, expect_kwargs=Argument.ignore )
def verify_callable_argspec(callable_, expected_args=Argument.ignore, expect_starargs=Argument.ignore, expect_kwargs=Argument.ignore): """ Checks the callable_ to make sure that it satisfies the given expectations. expected_args should be an iterable of Arguments in the order you expect to receive them. expect_starargs means that the function should or should not take a *args param. expect_kwargs says the callable should or should not take **kwargs param. If expected_args, expect_starargs, or expect_kwargs is Argument.ignore, then the checks related to that argument will not occur. Example usage: callable_check( f, [Argument('a'), Argument('b', 1)], expect_starargs=True, expect_kwargs=Argument.ignore ) """ if not callable(callable_): raise NotCallable(callable_) expected_arg_list = list( expected_args if expected_args is not Argument.ignore else [] ) args, starargs, kwargs = Argument.parse_argspec(callable_) exc_args = callable_, args, starargs, kwargs # Check the *args. _expect_extra( expect_starargs, starargs, UnexpectedStarargs, NoStarargs, exc_args, ) # Check the **kwargs. _expect_extra( expect_kwargs, kwargs, UnexpectedKwargs, NoKwargs, exc_args, ) if expected_args is Argument.ignore: # Ignore the argument list checks. return if len(args) < len(expected_arg_list): # One or more argument that we expected was not present. raise NotEnoughArguments( callable_, args, starargs, kwargs, [arg for arg in expected_arg_list if arg not in args], ) elif len(args) > len(expected_arg_list): raise TooManyArguments( callable_, args, starargs, kwargs ) # Empty argument that will not match with any actual arguments. missing_arg = Argument(object(), object()) for expected, provided in zip_longest(expected_arg_list, args, fillvalue=missing_arg): if not expected.matches(provided): raise MismatchedArguments( callable_, args, starargs, kwargs )
Optionally show a progress bar for the given iterator. Parameters ---------- it : iterable The underlying iterator. show_progress : bool Should progress be shown. **kwargs Forwarded to the click progress bar. Returns ------- itercontext : context manager A context manager whose enter is the actual iterator to use. Examples -------- .. code-block:: python with maybe_show_progress([1, 2, 3], True) as ns: for n in ns: ...
def maybe_show_progress(it, show_progress, **kwargs): """Optionally show a progress bar for the given iterator. Parameters ---------- it : iterable The underlying iterator. show_progress : bool Should progress be shown. **kwargs Forwarded to the click progress bar. Returns ------- itercontext : context manager A context manager whose enter is the actual iterator to use. Examples -------- .. code-block:: python with maybe_show_progress([1, 2, 3], True) as ns: for n in ns: ... """ if show_progress: return click.progressbar(it, **kwargs) # context manager that just return `it` when we enter it return CallbackManager(lambda it=it: it)
Compute the start and end dates to run a pipeline for. Parameters ---------- sessions : DatetimeIndex The available dates. start_date : pd.Timestamp The first date in the pipeline. end_date : pd.Timestamp The last date in the pipeline. chunksize : int or None The size of the chunks to run. Setting this to None returns one chunk. Returns ------- ranges : iterable[(np.datetime64, np.datetime64)] A sequence of start and end dates to run the pipeline for.
def compute_date_range_chunks(sessions, start_date, end_date, chunksize): """Compute the start and end dates to run a pipeline for. Parameters ---------- sessions : DatetimeIndex The available dates. start_date : pd.Timestamp The first date in the pipeline. end_date : pd.Timestamp The last date in the pipeline. chunksize : int or None The size of the chunks to run. Setting this to None returns one chunk. Returns ------- ranges : iterable[(np.datetime64, np.datetime64)] A sequence of start and end dates to run the pipeline for. """ if start_date not in sessions: raise KeyError("Start date %s is not found in calendar." % (start_date.strftime("%Y-%m-%d"),)) if end_date not in sessions: raise KeyError("End date %s is not found in calendar." % (end_date.strftime("%Y-%m-%d"),)) if end_date < start_date: raise ValueError("End date %s cannot precede start date %s." % (end_date.strftime("%Y-%m-%d"), start_date.strftime("%Y-%m-%d"))) if chunksize is None: return [(start_date, end_date)] start_ix, end_ix = sessions.slice_locs(start_date, end_date) return ( (r[0], r[-1]) for r in partition_all( chunksize, sessions[start_ix:end_ix] ) )
Used to mark a function as deprecated. Parameters ---------- msg : str The message to display in the deprecation warning. stacklevel : int How far up the stack the warning needs to go, before showing the relevant calling lines. Examples -------- @deprecated(msg='function_a is deprecated! Use function_b instead.') def function_a(*args, **kwargs):
def deprecated(msg=None, stacklevel=2): """ Used to mark a function as deprecated. Parameters ---------- msg : str The message to display in the deprecation warning. stacklevel : int How far up the stack the warning needs to go, before showing the relevant calling lines. Examples -------- @deprecated(msg='function_a is deprecated! Use function_b instead.') def function_a(*args, **kwargs): """ def deprecated_dec(fn): @wraps(fn) def wrapper(*args, **kwargs): warnings.warn( msg or "Function %s is deprecated." % fn.__name__, category=DeprecationWarning, stacklevel=stacklevel ) return fn(*args, **kwargs) return wrapper return deprecated_dec
Construct a new enum object. Parameters ---------- *options : iterable of str The names of the fields for the enum. Returns ------- enum A new enum collection. Examples -------- >>> e = enum('a', 'b', 'c') >>> e <enum: ('a', 'b', 'c')> >>> e.a 0 >>> e.b 1 >>> e.a in e True >>> tuple(e) (0, 1, 2) Notes ----- Identity checking is not guaranteed to work with enum members, instead equality checks should be used. From CPython's documentation: "The current implementation keeps an array of integer objects for all integers between -5 and 256, when you create an int in that range you actually just get back a reference to the existing object. So it should be possible to change the value of 1. I suspect the behaviour of Python in this case is undefined. :-)"
def enum(option, *options): """ Construct a new enum object. Parameters ---------- *options : iterable of str The names of the fields for the enum. Returns ------- enum A new enum collection. Examples -------- >>> e = enum('a', 'b', 'c') >>> e <enum: ('a', 'b', 'c')> >>> e.a 0 >>> e.b 1 >>> e.a in e True >>> tuple(e) (0, 1, 2) Notes ----- Identity checking is not guaranteed to work with enum members, instead equality checks should be used. From CPython's documentation: "The current implementation keeps an array of integer objects for all integers between -5 and 256, when you create an int in that range you actually just get back a reference to the existing object. So it should be possible to change the value of 1. I suspect the behaviour of Python in this case is undefined. :-)" """ options = (option,) + options rangeob = range(len(options)) try: inttype = _inttypes[int(np.log2(len(options) - 1)) // 8] except IndexError: raise OverflowError( 'Cannot store enums with more than sys.maxsize elements, got %d' % len(options), ) class _enum(Structure): _fields_ = [(o, inttype) for o in options] def __iter__(self): return iter(rangeob) def __contains__(self, value): return 0 <= value < len(options) def __repr__(self): return '<enum: %s>' % ( ('%d fields' % len(options)) if len(options) > 10 else repr(options) ) return _enum(*rangeob)
Converts a UTC tz-naive timestamp to a tz-aware timestamp.
def naive_to_utc(ts): """ Converts a UTC tz-naive timestamp to a tz-aware timestamp. """ # Drop the nanoseconds field. warn=False suppresses the warning # that we are losing the nanoseconds; however, this is intended. return pd.Timestamp(ts.to_pydatetime(warn=False), tz='UTC')
Normalize a time. If the time is tz-naive, assume it is UTC.
def ensure_utc(time, tz='UTC'): """ Normalize a time. If the time is tz-naive, assume it is UTC. """ if not time.tzinfo: time = time.replace(tzinfo=pytz.timezone(tz)) return time.replace(tzinfo=pytz.utc)
Builds the offset argument for event rules.
def _build_offset(offset, kwargs, default): """ Builds the offset argument for event rules. """ # Filter down to just kwargs that were actually passed. kwargs = {k: v for k, v in six.iteritems(kwargs) if v is not None} if offset is None: if not kwargs: return default # use the default. else: return _td_check(datetime.timedelta(**kwargs)) elif kwargs: raise ValueError('Cannot pass kwargs and an offset') elif isinstance(offset, datetime.timedelta): return _td_check(offset) else: raise TypeError("Must pass 'hours' and/or 'minutes' as keywords")
Builds the date argument for event rules.
def _build_date(date, kwargs): """ Builds the date argument for event rules. """ if date is None: if not kwargs: raise ValueError('Must pass a date or kwargs') else: return datetime.date(**kwargs) elif kwargs: raise ValueError('Cannot pass kwargs and a date') else: return date
Builds the time argument for event rules.
def _build_time(time, kwargs): """ Builds the time argument for event rules. """ tz = kwargs.pop('tz', 'UTC') if time: if kwargs: raise ValueError('Cannot pass kwargs and a time') else: return ensure_utc(time, tz) elif not kwargs: raise ValueError('Must pass a time or kwargs') else: return datetime.time(**kwargs)
A preprocessor that coerces integral floats to ints. Receipt of non-integral floats raises a TypeError.
def lossless_float_to_int(funcname, func, argname, arg): """ A preprocessor that coerces integral floats to ints. Receipt of non-integral floats raises a TypeError. """ if not isinstance(arg, float): return arg arg_as_int = int(arg) if arg == arg_as_int: warnings.warn( "{f} expected an int for argument {name!r}, but got float {arg}." " Coercing to int.".format( f=funcname, name=argname, arg=arg, ), ) return arg_as_int raise TypeError(arg)
Constructs an event rule from the factory api.
def make_eventrule(date_rule, time_rule, cal, half_days=True): """ Constructs an event rule from the factory api. """ _check_if_not_called(date_rule) _check_if_not_called(time_rule) if half_days: inner_rule = date_rule & time_rule else: inner_rule = date_rule & time_rule & NotHalfDay() opd = OncePerDay(rule=inner_rule) # This is where a scheduled function's rule is associated with a calendar. opd.cal = cal return opd
creates trade_count trades for each sid in sids list. first trade will be on sim_params.start_session, and daily thereafter for each sid. Thus, two sids should result in two trades per day.
def create_daily_trade_source(sids, sim_params, asset_finder, trading_calendar): """ creates trade_count trades for each sid in sids list. first trade will be on sim_params.start_session, and daily thereafter for each sid. Thus, two sids should result in two trades per day. """ return create_trade_source( sids, timedelta(days=1), sim_params, asset_finder, trading_calendar=trading_calendar, )
Yield classes in the order that methods should be looked up from the base classes of an object.
def bases_mro(bases): """ Yield classes in the order that methods should be looked up from the base classes of an object. """ for base in bases: for class_ in base.__mro__: yield class_
Checks if `name` is a `final` object in the given `mro`. We need to check the mro because we need to directly go into the __dict__ of the classes. Because `final` objects are descriptor, we need to grab them _BEFORE_ the `__call__` is invoked.
def is_final(name, mro): """ Checks if `name` is a `final` object in the given `mro`. We need to check the mro because we need to directly go into the __dict__ of the classes. Because `final` objects are descriptor, we need to grab them _BEFORE_ the `__call__` is invoked. """ return any(isinstance(getattr(c, '__dict__', {}).get(name), final) for c in bases_mro(mro))
Adds a suffix to ``word`` if some sequence has anything other than exactly one element. Parameters ---------- word : str The string to add the suffix to. seq : sequence The sequence to check the length of. suffix : str, optional. The suffix to add to ``word`` Returns ------- maybe_plural : str ``word`` with ``suffix`` added if ``len(seq) != 1``.
def s(word, seq, suffix='s'): """Adds a suffix to ``word`` if some sequence has anything other than exactly one element. Parameters ---------- word : str The string to add the suffix to. seq : sequence The sequence to check the length of. suffix : str, optional. The suffix to add to ``word`` Returns ------- maybe_plural : str ``word`` with ``suffix`` added if ``len(seq) != 1``. """ if len(seq) == 1: return word return word + suffix
Selects a singular or plural word based on the length of a sequence. Parameters ---------- singlular : str The string to use when ``len(seq) == 1``. plural : str The string to use when ``len(seq) != 1``. seq : sequence The sequence to check the length of. Returns ------- maybe_plural : str Either ``singlular`` or ``plural``.
def plural(singular, plural, seq): """Selects a singular or plural word based on the length of a sequence. Parameters ---------- singlular : str The string to use when ``len(seq) == 1``. plural : str The string to use when ``len(seq) != 1``. seq : sequence The sequence to check the length of. Returns ------- maybe_plural : str Either ``singlular`` or ``plural``. """ if len(seq) == 1: return singular return plural
Format a bulleted list of values. Parameters ---------- items : sequence The items to make a list. indent : int, optional The number of spaces to add before each bullet. bullet_type : str, optional The bullet type to use. Returns ------- formatted_list : str The formatted list as a single string.
def bulleted_list(items, indent=0, bullet_type='-'): """Format a bulleted list of values. Parameters ---------- items : sequence The items to make a list. indent : int, optional The number of spaces to add before each bullet. bullet_type : str, optional The bullet type to use. Returns ------- formatted_list : str The formatted list as a single string. """ format_string = ' ' * indent + bullet_type + ' {}' return "\n".join(map(format_string.format, items))
Apply a function to arguments. Parameters ---------- f : callable The function to call. *args, **kwargs **kwargs Arguments to feed to the callable. Returns ------- a : any The result of ``f(*args, **kwargs)`` Examples -------- >>> from toolz.curried.operator import add, sub >>> fs = add(1), sub(1) >>> tuple(map(apply, fs, (1, 2))) (2, -1) Class decorator >>> instance = apply >>> @instance ... class obj: ... def f(self): ... return 'f' ... >>> obj.f() 'f' >>> issubclass(obj, object) Traceback (most recent call last): ... TypeError: issubclass() arg 1 must be a class >>> isinstance(obj, type) False See Also -------- unpack_apply mapply
def apply(f, *args, **kwargs): """Apply a function to arguments. Parameters ---------- f : callable The function to call. *args, **kwargs **kwargs Arguments to feed to the callable. Returns ------- a : any The result of ``f(*args, **kwargs)`` Examples -------- >>> from toolz.curried.operator import add, sub >>> fs = add(1), sub(1) >>> tuple(map(apply, fs, (1, 2))) (2, -1) Class decorator >>> instance = apply >>> @instance ... class obj: ... def f(self): ... return 'f' ... >>> obj.f() 'f' >>> issubclass(obj, object) Traceback (most recent call last): ... TypeError: issubclass() arg 1 must be a class >>> isinstance(obj, type) False See Also -------- unpack_apply mapply """ return f(*args, **kwargs)
Parameters ---------- funcs : iterable[function] Sequence of functions to map over `seq`. seq : iterable Sequence over which to map funcs. Yields ------ elem : object Concatenated result of mapping each ``func`` over ``seq``. Examples -------- >>> list(mapall([lambda x: x + 1, lambda x: x - 1], [1, 2, 3])) [2, 3, 4, 0, 1, 2]
def mapall(funcs, seq): """ Parameters ---------- funcs : iterable[function] Sequence of functions to map over `seq`. seq : iterable Sequence over which to map funcs. Yields ------ elem : object Concatenated result of mapping each ``func`` over ``seq``. Examples -------- >>> list(mapall([lambda x: x + 1, lambda x: x - 1], [1, 2, 3])) [2, 3, 4, 0, 1, 2] """ for func in funcs: for elem in seq: yield func(elem)
Check if all values in a sequence are equal. Returns True on empty sequences. Examples -------- >>> same(1, 1, 1, 1) True >>> same(1, 2, 1) False >>> same() True
def same(*values): """ Check if all values in a sequence are equal. Returns True on empty sequences. Examples -------- >>> same(1, 1, 1, 1) True >>> same(1, 2, 1) False >>> same() True """ if not values: return True first, rest = values[0], values[1:] return all(value == first for value in rest)
Parameters ---------- *dicts : iterable[dict] A sequence of dicts all sharing the same keys. Returns ------- zipped : dict A dict whose keys are the union of all keys in *dicts, and whose values are tuples of length len(dicts) containing the result of looking up each key in each dict. Raises ------ ValueError If dicts don't all have the same keys. Examples -------- >>> result = dzip_exact({'a': 1, 'b': 2}, {'a': 3, 'b': 4}) >>> result == {'a': (1, 3), 'b': (2, 4)} True
def dzip_exact(*dicts): """ Parameters ---------- *dicts : iterable[dict] A sequence of dicts all sharing the same keys. Returns ------- zipped : dict A dict whose keys are the union of all keys in *dicts, and whose values are tuples of length len(dicts) containing the result of looking up each key in each dict. Raises ------ ValueError If dicts don't all have the same keys. Examples -------- >>> result = dzip_exact({'a': 1, 'b': 2}, {'a': 3, 'b': 4}) >>> result == {'a': (1, 3), 'b': (2, 4)} True """ if not same(*map(viewkeys, dicts)): raise ValueError( "dict keys not all equal:\n\n%s" % _format_unequal_keys(dicts) ) return {k: tuple(d[k] for d in dicts) for k in dicts[0]}
Helper for unzip which checks the lengths of each element in it. Parameters ---------- it : iterable[tuple] An iterable of tuples. ``unzip`` should map ensure that these are already tuples. elem_len : int or None The expected element length. If this is None it is infered from the length of the first element. Yields ------ elem : tuple Each element of ``it``. Raises ------ ValueError Raised when the lengths do not match the ``elem_len``.
def _gen_unzip(it, elem_len): """Helper for unzip which checks the lengths of each element in it. Parameters ---------- it : iterable[tuple] An iterable of tuples. ``unzip`` should map ensure that these are already tuples. elem_len : int or None The expected element length. If this is None it is infered from the length of the first element. Yields ------ elem : tuple Each element of ``it``. Raises ------ ValueError Raised when the lengths do not match the ``elem_len``. """ elem = next(it) first_elem_len = len(elem) if elem_len is not None and elem_len != first_elem_len: raise ValueError( 'element at index 0 was length %d, expected %d' % ( first_elem_len, elem_len, ) ) else: elem_len = first_elem_len yield elem for n, elem in enumerate(it, 1): if len(elem) != elem_len: raise ValueError( 'element at index %d was length %d, expected %d' % ( n, len(elem), elem_len, ), ) yield elem
Unzip a length n sequence of length m sequences into m seperate length n sequences. Parameters ---------- seq : iterable[iterable] The sequence to unzip. elem_len : int, optional The expected length of each element of ``seq``. If not provided this will be infered from the length of the first element of ``seq``. This can be used to ensure that code like: ``a, b = unzip(seq)`` does not fail even when ``seq`` is empty. Returns ------- seqs : iterable[iterable] The new sequences pulled out of the first iterable. Raises ------ ValueError Raised when ``seq`` is empty and ``elem_len`` is not provided. Raised when elements of ``seq`` do not match the given ``elem_len`` or the length of the first element of ``seq``. Examples -------- >>> seq = [('a', 1), ('b', 2), ('c', 3)] >>> cs, ns = unzip(seq) >>> cs ('a', 'b', 'c') >>> ns (1, 2, 3) # checks that the elements are the same length >>> seq = [('a', 1), ('b', 2), ('c', 3, 'extra')] >>> cs, ns = unzip(seq) Traceback (most recent call last): ... ValueError: element at index 2 was length 3, expected 2 # allows an explicit element length instead of infering >>> seq = [('a', 1, 'extra'), ('b', 2), ('c', 3)] >>> cs, ns = unzip(seq, 2) Traceback (most recent call last): ... ValueError: element at index 0 was length 3, expected 2 # handles empty sequences when a length is given >>> cs, ns = unzip([], elem_len=2) >>> cs == ns == () True Notes ----- This function will force ``seq`` to completion.
def unzip(seq, elem_len=None): """Unzip a length n sequence of length m sequences into m seperate length n sequences. Parameters ---------- seq : iterable[iterable] The sequence to unzip. elem_len : int, optional The expected length of each element of ``seq``. If not provided this will be infered from the length of the first element of ``seq``. This can be used to ensure that code like: ``a, b = unzip(seq)`` does not fail even when ``seq`` is empty. Returns ------- seqs : iterable[iterable] The new sequences pulled out of the first iterable. Raises ------ ValueError Raised when ``seq`` is empty and ``elem_len`` is not provided. Raised when elements of ``seq`` do not match the given ``elem_len`` or the length of the first element of ``seq``. Examples -------- >>> seq = [('a', 1), ('b', 2), ('c', 3)] >>> cs, ns = unzip(seq) >>> cs ('a', 'b', 'c') >>> ns (1, 2, 3) # checks that the elements are the same length >>> seq = [('a', 1), ('b', 2), ('c', 3, 'extra')] >>> cs, ns = unzip(seq) Traceback (most recent call last): ... ValueError: element at index 2 was length 3, expected 2 # allows an explicit element length instead of infering >>> seq = [('a', 1, 'extra'), ('b', 2), ('c', 3)] >>> cs, ns = unzip(seq, 2) Traceback (most recent call last): ... ValueError: element at index 0 was length 3, expected 2 # handles empty sequences when a length is given >>> cs, ns = unzip([], elem_len=2) >>> cs == ns == () True Notes ----- This function will force ``seq`` to completion. """ ret = tuple(zip(*_gen_unzip(map(tuple, seq), elem_len))) if ret: return ret if elem_len is None: raise ValueError("cannot unzip empty sequence without 'elem_len'") return ((),) * elem_len
Perform a chained application of ``getattr`` on ``value`` with the values in ``attrs``. If ``default`` is supplied, return it if any of the attribute lookups fail. Parameters ---------- value : object Root of the lookup chain. attrs : iterable[str] Sequence of attributes to look up. default : object, optional Value to return if any of the lookups fail. Returns ------- result : object Result of the lookup sequence. Examples -------- >>> class EmptyObject(object): ... pass ... >>> obj = EmptyObject() >>> obj.foo = EmptyObject() >>> obj.foo.bar = "value" >>> getattrs(obj, ('foo', 'bar')) 'value' >>> getattrs(obj, ('foo', 'buzz')) Traceback (most recent call last): ... AttributeError: 'EmptyObject' object has no attribute 'buzz' >>> getattrs(obj, ('foo', 'buzz'), 'default') 'default'
def getattrs(value, attrs, default=_no_default): """ Perform a chained application of ``getattr`` on ``value`` with the values in ``attrs``. If ``default`` is supplied, return it if any of the attribute lookups fail. Parameters ---------- value : object Root of the lookup chain. attrs : iterable[str] Sequence of attributes to look up. default : object, optional Value to return if any of the lookups fail. Returns ------- result : object Result of the lookup sequence. Examples -------- >>> class EmptyObject(object): ... pass ... >>> obj = EmptyObject() >>> obj.foo = EmptyObject() >>> obj.foo.bar = "value" >>> getattrs(obj, ('foo', 'bar')) 'value' >>> getattrs(obj, ('foo', 'buzz')) Traceback (most recent call last): ... AttributeError: 'EmptyObject' object has no attribute 'buzz' >>> getattrs(obj, ('foo', 'buzz'), 'default') 'default' """ try: for attr in attrs: value = getattr(value, attr) except AttributeError: if default is _no_default: raise value = default return value
Decorator factory for setting attributes on a function. Doesn't change the behavior of the wrapped function. Examples -------- >>> @set_attribute('__name__', 'foo') ... def bar(): ... return 3 ... >>> bar() 3 >>> bar.__name__ 'foo'
def set_attribute(name, value): """ Decorator factory for setting attributes on a function. Doesn't change the behavior of the wrapped function. Examples -------- >>> @set_attribute('__name__', 'foo') ... def bar(): ... return 3 ... >>> bar() 3 >>> bar.__name__ 'foo' """ def decorator(f): setattr(f, name, value) return f return decorator
Fold a function over a sequence with right associativity. Parameters ---------- f : callable[any, any] The function to reduce the sequence with. The first argument will be the element of the sequence; the second argument will be the accumulator. seq : iterable[any] The sequence to reduce. default : any, optional The starting value to reduce with. If not provided, the sequence cannot be empty, and the last value of the sequence will be used. Returns ------- folded : any The folded value. Notes ----- This functions works by reducing the list in a right associative way. For example, imagine we are folding with ``operator.add`` or ``+``: .. code-block:: python foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default))) In the more general case with an arbitrary function, ``foldr`` will expand like so: .. code-block:: python foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default)))) For a more in depth discussion of left and right folds, see: `https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_ The images in that page are very good for showing the differences between ``foldr`` and ``foldl`` (``reduce``). .. note:: For performance reasons is is best to pass a strict (non-lazy) sequence, for example, a list. See Also -------- :func:`functools.reduce` :func:`sum`
def foldr(f, seq, default=_no_default): """Fold a function over a sequence with right associativity. Parameters ---------- f : callable[any, any] The function to reduce the sequence with. The first argument will be the element of the sequence; the second argument will be the accumulator. seq : iterable[any] The sequence to reduce. default : any, optional The starting value to reduce with. If not provided, the sequence cannot be empty, and the last value of the sequence will be used. Returns ------- folded : any The folded value. Notes ----- This functions works by reducing the list in a right associative way. For example, imagine we are folding with ``operator.add`` or ``+``: .. code-block:: python foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default))) In the more general case with an arbitrary function, ``foldr`` will expand like so: .. code-block:: python foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default)))) For a more in depth discussion of left and right folds, see: `https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_ The images in that page are very good for showing the differences between ``foldr`` and ``foldl`` (``reduce``). .. note:: For performance reasons is is best to pass a strict (non-lazy) sequence, for example, a list. See Also -------- :func:`functools.reduce` :func:`sum` """ return reduce( flip(f), reversed(seq), *(default,) if default is not _no_default else () )
Invert a dictionary into a dictionary of sets. >>> invert({'a': 1, 'b': 2, 'c': 1}) # doctest: +SKIP {1: {'a', 'c'}, 2: {'b'}}
def invert(d): """ Invert a dictionary into a dictionary of sets. >>> invert({'a': 1, 'b': 2, 'c': 1}) # doctest: +SKIP {1: {'a', 'c'}, 2: {'b'}} """ out = {} for k, v in iteritems(d): try: out[v].add(k) except KeyError: out[v] = {k} return out
Get the items from a dict, sorted by key. Example ------- >>> keysorted({'c': 1, 'b': 2, 'a': 3}) [('a', 3), ('b', 2), ('c', 1)]
def keysorted(d): """Get the items from a dict, sorted by key. Example ------- >>> keysorted({'c': 1, 'b': 2, 'a': 3}) [('a', 3), ('b', 2), ('c', 1)] """ return sorted(iteritems(d), key=itemgetter(0))
Check that all axes of a pandas object are unique. Parameters ---------- obj : pd.Series / pd.DataFrame / pd.Panel The object to validate. Returns ------- obj : pd.Series / pd.DataFrame / pd.Panel The validated object, unchanged. Raises ------ ValueError If any axis has duplicate entries.
def verify_indices_all_unique(obj): """ Check that all axes of a pandas object are unique. Parameters ---------- obj : pd.Series / pd.DataFrame / pd.Panel The object to validate. Returns ------- obj : pd.Series / pd.DataFrame / pd.Panel The validated object, unchanged. Raises ------ ValueError If any axis has duplicate entries. """ axis_names = [ ('index',), # Series ('index', 'columns'), # DataFrame ('items', 'major_axis', 'minor_axis') # Panel ][obj.ndim - 1] # ndim = 1 should go to entry 0, for axis_name, index in zip(axis_names, obj.axes): if index.is_unique: continue raise ValueError( "Duplicate entries in {type}.{axis}: {dupes}.".format( type=type(obj).__name__, axis=axis_name, dupes=sorted(index[index.duplicated()]), ) ) return obj
Modify a preprocessor to explicitly allow `None`. Parameters ---------- preprocessor : callable[callable, str, any -> any] A preprocessor to delegate to when `arg is not None`. Returns ------- optional_preprocessor : callable[callable, str, any -> any] A preprocessor that delegates to `preprocessor` when `arg is not None`. Examples -------- >>> def preprocessor(func, argname, arg): ... if not isinstance(arg, int): ... raise TypeError('arg must be int') ... return arg ... >>> @preprocess(a=optionally(preprocessor)) ... def f(a): ... return a ... >>> f(1) # call with int 1 >>> f('a') # call with not int Traceback (most recent call last): ... TypeError: arg must be int >>> f(None) is None # call with explicit None True
def optionally(preprocessor): """Modify a preprocessor to explicitly allow `None`. Parameters ---------- preprocessor : callable[callable, str, any -> any] A preprocessor to delegate to when `arg is not None`. Returns ------- optional_preprocessor : callable[callable, str, any -> any] A preprocessor that delegates to `preprocessor` when `arg is not None`. Examples -------- >>> def preprocessor(func, argname, arg): ... if not isinstance(arg, int): ... raise TypeError('arg must be int') ... return arg ... >>> @preprocess(a=optionally(preprocessor)) ... def f(a): ... return a ... >>> f(1) # call with int 1 >>> f('a') # call with not int Traceback (most recent call last): ... TypeError: arg must be int >>> f(None) is None # call with explicit None True """ @wraps(preprocessor) def wrapper(func, argname, arg): return arg if arg is None else preprocessor(func, argname, arg) return wrapper
Argument preprocessor that converts the input into a numpy dtype. Examples -------- >>> import numpy as np >>> from zipline.utils.preprocess import preprocess >>> @preprocess(dtype=ensure_dtype) ... def foo(dtype): ... return dtype ... >>> foo(float) dtype('float64')
def ensure_dtype(func, argname, arg): """ Argument preprocessor that converts the input into a numpy dtype. Examples -------- >>> import numpy as np >>> from zipline.utils.preprocess import preprocess >>> @preprocess(dtype=ensure_dtype) ... def foo(dtype): ... return dtype ... >>> foo(float) dtype('float64') """ try: return dtype(arg) except TypeError: raise TypeError( "{func}() couldn't convert argument " "{argname}={arg!r} to a numpy dtype.".format( func=_qualified_name(func), argname=argname, arg=arg, ), )
Argument preprocessor that converts the input into a tzinfo object. Examples -------- >>> from zipline.utils.preprocess import preprocess >>> @preprocess(tz=ensure_timezone) ... def foo(tz): ... return tz >>> foo('utc') <UTC>
def ensure_timezone(func, argname, arg): """Argument preprocessor that converts the input into a tzinfo object. Examples -------- >>> from zipline.utils.preprocess import preprocess >>> @preprocess(tz=ensure_timezone) ... def foo(tz): ... return tz >>> foo('utc') <UTC> """ if isinstance(arg, tzinfo): return arg if isinstance(arg, string_types): return timezone(arg) raise TypeError( "{func}() couldn't convert argument " "{argname}={arg!r} to a timezone.".format( func=_qualified_name(func), argname=argname, arg=arg, ), )
Argument preprocessor that converts the input into a pandas Timestamp object. Examples -------- >>> from zipline.utils.preprocess import preprocess >>> @preprocess(ts=ensure_timestamp) ... def foo(ts): ... return ts >>> foo('2014-01-01') Timestamp('2014-01-01 00:00:00')
def ensure_timestamp(func, argname, arg): """Argument preprocessor that converts the input into a pandas Timestamp object. Examples -------- >>> from zipline.utils.preprocess import preprocess >>> @preprocess(ts=ensure_timestamp) ... def foo(ts): ... return ts >>> foo('2014-01-01') Timestamp('2014-01-01 00:00:00') """ try: return pd.Timestamp(arg) except ValueError as e: raise TypeError( "{func}() couldn't convert argument " "{argname}={arg!r} to a pandas Timestamp.\n" "Original error was: {t}: {e}".format( func=_qualified_name(func), argname=argname, arg=arg, t=_qualified_name(type(e)), e=e, ), )
Preprocessing decorator that verifies inputs have expected numpy dtypes. Examples -------- >>> from numpy import dtype, arange, int8, float64 >>> @expect_dtypes(x=dtype(int8)) ... def foo(x, y): ... return x, y ... >>> foo(arange(3, dtype=int8), 'foo') (array([0, 1, 2], dtype=int8), 'foo') >>> foo(arange(3, dtype=float64), 'foo') # doctest: +NORMALIZE_WHITESPACE ... # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a value with dtype 'int8' for argument 'x', but got 'float64' instead.
def expect_dtypes(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs have expected numpy dtypes. Examples -------- >>> from numpy import dtype, arange, int8, float64 >>> @expect_dtypes(x=dtype(int8)) ... def foo(x, y): ... return x, y ... >>> foo(arange(3, dtype=int8), 'foo') (array([0, 1, 2], dtype=int8), 'foo') >>> foo(arange(3, dtype=float64), 'foo') # doctest: +NORMALIZE_WHITESPACE ... # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a value with dtype 'int8' for argument 'x', but got 'float64' instead. """ for name, type_ in iteritems(named): if not isinstance(type_, (dtype, tuple)): raise TypeError( "expect_dtypes() expected a numpy dtype or tuple of dtypes" " for argument {name!r}, but got {dtype} instead.".format( name=name, dtype=dtype, ) ) if isinstance(__funcname, str): def get_funcname(_): return __funcname else: get_funcname = __funcname @preprocess(dtypes=call(lambda x: x if isinstance(x, tuple) else (x,))) def _expect_dtype(dtypes): """ Factory for dtype-checking functions that work with the @preprocess decorator. """ def error_message(func, argname, value): # If the bad value has a dtype, but it's wrong, show the dtype # name. Otherwise just show the value. try: value_to_show = value.dtype.name except AttributeError: value_to_show = value return ( "{funcname}() expected a value with dtype {dtype_str} " "for argument {argname!r}, but got {value!r} instead." ).format( funcname=get_funcname(func), dtype_str=' or '.join(repr(d.name) for d in dtypes), argname=argname, value=value_to_show, ) def _actual_preprocessor(func, argname, argvalue): if getattr(argvalue, 'dtype', object()) not in dtypes: raise TypeError(error_message(func, argname, argvalue)) return argvalue return _actual_preprocessor return preprocess(**valmap(_expect_dtype, named))
Preprocessing decorator that verifies inputs have expected dtype kinds. Examples -------- >>> from numpy import int64, int32, float32 >>> @expect_kinds(x='i') ... def foo(x): ... return x ... >>> foo(int64(2)) 2 >>> foo(int32(2)) 2 >>> foo(float32(2)) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a numpy object of kind 'i' for argument 'x', but got 'f' instead.
def expect_kinds(**named): """ Preprocessing decorator that verifies inputs have expected dtype kinds. Examples -------- >>> from numpy import int64, int32, float32 >>> @expect_kinds(x='i') ... def foo(x): ... return x ... >>> foo(int64(2)) 2 >>> foo(int32(2)) 2 >>> foo(float32(2)) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a numpy object of kind 'i' for argument 'x', but got 'f' instead. """ for name, kind in iteritems(named): if not isinstance(kind, (str, tuple)): raise TypeError( "expect_dtype_kinds() expected a string or tuple of strings" " for argument {name!r}, but got {kind} instead.".format( name=name, kind=dtype, ) ) @preprocess(kinds=call(lambda x: x if isinstance(x, tuple) else (x,))) def _expect_kind(kinds): """ Factory for kind-checking functions that work the @preprocess decorator. """ def error_message(func, argname, value): # If the bad value has a dtype, but it's wrong, show the dtype # kind. Otherwise just show the value. try: value_to_show = value.dtype.kind except AttributeError: value_to_show = value return ( "{funcname}() expected a numpy object of kind {kinds} " "for argument {argname!r}, but got {value!r} instead." ).format( funcname=_qualified_name(func), kinds=' or '.join(map(repr, kinds)), argname=argname, value=value_to_show, ) def _actual_preprocessor(func, argname, argvalue): if getattrs(argvalue, ('dtype', 'kind'), object()) not in kinds: raise TypeError(error_message(func, argname, argvalue)) return argvalue return _actual_preprocessor return preprocess(**valmap(_expect_kind, named))
Preprocessing decorator that verifies inputs have expected types. Examples -------- >>> @expect_types(x=int, y=str) ... def foo(x, y): ... return x, y ... >>> foo(2, '3') (2, '3') >>> foo(2.0, '3') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a value of type int for argument 'x', but got float instead. Notes ----- A special argument, __funcname, can be provided as a string to override the function name shown in error messages. This is most often used on __init__ or __new__ methods to make errors refer to the class name instead of the function name.
def expect_types(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs have expected types. Examples -------- >>> @expect_types(x=int, y=str) ... def foo(x, y): ... return x, y ... >>> foo(2, '3') (2, '3') >>> foo(2.0, '3') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a value of type int for argument 'x', but got float instead. Notes ----- A special argument, __funcname, can be provided as a string to override the function name shown in error messages. This is most often used on __init__ or __new__ methods to make errors refer to the class name instead of the function name. """ for name, type_ in iteritems(named): if not isinstance(type_, (type, tuple)): raise TypeError( "expect_types() expected a type or tuple of types for " "argument '{name}', but got {type_} instead.".format( name=name, type_=type_, ) ) def _expect_type(type_): # Slightly different messages for type and tuple of types. _template = ( "%(funcname)s() expected a value of type {type_or_types} " "for argument '%(argname)s', but got %(actual)s instead." ) if isinstance(type_, tuple): template = _template.format( type_or_types=' or '.join(map(_qualified_name, type_)) ) else: template = _template.format(type_or_types=_qualified_name(type_)) return make_check( exc_type=TypeError, template=template, pred=lambda v: not isinstance(v, type_), actual=compose(_qualified_name, type), funcname=__funcname, ) return preprocess(**valmap(_expect_type, named))
Factory for making preprocessing functions that check a predicate on the input value. Parameters ---------- exc_type : Exception The exception type to raise if the predicate fails. template : str A template string to use to create error messages. Should have %-style named template parameters for 'funcname', 'argname', and 'actual'. pred : function[object -> bool] A function to call on the argument being preprocessed. If the predicate returns `True`, we raise an instance of `exc_type`. actual : function[object -> object] A function to call on bad values to produce the value to display in the error message. funcname : str or callable Name to use in error messages, or function to call on decorated functions to produce a name. Passing an explicit name is useful when creating checks for __init__ or __new__ methods when you want the error to refer to the class name instead of the method name.
def make_check(exc_type, template, pred, actual, funcname): """ Factory for making preprocessing functions that check a predicate on the input value. Parameters ---------- exc_type : Exception The exception type to raise if the predicate fails. template : str A template string to use to create error messages. Should have %-style named template parameters for 'funcname', 'argname', and 'actual'. pred : function[object -> bool] A function to call on the argument being preprocessed. If the predicate returns `True`, we raise an instance of `exc_type`. actual : function[object -> object] A function to call on bad values to produce the value to display in the error message. funcname : str or callable Name to use in error messages, or function to call on decorated functions to produce a name. Passing an explicit name is useful when creating checks for __init__ or __new__ methods when you want the error to refer to the class name instead of the method name. """ if isinstance(funcname, str): def get_funcname(_): return funcname else: get_funcname = funcname def _check(func, argname, argvalue): if pred(argvalue): raise exc_type( template % { 'funcname': get_funcname(func), 'argname': argname, 'actual': actual(argvalue), }, ) return argvalue return _check
Helper for use with `expect_types` when an input can be `type_` or `None`. Returns an object such that both `None` and instances of `type_` pass checks of the form `isinstance(obj, optional(type_))`. Parameters ---------- type_ : type Type for which to produce an option. Examples -------- >>> isinstance({}, optional(dict)) True >>> isinstance(None, optional(dict)) True >>> isinstance(1, optional(dict)) False
def optional(type_): """ Helper for use with `expect_types` when an input can be `type_` or `None`. Returns an object such that both `None` and instances of `type_` pass checks of the form `isinstance(obj, optional(type_))`. Parameters ---------- type_ : type Type for which to produce an option. Examples -------- >>> isinstance({}, optional(dict)) True >>> isinstance(None, optional(dict)) True >>> isinstance(1, optional(dict)) False """ return (type_, type(None))
Preprocessing decorator that verifies inputs are elements of some expected collection. Examples -------- >>> @expect_element(x=('a', 'b')) ... def foo(x): ... return x.upper() ... >>> foo('a') 'A' >>> foo('b') 'B' >>> foo('c') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value in ('a', 'b') for argument 'x', but got 'c' instead. Notes ----- A special argument, __funcname, can be provided as a string to override the function name shown in error messages. This is most often used on __init__ or __new__ methods to make errors refer to the class name instead of the function name. This uses the `in` operator (__contains__) to make the containment check. This allows us to use any custom container as long as the object supports the container protocol.
def expect_element(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs are elements of some expected collection. Examples -------- >>> @expect_element(x=('a', 'b')) ... def foo(x): ... return x.upper() ... >>> foo('a') 'A' >>> foo('b') 'B' >>> foo('c') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value in ('a', 'b') for argument 'x', but got 'c' instead. Notes ----- A special argument, __funcname, can be provided as a string to override the function name shown in error messages. This is most often used on __init__ or __new__ methods to make errors refer to the class name instead of the function name. This uses the `in` operator (__contains__) to make the containment check. This allows us to use any custom container as long as the object supports the container protocol. """ def _expect_element(collection): if isinstance(collection, (set, frozenset)): # Special case the error message for set and frozen set to make it # less verbose. collection_for_error_message = tuple(sorted(collection)) else: collection_for_error_message = collection template = ( "%(funcname)s() expected a value in {collection} " "for argument '%(argname)s', but got %(actual)s instead." ).format(collection=collection_for_error_message) return make_check( ValueError, template, complement(op.contains(collection)), repr, funcname=__funcname, ) return preprocess(**valmap(_expect_element, named))
Preprocessing decorator verifying that inputs fall INCLUSIVELY between bounds. Bounds should be passed as a pair of ``(min_value, max_value)``. ``None`` may be passed as ``min_value`` or ``max_value`` to signify that the input is only bounded above or below. Examples -------- >>> @expect_bounded(x=(1, 5)) ... def foo(x): ... return x + 1 ... >>> foo(1) 2 >>> foo(5) 6 >>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value inclusively between 1 and 5 for argument 'x', but got 6 instead. >>> @expect_bounded(x=(2, None)) ... def foo(x): ... return x ... >>> foo(100000) 100000 >>> foo(1) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value greater than or equal to 2 for argument 'x', but got 1 instead. >>> @expect_bounded(x=(None, 5)) ... def foo(x): ... return x ... >>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value less than or equal to 5 for argument 'x', but got 6 instead.
def expect_bounded(__funcname=_qualified_name, **named): """ Preprocessing decorator verifying that inputs fall INCLUSIVELY between bounds. Bounds should be passed as a pair of ``(min_value, max_value)``. ``None`` may be passed as ``min_value`` or ``max_value`` to signify that the input is only bounded above or below. Examples -------- >>> @expect_bounded(x=(1, 5)) ... def foo(x): ... return x + 1 ... >>> foo(1) 2 >>> foo(5) 6 >>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value inclusively between 1 and 5 for argument 'x', but got 6 instead. >>> @expect_bounded(x=(2, None)) ... def foo(x): ... return x ... >>> foo(100000) 100000 >>> foo(1) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value greater than or equal to 2 for argument 'x', but got 1 instead. >>> @expect_bounded(x=(None, 5)) ... def foo(x): ... return x ... >>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value less than or equal to 5 for argument 'x', but got 6 instead. """ def _make_bounded_check(bounds): (lower, upper) = bounds if lower is None: def should_fail(value): return value > upper predicate_descr = "less than or equal to " + str(upper) elif upper is None: def should_fail(value): return value < lower predicate_descr = "greater than or equal to " + str(lower) else: def should_fail(value): return not (lower <= value <= upper) predicate_descr = "inclusively between %s and %s" % bounds template = ( "%(funcname)s() expected a value {predicate}" " for argument '%(argname)s', but got %(actual)s instead." ).format(predicate=predicate_descr) return make_check( exc_type=ValueError, template=template, pred=should_fail, actual=repr, funcname=__funcname, ) return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named)
Preprocessing decorator verifying that inputs fall EXCLUSIVELY between bounds. Bounds should be passed as a pair of ``(min_value, max_value)``. ``None`` may be passed as ``min_value`` or ``max_value`` to signify that the input is only bounded above or below. Examples -------- >>> @expect_strictly_bounded(x=(1, 5)) ... def foo(x): ... return x + 1 ... >>> foo(2) 3 >>> foo(4) 5 >>> foo(5) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value exclusively between 1 and 5 for argument 'x', but got 5 instead. >>> @expect_strictly_bounded(x=(2, None)) ... def foo(x): ... return x ... >>> foo(100000) 100000 >>> foo(2) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value strictly greater than 2 for argument 'x', but got 2 instead. >>> @expect_strictly_bounded(x=(None, 5)) ... def foo(x): ... return x ... >>> foo(5) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value strictly less than 5 for argument 'x', but got 5 instead.
def expect_strictly_bounded(__funcname=_qualified_name, **named): """ Preprocessing decorator verifying that inputs fall EXCLUSIVELY between bounds. Bounds should be passed as a pair of ``(min_value, max_value)``. ``None`` may be passed as ``min_value`` or ``max_value`` to signify that the input is only bounded above or below. Examples -------- >>> @expect_strictly_bounded(x=(1, 5)) ... def foo(x): ... return x + 1 ... >>> foo(2) 3 >>> foo(4) 5 >>> foo(5) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value exclusively between 1 and 5 for argument 'x', but got 5 instead. >>> @expect_strictly_bounded(x=(2, None)) ... def foo(x): ... return x ... >>> foo(100000) 100000 >>> foo(2) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value strictly greater than 2 for argument 'x', but got 2 instead. >>> @expect_strictly_bounded(x=(None, 5)) ... def foo(x): ... return x ... >>> foo(5) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value strictly less than 5 for argument 'x', but got 5 instead. """ def _make_bounded_check(bounds): (lower, upper) = bounds if lower is None: def should_fail(value): return value >= upper predicate_descr = "strictly less than " + str(upper) elif upper is None: def should_fail(value): return value <= lower predicate_descr = "strictly greater than " + str(lower) else: def should_fail(value): return not (lower < value < upper) predicate_descr = "exclusively between %s and %s" % bounds template = ( "%(funcname)s() expected a value {predicate}" " for argument '%(argname)s', but got %(actual)s instead." ).format(predicate=predicate_descr) return make_check( exc_type=ValueError, template=template, pred=should_fail, actual=repr, funcname=__funcname, ) return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named)
Preprocessing decorator that verifies inputs are numpy arrays with a specific dimensionality. Examples -------- >>> from numpy import array >>> @expect_dimensions(x=1, y=2) ... def foo(x, y): ... return x[0] + y[0, 0] ... >>> foo(array([1, 1]), array([[1, 1], [2, 2]])) 2 >>> foo(array([1, 1]), array([1, 1])) # doctest: +NORMALIZE_WHITESPACE ... # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a 2-D array for argument 'y', but got a 1-D array instead.
def expect_dimensions(__funcname=_qualified_name, **dimensions): """ Preprocessing decorator that verifies inputs are numpy arrays with a specific dimensionality. Examples -------- >>> from numpy import array >>> @expect_dimensions(x=1, y=2) ... def foo(x, y): ... return x[0] + y[0, 0] ... >>> foo(array([1, 1]), array([[1, 1], [2, 2]])) 2 >>> foo(array([1, 1]), array([1, 1])) # doctest: +NORMALIZE_WHITESPACE ... # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a 2-D array for argument 'y', but got a 1-D array instead. """ if isinstance(__funcname, str): def get_funcname(_): return __funcname else: get_funcname = __funcname def _expect_dimension(expected_ndim): def _check(func, argname, argvalue): actual_ndim = argvalue.ndim if actual_ndim != expected_ndim: if actual_ndim == 0: actual_repr = 'scalar' else: actual_repr = "%d-D array" % actual_ndim raise ValueError( "{func}() expected a {expected:d}-D array" " for argument {argname!r}, but got a {actual}" " instead.".format( func=get_funcname(func), expected=expected_ndim, argname=argname, actual=actual_repr, ) ) return argvalue return _check return preprocess(**valmap(_expect_dimension, dimensions))
A preprocessing decorator that coerces inputs of a given type by passing them to a callable. Parameters ---------- from : type or tuple or types Inputs types on which to call ``to``. to : function Coercion function to call on inputs. **to_kwargs Additional keywords to forward to every call to ``to``. Examples -------- >>> @preprocess(x=coerce(float, int), y=coerce(float, int)) ... def floordiff(x, y): ... return x - y ... >>> floordiff(3.2, 2.5) 1 >>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2)) ... def add_binary_strings(x, y): ... return bin(x + y)[2:] ... >>> add_binary_strings('101', '001') '110'
def coerce(from_, to, **to_kwargs): """ A preprocessing decorator that coerces inputs of a given type by passing them to a callable. Parameters ---------- from : type or tuple or types Inputs types on which to call ``to``. to : function Coercion function to call on inputs. **to_kwargs Additional keywords to forward to every call to ``to``. Examples -------- >>> @preprocess(x=coerce(float, int), y=coerce(float, int)) ... def floordiff(x, y): ... return x - y ... >>> floordiff(3.2, 2.5) 1 >>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2)) ... def add_binary_strings(x, y): ... return bin(x + y)[2:] ... >>> add_binary_strings('101', '001') '110' """ def preprocessor(func, argname, arg): if isinstance(arg, from_): return to(arg, **to_kwargs) return arg return preprocessor
Preprocessing decorator that applies type coercions. Parameters ---------- **kwargs : dict[str -> (type, callable)] Keyword arguments mapping function parameter names to pairs of (from_type, to_type). Examples -------- >>> @coerce_types(x=(float, int), y=(int, str)) ... def func(x, y): ... return (x, y) ... >>> func(1.0, 3) (1, '3')
def coerce_types(**kwargs): """ Preprocessing decorator that applies type coercions. Parameters ---------- **kwargs : dict[str -> (type, callable)] Keyword arguments mapping function parameter names to pairs of (from_type, to_type). Examples -------- >>> @coerce_types(x=(float, int), y=(int, str)) ... def func(x, y): ... return (x, y) ... >>> func(1.0, 3) (1, '3') """ def _coerce(types): return coerce(*types) return preprocess(**valmap(_coerce, kwargs))
Validate that a dictionary has an expected set of keys.
def validate_keys(dict_, expected, funcname): """Validate that a dictionary has an expected set of keys. """ expected = set(expected) received = set(dict_) missing = expected - received if missing: raise ValueError( "Missing keys in {}:\n" "Expected Keys: {}\n" "Received Keys: {}".format( funcname, sorted(expected), sorted(received), ) ) unexpected = received - expected if unexpected: raise ValueError( "Unexpected keys in {}:\n" "Expected Keys: {}\n" "Received Keys: {}".format( funcname, sorted(expected), sorted(received), ) )
Check if a and b are equal with some tolerance. Parameters ---------- a, b : float The floats to check for equality. atol : float, optional The absolute tolerance. rtol : float, optional The relative tolerance. equal_nan : bool, optional Should NaN compare equal? See Also -------- numpy.isclose Notes ----- This function is just a scalar version of numpy.isclose for performance. See the docstring of ``isclose`` for more information about ``atol`` and ``rtol``.
def tolerant_equals(a, b, atol=10e-7, rtol=10e-7, equal_nan=False): """Check if a and b are equal with some tolerance. Parameters ---------- a, b : float The floats to check for equality. atol : float, optional The absolute tolerance. rtol : float, optional The relative tolerance. equal_nan : bool, optional Should NaN compare equal? See Also -------- numpy.isclose Notes ----- This function is just a scalar version of numpy.isclose for performance. See the docstring of ``isclose`` for more information about ``atol`` and ``rtol``. """ if equal_nan and isnan(a) and isnan(b): return True return math.fabs(a - b) <= (atol + rtol * math.fabs(b))
Round a to the nearest integer if that integer is within an epsilon of a.
def round_if_near_integer(a, epsilon=1e-4): """ Round a to the nearest integer if that integer is within an epsilon of a. """ if abs(a - round(a)) <= epsilon: return round(a) else: return a
Compute the number of decimal places in a number. Examples -------- >>> number_of_decimal_places(1) 0 >>> number_of_decimal_places(3.14) 2 >>> number_of_decimal_places('3.14') 2
def number_of_decimal_places(n): """ Compute the number of decimal places in a number. Examples -------- >>> number_of_decimal_places(1) 0 >>> number_of_decimal_places(3.14) 2 >>> number_of_decimal_places('3.14') 2 """ decimal = Decimal(str(n)) return -decimal.as_tuple().exponent
Users should only access the lru_cache through its public API: cache_info, cache_clear The internals of the lru_cache are encapsulated for thread safety and to allow the implementation to change.
def _weak_lru_cache(maxsize=100): """ Users should only access the lru_cache through its public API: cache_info, cache_clear The internals of the lru_cache are encapsulated for thread safety and to allow the implementation to change. """ def decorating_function( user_function, tuple=tuple, sorted=sorted, len=len, KeyError=KeyError): hits, misses = [0], [0] kwd_mark = (object(),) # separates positional and keyword args lock = Lock() # needed because OrderedDict isn't threadsafe if maxsize is None: cache = _WeakArgsDict() # cache without ordering or size limit @wraps(user_function) def wrapper(*args, **kwds): key = args if kwds: key += kwd_mark + tuple(sorted(kwds.items())) try: result = cache[key] hits[0] += 1 return result except KeyError: pass result = user_function(*args, **kwds) cache[key] = result misses[0] += 1 return result else: # ordered least recent to most recent cache = _WeakArgsOrderedDict() cache_popitem = cache.popitem cache_renew = cache.move_to_end @wraps(user_function) def wrapper(*args, **kwds): key = args if kwds: key += kwd_mark + tuple(sorted(kwds.items())) with lock: try: result = cache[key] cache_renew(key) # record recent use of this key hits[0] += 1 return result except KeyError: pass result = user_function(*args, **kwds) with lock: cache[key] = result # record recent use of this key misses[0] += 1 if len(cache) > maxsize: # purge least recently used cache entry cache_popitem(False) return result def cache_info(): """Report cache statistics""" with lock: return hits[0], misses[0], maxsize, len(cache) def cache_clear(): """Clear the cache and cache statistics""" with lock: cache.clear() hits[0] = misses[0] = 0 wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return wrapper return decorating_function
Weak least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. Arguments to the cached function must be hashable. Any that are weak- referenceable will be stored by weak reference. Once any of the args have been garbage collected, the entry will be removed from the cache. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
def weak_lru_cache(maxsize=100): """Weak least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. Arguments to the cached function must be hashable. Any that are weak- referenceable will be stored by weak reference. Once any of the args have been garbage collected, the entry will be removed from the cache. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used """ class desc(lazyval): def __get__(self, instance, owner): if instance is None: return self try: return self._cache[instance] except KeyError: inst = ref(instance) @_weak_lru_cache(maxsize) @wraps(self._get) def wrapper(*args, **kwargs): return self._get(inst(), *args, **kwargs) self._cache[instance] = wrapper return wrapper @_weak_lru_cache(maxsize) def __call__(self, *args, **kwargs): return self._get(*args, **kwargs) return desc
Compose multiple classes together. Parameters ---------- *mcls : tuple[type] The classes that you would like to compose Returns ------- cls : type A type that subclasses all of the types in ``mcls``. Notes ----- A common use case for this is to build composed metaclasses, for example, imagine you have some simple metaclass ``M`` and some instance of ``M`` named ``C`` like so: .. code-block:: python >>> class M(type): ... def __new__(mcls, name, bases, dict_): ... dict_['ayy'] = 'lmao' ... return super(M, mcls).__new__(mcls, name, bases, dict_) >>> from six import with_metaclass >>> class C(with_metaclass(M, object)): ... pass We now want to create a sublclass of ``C`` that is also an abstract class. We can use ``compose_types`` to create a new metaclass that is a subclass of ``M`` and ``ABCMeta``. This is needed because a subclass of a class with a metaclass must have a metaclass which is a subclass of the metaclass of the superclass. .. code-block:: python >>> from abc import ABCMeta, abstractmethod >>> class D(with_metaclass(compose_types(M, ABCMeta), C)): ... @abstractmethod ... def f(self): ... raise NotImplementedError('f') We can see that this class has both metaclasses applied to it: .. code-block:: python >>> D.ayy 'lmao' >>> D() Traceback (most recent call last): ... TypeError: Can't instantiate abstract class D with abstract methods f An important note here is that ``M`` did not use ``type.__new__`` and instead used ``super()``. This is to support cooperative multiple inheritance which is needed for ``compose_types`` to work as intended. After we have composed these types ``M.__new__``'s super will actually go to ``ABCMeta.__new__`` and not ``type.__new__``. Always using ``super()`` to dispatch to your superclass is best practices anyways so most classes should compose without much special considerations.
def compose_types(a, *cs): """Compose multiple classes together. Parameters ---------- *mcls : tuple[type] The classes that you would like to compose Returns ------- cls : type A type that subclasses all of the types in ``mcls``. Notes ----- A common use case for this is to build composed metaclasses, for example, imagine you have some simple metaclass ``M`` and some instance of ``M`` named ``C`` like so: .. code-block:: python >>> class M(type): ... def __new__(mcls, name, bases, dict_): ... dict_['ayy'] = 'lmao' ... return super(M, mcls).__new__(mcls, name, bases, dict_) >>> from six import with_metaclass >>> class C(with_metaclass(M, object)): ... pass We now want to create a sublclass of ``C`` that is also an abstract class. We can use ``compose_types`` to create a new metaclass that is a subclass of ``M`` and ``ABCMeta``. This is needed because a subclass of a class with a metaclass must have a metaclass which is a subclass of the metaclass of the superclass. .. code-block:: python >>> from abc import ABCMeta, abstractmethod >>> class D(with_metaclass(compose_types(M, ABCMeta), C)): ... @abstractmethod ... def f(self): ... raise NotImplementedError('f') We can see that this class has both metaclasses applied to it: .. code-block:: python >>> D.ayy 'lmao' >>> D() Traceback (most recent call last): ... TypeError: Can't instantiate abstract class D with abstract methods f An important note here is that ``M`` did not use ``type.__new__`` and instead used ``super()``. This is to support cooperative multiple inheritance which is needed for ``compose_types`` to work as intended. After we have composed these types ``M.__new__``\'s super will actually go to ``ABCMeta.__new__`` and not ``type.__new__``. Always using ``super()`` to dispatch to your superclass is best practices anyways so most classes should compose without much special considerations. """ if not cs: # if there are no types to compose then just return the single type return a mcls = (a,) + cs return type( 'compose_types(%s)' % ', '.join(map(attrgetter('__name__'), mcls)), mcls, {}, )
Make a class inheriting from ``bases`` whose metaclass inherits from all of ``metaclasses``. Like :func:`six.with_metaclass`, but allows multiple metaclasses. Parameters ---------- metaclasses : iterable[type] A tuple of types to use as metaclasses. *bases : tuple[type] A tuple of types to use as bases. Returns ------- base : type A subtype of ``bases`` whose metaclass is a subtype of ``metaclasses``. Notes ----- The metaclasses must be written to support cooperative multiple inheritance. This means that they must delegate all calls to ``super()`` instead of inlining their super class by name.
def with_metaclasses(metaclasses, *bases): """Make a class inheriting from ``bases`` whose metaclass inherits from all of ``metaclasses``. Like :func:`six.with_metaclass`, but allows multiple metaclasses. Parameters ---------- metaclasses : iterable[type] A tuple of types to use as metaclasses. *bases : tuple[type] A tuple of types to use as bases. Returns ------- base : type A subtype of ``bases`` whose metaclass is a subtype of ``metaclasses``. Notes ----- The metaclasses must be written to support cooperative multiple inheritance. This means that they must delegate all calls to ``super()`` instead of inlining their super class by name. """ return six.with_metaclass(compose_types(*metaclasses), *bases)
Retrieve NaT with the same units as ``dtype``. Parameters ---------- dtype : dtype-coercable The dtype to lookup the NaT value for. Returns ------- NaT : dtype The NaT value for the given dtype.
def NaT_for_dtype(dtype): """Retrieve NaT with the same units as ``dtype``. Parameters ---------- dtype : dtype-coercable The dtype to lookup the NaT value for. Returns ------- NaT : dtype The NaT value for the given dtype. """ return NaTmap[np.dtype(dtype)]
Make a function that checks whether a scalar or array is of a given kind (e.g. float, int, datetime, timedelta).
def make_kind_check(python_types, numpy_kind): """ Make a function that checks whether a scalar or array is of a given kind (e.g. float, int, datetime, timedelta). """ def check(value): if hasattr(value, 'dtype'): return value.dtype.kind == numpy_kind return isinstance(value, python_types) return check
Make a value with the specified numpy dtype. Only datetime64[ns] and datetime64[D] are supported for datetime dtypes.
def coerce_to_dtype(dtype, value): """ Make a value with the specified numpy dtype. Only datetime64[ns] and datetime64[D] are supported for datetime dtypes. """ name = dtype.name if name.startswith('datetime64'): if name == 'datetime64[D]': return make_datetime64D(value) elif name == 'datetime64[ns]': return make_datetime64ns(value) else: raise TypeError( "Don't know how to coerce values of dtype %s" % dtype ) return dtype.type(value)
Get the default fill value for `dtype`.
def default_missing_value_for_dtype(dtype): """ Get the default fill value for `dtype`. """ try: return _FILLVALUE_DEFAULTS[dtype] except KeyError: raise NoDefaultMissingValue( "No default value registered for dtype %s." % dtype )
Restride `array` to repeat `count` times along the first axis. Parameters ---------- array : np.array The array to restride. count : int Number of times to repeat `array`. Returns ------- result : array Array of shape (count,) + array.shape, composed of `array` repeated `count` times along the first axis. Example ------- >>> from numpy import arange >>> a = arange(3); a array([0, 1, 2]) >>> repeat_first_axis(a, 2) array([[0, 1, 2], [0, 1, 2]]) >>> repeat_first_axis(a, 4) array([[0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2]]) Notes ---- The resulting array will share memory with `array`. If you need to assign to the input or output, you should probably make a copy first. See Also -------- repeat_last_axis
def repeat_first_axis(array, count): """ Restride `array` to repeat `count` times along the first axis. Parameters ---------- array : np.array The array to restride. count : int Number of times to repeat `array`. Returns ------- result : array Array of shape (count,) + array.shape, composed of `array` repeated `count` times along the first axis. Example ------- >>> from numpy import arange >>> a = arange(3); a array([0, 1, 2]) >>> repeat_first_axis(a, 2) array([[0, 1, 2], [0, 1, 2]]) >>> repeat_first_axis(a, 4) array([[0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2]]) Notes ---- The resulting array will share memory with `array`. If you need to assign to the input or output, you should probably make a copy first. See Also -------- repeat_last_axis """ return as_strided(array, (count,) + array.shape, (0,) + array.strides)
Restride `array` to repeat `count` times along the last axis. Parameters ---------- array : np.array The array to restride. count : int Number of times to repeat `array`. Returns ------- result : array Array of shape array.shape + (count,) composed of `array` repeated `count` times along the last axis. Example ------- >>> from numpy import arange >>> a = arange(3); a array([0, 1, 2]) >>> repeat_last_axis(a, 2) array([[0, 0], [1, 1], [2, 2]]) >>> repeat_last_axis(a, 4) array([[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]) Notes ---- The resulting array will share memory with `array`. If you need to assign to the input or output, you should probably make a copy first. See Also -------- repeat_last_axis
def repeat_last_axis(array, count): """ Restride `array` to repeat `count` times along the last axis. Parameters ---------- array : np.array The array to restride. count : int Number of times to repeat `array`. Returns ------- result : array Array of shape array.shape + (count,) composed of `array` repeated `count` times along the last axis. Example ------- >>> from numpy import arange >>> a = arange(3); a array([0, 1, 2]) >>> repeat_last_axis(a, 2) array([[0, 0], [1, 1], [2, 2]]) >>> repeat_last_axis(a, 4) array([[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]) Notes ---- The resulting array will share memory with `array`. If you need to assign to the input or output, you should probably make a copy first. See Also -------- repeat_last_axis """ return as_strided(array, array.shape + (count,), array.strides + (0,))
Restride an array of shape (X_0, ... X_N) into an array of shape (length, X_0 - length + 1, ... X_N) where each slice at index i along the first axis is equivalent to result[i] = array[length * i:length * (i + 1)] Parameters ---------- array : np.ndarray The base array. length : int Length of the synthetic first axis to generate. Returns ------- out : np.ndarray Example ------- >>> from numpy import arange >>> a = arange(25).reshape(5, 5) >>> a array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) >>> rolling_window(a, 2) array([[[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9]], <BLANKLINE> [[ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], <BLANKLINE> [[10, 11, 12, 13, 14], [15, 16, 17, 18, 19]], <BLANKLINE> [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]])
def rolling_window(array, length): """ Restride an array of shape (X_0, ... X_N) into an array of shape (length, X_0 - length + 1, ... X_N) where each slice at index i along the first axis is equivalent to result[i] = array[length * i:length * (i + 1)] Parameters ---------- array : np.ndarray The base array. length : int Length of the synthetic first axis to generate. Returns ------- out : np.ndarray Example ------- >>> from numpy import arange >>> a = arange(25).reshape(5, 5) >>> a array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) >>> rolling_window(a, 2) array([[[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9]], <BLANKLINE> [[ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], <BLANKLINE> [[10, 11, 12, 13, 14], [15, 16, 17, 18, 19]], <BLANKLINE> [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]]) """ orig_shape = array.shape if not orig_shape: raise IndexError("Can't restride a scalar.") elif orig_shape[0] <= length: raise IndexError( "Can't restride array of shape {shape} with" " a window length of {len}".format( shape=orig_shape, len=length, ) ) num_windows = (orig_shape[0] - length + 1) new_shape = (num_windows, length) + orig_shape[1:] new_strides = (array.strides[0],) + array.strides return as_strided(array, new_shape, new_strides)
Check if a value is np.NaT.
def isnat(obj): """ Check if a value is np.NaT. """ if obj.dtype.kind not in ('m', 'M'): raise ValueError("%s is not a numpy datetime or timedelta") return obj.view(int64_dtype) == iNaT
Generic is_missing function that handles NaN and NaT.
def is_missing(data, missing_value): """ Generic is_missing function that handles NaN and NaT. """ if is_float(data) and isnan(missing_value): return isnan(data) elif is_datetime(data) and isnat(missing_value): return isnat(data) elif is_object(data) and missing_value is None: # XXX: Older versions of numpy returns True/False for array == # None. Work around this by boxing None in a 1x1 array, which causes # numpy to do the broadcasted comparison we want. return data == np.array([missing_value]) return (data == missing_value)
Check if two scalar values are "the same". Returns True if `x == y`, or if x and y are both NaN or both NaT.
def same(x, y): """ Check if two scalar values are "the same". Returns True if `x == y`, or if x and y are both NaN or both NaT. """ if is_float(x) and isnan(x) and is_float(y) and isnan(y): return True elif is_datetime(x) and isnat(x) and is_datetime(y) and isnat(y): return True else: return x == y
Simple of numpy.busday_count that returns `float` arrays rather than int arrays, and handles `NaT`s by returning `NaN`s where the inputs were `NaT`. Doesn't support custom weekdays or calendars, but probably should in the future. See Also -------- np.busday_count
def busday_count_mask_NaT(begindates, enddates, out=None): """ Simple of numpy.busday_count that returns `float` arrays rather than int arrays, and handles `NaT`s by returning `NaN`s where the inputs were `NaT`. Doesn't support custom weekdays or calendars, but probably should in the future. See Also -------- np.busday_count """ if out is None: out = empty(broadcast(begindates, enddates).shape, dtype=float) beginmask = isnat(begindates) endmask = isnat(enddates) out = busday_count( # Temporarily fill in non-NaT values. where(beginmask, _notNaT, begindates), where(endmask, _notNaT, enddates), out=out, ) # Fill in entries where either comparison was NaT with nan in the output. out[beginmask | endmask] = nan return out
Helper for building a WarningContext that ignores warnings from numpy's nanfunctions.
def ignore_nanwarnings(): """ Helper for building a WarningContext that ignores warnings from numpy's nanfunctions. """ return WarningContext( ( ('ignore',), {'category': RuntimeWarning, 'module': 'numpy.lib.nanfunctions'}, ) )
Check if each element of ``array`` is in choices. Parameters ---------- array : np.ndarray choices : object Object implementing __contains__. Returns ------- was_element : np.ndarray[bool] Array indicating whether each element of ``array`` was in ``choices``.
def vectorized_is_element(array, choices): """ Check if each element of ``array`` is in choices. Parameters ---------- array : np.ndarray choices : object Object implementing __contains__. Returns ------- was_element : np.ndarray[bool] Array indicating whether each element of ``array`` was in ``choices``. """ return vectorize(choices.__contains__, otypes=[bool])(array)
Convert an array of shape (N,) into an array of shape (N, 1). This is equivalent to `a[:, np.newaxis]`. Parameters ---------- a : np.ndarray Example ------- >>> import numpy as np >>> a = np.arange(5) >>> a array([0, 1, 2, 3, 4]) >>> as_column(a) array([[0], [1], [2], [3], [4]]) >>> as_column(a).shape (5, 1)
def as_column(a): """ Convert an array of shape (N,) into an array of shape (N, 1). This is equivalent to `a[:, np.newaxis]`. Parameters ---------- a : np.ndarray Example ------- >>> import numpy as np >>> a = np.arange(5) >>> a array([0, 1, 2, 3, 4]) >>> as_column(a) array([[0], [1], [2], [3], [4]]) >>> as_column(a).shape (5, 1) """ if a.ndim != 1: raise ValueError( "as_column expected an 1-dimensional array, " "but got an array of shape %s" % (a.shape,) ) return a[:, None]
Compute indices of values in ``a`` that differ from the previous value. Parameters ---------- a : np.ndarray The array on which to indices of change. include_first : bool Whether or not to consider the first index of the array as "changed". Example ------- >>> import numpy as np >>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=False) array([2, 4]) >>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=True) array([0, 2, 4])
def changed_locations(a, include_first): """ Compute indices of values in ``a`` that differ from the previous value. Parameters ---------- a : np.ndarray The array on which to indices of change. include_first : bool Whether or not to consider the first index of the array as "changed". Example ------- >>> import numpy as np >>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=False) array([2, 4]) >>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=True) array([0, 2, 4]) """ if a.ndim > 1: raise ValueError("indices_of_changed_values only supports 1D arrays.") indices = flatnonzero(diff(a)) + 1 if not include_first: return indices return hstack([[0], indices])
Compare datetime64 ndarrays, treating NaT values as equal.
def compare_datetime_arrays(x, y): """ Compare datetime64 ndarrays, treating NaT values as equal. """ return array_equal(x.view('int64'), y.view('int64'))
Convert an array of dtype S to an object array containing `str`.
def bytes_array_to_native_str_object_array(a): """Convert an array of dtype S to an object array containing `str`. """ if six.PY2: return a.astype(object) else: return a.astype(str).astype(object)
Take a DataFrame and return a triple of (df.index, df.columns, df.values)
def explode(df): """ Take a DataFrame and return a triple of (df.index, df.columns, df.values) """ return df.index, df.columns, df.values
Convert a time into microseconds since midnight. Parameters ---------- time : datetime.time The time to convert. Returns ------- us : int The number of microseconds since midnight. Notes ----- This does not account for leap seconds or daylight savings.
def _time_to_micros(time): """Convert a time into microseconds since midnight. Parameters ---------- time : datetime.time The time to convert. Returns ------- us : int The number of microseconds since midnight. Notes ----- This does not account for leap seconds or daylight savings. """ seconds = time.hour * 60 * 60 + time.minute * 60 + time.second return 1000000 * seconds + time.microsecond
Return a mask of all of the datetimes in ``dts`` that are between ``start`` and ``end``. Parameters ---------- dts : pd.DatetimeIndex The index to mask. start : time Mask away times less than the start. end : time Mask away times greater than the end. include_start : bool, optional Inclusive on ``start``. include_end : bool, optional Inclusive on ``end``. Returns ------- mask : np.ndarray[bool] A bool array masking ``dts``. See Also -------- :meth:`pandas.DatetimeIndex.indexer_between_time`
def mask_between_time(dts, start, end, include_start=True, include_end=True): """Return a mask of all of the datetimes in ``dts`` that are between ``start`` and ``end``. Parameters ---------- dts : pd.DatetimeIndex The index to mask. start : time Mask away times less than the start. end : time Mask away times greater than the end. include_start : bool, optional Inclusive on ``start``. include_end : bool, optional Inclusive on ``end``. Returns ------- mask : np.ndarray[bool] A bool array masking ``dts``. See Also -------- :meth:`pandas.DatetimeIndex.indexer_between_time` """ # This function is adapted from # `pandas.Datetime.Index.indexer_between_time` which was originally # written by Wes McKinney, Chang She, and Grant Roch. time_micros = dts._get_time_micros() start_micros = _time_to_micros(start) end_micros = _time_to_micros(end) left_op, right_op, join_op = _opmap[ bool(include_start), bool(include_end), start_micros <= end_micros, ] return join_op( left_op(start_micros, time_micros), right_op(time_micros, end_micros), )
Find the index of ``dt`` in ``dts``. This function should be used instead of `dts.get_loc(dt)` if the index is large enough that we don't want to initialize a hash table in ``dts``. In particular, this should always be used on minutely trading calendars. Parameters ---------- dts : pd.DatetimeIndex Index in which to look up ``dt``. **Must be sorted**. dt : pd.Timestamp ``dt`` to be looked up. Returns ------- ix : int Integer index such that dts[ix] == dt. Raises ------ KeyError If dt is not in ``dts``.
def find_in_sorted_index(dts, dt): """ Find the index of ``dt`` in ``dts``. This function should be used instead of `dts.get_loc(dt)` if the index is large enough that we don't want to initialize a hash table in ``dts``. In particular, this should always be used on minutely trading calendars. Parameters ---------- dts : pd.DatetimeIndex Index in which to look up ``dt``. **Must be sorted**. dt : pd.Timestamp ``dt`` to be looked up. Returns ------- ix : int Integer index such that dts[ix] == dt. Raises ------ KeyError If dt is not in ``dts``. """ ix = dts.searchsorted(dt) if ix == len(dts) or dts[ix] != dt: raise LookupError("{dt} is not in {dts}".format(dt=dt, dts=dts)) return ix
Find values in ``dts`` closest but not equal to ``dt``. Returns a pair of (last_before, first_after). When ``dt`` is less than any element in ``dts``, ``last_before`` is None. When ``dt`` is greater any element in ``dts``, ``first_after`` is None. ``dts`` must be unique and sorted in increasing order. Parameters ---------- dts : pd.DatetimeIndex Dates in which to search. dt : pd.Timestamp Date for which to find bounds.
def nearest_unequal_elements(dts, dt): """ Find values in ``dts`` closest but not equal to ``dt``. Returns a pair of (last_before, first_after). When ``dt`` is less than any element in ``dts``, ``last_before`` is None. When ``dt`` is greater any element in ``dts``, ``first_after`` is None. ``dts`` must be unique and sorted in increasing order. Parameters ---------- dts : pd.DatetimeIndex Dates in which to search. dt : pd.Timestamp Date for which to find bounds. """ if not dts.is_unique: raise ValueError("dts must be unique") if not dts.is_monotonic_increasing: raise ValueError("dts must be sorted in increasing order") if not len(dts): return None, None sortpos = dts.searchsorted(dt, side='left') try: sortval = dts[sortpos] except IndexError: # dt is greater than any value in the array. return dts[-1], None if dt < sortval: lower_ix = sortpos - 1 upper_ix = sortpos elif dt == sortval: lower_ix = sortpos - 1 upper_ix = sortpos + 1 else: lower_ix = sortpos upper_ix = sortpos + 1 lower_value = dts[lower_ix] if lower_ix >= 0 else None upper_value = dts[upper_ix] if upper_ix < len(dts) else None return lower_value, upper_value
Convert a pd.Timedelta to a number of seconds as an int.
def timedelta_to_integral_seconds(delta): """ Convert a pd.Timedelta to a number of seconds as an int. """ return int(delta.total_seconds())
Convert a pd.Timedelta to a number of minutes as an int.
def timedelta_to_integral_minutes(delta): """ Convert a pd.Timedelta to a number of minutes as an int. """ return timedelta_to_integral_seconds(delta) // 60
Clear cached attributes from a pandas DataFrame. By default pandas memoizes indexers (`iloc`, `loc`, `ix`, etc.) objects on DataFrames, resulting in refcycles that can lead to unexpectedly long-lived DataFrames. This function attempts to clear those cycles by deleting the cached indexers from the frame. Parameters ---------- df : pd.DataFrame
def clear_dataframe_indexer_caches(df): """ Clear cached attributes from a pandas DataFrame. By default pandas memoizes indexers (`iloc`, `loc`, `ix`, etc.) objects on DataFrames, resulting in refcycles that can lead to unexpectedly long-lived DataFrames. This function attempts to clear those cycles by deleting the cached indexers from the frame. Parameters ---------- df : pd.DataFrame """ for attr in _INDEXER_NAMES: try: delattr(df, attr) except AttributeError: pass