response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Return the input as a numpy ndarray.
This is a no-op if the input is already an ndarray. If the input is an
adjusted_array, this extracts a read-only view of its internal data buffer.
Parameters
----------
ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array
Returns
-------
out : The input, converted to an ndarray. | def ensure_ndarray(ndarray_or_adjusted_array):
"""
Return the input as a numpy ndarray.
This is a no-op if the input is already an ndarray. If the input is an
adjusted_array, this extracts a read-only view of its internal data buffer.
Parameters
----------
ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array
Returns
-------
out : The input, converted to an ndarray.
"""
if isinstance(ndarray_or_adjusted_array, ndarray):
return ndarray_or_adjusted_array
elif isinstance(ndarray_or_adjusted_array, AdjustedArray):
return ndarray_or_adjusted_array.data
else:
raise TypeError(
"Can't convert %s to ndarray" %
type(ndarray_or_adjusted_array).__name__
) |
Check that a window of length `window_length` is well-defined on `data`.
Parameters
----------
data : np.ndarray[ndim=2]
The array of data to check.
window_length : int
Length of the desired window.
Returns
-------
None
Raises
------
WindowLengthNotPositive
If window_length < 1.
WindowLengthTooLong
If window_length is greater than the number of rows in `data`. | def _check_window_params(data, window_length):
"""
Check that a window of length `window_length` is well-defined on `data`.
Parameters
----------
data : np.ndarray[ndim=2]
The array of data to check.
window_length : int
Length of the desired window.
Returns
-------
None
Raises
------
WindowLengthNotPositive
If window_length < 1.
WindowLengthTooLong
If window_length is greater than the number of rows in `data`.
"""
if window_length < 1:
raise WindowLengthNotPositive(window_length=window_length)
if window_length > data.shape[0]:
raise WindowLengthTooLong(
nrows=data.shape[0],
window_length=window_length,
) |
Eq check with a short-circuit for identical objects. | def compare_arrays(left, right):
"Eq check with a short-circuit for identical objects."
return (
left is right
or ((left.shape == right.shape) and (left == right).all())
) |
LabelArray-aware implementation of np.where.
| def labelarray_where(cond, trues, falses):
"""LabelArray-aware implementation of np.where.
"""
if trues.missing_value != falses.missing_value:
raise ValueError(
"Can't compute where on arrays with different missing values."
)
strs = np.where(cond, trues.as_string_array(), falses.as_string_array())
return LabelArray(strs, missing_value=trues.missing_value) |
Simple implementation of grouped row-wise function application.
Parameters
----------
data : ndarray[ndim=2]
Input array over which to apply a grouped function.
group_labels : ndarray[ndim=2, dtype=int64]
Labels to use to bucket inputs from array.
Should be the same shape as array.
func : function[ndarray[ndim=1]] -> function[ndarray[ndim=1]]
Function to apply to pieces of each row in array.
func_args : tuple
Additional positional arguments to provide to each row in array.
out : ndarray, optional
Array into which to write output. If not supplied, a new array of the
same shape as ``data`` is allocated and returned.
Examples
--------
>>> data = np.array([[1., 2., 3.],
... [2., 3., 4.],
... [5., 6., 7.]])
>>> labels = np.array([[0, 0, 1],
... [0, 1, 0],
... [1, 0, 2]])
>>> naive_grouped_rowwise_apply(data, labels, lambda row: row - row.min())
array([[ 0., 1., 0.],
[ 0., 0., 2.],
[ 0., 0., 0.]])
>>> naive_grouped_rowwise_apply(data, labels, lambda row: row / row.sum())
array([[ 0.33333333, 0.66666667, 1. ],
[ 0.33333333, 1. , 0.66666667],
[ 1. , 1. , 1. ]]) | def naive_grouped_rowwise_apply(data,
group_labels,
func,
func_args=(),
out=None):
"""
Simple implementation of grouped row-wise function application.
Parameters
----------
data : ndarray[ndim=2]
Input array over which to apply a grouped function.
group_labels : ndarray[ndim=2, dtype=int64]
Labels to use to bucket inputs from array.
Should be the same shape as array.
func : function[ndarray[ndim=1]] -> function[ndarray[ndim=1]]
Function to apply to pieces of each row in array.
func_args : tuple
Additional positional arguments to provide to each row in array.
out : ndarray, optional
Array into which to write output. If not supplied, a new array of the
same shape as ``data`` is allocated and returned.
Examples
--------
>>> data = np.array([[1., 2., 3.],
... [2., 3., 4.],
... [5., 6., 7.]])
>>> labels = np.array([[0, 0, 1],
... [0, 1, 0],
... [1, 0, 2]])
>>> naive_grouped_rowwise_apply(data, labels, lambda row: row - row.min())
array([[ 0., 1., 0.],
[ 0., 0., 2.],
[ 0., 0., 0.]])
>>> naive_grouped_rowwise_apply(data, labels, lambda row: row / row.sum())
array([[ 0.33333333, 0.66666667, 1. ],
[ 0.33333333, 1. , 0.66666667],
[ 1. , 1. , 1. ]])
"""
if out is None:
out = np.empty_like(data)
for (row, label_row, out_row) in zip(data, group_labels, out):
for label in np.unique(label_row):
locs = (label_row == label)
out_row[locs] = func(row[locs], *func_args)
return out |
Compute rowwise array quantiles on an input. | def quantiles(data, nbins_or_partition_bounds):
"""
Compute rowwise array quantiles on an input.
"""
return apply_along_axis(
qcut,
1,
data,
q=nbins_or_partition_bounds, labels=False,
) |
A factory for decorators that restrict Term methods to only be callable on
Terms with a specific dtype.
This is conceptually similar to
zipline.utils.input_validation.expect_dtypes, but provides more flexibility
for providing error messages that are specifically targeting Term methods.
Parameters
----------
dtype : numpy.dtype
The dtype on which the decorated method may be called.
message_template : str
A template for the error message to be raised.
`message_template.format` will be called with keyword arguments
`method_name`, `expected_dtype`, and `received_dtype`.
Examples
--------
@restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() was called on a factor of dtype {received_dtype}."
"{method_name}() requires factors of dtype{expected_dtype}."
),
)
def some_factor_method(self, ...):
self.stuff_that_requires_being_float64(...) | def restrict_to_dtype(dtype, message_template):
"""
A factory for decorators that restrict Term methods to only be callable on
Terms with a specific dtype.
This is conceptually similar to
zipline.utils.input_validation.expect_dtypes, but provides more flexibility
for providing error messages that are specifically targeting Term methods.
Parameters
----------
dtype : numpy.dtype
The dtype on which the decorated method may be called.
message_template : str
A template for the error message to be raised.
`message_template.format` will be called with keyword arguments
`method_name`, `expected_dtype`, and `received_dtype`.
Examples
--------
@restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() was called on a factor of dtype {received_dtype}."
"{method_name}() requires factors of dtype{expected_dtype}."
),
)
def some_factor_method(self, ...):
self.stuff_that_requires_being_float64(...)
"""
def processor(term_method, _, term_instance):
term_dtype = term_instance.dtype
if term_dtype != dtype:
raise TypeError(
message_template.format(
method_name=term_method.__name__,
expected_dtype=dtype.name,
received_dtype=term_dtype,
)
)
return term_instance
return preprocess(self=processor) |
Infer the domain from a collection of terms.
The algorithm for inferring domains is as follows:
- If all input terms have a domain of GENERIC, the result is GENERIC.
- If there is exactly one non-generic domain in the input terms, the result
is that domain.
- Otherwise, an AmbiguousDomain error is raised.
Parameters
----------
terms : iterable[zipline.pipeline.Term]
Returns
-------
inferred : Domain or NotSpecified
Raises
------
AmbiguousDomain
Raised if more than one concrete domain is present in the input terms. | def infer_domain(terms):
"""
Infer the domain from a collection of terms.
The algorithm for inferring domains is as follows:
- If all input terms have a domain of GENERIC, the result is GENERIC.
- If there is exactly one non-generic domain in the input terms, the result
is that domain.
- Otherwise, an AmbiguousDomain error is raised.
Parameters
----------
terms : iterable[zipline.pipeline.Term]
Returns
-------
inferred : Domain or NotSpecified
Raises
------
AmbiguousDomain
Raised if more than one concrete domain is present in the input terms.
"""
domains = {t.domain for t in terms}
num_domains = len(domains)
if num_domains == 0:
return GENERIC
elif num_domains == 1:
return domains.pop()
elif num_domains == 2 and GENERIC in domains:
domains.remove(GENERIC)
return domains.pop()
else:
# Remove GENERIC if it's present before raising. Showing it to the user
# is confusing because it doesn't contribute to the error.
domains.discard(GENERIC)
raise AmbiguousDomain(sorted(domains, key=repr)) |
Choose entries from ``dates`` to use for downsampling at ``frequency``.
Parameters
----------
dates : pd.DatetimeIndex
Dates from which to select sample choices.
{frequency}
Returns
-------
indices : np.array[int64]
An array condtaining indices of dates on which samples should be taken.
The resulting index will always include 0 as a sample index, and it
will include the first date of each subsequent year/quarter/month/week,
as determined by ``frequency``.
Notes
-----
This function assumes that ``dates`` does not have large gaps.
In particular, it assumes that the maximum distance between any two entries
in ``dates`` is never greater than a year, which we rely on because we use
``np.diff(dates.<frequency>)`` to find dates where the sampling
period has changed. | def select_sampling_indices(dates, frequency):
"""
Choose entries from ``dates`` to use for downsampling at ``frequency``.
Parameters
----------
dates : pd.DatetimeIndex
Dates from which to select sample choices.
{frequency}
Returns
-------
indices : np.array[int64]
An array condtaining indices of dates on which samples should be taken.
The resulting index will always include 0 as a sample index, and it
will include the first date of each subsequent year/quarter/month/week,
as determined by ``frequency``.
Notes
-----
This function assumes that ``dates`` does not have large gaps.
In particular, it assumes that the maximum distance between any two entries
in ``dates`` is never greater than a year, which we rely on because we use
``np.diff(dates.<frequency>)`` to find dates where the sampling
period has changed.
"""
return changed_locations(
_dt_to_period[frequency](dates),
include_first=True
) |
The default implementation for ``populate_initial_workspace``. This
function returns the ``initial_workspace`` argument without making any
modifications.
Parameters
----------
initial_workspace : dict[array-like]
The initial workspace before we have populated it with any cached
terms.
root_mask_term : Term
The root mask term, normally ``AssetExists()``. This is needed to
compute the dates for individual terms.
execution_plan : ExecutionPlan
The execution plan for the pipeline being run.
dates : pd.DatetimeIndex
All of the dates being requested in this pipeline run including
the extra dates for look back windows.
assets : pd.Int64Index
All of the assets that exist for the window being computed.
Returns
-------
populated_initial_workspace : dict[term, array-like]
The workspace to begin computations with. | def default_populate_initial_workspace(initial_workspace,
root_mask_term,
execution_plan,
dates,
assets):
"""The default implementation for ``populate_initial_workspace``. This
function returns the ``initial_workspace`` argument without making any
modifications.
Parameters
----------
initial_workspace : dict[array-like]
The initial workspace before we have populated it with any cached
terms.
root_mask_term : Term
The root mask term, normally ``AssetExists()``. This is needed to
compute the dates for individual terms.
execution_plan : ExecutionPlan
The execution plan for the pipeline being run.
dates : pd.DatetimeIndex
All of the dates being requested in this pipeline run including
the extra dates for look back windows.
assets : pd.Int64Index
All of the assets that exist for the window being computed.
Returns
-------
populated_initial_workspace : dict[term, array-like]
The workspace to begin computations with.
"""
return initial_workspace |
Create a MultiIndex for a pipeline output.
Parameters
----------
dates : pd.DatetimeIndex
Row labels for ``mask``.
assets : pd.Index
Column labels for ``mask``.
mask : np.ndarray[bool]
Mask array indicating date/asset pairs that should be included in
output index.
Returns
-------
index : pd.MultiIndex
MultiIndex containing (date, asset) pairs corresponding to ``True``
values in ``mask``. | def _pipeline_output_index(dates, assets, mask):
"""
Create a MultiIndex for a pipeline output.
Parameters
----------
dates : pd.DatetimeIndex
Row labels for ``mask``.
assets : pd.Index
Column labels for ``mask``.
mask : np.ndarray[bool]
Mask array indicating date/asset pairs that should be included in
output index.
Returns
-------
index : pd.MultiIndex
MultiIndex containing (date, asset) pairs corresponding to ``True``
values in ``mask``.
"""
date_labels = repeat_last_axis(arange(len(dates)), len(assets))[mask]
asset_labels = repeat_first_axis(arange(len(assets)), len(dates))[mask]
return MultiIndex(
levels=[dates, assets],
labels=[date_labels, asset_labels],
# TODO: We should probably add names for these.
names=[None, None],
verify_integrity=False,
) |
Create a tuple containing all elements of tup, plus elem.
Returns the new tuple and the index of elem in the new tuple. | def _ensure_element(tup, elem):
"""
Create a tuple containing all elements of tup, plus elem.
Returns the new tuple and the index of elem in the new tuple.
"""
try:
return tup, tup.index(elem)
except ValueError:
return tuple(chain(tup, (elem,))), len(tup) |
Get the name of the Python magic method corresponding to `op`.
Parameters
----------
op : str {'+','-','*', '/','**','&','|','^','<','<=','==','!=','>=','>'}
The requested operation.
commute : bool
Whether to return the name of an equivalent method after flipping args.
Returns
-------
method_name : str
The name of the Python magic method corresponding to `op`.
If `commute` is True, returns the name of a method equivalent to `op`
with inputs flipped.
Examples
--------
>>> method_name_for_op('+')
'__add__'
>>> method_name_for_op('+', commute=True)
'__radd__'
>>> method_name_for_op('>')
'__gt__'
>>> method_name_for_op('>', commute=True)
'__lt__' | def method_name_for_op(op, commute=False):
"""
Get the name of the Python magic method corresponding to `op`.
Parameters
----------
op : str {'+','-','*', '/','**','&','|','^','<','<=','==','!=','>=','>'}
The requested operation.
commute : bool
Whether to return the name of an equivalent method after flipping args.
Returns
-------
method_name : str
The name of the Python magic method corresponding to `op`.
If `commute` is True, returns the name of a method equivalent to `op`
with inputs flipped.
Examples
--------
>>> method_name_for_op('+')
'__add__'
>>> method_name_for_op('+', commute=True)
'__radd__'
>>> method_name_for_op('>')
'__gt__'
>>> method_name_for_op('>', commute=True)
'__lt__'
"""
if commute:
return ops_to_commuted_methods[op]
return ops_to_methods[op] |
Specialize a term if it's loadable.
| def maybe_specialize(term, domain):
"""Specialize a term if it's loadable.
"""
if isinstance(term, LoadableTerm):
return term.specialize(domain)
return term |
Validate a `dtype` and `missing_value` passed to Term.__new__.
Ensures that we know how to represent ``dtype``, and that missing_value
is specified for types without default missing values.
Returns
-------
validated_dtype, validated_missing_value : np.dtype, any
The dtype and missing_value to use for the new term.
Raises
------
DTypeNotSpecified
When no dtype was passed to the instance, and the class doesn't
provide a default.
NotDType
When either the class or the instance provides a value not
coercible to a numpy dtype.
NoDefaultMissingValue
When dtype requires an explicit missing_value, but
``missing_value`` is NotSpecified. | def validate_dtype(termname, dtype, missing_value):
"""
Validate a `dtype` and `missing_value` passed to Term.__new__.
Ensures that we know how to represent ``dtype``, and that missing_value
is specified for types without default missing values.
Returns
-------
validated_dtype, validated_missing_value : np.dtype, any
The dtype and missing_value to use for the new term.
Raises
------
DTypeNotSpecified
When no dtype was passed to the instance, and the class doesn't
provide a default.
NotDType
When either the class or the instance provides a value not
coercible to a numpy dtype.
NoDefaultMissingValue
When dtype requires an explicit missing_value, but
``missing_value`` is NotSpecified.
"""
if dtype is NotSpecified:
raise DTypeNotSpecified(termname=termname)
try:
dtype = dtype_class(dtype)
except TypeError:
raise NotDType(dtype=dtype, termname=termname)
if not can_represent_dtype(dtype):
raise UnsupportedDType(dtype=dtype, termname=termname)
if missing_value is NotSpecified:
missing_value = default_missing_value_for_dtype(dtype)
try:
_coerce_to_dtype(missing_value, dtype)
except TypeError as e:
raise TypeError(
"Missing value {value!r} is not a valid choice "
"for term {termname} with dtype {dtype}.\n\n"
"Coercion attempt failed with: {error}".format(
termname=termname,
value=missing_value,
dtype=dtype,
error=e,
)
)
return dtype, missing_value |
Check that value is a valid categorical missing_value.
Raises a TypeError if the value is cannot be used as the missing_value for
a categorical_dtype Term. | def _assert_valid_categorical_missing_value(value):
"""
Check that value is a valid categorical missing_value.
Raises a TypeError if the value is cannot be used as the missing_value for
a categorical_dtype Term.
"""
label_types = LabelArray.SUPPORTED_SCALAR_TYPES
if not isinstance(value, label_types):
raise TypeError(
"String-dtype classifiers can only produce strings or None."
.format(types=' or '.join([t.__name__ for t in label_types]))
) |
Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"' | def delimit(delimiters, content):
"""
Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"'
"""
if len(delimiters) != 2:
raise ValueError(
"`delimiters` must be of length 2. Got %r" % delimiters
)
return ''.join([delimiters[0], content, delimiters[1]]) |
Get nodes from graph G with indegree 0 | def roots(g):
"Get nodes from graph G with indegree 0"
return set(n for n, d in iteritems(g.in_degree()) if d == 0) |
Draw `g` as a graph to `out`, in format `format`.
Parameters
----------
g : zipline.pipeline.graph.TermGraph
Graph to render.
out : file-like object
format_ : str {'png', 'svg'}
Output format.
include_asset_exists : bool
Whether to filter out `AssetExists()` nodes. | def _render(g, out, format_, include_asset_exists=False):
"""
Draw `g` as a graph to `out`, in format `format`.
Parameters
----------
g : zipline.pipeline.graph.TermGraph
Graph to render.
out : file-like object
format_ : str {'png', 'svg'}
Output format.
include_asset_exists : bool
Whether to filter out `AssetExists()` nodes.
"""
graph_attrs = {'rankdir': 'TB', 'splines': 'ortho'}
cluster_attrs = {'style': 'filled', 'color': 'lightgoldenrod1'}
in_nodes = g.loadable_terms
out_nodes = list(g.outputs.values())
f = BytesIO()
with graph(f, "G", **graph_attrs):
# Write outputs cluster.
with cluster(f, 'Output', labelloc='b', **cluster_attrs):
for term in filter_nodes(include_asset_exists, out_nodes):
add_term_node(f, term)
# Write inputs cluster.
with cluster(f, 'Input', **cluster_attrs):
for term in filter_nodes(include_asset_exists, in_nodes):
add_term_node(f, term)
# Write intermediate results.
for term in filter_nodes(include_asset_exists,
topological_sort(g.graph)):
if term in in_nodes or term in out_nodes:
continue
add_term_node(f, term)
# Write edges
for source, dest in g.graph.edges():
if source is AssetExists() and not include_asset_exists:
continue
add_edge(f, id(source), id(dest))
cmd = ['dot', '-T', format_]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError(
"Couldn't find `dot` graph layout program. "
"Make sure Graphviz is installed and `dot` is on your path."
)
else:
raise
f.seek(0)
proc_stdout, proc_stderr = proc.communicate(f.read())
if proc_stderr:
raise RuntimeError(
"Error(s) while rendering graph: %s" % proc_stderr.decode('utf-8')
)
out.write(proc_stdout) |
Display a TermGraph interactively from within IPython. | def display_graph(g, format='svg', include_asset_exists=False):
"""
Display a TermGraph interactively from within IPython.
"""
try:
import IPython.display as display
except ImportError:
raise NoIPython("IPython is not installed. Can't display graph.")
if format == 'svg':
display_cls = display.SVG
elif format in ("jpeg", "png"):
display_cls = partial(display.Image, format=format, embed=True)
out = BytesIO()
_render(g, out, format, include_asset_exists=include_asset_exists)
return display_cls(data=out.getvalue()) |
Format key, value pairs from attrs into graphviz attrs format
Examples
--------
>>> format_attrs({'key1': 'value1', 'key2': 'value2'}) # doctest: +SKIP
'[key1=value1, key2=value2]' | def format_attrs(attrs):
"""
Format key, value pairs from attrs into graphviz attrs format
Examples
--------
>>> format_attrs({'key1': 'value1', 'key2': 'value2'}) # doctest: +SKIP
'[key1=value1, key2=value2]'
"""
if not attrs:
return ''
entries = ['='.join((key, value)) for key, value in iteritems(attrs)]
return '[' + ', '.join(entries) + ']' |
Build a weight vector for an exponentially-weighted statistic.
The resulting ndarray is of the form::
[decay_rate ** length, ..., decay_rate ** 2, decay_rate]
Parameters
----------
length : int
The length of the desired weight vector.
decay_rate : float
The rate at which entries in the weight vector increase or decrease.
Returns
-------
weights : ndarray[float64] | def exponential_weights(length, decay_rate):
"""
Build a weight vector for an exponentially-weighted statistic.
The resulting ndarray is of the form::
[decay_rate ** length, ..., decay_rate ** 2, decay_rate]
Parameters
----------
length : int
The length of the desired weight vector.
decay_rate : float
The rate at which entries in the weight vector increase or decrease.
Returns
-------
weights : ndarray[float64]
"""
return full(length, decay_rate, float64_dtype) ** arange(length + 1, 1, -1) |
A decorator for methods whose signature is f(self, other) that coerces
``other`` to ``self.dtype``.
This is used to make comparison operations between numbers and `Factor`
instances work independently of whether the user supplies a float or
integer literal.
For example, if I write::
my_filter = my_factor > 3
my_factor probably has dtype float64, but 3 is an int, so we want to coerce
to float64 before doing the comparison. | def coerce_numbers_to_my_dtype(f):
"""
A decorator for methods whose signature is f(self, other) that coerces
``other`` to ``self.dtype``.
This is used to make comparison operations between numbers and `Factor`
instances work independently of whether the user supplies a float or
integer literal.
For example, if I write::
my_filter = my_factor > 3
my_factor probably has dtype float64, but 3 is an int, so we want to coerce
to float64 before doing the comparison.
"""
@wraps(f)
def method(self, other):
if isinstance(other, Number):
other = coerce_to_dtype(self.dtype, other)
return f(self, other)
return method |
Compute the expected return dtype for the given binary operator.
Parameters
----------
op : str
Operator symbol, (e.g. '+', '-', ...).
left : numpy.dtype
Dtype of left hand side.
right : numpy.dtype
Dtype of right hand side.
Returns
-------
outdtype : numpy.dtype
The dtype of the result of `left <op> right`. | def binop_return_dtype(op, left, right):
"""
Compute the expected return dtype for the given binary operator.
Parameters
----------
op : str
Operator symbol, (e.g. '+', '-', ...).
left : numpy.dtype
Dtype of left hand side.
right : numpy.dtype
Dtype of right hand side.
Returns
-------
outdtype : numpy.dtype
The dtype of the result of `left <op> right`.
"""
if is_comparison(op):
if left != right:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Comparisons are only supported between Factors of equal "
"dtypes.".format(left=left, op=op, right=right)
)
return bool_dtype
elif left != float64_dtype or right != float64_dtype:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Arithmetic operators are only supported between Factors of "
"dtype 'float64'.".format(
left=left.name,
op=op,
right=right.name,
)
)
return float64_dtype |
Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__. | def binary_operator(op):
"""
Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__.
"""
# When combining a Factor with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted implementation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
is_compare = is_comparison(op)
if is_compare:
ret_doc = BINOP_RETURN_FILTER.format(op=op)
rtype = 'Filter'
else:
ret_doc = BINOP_RETURN_FACTOR.format(op=op)
rtype = 'Factor'
docstring = BINOP_DOCSTRING_TEMPLATE.format(
op=op,
ret=ret_doc,
rtype=rtype,
)
@with_doc(docstring)
@with_name(method_name_for_op(op))
@coerce_numbers_to_my_dtype
def binary_operator(self, other):
# This can't be hoisted up a scope because the types returned by
# binop_return_type aren't defined when the top-level function is
# invoked in the class body of Factor.
return_type = NumExprFilter if is_compare else NumExprFactor
if isinstance(self, NumExprFactor):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return return_type(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, NumExprFactor):
# NumericalExpression overrides ops to correctly handle merging of
# inputs. Look up and call the appropriate reflected operator with
# ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if self is other:
return return_type(
"x_0 {op} x_0".format(op=op),
(self,),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
return return_type(
"x_0 {op} x_1".format(op=op),
(self, other),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, Number):
return return_type(
"x_0 {op} ({constant})".format(op=op, constant=other),
binds=(self,),
# .dtype access is safe here because coerce_numbers_to_my_dtype
# will convert any input numbers to numpy equivalents.
dtype=binop_return_dtype(op, self.dtype, other.dtype)
)
raise BadBinaryOperator(op, self, other)
return binary_operator |
Factory function for making binary operator methods on a Factor.
Returns a function, "reflected_binary_operator" suitable for implementing
functions like __radd__. | def reflected_binary_operator(op):
"""
Factory function for making binary operator methods on a Factor.
Returns a function, "reflected_binary_operator" suitable for implementing
functions like __radd__.
"""
assert not is_comparison(op)
@with_name(method_name_for_op(op, commute=True))
@coerce_numbers_to_my_dtype
def reflected_binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other
)
return NumExprFactor(
"({left}) {op} ({right})".format(
left=other_expr,
right=self_expr,
op=op,
),
new_inputs,
dtype=binop_return_dtype(op, other.dtype, self.dtype)
)
# Only have to handle the numeric case because in all other valid cases
# the corresponding left-binding method will be called.
elif isinstance(other, Number):
return NumExprFactor(
"{constant} {op} x_0".format(op=op, constant=other),
binds=(self,),
dtype=binop_return_dtype(op, other.dtype, self.dtype),
)
raise BadBinaryOperator(op, other, self)
return reflected_binary_operator |
Factory function for making unary operator methods for Factors. | def unary_operator(op):
"""
Factory function for making unary operator methods for Factors.
"""
# Only negate is currently supported.
valid_ops = {'-'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
@with_doc("Unary Operator: '%s'" % op)
@with_name(unary_op_name(op))
def unary_operator(self):
if self.dtype != float64_dtype:
raise TypeError(
"Can't apply unary operator {op!r} to instance of "
"{typename!r} with dtype {dtypename!r}.\n"
"{op!r} is only supported for Factors of dtype "
"'float64'.".format(
op=op,
typename=type(self).__name__,
dtypename=self.dtype.name,
)
)
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{op}x_0".format(op=op),
(self,),
dtype=float64_dtype,
)
return unary_operator |
Factory function for producing function application methods for Factor
subclasses. | def function_application(func):
"""
Factory function for producing function application methods for Factor
subclasses.
"""
if func not in NUMEXPR_MATH_FUNCS:
raise ValueError("Unsupported mathematical function '%s'" % func)
docstring = dedent(
"""\
Construct a Factor that computes ``{}()`` on each output of ``self``.
Returns
-------
factor : zipline.pipeline.Factor
""".format(func)
)
@with_doc(docstring)
@with_name(func)
def mathfunc(self):
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{func}({expr})".format(func=func, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{func}(x_0)".format(func=func),
(self,),
dtype=float64_dtype,
)
return mathfunc |
This implementation is based on scipy.stats.mstats.winsorize | def winsorize(row, min_percentile, max_percentile):
"""
This implementation is based on scipy.stats.mstats.winsorize
"""
a = row.copy()
nan_count = isnan(row).sum()
nonnan_count = a.size - nan_count
# NOTE: argsort() sorts nans to the end of the array.
idx = a.argsort()
# Set values at indices below the min percentile to the value of the entry
# at the cutoff.
if min_percentile > 0:
lower_cutoff = int(min_percentile * nonnan_count)
a[idx[:lower_cutoff]] = a[idx[lower_cutoff]]
# Set values at indices above the max percentile to the value of the entry
# at the cutoff.
if max_percentile < 1:
upper_cutoff = int(ceil(nonnan_count * max_percentile))
# if max_percentile is close to 1, then upper_cutoff might not
# remove any values.
if upper_cutoff < nonnan_count:
start_of_nans = (-nan_count) if nan_count else None
a[idx[upper_cutoff:start_of_nans]] = a[idx[upper_cutoff - 1]]
return a |
Compute slopes of linear regressions between columns of ``dependents`` and
``independent``.
Parameters
----------
dependents : np.array[N, M]
Array with columns of data to be regressed against ``independent``.
independent : np.array[N, 1]
Independent variable of the regression
allowed_missing : int
Number of allowed missing (NaN) observations per column. Columns with
more than this many non-nan observations in either ``dependents`` or
``independents`` will output NaN as the regression coefficient.
out : np.array[M] or None, optional
Output array into which to write results. If None, a new array is
created and returned.
Returns
-------
slopes : np.array[M]
Linear regression coefficients for each column of ``dependents``. | def vectorized_beta(dependents, independent, allowed_missing, out=None):
"""
Compute slopes of linear regressions between columns of ``dependents`` and
``independent``.
Parameters
----------
dependents : np.array[N, M]
Array with columns of data to be regressed against ``independent``.
independent : np.array[N, 1]
Independent variable of the regression
allowed_missing : int
Number of allowed missing (NaN) observations per column. Columns with
more than this many non-nan observations in either ``dependents`` or
``independents`` will output NaN as the regression coefficient.
out : np.array[M] or None, optional
Output array into which to write results. If None, a new array is
created and returned.
Returns
-------
slopes : np.array[M]
Linear regression coefficients for each column of ``dependents``.
"""
# Cache these as locals since we're going to call them multiple times.
nan = np.nan
isnan = np.isnan
N, M = dependents.shape
if out is None:
out = np.full(M, nan)
# Copy N times as a column vector and fill with nans to have the same
# missing value pattern as the dependent variable.
#
# PERF_TODO: We could probably avoid the space blowup by doing this in
# Cython.
# shape: (N, M)
independent = np.where(
isnan(dependents),
nan,
independent,
)
# Calculate beta as Cov(X, Y) / Cov(X, X).
# https://en.wikipedia.org/wiki/Simple_linear_regression#Fitting_the_regression_line # noqa
#
# NOTE: The usual formula for covariance is::
#
# mean((X - mean(X)) * (Y - mean(Y)))
#
# However, we don't actually need to take the mean of both sides of the
# product, because of the folllowing equivalence::
#
# Let X_res = (X - mean(X)).
# We have:
#
# mean(X_res * (Y - mean(Y))) = mean(X_res * (Y - mean(Y)))
# (1) = mean((X_res * Y) - (X_res * mean(Y)))
# (2) = mean(X_res * Y) - mean(X_res * mean(Y))
# (3) = mean(X_res * Y) - mean(X_res) * mean(Y)
# (4) = mean(X_res * Y) - 0 * mean(Y)
# (5) = mean(X_res * Y)
#
#
# The tricky step in the above derivation is step (4). We know that
# mean(X_res) is zero because, for any X:
#
# mean(X - mean(X)) = mean(X) - mean(X) = 0.
#
# The upshot of this is that we only have to center one of `independent`
# and `dependent` when calculating covariances. Since we need the centered
# `independent` to calculate its variance in the next step, we choose to
# center `independent`.
# shape: (N, M)
ind_residual = independent - nanmean(independent, axis=0)
# shape: (M,)
covariances = nanmean(ind_residual * dependents, axis=0)
# We end up with different variances in each column here because each
# column may have a different subset of the data dropped due to missing
# data in the corresponding dependent column.
# shape: (M,)
independent_variances = nanmean(ind_residual ** 2, axis=0)
# shape: (M,)
np.divide(covariances, independent_variances, out=out)
# Write nans back to locations where we have more then allowed number of
# missing entries.
nanlocs = isnan(independent).sum(axis=0) > allowed_missing
out[nanlocs] = nan
return out |
Compute Pearson's r between columns of ``dependents`` and ``independents``.
Parameters
----------
dependents : np.array[N, M]
Array with columns of data to be regressed against ``independent``.
independents : np.array[N, M] or np.array[N, 1]
Independent variable(s) of the regression. If a single column is
passed, it is broadcast to the shape of ``dependents``.
allowed_missing : int
Number of allowed missing (NaN) observations per column. Columns with
more than this many non-nan observations in either ``dependents`` or
``independents`` will output NaN as the correlation coefficient.
out : np.array[M] or None, optional
Output array into which to write results. If None, a new array is
created and returned.
Returns
-------
correlations : np.array[M]
Pearson correlation coefficients for each column of ``dependents``.
See Also
--------
:class:`zipline.pipeline.factors.RollingPearson`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns` | def vectorized_pearson_r(dependents, independents, allowed_missing, out=None):
"""
Compute Pearson's r between columns of ``dependents`` and ``independents``.
Parameters
----------
dependents : np.array[N, M]
Array with columns of data to be regressed against ``independent``.
independents : np.array[N, M] or np.array[N, 1]
Independent variable(s) of the regression. If a single column is
passed, it is broadcast to the shape of ``dependents``.
allowed_missing : int
Number of allowed missing (NaN) observations per column. Columns with
more than this many non-nan observations in either ``dependents`` or
``independents`` will output NaN as the correlation coefficient.
out : np.array[M] or None, optional
Output array into which to write results. If None, a new array is
created and returned.
Returns
-------
correlations : np.array[M]
Pearson correlation coefficients for each column of ``dependents``.
See Also
--------
:class:`zipline.pipeline.factors.RollingPearson`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
"""
nan = np.nan
isnan = np.isnan
N, M = dependents.shape
if out is None:
out = np.full(M, nan)
if allowed_missing > 0:
# If we're handling nans robustly, we need to mask both arrays to
# locations where either was nan.
either_nan = isnan(dependents) | isnan(independents)
independents = np.where(either_nan, nan, independents)
dependents = np.where(either_nan, nan, dependents)
mean = nanmean
else:
# Otherwise, we can just use mean, which will give us a nan for any
# column where there's ever a nan.
mean = np.mean
# Pearson R is Cov(X, Y) / StdDev(X) * StdDev(Y)
# c.f. https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
ind_residual = independents - mean(independents, axis=0)
dep_residual = dependents - mean(dependents, axis=0)
ind_variance = mean(ind_residual ** 2, axis=0)
dep_variance = mean(dep_residual ** 2, axis=0)
covariances = mean(ind_residual * dep_residual, axis=0)
evaluate(
'where(mask, nan, cov / sqrt(ind_variance * dep_variance))',
local_dict={'cov': covariances,
'mask': isnan(independents).sum(axis=0) > allowed_missing,
'nan': np.nan,
'ind_variance': ind_variance,
'dep_variance': dep_variance},
global_dict={},
out=out,
)
return out |
Concatenate a sequence of tuples into one tuple. | def concat_tuples(*tuples):
"""
Concatenate a sequence of tuples into one tuple.
"""
return tuple(chain(*tuples)) |
Factory function for making binary operator methods on a Filter subclass.
Returns a function "binary_operator" suitable for implementing functions
like __and__ or __or__. | def binary_operator(op):
"""
Factory function for making binary operator methods on a Filter subclass.
Returns a function "binary_operator" suitable for implementing functions
like __and__ or __or__.
"""
# When combining a Filter with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted interpretation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
def binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return NumExprFilter.create(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
)
elif isinstance(other, NumericalExpression):
# NumericalExpression overrides numerical ops to correctly handle
# merging of inputs. Look up and call the appropriate
# right-binding operator with ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if other.dtype != bool_dtype:
raise BadBinaryOperator(op, self, other)
if self is other:
return NumExprFilter.create(
"x_0 {op} x_0".format(op=op),
(self,),
)
return NumExprFilter.create(
"x_0 {op} x_1".format(op=op),
(self, other),
)
elif isinstance(other, int): # Note that this is true for bool as well
return NumExprFilter.create(
"x_0 {op} {constant}".format(op=op, constant=int(other)),
binds=(self,),
)
raise BadBinaryOperator(op, self, other)
binary_operator.__doc__ = "Binary Operator: '%s'" % op
return binary_operator |
Factory function for making unary operator methods for Filters. | def unary_operator(op):
"""
Factory function for making unary operator methods for Filters.
"""
valid_ops = {'~'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
def unary_operator(self):
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFilter.create(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
)
else:
return NumExprFilter.create("{op}x_0".format(op=op), (self,))
unary_operator.__doc__ = "Unary Operator: '%s'" % op
return unary_operator |
Factory function for making DelegatingHooks methods.
| def delegating_hooks_method(method_name):
"""Factory function for making DelegatingHooks methods.
"""
if method_name in PIPELINE_HOOKS_CONTEXT_MANAGERS:
# Generate a contextmanager that enters the context of all child hooks.
@wraps(getattr(PipelineHooks, method_name))
@contextmanager
def ctx(self, *args, **kwargs):
with ExitStack() as stack:
for hook in self._hooks:
sub_ctx = getattr(hook, method_name)(*args, **kwargs)
stack.enter_context(sub_ctx)
yield stack
return ctx
else:
# Generate a method that calls methods of all child hooks.
@wraps(getattr(PipelineHooks, method_name))
def method(self, *args, **kwargs):
for hook in self._hooks:
sub_method = getattr(hook, method_name)
sub_method(*args, **kwargs)
return method |
Wrapper for contextlib.contextmanager that tracks which methods of
PipelineHooks are contextmanagers in CONTEXT_MANAGER_METHODS. | def contextmanager(f):
"""
Wrapper for contextlib.contextmanager that tracks which methods of
PipelineHooks are contextmanagers in CONTEXT_MANAGER_METHODS.
"""
PIPELINE_HOOKS_CONTEXT_MANAGERS.add(f.__name__)
return _contextmanager(f) |
Repr a value and html-escape the result.
If an error is thrown by the repr, show a placeholder. | def repr_htmlsafe(t):
"""Repr a value and html-escape the result.
If an error is thrown by the repr, show a placeholder.
"""
try:
r = repr(t)
except Exception:
r = "(Error Displaying {})".format(type(t).__name__)
return escape_html(str(r), quote=True) |
Factory function for making testing methods.
| def testing_hooks_method(method_name):
"""Factory function for making testing methods.
"""
if method_name in PIPELINE_HOOKS_CONTEXT_MANAGERS:
# Generate a method that enters the context of all sub-hooks.
@wraps(getattr(PipelineHooks, method_name))
@contextmanager
def ctx(self, *args, **kwargs):
call = Call(method_name, args, kwargs)
self.trace.append(ContextCall('enter', call))
yield
self.trace.append(ContextCall('exit', call))
return ctx
else:
# Generate a method that calls methods of all sub-hooks.
@wraps(getattr(PipelineHooks, method_name))
def method(self, *args, **kwargs):
self.trace.append(Call(method_name, args, kwargs))
return method |
Compute the set of resource columns required to serve
`columns`. | def required_estimates_fields(columns):
"""
Compute the set of resource columns required to serve
`columns`.
"""
# We also expect any of the field names that our loadable columns
# are mapped to.
return metadata_columns.union(viewvalues(columns)) |
Verify that the columns of ``events`` can be used by a
EarningsEstimatesLoader to serve the BoundColumns described by
`columns`. | def validate_column_specs(events, columns):
"""
Verify that the columns of ``events`` can be used by a
EarningsEstimatesLoader to serve the BoundColumns described by
`columns`.
"""
required = required_estimates_fields(columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EarningsEstimatesLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
) |
Compute the set of resource columns required to serve
``next_value_columns`` and ``previous_value_columns``. | def required_event_fields(next_value_columns, previous_value_columns):
"""
Compute the set of resource columns required to serve
``next_value_columns`` and ``previous_value_columns``.
"""
# These metadata columns are used to align event indexers.
return {
TS_FIELD_NAME,
SID_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
}.union(
# We also expect any of the field names that our loadable columns
# are mapped to.
viewvalues(next_value_columns),
viewvalues(previous_value_columns),
) |
Verify that the columns of ``events`` can be used by an EventsLoader to
serve the BoundColumns described by ``next_value_columns`` and
``previous_value_columns``. | def validate_column_specs(events, next_value_columns, previous_value_columns):
"""
Verify that the columns of ``events`` can be used by an EventsLoader to
serve the BoundColumns described by ``next_value_columns`` and
``previous_value_columns``.
"""
required = required_event_fields(next_value_columns,
previous_value_columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EventsLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
) |
For a given asset/date/column combination, we generate a corresponding raw
value using the following formula for OHLCV columns:
data(asset, date, column) = (100,000 * asset_id)
+ (10,000 * column_num)
+ (date - Jan 1 2000).days # ~6000 for 2015
where:
column_num('open') = 0
column_num('high') = 1
column_num('low') = 2
column_num('close') = 3
column_num('volume') = 4
We use days since Jan 1, 2000 to guarantee that there are no collisions
while also the produced values smaller than UINT32_MAX / 1000.
For 'day' and 'id', we use the standard format expected by the base class.
Parameters
----------
asset_info : DataFrame
DataFrame with asset_id as index and 'start_date'/'end_date' columns.
calendar : pd.DatetimeIndex
The trading calendar to use.
holes : dict[int -> tuple[pd.Timestamps]], optional
A dict mapping asset ids to the tuple of dates that should have
no data for that asset in the output. Default is no holes.
Yields
------
p : (int, pd.DataFrame)
A sid, data pair to be passed to BcolzDailyDailyBarWriter.write | def make_bar_data(asset_info, calendar, holes=None):
"""
For a given asset/date/column combination, we generate a corresponding raw
value using the following formula for OHLCV columns:
data(asset, date, column) = (100,000 * asset_id)
+ (10,000 * column_num)
+ (date - Jan 1 2000).days # ~6000 for 2015
where:
column_num('open') = 0
column_num('high') = 1
column_num('low') = 2
column_num('close') = 3
column_num('volume') = 4
We use days since Jan 1, 2000 to guarantee that there are no collisions
while also the produced values smaller than UINT32_MAX / 1000.
For 'day' and 'id', we use the standard format expected by the base class.
Parameters
----------
asset_info : DataFrame
DataFrame with asset_id as index and 'start_date'/'end_date' columns.
calendar : pd.DatetimeIndex
The trading calendar to use.
holes : dict[int -> tuple[pd.Timestamps]], optional
A dict mapping asset ids to the tuple of dates that should have
no data for that asset in the output. Default is no holes.
Yields
------
p : (int, pd.DataFrame)
A sid, data pair to be passed to BcolzDailyDailyBarWriter.write
"""
assert (
# Using .value here to avoid having to care about UTC-aware dates.
PSEUDO_EPOCH.value <
calendar.normalize().min().value <=
asset_info['start_date'].min().value
), "calendar.min(): %s\nasset_info['start_date'].min(): %s" % (
calendar.min(),
asset_info['start_date'].min(),
)
assert (asset_info['start_date'] < asset_info['end_date']).all()
def _raw_data_for_asset(asset_id):
"""
Generate 'raw' data that encodes information about the asset.
See docstring for a description of the data format.
"""
# Get the dates for which this asset existed according to our asset
# info.
datetimes = calendar[calendar.slice_indexer(
asset_start(asset_info, asset_id),
asset_end(asset_info, asset_id),
)]
data = full(
(len(datetimes), len(US_EQUITY_PRICING_BCOLZ_COLUMNS)),
asset_id * 100 * 1000,
dtype=uint32,
)
# Add 10,000 * column-index to OHLCV columns
data[:, :5] += arange(5, dtype=uint32) * 1000
# Add days since Jan 1 2001 for OHLCV columns.
data[:, :5] += (datetimes - PSEUDO_EPOCH).days[:, None].astype(uint32)
frame = DataFrame(
data,
index=datetimes,
columns=US_EQUITY_PRICING_BCOLZ_COLUMNS,
)
if holes is not None and asset_id in holes:
for dt in holes[asset_id]:
frame.loc[dt, OHLC] = nan
frame.loc[dt, ['volume']] = 0
frame['day'] = nanos_to_seconds(datetimes.asi8)
frame['id'] = asset_id
return frame
for asset in asset_info.index:
yield asset, _raw_data_for_asset(asset) |
Check that the raw value for an asset/date/column triple is as
expected.
Used by tests to verify data written by a writer. | def expected_bar_value(asset_id, date, colname):
"""
Check that the raw value for an asset/date/column triple is as
expected.
Used by tests to verify data written by a writer.
"""
from_asset = asset_id * 100000
from_colname = OHLCV.index(colname) * 1000
from_date = (date - PSEUDO_EPOCH).days
return from_asset + from_colname + from_date |
Return an 2D array containing cls.expected_value(asset_id, date,
colname) for each date/asset pair in the inputs.
Missing locs are filled with 0 for volume and NaN for price columns:
- Values before/after an asset's lifetime.
- Values for asset_ids not contained in asset_info.
- Locs defined in `holes`. | def expected_bar_values_2d(dates,
assets,
asset_info,
colname,
holes=None):
"""
Return an 2D array containing cls.expected_value(asset_id, date,
colname) for each date/asset pair in the inputs.
Missing locs are filled with 0 for volume and NaN for price columns:
- Values before/after an asset's lifetime.
- Values for asset_ids not contained in asset_info.
- Locs defined in `holes`.
"""
if colname == 'volume':
dtype = uint32
missing = 0
else:
dtype = float64
missing = float('nan')
data = full((len(dates), len(assets)), missing, dtype=dtype)
for j, asset in enumerate(assets):
# Use missing values when asset_id is not contained in asset_info.
if asset not in asset_info.index:
continue
start = asset_start(asset_info, asset)
end = asset_end(asset_info, asset)
for i, date in enumerate(dates):
# No value expected for dates outside the asset's start/end
# date.
if not (start <= date <= end):
continue
if holes is not None:
expected = expected_bar_value_with_holes(
asset,
date,
colname,
holes,
missing,
)
else:
expected = expected_bar_value(asset, date, colname)
data[i, j] = expected
return data |
Make a PipelineLoader that emits np.eye arrays for the columns in
``TestingDataSet``. | def make_eye_loader(dates, sids):
"""
Make a PipelineLoader that emits np.eye arrays for the columns in
``TestingDataSet``.
"""
return EyeLoader(TestingDataSet.columns, dates, sids) |
Make a PipelineLoader that emits random arrays seeded with `seed` for the
columns in ``TestingDataSet``. | def make_seeded_random_loader(seed,
dates,
sids,
columns=TestingDataSet.columns):
"""
Make a PipelineLoader that emits random arrays seeded with `seed` for the
columns in ``TestingDataSet``.
"""
return SeededRandomLoader(seed, columns, dates, sids) |
Check if a numpy array is sorted. | def is_sorted_ascending(a):
"""Check if a numpy array is sorted."""
return (np.fmax.accumulate(a) <= a).all() |
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the next event for each
sid at each moment in time.
Locations where no next event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions in ``all_dates``.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``. | def next_event_indexer(all_dates,
data_query_cutoff,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the next event for each
sid at each moment in time.
Locations where no next event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions in ``all_dates``.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
sid_ixs = all_sids.searchsorted(event_sids)
# side='right' here ensures that we include the event date itself
# if it's in all_dates.
dt_ixs = all_dates.searchsorted(event_dates, side='right')
ts_ixs = data_query_cutoff.searchsorted(event_timestamps, side='right')
# Walk backward through the events, writing the index of the event into
# slots ranging from the event's timestamp to its asof. This depends for
# correctness on the fact that event_dates is sorted in ascending order,
# because we need to overwrite later events with earlier ones if their
# eligible windows overlap.
for i in range(len(event_sids) - 1, -1, -1):
start_ix = ts_ixs[i]
end_ix = dt_ixs[i]
out[start_ix:end_ix, sid_ixs[i]] = i
return out |
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the previous event for
each sid at each moment in time.
Locations where no previous event was known will be filled with -1.
Parameters
----------
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions.
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``. | def previous_event_indexer(data_query_cutoff_times,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the previous event for
each sid at each moment in time.
Locations where no previous event was known will be filled with -1.
Parameters
----------
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions.
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full(
(len(data_query_cutoff_times), len(all_sids)),
-1,
dtype=np.int64,
)
eff_dts = np.maximum(event_dates, event_timestamps)
sid_ixs = all_sids.searchsorted(event_sids)
dt_ixs = data_query_cutoff_times.searchsorted(eff_dts, side='right')
# Walk backwards through the events, writing the index of the event into
# slots ranging from max(event_date, event_timestamp) to the start of the
# previously-written event. This depends for correctness on the fact that
# event_dates is sorted in ascending order, because we need to have written
# later events so we know where to stop forward-filling earlier events.
last_written = {}
for i in range(len(event_dates) - 1, -1, -1):
sid_ix = sid_ixs[i]
dt_ix = dt_ixs[i]
out[dt_ix:last_written.get(sid_ix, None), sid_ix] = i
last_written[sid_ix] = dt_ix
return out |
Determine the last piece of information known on each date in the date
index for each group. Input df MUST be sorted such that the correct last
item is chosen from each group.
Parameters
----------
df : pd.DataFrame
The DataFrame containing the data to be grouped. Must be sorted so that
the correct last item is chosen from each group.
data_query_cutoff_times : pd.DatetimeIndex
The dates to use for grouping and reindexing.
assets : pd.Int64Index
The assets that should be included in the column multiindex.
reindex : bool
Whether or not the DataFrame should be reindexed against the date
index. This will add back any dates to the index that were grouped
away.
have_sids : bool
Whether or not the DataFrame has sids. If it does, they will be used
in the groupby.
extra_groupers : list of str
Any extra field names that should be included in the groupby.
Returns
-------
last_in_group : pd.DataFrame
A DataFrame with dates as the index and fields used in the groupby as
levels of a multiindex of columns. | def last_in_date_group(df,
data_query_cutoff_times,
assets,
reindex=True,
have_sids=True,
extra_groupers=None):
"""
Determine the last piece of information known on each date in the date
index for each group. Input df MUST be sorted such that the correct last
item is chosen from each group.
Parameters
----------
df : pd.DataFrame
The DataFrame containing the data to be grouped. Must be sorted so that
the correct last item is chosen from each group.
data_query_cutoff_times : pd.DatetimeIndex
The dates to use for grouping and reindexing.
assets : pd.Int64Index
The assets that should be included in the column multiindex.
reindex : bool
Whether or not the DataFrame should be reindexed against the date
index. This will add back any dates to the index that were grouped
away.
have_sids : bool
Whether or not the DataFrame has sids. If it does, they will be used
in the groupby.
extra_groupers : list of str
Any extra field names that should be included in the groupby.
Returns
-------
last_in_group : pd.DataFrame
A DataFrame with dates as the index and fields used in the groupby as
levels of a multiindex of columns.
"""
idx = [data_query_cutoff_times[data_query_cutoff_times.searchsorted(
df[TS_FIELD_NAME].values,
)]]
if have_sids:
idx += [SID_FIELD_NAME]
if extra_groupers is None:
extra_groupers = []
idx += extra_groupers
last_in_group = df.drop(TS_FIELD_NAME, axis=1).groupby(
idx,
sort=False,
).last()
# For the number of things that we're grouping by (except TS), unstack
# the df. Done this way because of an unresolved pandas bug whereby
# passing a list of levels with mixed dtypes to unstack causes the
# resulting DataFrame to have all object-type columns.
for _ in range(len(idx) - 1):
last_in_group = last_in_group.unstack(-1)
if reindex:
if have_sids:
cols = last_in_group.columns
last_in_group = last_in_group.reindex(
index=data_query_cutoff_times,
columns=pd.MultiIndex.from_product(
tuple(cols.levels[0:len(extra_groupers) + 1]) + (assets,),
names=cols.names,
),
)
else:
last_in_group = last_in_group.reindex(data_query_cutoff_times)
return last_in_group |
Forward fill values in a DataFrame with special logic to handle cases
that pd.DataFrame.ffill cannot and cast columns to appropriate types.
Parameters
----------
df : pd.DataFrame
The DataFrame to do forward-filling on.
columns : list of BoundColumn
The BoundColumns that correspond to columns in the DataFrame to which
special filling and/or casting logic should be applied.
name_map: map of string -> string
Mapping from the name of each BoundColumn to the associated column
name in `df`. | def ffill_across_cols(df, columns, name_map):
"""
Forward fill values in a DataFrame with special logic to handle cases
that pd.DataFrame.ffill cannot and cast columns to appropriate types.
Parameters
----------
df : pd.DataFrame
The DataFrame to do forward-filling on.
columns : list of BoundColumn
The BoundColumns that correspond to columns in the DataFrame to which
special filling and/or casting logic should be applied.
name_map: map of string -> string
Mapping from the name of each BoundColumn to the associated column
name in `df`.
"""
df.ffill(inplace=True)
# Fill in missing values specified by each column. This is made
# significantly more complex by the fact that we need to work around
# two pandas issues:
# 1) When we have sids, if there are no records for a given sid for any
# dates, pandas will generate a column full of NaNs for that sid.
# This means that some of the columns in `dense_output` are now
# float instead of the intended dtype, so we have to coerce back to
# our expected type and convert NaNs into the desired missing value.
# 2) DataFrame.ffill assumes that receiving None as a fill-value means
# that no value was passed. Consequently, there's no way to tell
# pandas to replace NaNs in an object column with None using fillna,
# so we have to roll our own instead using df.where.
for column in columns:
column_name = name_map[column.name]
# Special logic for strings since `fillna` doesn't work if the
# missing value is `None`.
if column.dtype == categorical_dtype:
df[column_name] = df[
column.name
].where(pd.notnull(df[column_name]),
column.missing_value)
else:
# We need to execute `fillna` before `astype` in case the
# column contains NaNs and needs to be cast to bool or int.
# This is so that the NaNs are replaced first, since pandas
# can't convert NaNs for those types.
df[column_name] = df[
column_name
].fillna(column.missing_value).astype(column.dtype) |
Shift dates of a pipeline query back by ``shift`` days.
Parameters
----------
dates : DatetimeIndex
All known dates.
start_date : pd.Timestamp
Start date of the pipeline query.
end_date : pd.Timestamp
End date of the pipeline query.
shift : int
The number of days to shift back the query dates.
Returns
-------
shifted : pd.DatetimeIndex
The range [start_date, end_date] from ``dates``, shifted backwards by
``shift`` days.
Raises
------
ValueError
If ``start_date`` or ``end_date`` is not in ``dates``.
NoFurtherDataError
If shifting ``start_date`` back by ``shift`` days would push it off the
end of ``dates``. | def shift_dates(dates, start_date, end_date, shift):
"""
Shift dates of a pipeline query back by ``shift`` days.
Parameters
----------
dates : DatetimeIndex
All known dates.
start_date : pd.Timestamp
Start date of the pipeline query.
end_date : pd.Timestamp
End date of the pipeline query.
shift : int
The number of days to shift back the query dates.
Returns
-------
shifted : pd.DatetimeIndex
The range [start_date, end_date] from ``dates``, shifted backwards by
``shift`` days.
Raises
------
ValueError
If ``start_date`` or ``end_date`` is not in ``dates``.
NoFurtherDataError
If shifting ``start_date`` back by ``shift`` days would push it off the
end of ``dates``.
"""
try:
start = dates.get_loc(start_date)
except KeyError:
if start_date < dates[0]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data starting on {query_start}, "
"but first known date is {calendar_start}"
).format(
query_start=str(start_date),
calendar_start=str(dates[0]),
)
)
else:
raise ValueError("Query start %s not in calendar" % start_date)
# Make sure that shifting doesn't push us out of the calendar.
if start < shift:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data from {shift}"
" days before {query_start}, but first known date is only "
"{start} days earlier."
).format(shift=shift, query_start=start_date, start=start),
)
try:
end = dates.get_loc(end_date)
except KeyError:
if end_date > dates[-1]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requesting data up to {query_end}, "
"but last known date is {calendar_end}"
).format(
query_end=end_date,
calendar_end=dates[-1],
)
)
else:
raise ValueError("Query end %s not in calendar" % end_date)
return dates[start - shift:end - shift + 1] |
Given a datashape type, return the associated numpy type. Maps
datashape's DateTime type to numpy's `datetime64[ns]` dtype, since the
numpy datetime returned by datashape isn't supported by pipeline.
Parameters
----------
type_: datashape.coretypes.Type
The datashape type.
Returns
-------
type_ np.dtype
The numpy dtype. | def datashape_type_to_numpy(type_):
"""
Given a datashape type, return the associated numpy type. Maps
datashape's DateTime type to numpy's `datetime64[ns]` dtype, since the
numpy datetime returned by datashape isn't supported by pipeline.
Parameters
----------
type_: datashape.coretypes.Type
The datashape type.
Returns
-------
type_ np.dtype
The numpy dtype.
"""
if isinstance(type_, Option):
type_ = type_.ty
if isinstance(type_, DateTime):
return np.dtype('datetime64[ns]')
if isinstance(type_, String):
return np.dtype(object)
if type_ in integral:
return np.dtype('int64')
else:
return type_.to_numpy_dtype() |
Creates or returns a dataset from a blaze expression.
Parameters
----------
expr : Expr
The blaze expression representing the values.
missing_values : frozenset((name, value) pairs
Association pairs column name and missing_value for that column.
This needs to be a frozenset rather than a dict or tuple of tuples
because we want a collection that's unordered but still hashable.
domain : zipline.pipeline.domain.Domain
Domain of the dataset to be created.
Returns
-------
ds : type
A new dataset type.
Notes
-----
This function is memoized. repeated calls with the same inputs will return
the same type. | def new_dataset(expr, missing_values, domain):
"""
Creates or returns a dataset from a blaze expression.
Parameters
----------
expr : Expr
The blaze expression representing the values.
missing_values : frozenset((name, value) pairs
Association pairs column name and missing_value for that column.
This needs to be a frozenset rather than a dict or tuple of tuples
because we want a collection that's unordered but still hashable.
domain : zipline.pipeline.domain.Domain
Domain of the dataset to be created.
Returns
-------
ds : type
A new dataset type.
Notes
-----
This function is memoized. repeated calls with the same inputs will return
the same type.
"""
missing_values = dict(missing_values)
class_dict = {'ndim': 2 if SID_FIELD_NAME in expr.fields else 1}
for name, type_ in expr.dshape.measure.fields:
# Don't generate a column for sid or timestamp, since they're
# implicitly the labels if the arrays that will be passed to pipeline
# Terms.
if name in (SID_FIELD_NAME, TS_FIELD_NAME):
continue
type_ = datashape_type_to_numpy(type_)
if can_represent_dtype(type_):
col = Column(
type_,
missing_values.get(name, NotSpecified),
)
else:
col = NonPipelineField(name, type_)
class_dict[name] = col
if 'domain' in class_dict:
raise ValueError("Got a column named 'domain' in new_dataset(). "
"'domain' is reserved.")
class_dict['domain'] = domain
name = expr._name
if name is None:
name = next(_new_names)
# unicode is a name error in py3 but the branch is only hit
# when we are in python 2.
if PY2 and isinstance(name, unicode): # pragma: no cover # noqa
name = name.encode('utf-8')
return type(name, (DataSet,), class_dict) |
Validate that the expression and resources passed match up.
Parameters
----------
name : str
The name of the argument we are checking.
expr : Expr
The potentially bound expr.
resources
The explicitly passed resources to compute expr.
Raises
------
ValueError
If the resources do not match for an expression. | def _check_resources(name, expr, resources):
"""Validate that the expression and resources passed match up.
Parameters
----------
name : str
The name of the argument we are checking.
expr : Expr
The potentially bound expr.
resources
The explicitly passed resources to compute expr.
Raises
------
ValueError
If the resources do not match for an expression.
"""
if expr is None:
return
bound = expr._resources()
if not bound and resources is None:
raise ValueError('no resources provided to compute %s' % name)
if bound and resources:
raise ValueError(
'explicit and implicit resources provided to compute %s' % name,
) |
Check that a field is a datetime inside some measure.
Parameters
----------
name : str
The name of the field to check.
measure : Record
The record to check the field of.
Raises
------
TypeError
If the field is not a datetime inside ``measure``. | def _check_datetime_field(name, measure):
"""Check that a field is a datetime inside some measure.
Parameters
----------
name : str
The name of the field to check.
measure : Record
The record to check the field of.
Raises
------
TypeError
If the field is not a datetime inside ``measure``.
"""
if not isinstance(measure[name], (Date, DateTime)):
raise TypeError(
"'{name}' field must be a '{dt}', not: '{dshape}'".format(
name=name,
dt=DateTime(),
dshape=measure[name],
),
) |
Find the correct metadata expression for the expression.
Parameters
----------
field : {'deltas', 'checkpoints'}
The kind of metadata expr to lookup.
expr : Expr
The baseline expression.
metadata_expr : Expr, 'auto', or None
The metadata argument. If this is 'auto', then the metadata table will
be searched for by walking up the expression tree. If this cannot be
reflected, then an action will be taken based on the
``no_metadata_rule``.
no_metadata_rule : {'warn', 'raise', 'ignore'}
How to handle the case where the metadata_expr='auto' but no expr
could be found.
Returns
-------
metadata : Expr or None
The deltas or metadata table to use. | def _get_metadata(field, expr, metadata_expr, no_metadata_rule):
"""Find the correct metadata expression for the expression.
Parameters
----------
field : {'deltas', 'checkpoints'}
The kind of metadata expr to lookup.
expr : Expr
The baseline expression.
metadata_expr : Expr, 'auto', or None
The metadata argument. If this is 'auto', then the metadata table will
be searched for by walking up the expression tree. If this cannot be
reflected, then an action will be taken based on the
``no_metadata_rule``.
no_metadata_rule : {'warn', 'raise', 'ignore'}
How to handle the case where the metadata_expr='auto' but no expr
could be found.
Returns
-------
metadata : Expr or None
The deltas or metadata table to use.
"""
if isinstance(metadata_expr, bz.Expr) or metadata_expr is None:
return metadata_expr
try:
# The error produced by expr[field_name] when field_name doesn't exist
# is very expensive. Avoid that cost by doing the check ourselves.
field_name = '_'.join(((expr._name or ''), field))
child = expr._child
if field_name not in child.fields:
raise AttributeError(field_name)
return child[field_name]
except (ValueError, AttributeError):
if no_metadata_rule == 'raise':
raise ValueError(
"no %s table could be reflected for %s" % (field, expr)
)
elif no_metadata_rule == 'warn':
warnings.warn(NoMetaDataWarning(expr, field), stacklevel=4)
return None |
Duplicate the asof_date column as the timestamp column.
Parameters
----------
expr : Expr or None
The expression to change the columns of.
Returns
-------
transformed : Expr or None
The transformed expression or None if ``expr`` is None. | def _ad_as_ts(expr):
"""Duplicate the asof_date column as the timestamp column.
Parameters
----------
expr : Expr or None
The expression to change the columns of.
Returns
-------
transformed : Expr or None
The transformed expression or None if ``expr`` is None.
"""
return (
None
if expr is None else
bz.transform(expr, **{TS_FIELD_NAME: expr[AD_FIELD_NAME]})
) |
Verify that the baseline and deltas expressions have a timestamp field.
If there is not a ``TS_FIELD_NAME`` on either of the expressions, it will
be copied from the ``AD_FIELD_NAME``. If one is provided, then we will
verify that it is the correct dshape.
Parameters
----------
dataset_expr : Expr
The baseline expression.
deltas : Expr or None
The deltas expression if any was provided.
checkpoints : Expr or None
The checkpoints expression if any was provided.
Returns
-------
dataset_expr, deltas : Expr
The new baseline and deltas expressions to use. | def _ensure_timestamp_field(dataset_expr, deltas, checkpoints):
"""Verify that the baseline and deltas expressions have a timestamp field.
If there is not a ``TS_FIELD_NAME`` on either of the expressions, it will
be copied from the ``AD_FIELD_NAME``. If one is provided, then we will
verify that it is the correct dshape.
Parameters
----------
dataset_expr : Expr
The baseline expression.
deltas : Expr or None
The deltas expression if any was provided.
checkpoints : Expr or None
The checkpoints expression if any was provided.
Returns
-------
dataset_expr, deltas : Expr
The new baseline and deltas expressions to use.
"""
measure = dataset_expr.dshape.measure
if TS_FIELD_NAME not in measure.names:
dataset_expr = bz.transform(
dataset_expr,
**{TS_FIELD_NAME: dataset_expr[AD_FIELD_NAME]}
)
deltas = _ad_as_ts(deltas)
checkpoints = _ad_as_ts(checkpoints)
else:
_check_datetime_field(TS_FIELD_NAME, measure)
return dataset_expr, deltas, checkpoints |
Create a Pipeline API object from a blaze expression.
Parameters
----------
expr : Expr
The blaze expression to use.
deltas : Expr, 'auto' or None, optional
The expression to use for the point in time adjustments.
If the string 'auto' is passed, a deltas expr will be looked up
by stepping up the expression tree and looking for another field
with the name of ``expr._name`` + '_deltas'. If None is passed, no
deltas will be used.
checkpoints : Expr, 'auto' or None, optional
The expression to use for the forward fill checkpoints.
If the string 'auto' is passed, a checkpoints expr will be looked up
by stepping up the expression tree and looking for another field
with the name of ``expr._name`` + '_checkpoints'. If None is passed,
no checkpoints will be used.
loader : BlazeLoader, optional
The blaze loader to attach this pipeline dataset to. If None is passed,
the global blaze loader is used.
resources : dict or any, optional
The data to execute the blaze expressions against. This is used as the
scope for ``bz.compute``.
odo_kwargs : dict, optional
The keyword arguments to pass to odo when evaluating the expressions.
domain : zipline.pipeline.domain.Domain
Domain of the dataset to be created.
missing_values : dict[str -> any], optional
A dict mapping column names to missing values for those columns.
Missing values are required for integral columns.
no_deltas_rule : {'warn', 'raise', 'ignore'}, optional
What should happen if ``deltas='auto'`` but no deltas can be found.
'warn' says to raise a warning but continue.
'raise' says to raise an exception if no deltas can be found.
'ignore' says take no action and proceed with no deltas.
no_checkpoints_rule : {'warn', 'raise', 'ignore'}, optional
What should happen if ``checkpoints='auto'`` but no checkpoints can be
found. 'warn' says to raise a warning but continue.
'raise' says to raise an exception if no deltas can be found.
'ignore' says take no action and proceed with no deltas.
Returns
-------
pipeline_api_obj : DataSet or BoundColumn
Either a new dataset or bound column based on the shape of the expr
passed in. If a table shaped expression is passed, this will return
a ``DataSet`` that represents the whole table. If an array-like shape
is passed, a ``BoundColumn`` on the dataset that would be constructed
from passing the parent is returned. | def from_blaze(expr,
deltas='auto',
checkpoints='auto',
loader=None,
resources=None,
odo_kwargs=None,
missing_values=None,
domain=GENERIC,
no_deltas_rule='warn',
no_checkpoints_rule='warn'):
"""Create a Pipeline API object from a blaze expression.
Parameters
----------
expr : Expr
The blaze expression to use.
deltas : Expr, 'auto' or None, optional
The expression to use for the point in time adjustments.
If the string 'auto' is passed, a deltas expr will be looked up
by stepping up the expression tree and looking for another field
with the name of ``expr._name`` + '_deltas'. If None is passed, no
deltas will be used.
checkpoints : Expr, 'auto' or None, optional
The expression to use for the forward fill checkpoints.
If the string 'auto' is passed, a checkpoints expr will be looked up
by stepping up the expression tree and looking for another field
with the name of ``expr._name`` + '_checkpoints'. If None is passed,
no checkpoints will be used.
loader : BlazeLoader, optional
The blaze loader to attach this pipeline dataset to. If None is passed,
the global blaze loader is used.
resources : dict or any, optional
The data to execute the blaze expressions against. This is used as the
scope for ``bz.compute``.
odo_kwargs : dict, optional
The keyword arguments to pass to odo when evaluating the expressions.
domain : zipline.pipeline.domain.Domain
Domain of the dataset to be created.
missing_values : dict[str -> any], optional
A dict mapping column names to missing values for those columns.
Missing values are required for integral columns.
no_deltas_rule : {'warn', 'raise', 'ignore'}, optional
What should happen if ``deltas='auto'`` but no deltas can be found.
'warn' says to raise a warning but continue.
'raise' says to raise an exception if no deltas can be found.
'ignore' says take no action and proceed with no deltas.
no_checkpoints_rule : {'warn', 'raise', 'ignore'}, optional
What should happen if ``checkpoints='auto'`` but no checkpoints can be
found. 'warn' says to raise a warning but continue.
'raise' says to raise an exception if no deltas can be found.
'ignore' says take no action and proceed with no deltas.
Returns
-------
pipeline_api_obj : DataSet or BoundColumn
Either a new dataset or bound column based on the shape of the expr
passed in. If a table shaped expression is passed, this will return
a ``DataSet`` that represents the whole table. If an array-like shape
is passed, a ``BoundColumn`` on the dataset that would be constructed
from passing the parent is returned.
"""
if 'auto' in {deltas, checkpoints}:
invalid_nodes = tuple(filter(is_invalid_deltas_node, expr._subterms()))
if invalid_nodes:
raise TypeError(
'expression with auto %s may only contain (%s) nodes,'
" found: %s" % (
' or '.join(
['deltas'] if deltas is not None else [] +
['checkpoints'] if checkpoints is not None else [],
),
', '.join(map(get__name__, valid_deltas_node_types)),
', '.join(
set(map(compose(get__name__, type), invalid_nodes)),
),
),
)
deltas = _get_metadata(
'deltas',
expr,
deltas,
no_deltas_rule,
)
checkpoints = _get_metadata(
'checkpoints',
expr,
checkpoints,
no_checkpoints_rule,
)
# Check if this is a single column out of a dataset.
if bz.ndim(expr) != 1:
raise TypeError(
'expression was not tabular or array-like,'
' %s dimensions: %d' % (
'too many' if bz.ndim(expr) > 1 else 'not enough',
bz.ndim(expr),
),
)
single_column = None
if isscalar(expr.dshape.measure):
# This is a single column. Record which column we are to return
# but create the entire dataset.
single_column = rename = expr._name
field_hit = False
if not isinstance(expr, traversable_nodes):
raise TypeError(
"expression '%s' was array-like but not a simple field of"
" some larger table" % str(expr),
)
while isinstance(expr, traversable_nodes):
if isinstance(expr, bz.expr.Field):
if not field_hit:
field_hit = True
else:
break
rename = expr._name
expr = expr._child
dataset_expr = expr.relabel({rename: single_column})
else:
dataset_expr = expr
measure = dataset_expr.dshape.measure
if not isrecord(measure) or AD_FIELD_NAME not in measure.names:
raise TypeError(
"The dataset must be a collection of records with at least an"
" '{ad}' field. Fields provided: '{fields}'\nhint: maybe you need"
" to use `relabel` to change your field names".format(
ad=AD_FIELD_NAME,
fields=measure,
),
)
_check_datetime_field(AD_FIELD_NAME, measure)
dataset_expr, deltas, checkpoints = _ensure_timestamp_field(
dataset_expr,
deltas,
checkpoints,
)
if deltas is not None and (sorted(deltas.dshape.measure.fields) !=
sorted(measure.fields)):
raise TypeError(
'baseline measure != deltas measure:\n%s != %s' % (
measure,
deltas.dshape.measure,
),
)
if (checkpoints is not None and
(sorted(checkpoints.dshape.measure.fields) !=
sorted(measure.fields))):
raise TypeError(
'baseline measure != checkpoints measure:\n%s != %s' % (
measure,
checkpoints.dshape.measure,
),
)
# Ensure that we have a data resource to execute the query against.
_check_resources('expr', dataset_expr, resources)
_check_resources('deltas', deltas, resources)
_check_resources('checkpoints', checkpoints, resources)
# Create or retrieve the Pipeline API dataset.
if missing_values is None:
missing_values = {}
ds = new_dataset(dataset_expr, frozenset(missing_values.items()), domain)
# Register our new dataset with the loader.
(loader if loader is not None else global_loader).register_dataset(
ds,
bind_expression_to_resources(dataset_expr, resources),
bind_expression_to_resources(deltas, resources)
if deltas is not None else
None,
bind_expression_to_resources(checkpoints, resources)
if checkpoints is not None else
None,
odo_kwargs=odo_kwargs,
)
if single_column is not None:
# We were passed a single column, extract and return it.
return getattr(ds, single_column)
return ds |
Bind a Blaze expression to resources.
Parameters
----------
expr : bz.Expr
The expression to which we want to bind resources.
resources : dict[bz.Symbol -> any]
Mapping from the loadable terms of ``expr`` to actual data resources.
Returns
-------
bound_expr : bz.Expr
``expr`` with bound resources. | def bind_expression_to_resources(expr, resources):
"""
Bind a Blaze expression to resources.
Parameters
----------
expr : bz.Expr
The expression to which we want to bind resources.
resources : dict[bz.Symbol -> any]
Mapping from the loadable terms of ``expr`` to actual data resources.
Returns
-------
bound_expr : bz.Expr
``expr`` with bound resources.
"""
# bind the resources into the expression
if resources is None:
resources = {}
# _subs stands for substitute. It's not actually private, blaze just
# prefixes symbol-manipulation methods with underscores to prevent
# collisions with data column names.
return expr._subs({
k: bz.data(v, dshape=k.dshape) for k, v in iteritems(resources)
}) |
Computes a lower bound and a DataFrame checkpoints.
Parameters
----------
checkpoints : Expr
Bound blaze expression for a checkpoints table from which to get a
computed lower bound.
colnames : iterable of str
The names of the columns for which checkpoints should be computed.
lower_dt : pd.Timestamp
The lower date being queried for that serves as an upper bound for
checkpoints.
odo_kwargs : dict, optional
The extra keyword arguments to pass to ``odo``. | def get_materialized_checkpoints(checkpoints, colnames, lower_dt, odo_kwargs):
"""
Computes a lower bound and a DataFrame checkpoints.
Parameters
----------
checkpoints : Expr
Bound blaze expression for a checkpoints table from which to get a
computed lower bound.
colnames : iterable of str
The names of the columns for which checkpoints should be computed.
lower_dt : pd.Timestamp
The lower date being queried for that serves as an upper bound for
checkpoints.
odo_kwargs : dict, optional
The extra keyword arguments to pass to ``odo``.
"""
if checkpoints is not None:
ts = checkpoints[TS_FIELD_NAME]
checkpoints_ts = odo(
ts[ts < lower_dt].max(),
pd.Timestamp,
**odo_kwargs
)
if pd.isnull(checkpoints_ts):
# We don't have a checkpoint for before our start date so just
# don't constrain the lower date.
materialized_checkpoints = pd.DataFrame(columns=colnames)
lower = None
else:
materialized_checkpoints = odo(
checkpoints[ts == checkpoints_ts][colnames],
pd.DataFrame,
**odo_kwargs
)
lower = checkpoints_ts
else:
materialized_checkpoints = pd.DataFrame(columns=colnames)
lower = None # we don't have a good lower date constraint
return lower, materialized_checkpoints |
Query a blaze expression in a given time range properly forward filling
from values that fall before the lower date.
Parameters
----------
expr : Expr
Bound blaze expression.
lower : datetime
The lower date to query for.
upper : datetime
The upper date to query for.
checkpoints : Expr, optional
Bound blaze expression for a checkpoints table from which to get a
computed lower bound.
odo_kwargs : dict, optional
The extra keyword arguments to pass to ``odo``.
ts_field : str, optional
The name of the timestamp field in the given blaze expression.
Returns
-------
raw : pd.DataFrame
A strict dataframe for the data in the given date range. This may
start before the requested start date if a value is needed to ffill. | def ffill_query_in_range(expr,
lower,
upper,
checkpoints=None,
odo_kwargs=None,
ts_field=TS_FIELD_NAME):
"""Query a blaze expression in a given time range properly forward filling
from values that fall before the lower date.
Parameters
----------
expr : Expr
Bound blaze expression.
lower : datetime
The lower date to query for.
upper : datetime
The upper date to query for.
checkpoints : Expr, optional
Bound blaze expression for a checkpoints table from which to get a
computed lower bound.
odo_kwargs : dict, optional
The extra keyword arguments to pass to ``odo``.
ts_field : str, optional
The name of the timestamp field in the given blaze expression.
Returns
-------
raw : pd.DataFrame
A strict dataframe for the data in the given date range. This may
start before the requested start date if a value is needed to ffill.
"""
odo_kwargs = odo_kwargs or {}
computed_lower, materialized_checkpoints = get_materialized_checkpoints(
checkpoints,
expr.fields,
lower,
odo_kwargs,
)
pred = expr[ts_field] <= upper
if computed_lower is not None:
# only constrain the lower date if we computed a new lower date
pred &= expr[ts_field] >= computed_lower
raw = pd.concat(
(
materialized_checkpoints,
odo(
expr[pred],
pd.DataFrame,
**odo_kwargs
),
),
ignore_index=True,
)
raw.loc[:, ts_field] = raw.loc[:, ts_field].astype('datetime64[ns]')
return raw |
Given an expression representing data to load, perform normalization and
forward-filling and return the data, materialized. Only accepts data with a
`sid` field.
Parameters
----------
assets : pd.int64index
the assets to load data for.
data_query_cutoff_times : pd.DatetimeIndex
The datetime when data should no longer be considered available for
a session.
expr : expr
the expression representing the data to load.
odo_kwargs : dict
extra keyword arguments to pass to odo when executing the expression.
checkpoints : expr, optional
the expression representing the checkpointed data for `expr`.
Returns
-------
raw : pd.dataframe
The result of computing expr and materializing the result as a
dataframe. | def load_raw_data(assets,
data_query_cutoff_times,
expr,
odo_kwargs,
checkpoints=None):
"""
Given an expression representing data to load, perform normalization and
forward-filling and return the data, materialized. Only accepts data with a
`sid` field.
Parameters
----------
assets : pd.int64index
the assets to load data for.
data_query_cutoff_times : pd.DatetimeIndex
The datetime when data should no longer be considered available for
a session.
expr : expr
the expression representing the data to load.
odo_kwargs : dict
extra keyword arguments to pass to odo when executing the expression.
checkpoints : expr, optional
the expression representing the checkpointed data for `expr`.
Returns
-------
raw : pd.dataframe
The result of computing expr and materializing the result as a
dataframe.
"""
lower_dt, upper_dt = data_query_cutoff_times[[0, -1]]
raw = ffill_query_in_range(
expr,
lower_dt,
upper_dt,
checkpoints=checkpoints,
odo_kwargs=odo_kwargs,
)
sids = raw[SID_FIELD_NAME]
raw.drop(
sids[~sids.isin(assets)].index,
inplace=True
)
return raw |
Utility to generate a stream of dates. | def date_gen(start,
end,
trading_calendar,
delta=timedelta(minutes=1),
repeats=None):
"""
Utility to generate a stream of dates.
"""
daily_delta = not (delta.total_seconds()
% timedelta(days=1).total_seconds())
cur = start
if daily_delta:
# if we are producing daily timestamps, we
# use midnight
cur = cur.replace(hour=0, minute=0, second=0,
microsecond=0)
def advance_current(cur):
"""
Advances the current dt skipping non market days and minutes.
"""
cur = cur + delta
currently_executing = \
(daily_delta and (cur in trading_calendar.all_sessions)) or \
(trading_calendar.is_open_on_minute(cur))
if currently_executing:
return cur
else:
if daily_delta:
return trading_calendar.minute_to_session_label(cur)
else:
return trading_calendar.open_and_close_for_session(
trading_calendar.minute_to_session_label(cur)
)[0]
# yield count trade events, all on trading days, and
# during trading hours.
while cur < end:
if repeats:
for j in range(repeats):
yield cur
else:
yield cur
cur = advance_current(cur) |
Convert a string in US/Eastern time to UTC | def to_utc(time_str):
"""Convert a string in US/Eastern time to UTC"""
return pd.Timestamp(time_str, tz='US/Eastern').tz_convert('UTC') |
Convert a pandas-intelligible string to (integer) seconds since UTC.
>>> from pandas import Timestamp
>>> (Timestamp('2014-01-01') - Timestamp(0)).total_seconds()
1388534400.0
>>> str_to_seconds('2014-01-01')
1388534400 | def str_to_seconds(s):
"""
Convert a pandas-intelligible string to (integer) seconds since UTC.
>>> from pandas import Timestamp
>>> (Timestamp('2014-01-01') - Timestamp(0)).total_seconds()
1388534400.0
>>> str_to_seconds('2014-01-01')
1388534400
"""
return timedelta_to_integral_seconds(pd.Timestamp(s, tz='UTC') - EPOCH) |
Return an iterator of all pairs, (v0, v1) from values such that
`pred(v0, v1) == True`
Parameters
----------
values : iterable
pred : function
Returns
-------
pairs_iterator : generator
Generator yielding pairs matching `pred`.
Examples
--------
>>> from zipline.testing import all_pairs_matching_predicate
>>> from operator import eq, lt
>>> list(all_pairs_matching_predicate(range(5), eq))
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> list(all_pairs_matching_predicate("abcd", lt))
[('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')] | def all_pairs_matching_predicate(values, pred):
"""
Return an iterator of all pairs, (v0, v1) from values such that
`pred(v0, v1) == True`
Parameters
----------
values : iterable
pred : function
Returns
-------
pairs_iterator : generator
Generator yielding pairs matching `pred`.
Examples
--------
>>> from zipline.testing import all_pairs_matching_predicate
>>> from operator import eq, lt
>>> list(all_pairs_matching_predicate(range(5), eq))
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> list(all_pairs_matching_predicate("abcd", lt))
[('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')]
"""
return filter(lambda pair: pred(*pair), product(values, repeat=2)) |
Return an iterator over pairs, (v0, v1), drawn from values.
If `include_diagonal` is True, returns all pairs such that v0 <= v1.
If `include_diagonal` is False, returns all pairs such that v0 < v1. | def product_upper_triangle(values, include_diagonal=False):
"""
Return an iterator over pairs, (v0, v1), drawn from values.
If `include_diagonal` is True, returns all pairs such that v0 <= v1.
If `include_diagonal` is False, returns all pairs such that v0 < v1.
"""
return all_pairs_matching_predicate(
values,
operator.le if include_diagonal else operator.lt,
) |
Return all valid sub-indices of a pandas Index. | def all_subindices(index):
"""
Return all valid sub-indices of a pandas Index.
"""
return (
index[start:stop]
for start, stop in product_upper_triangle(range(len(index) + 1))
) |
Construct an iterable of length-1 strings beginning with `start` and ending
with `stop`.
Parameters
----------
start : str
The first character.
stop : str
The last character.
Returns
-------
chars: iterable[str]
Iterable of strings beginning with start and ending with stop.
Examples
--------
>>> chrange('A', 'C')
['A', 'B', 'C'] | def chrange(start, stop):
"""
Construct an iterable of length-1 strings beginning with `start` and ending
with `stop`.
Parameters
----------
start : str
The first character.
stop : str
The last character.
Returns
-------
chars: iterable[str]
Iterable of strings beginning with start and ending with stop.
Examples
--------
>>> chrange('A', 'C')
['A', 'B', 'C']
"""
return list(map(chr, range(ord(start), ord(stop) + 1))) |
Convert the asset info dataframe into a dataframe of trade data for each
sid, and write to the writer if provided. Write NaNs for locations where
assets did not exist. Return a dict of the dataframes, keyed by sid. | def make_trade_data_for_asset_info(dates,
asset_info,
price_start,
price_step_by_date,
price_step_by_sid,
volume_start,
volume_step_by_date,
volume_step_by_sid):
"""
Convert the asset info dataframe into a dataframe of trade data for each
sid, and write to the writer if provided. Write NaNs for locations where
assets did not exist. Return a dict of the dataframes, keyed by sid.
"""
trade_data = {}
sids = asset_info.index
price_sid_deltas = np.arange(len(sids), dtype=float64) * price_step_by_sid
price_date_deltas = (np.arange(len(dates), dtype=float64) *
price_step_by_date)
prices = (price_sid_deltas + as_column(price_date_deltas)) + price_start
volume_sid_deltas = np.arange(len(sids)) * volume_step_by_sid
volume_date_deltas = np.arange(len(dates)) * volume_step_by_date
volumes = volume_sid_deltas + as_column(volume_date_deltas) + volume_start
for j, sid in enumerate(sids):
start_date, end_date = asset_info.loc[sid, ['start_date', 'end_date']]
# Normalize here so the we still generate non-NaN values on the minutes
# for an asset's last trading day.
for i, date in enumerate(dates.normalize()):
if not (start_date <= date <= end_date):
prices[i, j] = 0
volumes[i, j] = 0
df = pd.DataFrame(
{
"open": prices[:, j],
"high": prices[:, j],
"low": prices[:, j],
"close": prices[:, j],
"volume": volumes[:, j],
},
index=dates,
)
trade_data[sid] = df
return trade_data |
Wrapper around np.testing.assert_allclose that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_allclose | def check_allclose(actual,
desired,
rtol=1e-07,
atol=0,
err_msg='',
verbose=True):
"""
Wrapper around np.testing.assert_allclose that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_allclose
"""
if type(actual) != type(desired):
raise AssertionError("%s != %s" % (type(actual), type(desired)))
return assert_allclose(
actual,
desired,
atol=atol,
rtol=rtol,
err_msg=err_msg,
verbose=verbose,
) |
Wrapper around np.testing.assert_array_equal that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_array_equal | def check_arrays(x, y, err_msg='', verbose=True, check_dtypes=True):
"""
Wrapper around np.testing.assert_array_equal that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_array_equal
"""
assert type(x) == type(y), "{x} != {y}".format(x=type(x), y=type(y))
assert x.dtype == y.dtype, "{x.dtype} != {y.dtype}".format(x=x, y=y)
if isinstance(x, LabelArray):
# Check that both arrays have missing values in the same locations...
assert_array_equal(
x.is_missing(),
y.is_missing(),
err_msg=err_msg,
verbose=verbose,
)
# ...then check the actual values as well.
x = x.as_string_array()
y = y.as_string_array()
elif x.dtype.kind in 'mM':
x_isnat = isnat(x)
y_isnat = isnat(y)
assert_array_equal(
x_isnat,
y_isnat,
err_msg="NaTs not equal",
verbose=verbose,
)
# Fill NaTs with zero for comparison.
x = np.where(x_isnat, np.zeros_like(x), x)
y = np.where(y_isnat, np.zeros_like(y), y)
return assert_array_equal(x, y, err_msg=err_msg, verbose=verbose) |
Context manager for creating an empty assets db.
See Also
--------
tmp_assets_db | def empty_assets_db():
"""Context manager for creating an empty assets db.
See Also
--------
tmp_assets_db
"""
return tmp_assets_db(equities=None) |
Context manager for creating an empty asset finder.
See Also
--------
empty_assets_db
tmp_assets_db
tmp_asset_finder | def empty_asset_finder():
"""Context manager for creating an empty asset finder.
See Also
--------
empty_assets_db
tmp_assets_db
tmp_asset_finder
"""
return tmp_asset_finder(equities=None) |
Construct a subtest in a unittest.
Consider using ``zipline.testing.parameter_space`` when subtests
are constructed over a single input or over the cross-product of multiple
inputs.
``subtest`` works by decorating a function as a subtest. The decorated
function will be run by iterating over the ``iterator`` and *unpacking the
values into the function. If any of the runs fail, the result will be put
into a set and the rest of the tests will be run. Finally, if any failed,
all of the results will be dumped as one failure.
Parameters
----------
iterator : iterable[iterable]
The iterator of arguments to pass to the function.
*name : iterator[str]
The names to use for each element of ``iterator``. These will be used
to print the scope when a test fails. If not provided, it will use the
integer index of the value as the name.
Examples
--------
::
class MyTest(TestCase):
def test_thing(self):
# Example usage inside another test.
@subtest(([n] for n in range(100000)), 'n')
def subtest(n):
self.assertEqual(n % 2, 0, 'n was not even')
subtest()
@subtest(([n] for n in range(100000)), 'n')
def test_decorated_function(self, n):
# Example usage to parameterize an entire function.
self.assertEqual(n % 2, 1, 'n was not odd')
Notes
-----
We use this when we:
* Will never want to run each parameter individually.
* Have a large parameter space we are testing
(see tests/utils/test_events.py).
``nose_parameterized.expand`` will create a test for each parameter
combination which bloats the test output and makes the travis pages slow.
We cannot use ``unittest2.TestCase.subTest`` because nose, pytest, and
nose2 do not support ``addSubTest``.
See Also
--------
zipline.testing.parameter_space | def subtest(iterator, *_names):
"""
Construct a subtest in a unittest.
Consider using ``zipline.testing.parameter_space`` when subtests
are constructed over a single input or over the cross-product of multiple
inputs.
``subtest`` works by decorating a function as a subtest. The decorated
function will be run by iterating over the ``iterator`` and *unpacking the
values into the function. If any of the runs fail, the result will be put
into a set and the rest of the tests will be run. Finally, if any failed,
all of the results will be dumped as one failure.
Parameters
----------
iterator : iterable[iterable]
The iterator of arguments to pass to the function.
*name : iterator[str]
The names to use for each element of ``iterator``. These will be used
to print the scope when a test fails. If not provided, it will use the
integer index of the value as the name.
Examples
--------
::
class MyTest(TestCase):
def test_thing(self):
# Example usage inside another test.
@subtest(([n] for n in range(100000)), 'n')
def subtest(n):
self.assertEqual(n % 2, 0, 'n was not even')
subtest()
@subtest(([n] for n in range(100000)), 'n')
def test_decorated_function(self, n):
# Example usage to parameterize an entire function.
self.assertEqual(n % 2, 1, 'n was not odd')
Notes
-----
We use this when we:
* Will never want to run each parameter individually.
* Have a large parameter space we are testing
(see tests/utils/test_events.py).
``nose_parameterized.expand`` will create a test for each parameter
combination which bloats the test output and makes the travis pages slow.
We cannot use ``unittest2.TestCase.subTest`` because nose, pytest, and
nose2 do not support ``addSubTest``.
See Also
--------
zipline.testing.parameter_space
"""
def dec(f):
@wraps(f)
def wrapped(*args, **kwargs):
names = _names
failures = []
for scope in iterator:
scope = tuple(scope)
try:
f(*args + scope, **kwargs)
except Exception:
info = sys.exc_info()
if not names:
names = count()
failures.append((dict(zip(names, scope)), info))
if failures:
raise SubTestFailures(*failures)
return wrapped
return dec |
Assert that two pandas Timestamp objects are the same.
Parameters
----------
left, right : pd.Timestamp
The values to compare.
compare_nat_equal : bool, optional
Whether to consider `NaT` values equal. Defaults to True.
msg : str, optional
A message to forward to `pd.util.testing.assert_equal`. | def assert_timestamp_equal(left, right, compare_nat_equal=True, msg=""):
"""
Assert that two pandas Timestamp objects are the same.
Parameters
----------
left, right : pd.Timestamp
The values to compare.
compare_nat_equal : bool, optional
Whether to consider `NaT` values equal. Defaults to True.
msg : str, optional
A message to forward to `pd.util.testing.assert_equal`.
"""
if compare_nat_equal and left is pd.NaT and right is pd.NaT:
return
return pd.util.testing.assert_equal(left, right, msg=msg) |
Return the power set (i.e., the set of all subsets) of entries in `values`. | def powerset(values):
"""
Return the power set (i.e., the set of all subsets) of entries in `values`.
"""
return concat(combinations(values, i) for i in range(len(values) + 1)) |
Helper for converting a dict of strings to a Series of datetimes.
This is just for making the test cases more readable. | def to_series(knowledge_dates, earning_dates):
"""
Helper for converting a dict of strings to a Series of datetimes.
This is just for making the test cases more readable.
"""
return pd.Series(
index=pd.to_datetime(knowledge_dates),
data=pd.to_datetime(earning_dates),
) |
Generate calendars to use as inputs. | def gen_calendars(start, stop, critical_dates):
"""
Generate calendars to use as inputs.
"""
all_dates = pd.date_range(start, stop, tz='utc')
for to_drop in map(list, powerset(critical_dates)):
# Have to yield tuples.
yield (all_dates.drop(to_drop),)
# Also test with the trading calendar.
trading_days = get_calendar("NYSE").all_days
yield (trading_days[trading_days.slice_indexer(start, stop)],) |
A contextManager that yields a SimplePipelineEngine holding a reference to
an AssetFinder generated via tmp_asset_finder.
Parameters
----------
calendar : pd.DatetimeIndex
Calendar to pass to the constructed PipelineEngine.
sids : iterable[int]
Sids to use for the temp asset finder.
random_seed : int
Integer used to seed instances of SeededRandomLoader.
symbols : iterable[str], optional
Symbols for constructed assets. Forwarded to make_simple_equity_info. | def temp_pipeline_engine(calendar, sids, random_seed, symbols=None):
"""
A contextManager that yields a SimplePipelineEngine holding a reference to
an AssetFinder generated via tmp_asset_finder.
Parameters
----------
calendar : pd.DatetimeIndex
Calendar to pass to the constructed PipelineEngine.
sids : iterable[int]
Sids to use for the temp asset finder.
random_seed : int
Integer used to seed instances of SeededRandomLoader.
symbols : iterable[str], optional
Symbols for constructed assets. Forwarded to make_simple_equity_info.
"""
equity_info = make_simple_equity_info(
sids=sids,
start_date=calendar[0],
end_date=calendar[-1],
symbols=symbols,
)
loader = make_seeded_random_loader(random_seed, calendar, sids)
def get_loader(column):
return loader
with tmp_asset_finder(equities=equity_info) as finder:
yield SimplePipelineEngine(get_loader, calendar, finder) |
Get a boolean value from the environment, making a reasonable attempt to
convert "truthy" values to True and "falsey" values to False.
Strings are coerced to bools using ``json.loads(s.lower())``.
Parameters
----------
name : str
Name of the environment variable.
default : bool, optional
Value to use if the environment variable isn't set. Default is False
env : dict-like, optional
Mapping in which to look up ``name``. This is a parameter primarily for
testing purposes. Default is os.environ.
Returns
-------
value : bool
``env[name]`` coerced to a boolean, or ``default`` if ``name`` is not
in ``env``. | def bool_from_envvar(name, default=False, env=None):
"""
Get a boolean value from the environment, making a reasonable attempt to
convert "truthy" values to True and "falsey" values to False.
Strings are coerced to bools using ``json.loads(s.lower())``.
Parameters
----------
name : str
Name of the environment variable.
default : bool, optional
Value to use if the environment variable isn't set. Default is False
env : dict-like, optional
Mapping in which to look up ``name``. This is a parameter primarily for
testing purposes. Default is os.environ.
Returns
-------
value : bool
``env[name]`` coerced to a boolean, or ``default`` if ``name`` is not
in ``env``.
"""
if env is None:
env = os.environ
value = env.get(name)
if value is None:
return default
try:
# Try to parse as JSON. This makes strings like "0", "False", and
# "null" evaluate as falsey values.
value = json.loads(value.lower())
except ValueError:
# If the value can't be parsed as json, assume it should be treated as
# a string for the purpose of evaluation.
pass
return bool(value) |
Wrapper around subtest that allows passing keywords mapping names to
iterables of values.
The decorated test function will be called with the cross-product of all
possible inputs
Examples
--------
>>> from unittest import TestCase
>>> class SomeTestCase(TestCase):
... @parameter_space(x=[1, 2], y=[2, 3])
... def test_some_func(self, x, y):
... # Will be called with every possible combination of x and y.
... self.assertEqual(somefunc(x, y), expected_result(x, y))
See Also
--------
zipline.testing.subtest | def parameter_space(__fail_fast=_FAIL_FAST_DEFAULT, **params):
"""
Wrapper around subtest that allows passing keywords mapping names to
iterables of values.
The decorated test function will be called with the cross-product of all
possible inputs
Examples
--------
>>> from unittest import TestCase
>>> class SomeTestCase(TestCase):
... @parameter_space(x=[1, 2], y=[2, 3])
... def test_some_func(self, x, y):
... # Will be called with every possible combination of x and y.
... self.assertEqual(somefunc(x, y), expected_result(x, y))
See Also
--------
zipline.testing.subtest
"""
def decorator(f):
argspec = getargspec(f)
if argspec.varargs:
raise AssertionError("parameter_space() doesn't support *args")
if argspec.keywords:
raise AssertionError("parameter_space() doesn't support **kwargs")
if argspec.defaults:
raise AssertionError("parameter_space() doesn't support defaults.")
# Skip over implicit self.
argnames = argspec.args
if argnames[0] == 'self':
argnames = argnames[1:]
extra = set(params) - set(argnames)
if extra:
raise AssertionError(
"Keywords %s supplied to parameter_space() are "
"not in function signature." % extra
)
unspecified = set(argnames) - set(params)
if unspecified:
raise AssertionError(
"Function arguments %s were not "
"supplied to parameter_space()." % unspecified
)
def make_param_sets():
return product(*(params[name] for name in argnames))
def clean_f(self, *args, **kwargs):
try:
f(self, *args, **kwargs)
finally:
self.tearDown()
self.setUp()
if __fail_fast:
@wraps(f)
def wrapped(self):
for args in make_param_sets():
clean_f(self, *args)
return wrapped
else:
@wraps(f)
def wrapped(*args, **kwargs):
subtest(make_param_sets(), *argnames)(clean_f)(*args, **kwargs)
return wrapped
return decorator |
Create a 2D numpy array with the given shape containing alternating values
of False, True, False, True,... along each row and each column.
Examples
--------
>>> make_alternating_boolean_array((4,4))
array([[ True, False, True, False],
[False, True, False, True],
[ True, False, True, False],
[False, True, False, True]], dtype=bool)
>>> make_alternating_boolean_array((4,3), first_value=False)
array([[False, True, False],
[ True, False, True],
[False, True, False],
[ True, False, True]], dtype=bool) | def make_alternating_boolean_array(shape, first_value=True):
"""
Create a 2D numpy array with the given shape containing alternating values
of False, True, False, True,... along each row and each column.
Examples
--------
>>> make_alternating_boolean_array((4,4))
array([[ True, False, True, False],
[False, True, False, True],
[ True, False, True, False],
[False, True, False, True]], dtype=bool)
>>> make_alternating_boolean_array((4,3), first_value=False)
array([[False, True, False],
[ True, False, True],
[False, True, False],
[ True, False, True]], dtype=bool)
"""
if len(shape) != 2:
raise ValueError(
'Shape must be 2-dimensional. Given shape was {}'.format(shape)
)
alternating = np.empty(shape, dtype=np.bool)
for row in alternating:
row[::2] = first_value
row[1::2] = not(first_value)
first_value = not(first_value)
return alternating |
Create a numpy array with the given shape containing cascading boolean
values, with `first_value` being the top-left value.
Examples
--------
>>> make_cascading_boolean_array((4,4))
array([[ True, True, True, False],
[ True, True, False, False],
[ True, False, False, False],
[False, False, False, False]], dtype=bool)
>>> make_cascading_boolean_array((4,2))
array([[ True, False],
[False, False],
[False, False],
[False, False]], dtype=bool)
>>> make_cascading_boolean_array((2,4))
array([[ True, True, True, False],
[ True, True, False, False]], dtype=bool) | def make_cascading_boolean_array(shape, first_value=True):
"""
Create a numpy array with the given shape containing cascading boolean
values, with `first_value` being the top-left value.
Examples
--------
>>> make_cascading_boolean_array((4,4))
array([[ True, True, True, False],
[ True, True, False, False],
[ True, False, False, False],
[False, False, False, False]], dtype=bool)
>>> make_cascading_boolean_array((4,2))
array([[ True, False],
[False, False],
[False, False],
[False, False]], dtype=bool)
>>> make_cascading_boolean_array((2,4))
array([[ True, True, True, False],
[ True, True, False, False]], dtype=bool)
"""
if len(shape) != 2:
raise ValueError(
'Shape must be 2-dimensional. Given shape was {}'.format(shape)
)
cascading = np.full(shape, not(first_value), dtype=np.bool)
ending_col = shape[1] - 1
for row in cascading:
if ending_col > 0:
row[:ending_col] = first_value
ending_col -= 1
else:
break
return cascading |
Shuffle each row in ``array`` based on permutations generated by ``seed``.
Parameters
----------
seed : int
Seed for numpy.RandomState
array : np.ndarray[ndim=2]
Array over which to apply permutations. | def permute_rows(seed, array):
"""
Shuffle each row in ``array`` based on permutations generated by ``seed``.
Parameters
----------
seed : int
Seed for numpy.RandomState
array : np.ndarray[ndim=2]
Array over which to apply permutations.
"""
rand = np.random.RandomState(seed)
return np.apply_along_axis(rand.permutation, 1, array) |
Returns a TestHandler which will be used by the given testcase. This
handler can be used to test log messages.
Parameters
----------
testcase: unittest.TestCase
The test class in which the log handler will be used.
*args, **kwargs
Forwarded to the new TestHandler object.
Returns
-------
handler: logbook.TestHandler
The handler to use for the test case. | def make_test_handler(testcase, *args, **kwargs):
"""
Returns a TestHandler which will be used by the given testcase. This
handler can be used to test log messages.
Parameters
----------
testcase: unittest.TestCase
The test class in which the log handler will be used.
*args, **kwargs
Forwarded to the new TestHandler object.
Returns
-------
handler: logbook.TestHandler
The handler to use for the test case.
"""
handler = TestHandler(*args, **kwargs)
testcase.addCleanup(handler.close)
return handler |
Write a compressed (gzipped) file to `path`. | def write_compressed(path, content):
"""
Write a compressed (gzipped) file to `path`.
"""
with gzip.open(path, 'wb') as f:
f.write(content) |
Write a compressed (gzipped) file from `path`. | def read_compressed(path):
"""
Write a compressed (gzipped) file from `path`.
"""
with gzip.open(path, 'rb') as f:
return f.read() |
Context manager for patching the operating system environment. | def patch_os_environment(remove=None, **values):
"""
Context manager for patching the operating system environment.
"""
old_values = {}
remove = remove or []
for key in remove:
old_values[key] = os.environ.pop(key)
for key, value in values.iteritems():
old_values[key] = os.getenv(key)
os.environ[key] = value
try:
yield
finally:
for old_key, old_value in old_values.iteritems():
if old_value is None:
# Value was not present when we entered, so del it out if it's
# still present.
try:
del os.environ[key]
except KeyError:
pass
else:
# Restore the old value.
os.environ[old_key] = old_value |
Patch pandas.read_csv to map lookups from url to another.
Parameters
----------
url_map : mapping[str or file-like object -> str or file-like object]
The mapping to use to redirect read_csv calls.
module : module, optional
The module to patch ``read_csv`` on. By default this is ``pandas``.
This should be set to another module if ``read_csv`` is early-bound
like ``from pandas import read_csv`` instead of late-bound like:
``import pandas as pd; pd.read_csv``.
strict : bool, optional
If true, then this will assert that ``read_csv`` is only called with
elements in the ``url_map``. | def patch_read_csv(url_map, module=pd, strict=False):
"""Patch pandas.read_csv to map lookups from url to another.
Parameters
----------
url_map : mapping[str or file-like object -> str or file-like object]
The mapping to use to redirect read_csv calls.
module : module, optional
The module to patch ``read_csv`` on. By default this is ``pandas``.
This should be set to another module if ``read_csv`` is early-bound
like ``from pandas import read_csv`` instead of late-bound like:
``import pandas as pd; pd.read_csv``.
strict : bool, optional
If true, then this will assert that ``read_csv`` is only called with
elements in the ``url_map``.
"""
read_csv = pd.read_csv
def patched_read_csv(filepath_or_buffer, *args, **kwargs):
if filepath_or_buffer in url_map:
return read_csv(url_map[filepath_or_buffer], *args, **kwargs)
elif not strict:
return read_csv(filepath_or_buffer, *args, **kwargs)
else:
raise AssertionError(
'attempted to call read_csv on %r which not in the url map' %
filepath_or_buffer,
)
with patch.object(module, 'read_csv', patched_read_csv):
yield |
Ensure that an object gets doctested. This is useful for instances
of objects like curry or partial which are not discovered by default.
Parameters
----------
f : any
The thing to doctest.
name : str, optional
The name to use in the doctest function mapping. If this is None,
Then ``f.__name__`` will be used.
Returns
-------
f : any
``f`` unchanged. | def ensure_doctest(f, name=None):
"""Ensure that an object gets doctested. This is useful for instances
of objects like curry or partial which are not discovered by default.
Parameters
----------
f : any
The thing to doctest.
name : str, optional
The name to use in the doctest function mapping. If this is None,
Then ``f.__name__`` will be used.
Returns
-------
f : any
``f`` unchanged.
"""
sys._getframe(2).f_globals.setdefault('__test__', {})[
f.__name__ if name is None else name
] = f
return f |
Construct the time series of prices that produce the given returns.
Parameters
----------
returns : np.ndarray[float]
The returns that these prices generate.
starting_price : float
The value of the asset.
Returns
-------
prices : np.ndaray[float]
The prices that generate the given returns. This array will be one
element longer than ``returns`` and ``prices[0] == starting_price``. | def prices_generating_returns(returns, starting_price):
"""Construct the time series of prices that produce the given returns.
Parameters
----------
returns : np.ndarray[float]
The returns that these prices generate.
starting_price : float
The value of the asset.
Returns
-------
prices : np.ndaray[float]
The prices that generate the given returns. This array will be one
element longer than ``returns`` and ``prices[0] == starting_price``.
"""
raw_prices = starting_price * (1 + np.append([0], returns)).cumprod()
rounded_prices = raw_prices.round(3)
if not np.allclose(raw_prices, rounded_prices):
raise ValueError(
'Prices only have 3 decimal places of precision. There is no valid'
' price series that generate these returns.',
)
return rounded_prices |
Construct a time series of prices that ticks by a random multiple of
``tick_size`` every period.
Parameters
----------
starting_price : float
The first price of the series.
count : int
Number of price observations to return.
tick_size : float
Unit of price movement between observations.
tick_range : (int, int)
Pair of lower/upper bounds for different in the number of ticks
between price observations.
seed : int, optional
Seed to use for random number generation. | def random_tick_prices(starting_price,
count,
tick_size=0.01,
tick_range=(-5, 7),
seed=42):
"""
Construct a time series of prices that ticks by a random multiple of
``tick_size`` every period.
Parameters
----------
starting_price : float
The first price of the series.
count : int
Number of price observations to return.
tick_size : float
Unit of price movement between observations.
tick_range : (int, int)
Pair of lower/upper bounds for different in the number of ticks
between price observations.
seed : int, optional
Seed to use for random number generation.
"""
out = np.full(count, starting_price, dtype=float)
rng = np.random.RandomState(seed)
diff = rng.randint(tick_range[0], tick_range[1], size=len(out) - 1)
ticks = starting_price + diff.cumsum() * tick_size
out[1:] = ticks
return out |
Generate a random walk of minute returns which meets the given OHLCV
profile for an asset. The volume will be evenly distributed through the
day.
Parameters
----------
open_ : float
The day's open.
high : float
The day's high.
low : float
The day's low.
close : float
The day's close.
volume : float
The day's volume.
trading_minutes : int, optional
The number of minutes to simulate.
random_state : numpy.random.RandomState, optional
The random state to use. If not provided, the global numpy state is
used. | def simulate_minutes_for_day(open_,
high,
low,
close,
volume,
trading_minutes=390,
random_state=None):
"""Generate a random walk of minute returns which meets the given OHLCV
profile for an asset. The volume will be evenly distributed through the
day.
Parameters
----------
open_ : float
The day's open.
high : float
The day's high.
low : float
The day's low.
close : float
The day's close.
volume : float
The day's volume.
trading_minutes : int, optional
The number of minutes to simulate.
random_state : numpy.random.RandomState, optional
The random state to use. If not provided, the global numpy state is
used.
"""
if random_state is None:
random_state = np.random
sub_periods = 5
values = (random_state.rand(trading_minutes * sub_periods) - 0.5).cumsum()
values *= (high - low) / (values.max() - values.min())
values += np.linspace(
open_ - values[0],
close - values[-1],
len(values),
)
assert np.allclose(open_, values[0])
assert np.allclose(close, values[-1])
max_ = max(close, open_)
where = values > max_
values[where] = (
(values[where] - max_) *
(high - max_) /
(values.max() - max_) +
max_
)
min_ = min(close, open_)
where = values < min_
values[where] = (
(values[where] - min_) *
(low - min_) /
(values.min() - min_) +
min_
)
if not (np.allclose(values.max(), high) and
np.allclose(values.min(), low)):
return simulate_minutes_for_day(
open_,
high,
low,
close,
volume,
trading_minutes,
random_state=random_state,
)
prices = pd.Series(values.round(3)).groupby(
np.arange(trading_minutes).repeat(sub_periods),
)
base_volume, remainder = divmod(volume, trading_minutes)
volume = np.full(trading_minutes, base_volume, dtype='int64')
volume[:remainder] += 1
# TODO: add in volume
return pd.DataFrame({
'open': prices.first(),
'close': prices.last(),
'high': prices.max(),
'low': prices.min(),
'volume': volume,
}) |
Create a new pipeline domain with a simple date_range index.
| def create_simple_domain(start, end, country_code):
"""Create a new pipeline domain with a simple date_range index.
"""
return EquitySessionDomain(pd.date_range(start, end), country_code) |
Write an HDF5 file of pricing data using an HDF5DailyBarWriter.
| def write_hdf5_daily_bars(writer,
asset_finder,
country_codes,
generate_data,
generate_currency_codes):
"""Write an HDF5 file of pricing data using an HDF5DailyBarWriter.
"""
asset_finder = asset_finder
for country_code in country_codes:
sids = asset_finder.equities_sids_for_country_code(country_code)
# XXX: The contract for generate_data is that it should return an
# iterator of (sid, df) pairs with entry for each sid in `sids`, and
# the contract for `generate_currency_codes` is that it should return a
# series indexed by the sids it receives.
#
# Unfortunately, some of our tests that were written before the
# introduction of multiple markets (in particular, the ones that use
# EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE), provide a function that always
# returns the same iterator, regardless of the provided `sids`, which
# means there are cases where the sids in `data` don't match the sids
# in `currency_codes`, which causes an assertion failure in
# `write_from_sid_df_pairs`.
#
# The correct fix for this is to update those old tests to respect
# `sids` (most likely by updating `make_equity_minute_bar_sids` to
# support multiple countries). But that requires updating a lot of
# tests, so for now, we call `generate_data` and use the sids it
# produces to determine what to pass to `generate_country_codes`.
data = list(generate_data(country_code=country_code, sids=sids))
data_sids = [p[0] for p in data]
currency_codes = generate_currency_codes(
country_code=country_code,
sids=data_sids,
)
writer.write_from_sid_df_pairs(
country_code,
iter(data),
currency_codes=currency_codes,
) |
Build an exchange_info suitable for passing to an AssetFinder from a list
of EquityCalendarDomain. | def exchange_info_for_domains(domains):
"""
Build an exchange_info suitable for passing to an AssetFinder from a list
of EquityCalendarDomain.
"""
return pd.DataFrame.from_records([
{'exchange': domain.calendar.name, 'country_code': domain.country_code}
for domain in domains
]) |