hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
8a1286e2ba3c1fc509193948e1e84af8a1e78fa51bb1a83a49f1dc7174cfe5bf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
import unittest.mock as mk
from astropy.modeling.bounding_box import (_BaseInterval, _Interval, _ignored_interval,
_BoundingDomain, ModelBoundingBox,
_BaseSelectorArgument, _SelectorArgument, _SelectorArguments,
CompoundBoundingBox)
from astropy.modeling.models import Gaussian1D, Gaussian2D, Shift, Scale, Identity
from astropy.modeling.core import Model, fix_inputs
from astropy.coordinates import SpectralCoord
import astropy.units as u
class Test_Interval:
def test_create(self):
lower = mk.MagicMock()
upper = mk.MagicMock()
interval = _Interval(lower, upper)
assert isinstance(interval, _BaseInterval)
assert interval.lower == lower
assert interval.upper == upper
assert interval == (lower, upper)
assert interval.__repr__() == \
f"Interval(lower={lower}, upper={upper})"
def test_copy(self):
interval = _Interval(0.5, 1.5)
copy = interval.copy()
assert interval == copy
assert id(interval) != id(copy)
# Same float values have will have same id
assert interval.lower == copy.lower
assert id(interval.lower) == id(copy.lower)
# Same float values have will have same id
assert interval.upper == copy.upper
assert id(interval.upper) == id(copy.upper)
def test__validate_shape(self):
message = "An interval must be some sort of sequence of length 2"
lower = mk.MagicMock()
upper = mk.MagicMock()
interval = _Interval(lower, upper)
# Passes (2,)
interval._validate_shape((1, 2))
interval._validate_shape([1, 2])
interval._validate_shape((1*u.m, 2*u.m))
interval._validate_shape([1*u.m, 2*u.m])
# Passes (1, 2)
interval._validate_shape(((1, 2),))
interval._validate_shape(([1, 2],))
interval._validate_shape([(1, 2)])
interval._validate_shape([[1, 2]])
interval._validate_shape(((1*u.m, 2*u.m),))
interval._validate_shape(([1*u.m, 2*u.m],))
interval._validate_shape([(1*u.m, 2*u.m)])
interval._validate_shape([[1*u.m, 2*u.m]])
# Passes (2, 0)
interval._validate_shape((mk.MagicMock(), mk.MagicMock()))
interval._validate_shape([mk.MagicMock(), mk.MagicMock()])
# Passes with array inputs:
interval._validate_shape((np.array([-2.5, -3.5]), np.array([2.5, 3.5])))
interval._validate_shape((np.array([-2.5, -3.5, -4.5]),
np.array([2.5, 3.5, 4.5])))
# Fails shape (no units)
with pytest.raises(ValueError) as err:
interval._validate_shape((1, 2, 3))
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([1, 2, 3])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([[1, 2, 3], [4, 5, 6]])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape(1)
assert str(err.value) == message
# Fails shape (units)
message = "An interval must be some sort of sequence of length 2"
with pytest.raises(ValueError) as err:
interval._validate_shape((1*u.m, 2*u.m, 3*u.m))
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([1*u.m, 2*u.m, 3*u.m])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([[1*u.m, 2*u.m, 3*u.m], [4*u.m, 5*u.m, 6*u.m]])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape(1*u.m)
assert str(err.value) == message
# Fails shape (arrays):
with pytest.raises(ValueError) as err:
interval._validate_shape((np.array([-2.5, -3.5]),
np.array([2.5, 3.5]),
np.array([3, 4])))
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape((np.array([-2.5, -3.5]), [2.5, 3.5]))
assert str(err.value) == message
def test__validate_bounds(self):
# Passes
assert _Interval._validate_bounds(1, 2) == (1, 2)
assert _Interval._validate_bounds(1*u.m, 2*u.m) == (1*u.m, 2*u.m)
interval = _Interval._validate_bounds(np.array([-2.5, -3.5]), np.array([2.5, 3.5]))
assert (interval.lower == np.array([-2.5, -3.5])).all()
assert (interval.upper == np.array([2.5, 3.5])).all()
# Fails
with pytest.warns(RuntimeWarning,
match="Invalid interval: upper bound 1 is strictly "
r"less than lower bound 2\."):
_Interval._validate_bounds(2, 1)
with pytest.warns(RuntimeWarning,
match=r"Invalid interval: upper bound 1\.0 m is strictly "
r"less than lower bound 2\.0 m\."):
_Interval._validate_bounds(2*u.m, 1*u.m)
def test_validate(self):
# Passes
assert _Interval.validate((1, 2)) == (1, 2)
assert _Interval.validate([1, 2]) == (1, 2)
assert _Interval.validate((1*u.m, 2*u.m)) == (1*u.m, 2*u.m)
assert _Interval.validate([1*u.m, 2*u.m]) == (1*u.m, 2*u.m)
assert _Interval.validate(((1, 2),)) == (1, 2)
assert _Interval.validate(([1, 2],)) == (1, 2)
assert _Interval.validate([(1, 2)]) == (1, 2)
assert _Interval.validate([[1, 2]]) == (1, 2)
assert _Interval.validate(((1*u.m, 2*u.m),)) == (1*u.m, 2*u.m)
assert _Interval.validate(([1*u.m, 2*u.m],)) == (1*u.m, 2*u.m)
assert _Interval.validate([(1*u.m, 2*u.m)]) == (1*u.m, 2*u.m)
assert _Interval.validate([[1*u.m, 2*u.m]]) == (1*u.m, 2*u.m)
interval = _Interval.validate((np.array([-2.5, -3.5]),
np.array([2.5, 3.5])))
assert (interval.lower == np.array([-2.5, -3.5])).all()
assert (interval.upper == np.array([2.5, 3.5])).all()
interval = _Interval.validate((np.array([-2.5, -3.5, -4.5]),
np.array([2.5, 3.5, 4.5])))
assert (interval.lower == np.array([-2.5, -3.5, -4.5])).all()
assert (interval.upper == np.array([2.5, 3.5, 4.5])).all()
# Fail shape
with pytest.raises(ValueError):
_Interval.validate((1, 2, 3))
# Fail bounds
with pytest.warns(RuntimeWarning):
_Interval.validate((2, 1))
def test_outside(self):
interval = _Interval.validate((0, 1))
assert (interval.outside(np.linspace(-1, 2, 13)) ==
[True, True, True, True,
False, False, False, False, False,
True, True, True, True]).all()
def test_domain(self):
interval = _Interval.validate((0, 1))
assert (interval.domain(0.25) == np.linspace(0, 1, 5)).all()
def test__ignored_interval(self):
assert _ignored_interval.lower == -np.inf
assert _ignored_interval.upper == np.inf
for num in [0, -1, -100, 3.14, 10**100, -10**100]:
assert not num < _ignored_interval[0]
assert num > _ignored_interval[0]
assert not num > _ignored_interval[1]
assert num < _ignored_interval[1]
assert not (_ignored_interval.outside(np.array([num]))).all()
def test_validate_with_SpectralCoord(self):
"""Regression test for issue #12439"""
lower = SpectralCoord(1, u.um)
upper = SpectralCoord(10, u.um)
interval = _Interval.validate((lower, upper))
assert interval.lower == lower
assert interval.upper == upper
class Test_BoundingDomain:
def setup(self):
class BoundingDomain(_BoundingDomain):
def fix_inputs(self, model, fix_inputs):
super().fix_inputs(model, fixed_inputs=fix_inputs)
def prepare_inputs(self, input_shape, inputs):
super().prepare_inputs(input_shape, inputs)
self.BoundingDomain = BoundingDomain
def test_create(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'C'
bounding_box = self.BoundingDomain(model, order='F')
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'F'
bounding_box = self.BoundingDomain(Gaussian2D(), ['x'])
assert bounding_box._ignored == [0]
assert bounding_box._order == 'C'
# Error
with pytest.raises(ValueError):
self.BoundingDomain(model, order=mk.MagicMock())
def test_model(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box.model == model
def test_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock(), order='C')
assert bounding_box._order == 'C'
assert bounding_box.order == 'C'
bounding_box = self.BoundingDomain(mk.MagicMock(), order='F')
assert bounding_box._order == 'F'
assert bounding_box.order == 'F'
bounding_box._order = 'test'
assert bounding_box.order == 'test'
def test_ignored(self):
ignored = [0]
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = self.BoundingDomain(model, ignored=ignored)
assert bounding_box._ignored == ignored
assert bounding_box.ignored == ignored
def test__get_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Success (default 'C')
assert bounding_box._order == 'C'
assert bounding_box._get_order() == 'C'
assert bounding_box._get_order('C') == 'C'
assert bounding_box._get_order('F') == 'F'
# Success (default 'F')
bounding_box._order = 'F'
assert bounding_box._order == 'F'
assert bounding_box._get_order() == 'F'
assert bounding_box._get_order('C') == 'C'
assert bounding_box._get_order('F') == 'F'
# Error
order = mk.MagicMock()
with pytest.raises(ValueError) as err:
bounding_box._get_order(order)
assert str(err.value) ==\
"order must be either 'C' (C/python order) or " +\
f"'F' (Fortran/mathematical order), got: {order}."
def test__get_index(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass input name
assert bounding_box._get_index('x') == 0
assert bounding_box._get_index('y') == 1
# Pass invalid input name
with pytest.raises(ValueError) as err:
bounding_box._get_index('z')
assert str(err.value) ==\
"'z' is not one of the inputs: ('x', 'y')."
# Pass valid index
assert bounding_box._get_index(0) == 0
assert bounding_box._get_index(1) == 1
assert bounding_box._get_index(np.int32(0)) == 0
assert bounding_box._get_index(np.int32(1)) == 1
assert bounding_box._get_index(np.int64(0)) == 0
assert bounding_box._get_index(np.int64(1)) == 1
# Pass invalid index
MESSAGE = "Integer key: 2 must be non-negative and < 2."
with pytest.raises(IndexError) as err:
bounding_box._get_index(2)
assert str(err.value) == MESSAGE
with pytest.raises(IndexError) as err:
bounding_box._get_index(np.int32(2))
assert str(err.value) == MESSAGE
with pytest.raises(IndexError) as err:
bounding_box._get_index(np.int64(2))
assert str(err.value) == MESSAGE
with pytest.raises(IndexError) as err:
bounding_box._get_index(-1)
assert str(err.value) ==\
"Integer key: -1 must be non-negative and < 2."
# Pass invalid key
value = mk.MagicMock()
with pytest.raises(ValueError) as err:
bounding_box._get_index(value)
assert str(err.value) ==\
f"Key value: {value} must be string or integer."
def test__get_name(self):
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = self.BoundingDomain(model)
index = mk.MagicMock()
name = mk.MagicMock()
model.inputs = mk.MagicMock()
model.inputs.__getitem__.return_value = name
assert bounding_box._get_name(index) == name
assert model.inputs.__getitem__.call_args_list == [mk.call(index)]
def test_ignored_inputs(self):
model = mk.MagicMock()
ignored = list(range(4, 8))
model.n_inputs = 8
model.inputs = [mk.MagicMock() for _ in range(8)]
bounding_box = self.BoundingDomain(model, ignored=ignored)
inputs = bounding_box.ignored_inputs
assert isinstance(inputs, list)
for index, _input in enumerate(inputs):
assert _input in model.inputs
assert model.inputs[index + 4] == _input
for index, _input in enumerate(model.inputs):
if _input in inputs:
assert inputs[index - 4] == _input
else:
assert index < 4
def test__validate_ignored(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass
assert bounding_box._validate_ignored(None) == []
assert bounding_box._validate_ignored(['x', 'y']) == [0, 1]
assert bounding_box._validate_ignored([0, 1]) == [0, 1]
assert bounding_box._validate_ignored([np.int32(0), np.int64(1)]) == [0, 1]
# Fail
with pytest.raises(ValueError):
bounding_box._validate_ignored([mk.MagicMock()])
with pytest.raises(ValueError):
bounding_box._validate_ignored(['z'])
with pytest.raises(IndexError):
bounding_box._validate_ignored([3])
with pytest.raises(IndexError):
bounding_box._validate_ignored([np.int32(3)])
with pytest.raises(IndexError):
bounding_box._validate_ignored([np.int64(3)])
def test___call__(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
args = tuple([mk.MagicMock() for _ in range(3)])
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
with pytest.raises(RuntimeError) as err:
bounding_box(*args, **kwargs)
assert str(err.value) ==\
"This bounding box is fixed by the model and does not have " +\
"adjustable parameters."
def test_fix_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
model = mk.MagicMock()
fixed_inputs = mk.MagicMock()
with pytest.raises(NotImplementedError) as err:
bounding_box.fix_inputs(model, fixed_inputs)
assert str(err.value) ==\
"This should be implemented by a child class."
def test__prepare_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
with pytest.raises(NotImplementedError) as err:
bounding_box.prepare_inputs(mk.MagicMock(), mk.MagicMock())
assert str(err.value) == \
"This has not been implemented for BoundingDomain."
def test__base_ouput(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Simple shape
input_shape = (13,)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
# Complex shape
input_shape = (13, 7)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
def test__all_out_output(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
# Simple shape
model.n_outputs = 1
input_shape = (13,)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (1, 13)
assert output_unit is None
# Complex shape
model.n_outputs = 6
input_shape = (13, 7)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (6, 13, 7)
assert output_unit is None
def test__modify_output(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
# Simple shape
with mk.patch.object(_BoundingDomain, '_base_output', autospec=True,
return_value=np.asanyarray(0)) as mkBase:
assert (np.array([1, 2, 3]) ==
bounding_box._modify_output([1, 2, 3], valid_index, input_shape, fill_value)).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
# Replacement
with mk.patch.object(_BoundingDomain, '_base_output', autospec=True,
return_value=np.array([1, 2, 3, 4, 5, 6])) as mkBase:
assert (np.array([7, 2, 8, 4, 9, 6]) ==
bounding_box._modify_output([7, 8, 9], np.array([[0, 2, 4]]), input_shape, fill_value)).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
def test__prepare_outputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
valid_outputs = [mk.MagicMock() for _ in range(3)]
effects = [mk.MagicMock() for _ in range(3)]
with mk.patch.object(_BoundingDomain, '_modify_output', autospec=True,
side_effect=effects) as mkModify:
assert effects == bounding_box._prepare_outputs(valid_outputs, valid_index,
input_shape, fill_value)
assert mkModify.call_args_list == \
[mk.call(bounding_box, valid_outputs[idx], valid_index, input_shape, fill_value)
for idx in range(3)]
def test_prepare_outputs(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
valid_outputs = mk.MagicMock()
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with mk.patch.object(_BoundingDomain, '_prepare_outputs', autospec=True) as mkPrepare:
# Reshape valid_outputs
model.n_outputs = 1
assert mkPrepare.return_value == \
bounding_box.prepare_outputs(valid_outputs, valid_index, input_shape, fill_value)
assert mkPrepare.call_args_list == \
[mk.call(bounding_box, [valid_outputs], valid_index, input_shape, fill_value)]
mkPrepare.reset_mock()
# No reshape valid_outputs
model.n_outputs = 2
assert mkPrepare.return_value == \
bounding_box.prepare_outputs(valid_outputs, valid_index, input_shape, fill_value)
assert mkPrepare.call_args_list == \
[mk.call(bounding_box, valid_outputs, valid_index, input_shape, fill_value)]
def test__get_valid_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Don't get unit
assert bounding_box._get_valid_outputs_unit(mk.MagicMock(), False) is None
# Get unit from unitless
assert bounding_box._get_valid_outputs_unit(7, True) is None
# Get unit
assert bounding_box._get_valid_outputs_unit(25 * u.m, True) == u.m
def test__evaluate_model(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
valid_inputs = mk.MagicMock()
input_shape = mk.MagicMock()
valid_index = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
with mk.patch.object(_BoundingDomain, '_get_valid_outputs_unit',
autospec=True) as mkGet:
with mk.patch.object(_BoundingDomain, 'prepare_outputs',
autospec=True) as mkPrepare:
assert bounding_box._evaluate_model(evaluate, valid_inputs,
valid_index, input_shape,
fill_value, with_units) == \
(mkPrepare.return_value, mkGet.return_value)
assert mkPrepare.call_args_list == \
[mk.call(bounding_box, evaluate.return_value, valid_index,
input_shape, fill_value)]
assert mkGet.call_args_list == \
[mk.call(evaluate.return_value, with_units)]
assert evaluate.call_args_list == \
[mk.call(valid_inputs)]
def test__evaluate(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
valid_inputs = mk.MagicMock()
valid_index = mk.MagicMock()
effects = [(valid_inputs, valid_index, True), (valid_inputs, valid_index, False)]
with mk.patch.object(self.BoundingDomain, 'prepare_inputs', autospec=True,
side_effect=effects) as mkPrepare:
with mk.patch.object(_BoundingDomain, '_all_out_output',
autospec=True) as mkAll:
with mk.patch.object(_BoundingDomain, '_evaluate_model',
autospec=True) as mkEvaluate:
# all_out
assert bounding_box._evaluate(evaluate, inputs, input_shape,
fill_value, with_units) == \
mkAll.return_value
assert mkAll.call_args_list == \
[mk.call(bounding_box, input_shape, fill_value)]
assert mkEvaluate.call_args_list == []
assert mkPrepare.call_args_list == \
[mk.call(bounding_box, input_shape, inputs)]
mkAll.reset_mock()
mkPrepare.reset_mock()
# not all_out
assert bounding_box._evaluate(evaluate, inputs, input_shape,
fill_value, with_units) == \
mkEvaluate.return_value
assert mkAll.call_args_list == []
assert mkEvaluate.call_args_list == \
[mk.call(bounding_box, evaluate, valid_inputs, valid_index,
input_shape, fill_value, with_units)]
assert mkPrepare.call_args_list == \
[mk.call(bounding_box, input_shape, inputs)]
def test__set_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# set no unit
assert 27 == bounding_box._set_outputs_unit(27, None)
# set unit
assert 27 * u.m == bounding_box._set_outputs_unit(27, u.m)
def test_evaluate(self):
bounding_box = self.BoundingDomain(Gaussian2D())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
fill_value = mk.MagicMock()
outputs = mk.MagicMock()
valid_outputs_unit = mk.MagicMock()
value = (outputs, valid_outputs_unit)
with mk.patch.object(_BoundingDomain, '_evaluate',
autospec=True, return_value=value) as mkEvaluate:
with mk.patch.object(_BoundingDomain, '_set_outputs_unit',
autospec=True) as mkSet:
with mk.patch.object(Model, 'input_shape', autospec=True) as mkShape:
with mk.patch.object(Model, 'bbox_with_units',
new_callable=mk.PropertyMock) as mkUnits:
assert tuple(mkSet.return_value) == \
bounding_box.evaluate(evaluate, inputs, fill_value)
assert mkSet.call_args_list == \
[mk.call(outputs, valid_outputs_unit)]
assert mkEvaluate.call_args_list == \
[mk.call(bounding_box, evaluate, inputs, mkShape.return_value,
fill_value, mkUnits.return_value)]
assert mkShape.call_args_list == \
[mk.call(bounding_box._model, inputs)]
assert mkUnits.call_args_list == [mk.call()]
class TestModelBoundingBox:
def test_create(self):
intervals = ()
model = mk.MagicMock()
bounding_box = ModelBoundingBox(intervals, model)
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {}
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'C'
# Set optional
intervals = {}
model = mk.MagicMock()
bounding_box = ModelBoundingBox(intervals, model, order='F')
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {}
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'F'
# Set interval
intervals = (1, 2)
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = ModelBoundingBox(intervals, model)
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2)}
assert bounding_box._model == model
# Set ignored
intervals = (1, 2)
model = mk.MagicMock()
model.n_inputs = 2
model.inputs = ['x', 'y']
bounding_box = ModelBoundingBox(intervals, model, ignored=[1])
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2)}
assert bounding_box._model == model
assert bounding_box._ignored == [1]
intervals = ((1, 2), (3, 4))
model = mk.MagicMock()
model.n_inputs = 3
model.inputs = ['x', 'y', 'z']
bounding_box = ModelBoundingBox(intervals, model, ignored=[2], order='F')
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2), 1: (3, 4)}
assert bounding_box._model == model
assert bounding_box._ignored == [2]
assert bounding_box._order == 'F'
def test_copy(self):
bounding_box = ModelBoundingBox.validate(Gaussian2D(), ((-4.5, 4.5), (-1.4, 1.4)))
copy = bounding_box.copy()
assert bounding_box == copy
assert id(bounding_box) != id(copy)
assert bounding_box.ignored == copy.ignored
assert id(bounding_box.ignored) != id(copy.ignored)
# model is not copied to prevent infinite recursion
assert bounding_box._model == copy._model
assert id(bounding_box._model) == id(copy._model)
# Same string values have will have same id
assert bounding_box._order == copy._order
assert id(bounding_box._order) == id(copy._order)
# Check interval objects
for index, interval in bounding_box.intervals.items():
assert interval == copy.intervals[index]
assert id(interval) != id(copy.intervals[index])
# Same float values have will have same id
assert interval.lower == copy.intervals[index].lower
assert id(interval.lower) == id(copy.intervals[index].lower)
# Same float values have will have same id
assert interval.upper == copy.intervals[index].upper
assert id(interval.upper) == id(copy.intervals[index].upper)
assert len(bounding_box.intervals) == len(copy.intervals)
assert bounding_box.intervals.keys() == copy.intervals.keys()
def test_intervals(self):
intervals = {0: _Interval(1, 2)}
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = ModelBoundingBox(intervals, model)
assert bounding_box._intervals == intervals
assert bounding_box.intervals == intervals
def test_named_intervals(self):
intervals = {idx: _Interval(idx, idx + 1) for idx in range(4)}
model = mk.MagicMock()
model.n_inputs = 4
model.inputs = [mk.MagicMock() for _ in range(4)]
bounding_box = ModelBoundingBox(intervals, model)
named = bounding_box.named_intervals
assert isinstance(named, dict)
for name, interval in named.items():
assert name in model.inputs
assert intervals[model.inputs.index(name)] == interval
for index, name in enumerate(model.inputs):
assert index in intervals
assert name in named
assert intervals[index] == named[name]
def test___repr__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.__repr__() ==\
"ModelBoundingBox(\n" +\
" intervals={\n" +\
" x: Interval(lower=-1, upper=1)\n" +\
" y: Interval(lower=-4, upper=4)\n" +\
" }\n" +\
" model=Gaussian2D(inputs=('x', 'y'))\n" +\
" order='C'\n" +\
")"
intervals = {0: _Interval(-1, 1)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=['y'])
assert bounding_box.__repr__() ==\
"ModelBoundingBox(\n" +\
" intervals={\n" +\
" x: Interval(lower=-1, upper=1)\n" +\
" }\n" +\
" ignored=['y']\n" +\
" model=Gaussian2D(inputs=('x', 'y'))\n" +\
" order='C'\n" +\
")"
def test___len__(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert len(bounding_box) == 1 == len(bounding_box._intervals)
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert len(bounding_box) == 2 == len(bounding_box._intervals)
bounding_box._intervals = {}
assert len(bounding_box) == 0 == len(bounding_box._intervals)
def test___contains__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Contains with keys
assert 'x' in bounding_box
assert 'y' in bounding_box
assert 'z' not in bounding_box
# Contains with index
assert 0 in bounding_box
assert 1 in bounding_box
assert 2 not in bounding_box
# General not in
assert mk.MagicMock() not in bounding_box
# Contains with ignored
del bounding_box['y']
# Contains with keys
assert 'x' in bounding_box
assert 'y' in bounding_box
assert 'z' not in bounding_box
# Contains with index
assert 0 in bounding_box
assert 1 in bounding_box
assert 2 not in bounding_box
def test___getitem__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Get using input key
assert bounding_box['x'] == (-1, 1)
assert bounding_box['y'] == (-4, 4)
# Fail with input key
with pytest.raises(ValueError):
bounding_box['z']
# Get using index
assert bounding_box[0] == (-1, 1)
assert bounding_box[1] == (-4, 4)
assert bounding_box[np.int32(0)] == (-1, 1)
assert bounding_box[np.int32(1)] == (-4, 4)
assert bounding_box[np.int64(0)] == (-1, 1)
assert bounding_box[np.int64(1)] == (-4, 4)
# Fail with index
with pytest.raises(IndexError):
bounding_box[2]
with pytest.raises(IndexError):
bounding_box[np.int32(2)]
with pytest.raises(IndexError):
bounding_box[np.int64(2)]
# get ignored interval
del bounding_box[0]
assert bounding_box[0] == _ignored_interval
assert bounding_box[1] == (-4, 4)
del bounding_box[1]
assert bounding_box[0] == _ignored_interval
assert bounding_box[1] == _ignored_interval
def test_bounding_box(self):
# 0D
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, {}, ignored=['x'])
assert bounding_box.bounding_box() == (-np.inf, np.inf)
assert bounding_box.bounding_box('C') == (-np.inf, np.inf)
assert bounding_box.bounding_box('F') == (-np.inf, np.inf)
# 1D
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.bounding_box() == (-1, 1)
assert bounding_box.bounding_box(mk.MagicMock()) == (-1, 1)
# > 1D
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.bounding_box() == ((-4, 4), (-1, 1))
assert bounding_box.bounding_box('C') == ((-4, 4), (-1, 1))
assert bounding_box.bounding_box('F') == ((-1, 1), (-4, 4))
def test___eq__(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model.copy(), intervals.copy())
assert bounding_box == bounding_box
assert bounding_box == ModelBoundingBox.validate(model.copy(), intervals.copy())
assert bounding_box == (-1, 1)
assert not (bounding_box == mk.MagicMock())
assert not (bounding_box == (-2, 2))
assert not (bounding_box == ModelBoundingBox.validate(model, {0: _Interval(-2, 2)}))
# Respect ordering
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box_1 = ModelBoundingBox.validate(model, intervals)
bounding_box_2 = ModelBoundingBox.validate(model, intervals, order='F')
assert bounding_box_1._order == 'C'
assert bounding_box_1 == ((-4, 4), (-1, 1))
assert not (bounding_box_1 == ((-1, 1), (-4, 4)))
assert bounding_box_2._order == 'F'
assert not (bounding_box_2 == ((-4, 4), (-1, 1)))
assert bounding_box_2 == ((-1, 1), (-4, 4))
assert bounding_box_1 == bounding_box_2
# Respect ignored
model = Gaussian2D()
bounding_box_1._ignored = [mk.MagicMock()]
bounding_box_2._ignored = [mk.MagicMock()]
assert bounding_box_1._ignored != bounding_box_2._ignored
assert not (bounding_box_1 == bounding_box_2)
def test__setitem__(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, {}, ignored=[0, 1])
assert bounding_box._ignored == [0, 1]
# USING Intervals directly
# Set interval using key
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box['x'] = _Interval(-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box['x'], _Interval)
assert bounding_box['x'] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box['y'] = _Interval(-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box['y'], _Interval)
assert bounding_box['y'] == (-4, 4)
del bounding_box['x']
del bounding_box['y']
# Set interval using index
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box[0] = _Interval(-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box[0], _Interval)
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box[1] = _Interval(-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box[1], _Interval)
assert bounding_box[1] == (-4, 4)
del bounding_box[0]
del bounding_box[1]
# USING tuples
# Set interval using key
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box['x'] = (-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box['x'], _Interval)
assert bounding_box['x'] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box['y'] = (-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box['y'], _Interval)
assert bounding_box['y'] == (-4, 4)
del bounding_box['x']
del bounding_box['y']
# Set interval using index
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box[0] = (-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box[0], _Interval)
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box[1] = (-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box[1], _Interval)
assert bounding_box[1] == (-4, 4)
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
# USING Intervals directly
# Set interval using key
assert 'x' not in bounding_box
bounding_box['x'] = _Interval(np.array([-1, -2]), np.array([1, 2]))
assert 'x' in bounding_box
assert isinstance(bounding_box['x'], _Interval)
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
# Set interval using index
bounding_box._intervals = {}
assert 0 not in bounding_box
bounding_box[0] = _Interval(np.array([-1, -2]), np.array([1, 2]))
assert 0 in bounding_box
assert isinstance(bounding_box[0], _Interval)
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
# USING tuples
# Set interval using key
bounding_box._intervals = {}
assert 'x' not in bounding_box
bounding_box['x'] = (np.array([-1, -2]), np.array([1, 2]))
assert 'x' in bounding_box
assert isinstance(bounding_box['x'], _Interval)
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
# Set interval using index
bounding_box._intervals = {}
assert 0 not in bounding_box
bounding_box[0] = (np.array([-1, -2]), np.array([1, 2]))
assert 0 in bounding_box
assert isinstance(bounding_box[0], _Interval)
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
def test___delitem__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Using index
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert 0 in bounding_box
assert 'x' in bounding_box
del bounding_box[0]
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
assert 0 in bounding_box
assert 'x' in bounding_box
# Delete an ignored item
with pytest.raises(RuntimeError) as err:
del bounding_box[0]
assert str(err.value) ==\
"Cannot delete ignored input: 0!"
# Using key
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert 0 in bounding_box
assert 'y' in bounding_box
del bounding_box['y']
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
assert 0 in bounding_box
assert 'y' in bounding_box
# Delete an ignored item
with pytest.raises(RuntimeError) as err:
del bounding_box['y']
assert str(err.value) ==\
"Cannot delete ignored input: y!"
def test__validate_dict(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Input name keys
intervals = {'x': _Interval(-1, 1), 'y': _Interval(-4, 4)}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_dict(intervals)
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Input index
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 0 not in bounding_box
assert 1 not in bounding_box
bounding_box._validate_dict(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
# name keys
intervals = {'x': _Interval(np.array([-1, -2]), np.array([1, 2]))}
assert 'x' not in bounding_box
bounding_box._validate_dict(intervals)
assert 'x' in bounding_box
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
# input index
bounding_box._intervals = {}
intervals = {0: _Interval(np.array([-1, -2]), np.array([1, 2]))}
assert 0 not in bounding_box
bounding_box._validate_dict(intervals)
assert 0 in bounding_box
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
def test__validate_sequence(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Default order
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# C order
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order='C')
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Fortran order
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order='F')
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Invalid order
bounding_box._intervals = {}
order = mk.MagicMock()
assert 'x' not in bounding_box
assert 'y' not in bounding_box
with pytest.raises(ValueError):
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order=order)
assert 'x' not in bounding_box
assert 'y' not in bounding_box
assert len(bounding_box.intervals) == 0
def test__n_inputs(self):
model = Gaussian2D()
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box._n_inputs == 2
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=['y'])
assert bounding_box._n_inputs == 1
bounding_box = ModelBoundingBox.validate(model, {}, ignored=['x', 'y'])
assert bounding_box._n_inputs == 0
bounding_box._ignored = ['x', 'y', 'z']
assert bounding_box._n_inputs == 0
def test__validate_iterable(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Pass sequence Default order
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_iterable(((-4, 4), (-1, 1)))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_iterable(((-4, 4), (-1, 1)), order='F')
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 0 not in bounding_box
assert 1 not in bounding_box
bounding_box._validate_iterable(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass with ignored
bounding_box._intervals = {}
bounding_box._ignored = [1]
intervals = {0: _Interval(-1, 1)}
assert 0 not in bounding_box.intervals
bounding_box._validate_iterable(intervals)
assert 0 in bounding_box.intervals
assert bounding_box[0] == (-1, 1)
# Invalid iterable
bounding_box._intervals = {}
bounding_box._ignored = []
assert 'x' not in bounding_box
assert 'y' not in bounding_box
with pytest.raises(ValueError) as err:
bounding_box._validate_iterable(((-4, 4), (-1, 1), (-3, 3)))
assert str(err.value) ==\
"Found 3 intervals, but must have exactly 2."
assert len(bounding_box.intervals) == 0
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._ignored = [1]
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
with pytest.raises(ValueError) as err:
bounding_box._validate_iterable(intervals)
assert str(err.value) ==\
"Found 2 intervals, but must have exactly 1."
assert len(bounding_box.intervals) == 0
bounding_box._ignored = []
intervals = {0: _Interval(-1, 1)}
with pytest.raises(ValueError) as err:
bounding_box._validate_iterable(intervals)
assert str(err.value) ==\
"Found 1 intervals, but must have exactly 2."
assert 'x' not in bounding_box
assert 'y' not in bounding_box
assert len(bounding_box.intervals) == 0
def test__validate(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Pass sequence Default order
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate(((-4, 4), (-1, 1)))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate(((-4, 4), (-1, 1)), order='F')
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass single with ignored
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox({}, model, ignored=[1])
assert 0 not in bounding_box.intervals
assert 1 not in bounding_box.intervals
bounding_box._validate(intervals)
assert 0 in bounding_box.intervals
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert len(bounding_box.intervals) == 1
# Pass single
model = Gaussian1D()
bounding_box = ModelBoundingBox({}, model)
assert 'x' not in bounding_box
bounding_box._validate((-1, 1))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert len(bounding_box.intervals) == 1
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
sequence = (np.array([-1, -2]), np.array([1, 2]))
assert 'x' not in bounding_box
bounding_box._validate(sequence)
assert 'x' in bounding_box
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
def test_validate(self):
model = Gaussian2D()
kwargs = {'test': mk.MagicMock()}
# Pass sequence Default order
bounding_box = ModelBoundingBox.validate(model, ((-4, 4), (-1, 1)), **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box = ModelBoundingBox.validate(model, ((-4, 4), (-1, 1)), order='F', **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
bounding_box = ModelBoundingBox.validate(model, intervals, order='F', **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
assert bounding_box.order == 'F'
# Pass ModelBoundingBox
bbox = bounding_box
bounding_box = ModelBoundingBox.validate(model, bbox, **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
assert bounding_box.order == 'F'
# Pass single ignored
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=['y'], **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == _ignored_interval
assert len(bounding_box.intervals) == 1
# Pass single
bounding_box = ModelBoundingBox.validate(Gaussian1D(), (-1, 1), **kwargs)
assert (bounding_box._model.parameters == Gaussian1D().parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert len(bounding_box.intervals) == 1
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
sequence = (np.array([-1, -2]), np.array([1, 2]))
bounding_box = ModelBoundingBox.validate(model, sequence, **kwargs)
assert 'x' in bounding_box
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
def test_fix_inputs(self):
bounding_box = ModelBoundingBox.validate(Gaussian2D(), ((-4, 4), (-1, 1)))
# keep_ignored = False (default)
new_bounding_box = bounding_box.fix_inputs(Gaussian1D(), {1: mk.MagicMock()})
assert not (bounding_box == new_bounding_box)
assert (new_bounding_box._model.parameters == Gaussian1D().parameters).all()
assert 'x' in new_bounding_box
assert new_bounding_box['x'] == (-1, 1)
assert 'y' not in new_bounding_box
assert len(new_bounding_box.intervals) == 1
assert new_bounding_box.ignored == []
# keep_ignored = True
new_bounding_box = bounding_box.fix_inputs(Gaussian2D(), {1: mk.MagicMock()},
_keep_ignored=True)
assert not (bounding_box == new_bounding_box)
assert (new_bounding_box._model.parameters == Gaussian2D().parameters).all()
assert 'x' in new_bounding_box
assert new_bounding_box['x'] == (-1, 1)
assert 'y' in new_bounding_box
assert 'y' in new_bounding_box.ignored_inputs
assert len(new_bounding_box.intervals) == 1
assert new_bounding_box.ignored == [1]
def test_dimension(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.dimension == 1 == len(bounding_box._intervals)
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.dimension == 2 == len(bounding_box._intervals)
bounding_box._intervals = {}
assert bounding_box.dimension == 0 == len(bounding_box._intervals)
def test_domain(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# test defaults
assert (np.array(bounding_box.domain(0.25)) ==
np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])).all()
# test C order
assert (np.array(bounding_box.domain(0.25, 'C')) ==
np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])).all()
# test Fortran order
assert (np.array(bounding_box.domain(0.25, 'F')) ==
np.array([np.linspace(-1, 1, 9), np.linspace(0, 2, 9)])).all()
# test error order
order = mk.MagicMock()
with pytest.raises(ValueError):
bounding_box.domain(0.25, order)
def test__outside(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [False for _ in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index ==
[True, True, True, True,
False, False, False, False, False,
True, True, True, True]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [True for _ in range(13)]).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [False]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [True]).all()
assert all_out and isinstance(all_out, bool)
def test__valid_index(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [idx for idx in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [4, 5, 6, 7, 8]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [0]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
def test_prepare_inputs(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert (np.array(new_inputs) == np.array(inputs)).all()
assert len(valid_index) == 1
assert (valid_index[0] == [idx for idx in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert (np.array(new_inputs) ==
np.array(
[
[x[4], x[5], x[6], x[7], x[8]],
[y[4], y[5], y[6], y[7], y[8]],
]
)).all()
assert len(valid_index) == 1
assert (valid_index[0] == [4, 5, 6, 7, 8]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert new_inputs == ()
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert (np.array(new_inputs) == np.array([[0.5], [0.5]])).all()
assert len(valid_index) == 1
assert (valid_index[0] == [0]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert new_inputs == ()
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
class Test_SelectorArgument:
def test_create(self):
index = mk.MagicMock()
ignore = mk.MagicMock()
argument = _SelectorArgument(index, ignore)
assert isinstance(argument, _BaseSelectorArgument)
assert argument.index == index
assert argument.ignore == ignore
assert argument == (index, ignore)
def test_validate(self):
model = Gaussian2D()
# default integer
assert _SelectorArgument.validate(model, 0) == (0, True)
assert _SelectorArgument.validate(model, 1) == (1, True)
# default string
assert _SelectorArgument.validate(model, 'x') == (0, True)
assert _SelectorArgument.validate(model, 'y') == (1, True)
ignore = mk.MagicMock()
# non-default integer
assert _SelectorArgument.validate(model, 0, ignore) == (0, ignore)
assert _SelectorArgument.validate(model, 1, ignore) == (1, ignore)
# non-default string
assert _SelectorArgument.validate(model, 'x', ignore) == (0, ignore)
assert _SelectorArgument.validate(model, 'y', ignore) == (1, ignore)
# Fail
with pytest.raises(ValueError):
_SelectorArgument.validate(model, 'z')
with pytest.raises(ValueError):
_SelectorArgument.validate(model, mk.MagicMock())
with pytest.raises(IndexError):
_SelectorArgument.validate(model, 2)
def test_get_selector(self):
# single inputs
inputs = [idx + 17 for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index]
# numpy array of single inputs
inputs = [np.array([idx + 11]) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index]
inputs = [np.asanyarray(idx + 13) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index]
# multi entry numpy array
inputs = [np.array([idx + 27, idx - 31]) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == tuple(inputs[index])
def test_name(self):
model = Gaussian2D()
for index in range(model.n_inputs):
assert _SelectorArgument(index, mk.MagicMock()).name(model) == model.inputs[index]
def test_pretty_repr(self):
model = Gaussian2D()
assert _SelectorArgument(0, False).pretty_repr(model) ==\
"Argument(name='x', ignore=False)"
assert _SelectorArgument(0, True).pretty_repr(model) ==\
"Argument(name='x', ignore=True)"
assert _SelectorArgument(1, False).pretty_repr(model) ==\
"Argument(name='y', ignore=False)"
assert _SelectorArgument(1, True).pretty_repr(model) ==\
"Argument(name='y', ignore=True)"
def test_get_fixed_value(self):
model = Gaussian2D()
values = {0: 5, 'y': 7}
# Get index value
assert _SelectorArgument(0, mk.MagicMock()).get_fixed_value(model, values) == 5
# Get name value
assert _SelectorArgument(1, mk.MagicMock()).get_fixed_value(model, values) == 7
# Fail
values = {0: 5}
with pytest.raises(RuntimeError) as err:
_SelectorArgument(1, True).get_fixed_value(model, values)
assert str(err.value) == \
"Argument(name='y', ignore=True) was not found in {0: 5}"
def test_is_argument(self):
model = Gaussian2D()
argument = _SelectorArgument.validate(model, 0)
# Is true
assert argument.is_argument(model, 0) == True
assert argument.is_argument(model, 'x') == True
# Is false
assert argument.is_argument(model, 1) == False
assert argument.is_argument(model, 'y') == False
# Fail
with pytest.raises(ValueError):
argument.is_argument(model, 'z')
with pytest.raises(ValueError):
argument.is_argument(model, mk.MagicMock())
with pytest.raises(IndexError):
argument.is_argument(model, 2)
def test_named_tuple(self):
model = Gaussian2D()
for index in range(model.n_inputs):
ignore = mk.MagicMock()
assert _SelectorArgument(index, ignore).named_tuple(model) == \
(model.inputs[index], ignore)
class Test_SelectorArguments:
def test_create(self):
arguments = _SelectorArguments((_SelectorArgument(0, True), _SelectorArgument(1, False)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments._kept_ignore == []
kept_ignore = mk.MagicMock()
arguments = _SelectorArguments((_SelectorArgument(0, True), _SelectorArgument(1, False)), kept_ignore)
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments._kept_ignore == kept_ignore
def test_pretty_repr(self):
model = Gaussian2D()
arguments = _SelectorArguments((_SelectorArgument(0, True), _SelectorArgument(1, False)))
assert arguments.pretty_repr(model) ==\
"SelectorArguments(\n" +\
" Argument(name='x', ignore=True)\n" +\
" Argument(name='y', ignore=False)\n" +\
")"
def test_ignore(self):
assert _SelectorArguments((_SelectorArgument(0, True),
_SelectorArgument(1, True))).ignore == [0, 1]
assert _SelectorArguments((_SelectorArgument(0, True),
_SelectorArgument(1, True)), [13, 4]).ignore == [0, 1, 13, 4]
assert _SelectorArguments((_SelectorArgument(0, True),
_SelectorArgument(1, False))).ignore == [0]
assert _SelectorArguments((_SelectorArgument(0, False),
_SelectorArgument(1, True))).ignore == [1]
assert _SelectorArguments((_SelectorArgument(0, False),
_SelectorArgument(1, False))).ignore == []
assert _SelectorArguments((_SelectorArgument(0, False),
_SelectorArgument(1, False)), [17, 14]).ignore == [17, 14]
def test_validate(self):
# Integer key and passed ignore
arguments = _SelectorArguments.validate(Gaussian2D(), ((0, True), (1, False)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments.kept_ignore == []
# Default ignore
arguments = _SelectorArguments.validate(Gaussian2D(), ((0,), (1,)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, True))
assert arguments.kept_ignore == []
# String key and passed ignore
arguments = _SelectorArguments.validate(Gaussian2D(), (('x', True), ('y', False)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments.kept_ignore == []
# Test kept_ignore option
new_arguments= _SelectorArguments.validate(Gaussian2D(), arguments, [11, 5, 8])
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True), (1, False))
assert new_arguments.kept_ignore == [11, 5, 8]
arguments._kept_ignore = [13, 17, 14]
new_arguments= _SelectorArguments.validate(Gaussian2D(), arguments)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True), (1, False))
assert new_arguments.kept_ignore == [13, 17, 14]
# Invalid, bad argument
with pytest.raises(ValueError):
_SelectorArguments.validate(Gaussian2D(), ((0, True), ('z', False)))
with pytest.raises(ValueError):
_SelectorArguments.validate(Gaussian2D(), ((mk.MagicMock(), True), (1, False)))
with pytest.raises(IndexError):
_SelectorArguments.validate(Gaussian2D(), ((0, True), (2, False)))
# Invalid, repeated argument
with pytest.raises(ValueError) as err:
_SelectorArguments.validate(Gaussian2D(), ((0, True), (0, False)))
assert str(err.value) == \
"Input: 'x' has been repeated."
# Invalid, no arguments
with pytest.raises(ValueError) as err:
_SelectorArguments.validate(Gaussian2D(), ())
assert str(err.value) == \
"There must be at least one selector argument."
def test_get_selector(self):
inputs = [idx + 19 for idx in range(4)]
assert _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).get_selector(*inputs) ==\
tuple(inputs[:2])
assert _SelectorArguments.validate(Gaussian2D(),
((1, True), (0, False))).get_selector(*inputs) ==\
tuple(inputs[:2][::-1])
assert _SelectorArguments.validate(Gaussian2D(),
((1, False),)).get_selector(*inputs) ==\
(inputs[1],)
assert _SelectorArguments.validate(Gaussian2D(),
((0, True),)).get_selector(*inputs) ==\
(inputs[0],)
def test_is_selector(self):
# Is Selector
assert _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector((0.5, 2.5))
assert _SelectorArguments.validate(Gaussian2D(),
((0, True),)).is_selector((0.5,))
# Is not selector
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector((0.5, 2.5, 3.5))
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector((0.5,))
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector(0.5)
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True),)).is_selector((0.5, 2.5))
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True),)).is_selector(2.5)
def test_get_fixed_values(self):
model = Gaussian2D()
assert _SelectorArguments.validate(model,
((0, True), (1, False))).get_fixed_values(model, {0: 11, 1: 7}) \
== (11, 7)
assert _SelectorArguments.validate(model,
((0, True), (1, False))).get_fixed_values(model, {0: 5, 'y': 47}) \
== (5, 47)
assert _SelectorArguments.validate(model,
((0, True), (1, False))).get_fixed_values(model, {'x': 2, 'y': 9}) \
== (2, 9)
assert _SelectorArguments.validate(model,
((0, True), (1, False))).get_fixed_values(model, {'x': 12, 1: 19}) \
== (12, 19)
def test_is_argument(self):
model = Gaussian2D()
# Is true
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.is_argument(model, 0) == True
assert arguments.is_argument(model, 'x') == True
assert arguments.is_argument(model, 1) == True
assert arguments.is_argument(model, 'y') == True
# Is true and false
arguments = _SelectorArguments.validate(model, ((0, True),))
assert arguments.is_argument(model, 0) == True
assert arguments.is_argument(model, 'x') == True
assert arguments.is_argument(model, 1) == False
assert arguments.is_argument(model, 'y') == False
arguments = _SelectorArguments.validate(model, ((1, False),))
assert arguments.is_argument(model, 0) == False
assert arguments.is_argument(model, 'x') == False
assert arguments.is_argument(model, 1) == True
assert arguments.is_argument(model, 'y') == True
def test_selector_index(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.selector_index(model, 0) == 0
assert arguments.selector_index(model, 'x') == 0
assert arguments.selector_index(model, 1) == 1
assert arguments.selector_index(model, 'y') == 1
arguments = _SelectorArguments.validate(model, ((1, True), (0, False)))
assert arguments.selector_index(model, 0) == 1
assert arguments.selector_index(model, 'x') == 1
assert arguments.selector_index(model, 1) == 0
assert arguments.selector_index(model, 'y') == 0
# Error
arguments = _SelectorArguments.validate(model, ((0, True),))
with pytest.raises(ValueError) as err:
arguments.selector_index(model, 'y')
assert str(err.value) ==\
"y does not correspond to any selector argument."
def test_add_ignore(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), ))
assert arguments == ((0, True),)
assert arguments._kept_ignore == []
new_arguments0 = arguments.add_ignore(model, 1)
assert new_arguments0 == arguments
assert new_arguments0._kept_ignore == [1]
assert arguments._kept_ignore == []
assert arguments._kept_ignore == []
new_arguments1 = new_arguments0.add_ignore(model, 'y')
assert new_arguments1 == arguments == new_arguments0
assert new_arguments0._kept_ignore == [1]
assert new_arguments1._kept_ignore == [1, 1]
assert arguments._kept_ignore == []
# Error
with pytest.raises(ValueError) as err:
arguments.add_ignore(model, 0)
assert str(err.value) ==\
"0: is a selector argument and cannot be ignored."
def test_reduce(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
new_arguments = arguments.reduce(model, 0)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((1, False),)
assert new_arguments._kept_ignore == [0]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, 'x')
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((1, False),)
assert new_arguments._kept_ignore == [0]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, 1)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True),)
assert new_arguments._kept_ignore == [1]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, 'y')
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True),)
assert new_arguments._kept_ignore == [1]
assert arguments._kept_ignore == []
def test_named_tuple(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.named_tuple(model) == (('x', True), ('y', False))
class TestCompoundBoundingBox:
def test_create(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args, create_selector, order='F')
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
for _selector, bbox in bounding_boxes.items():
assert _selector in bounding_box._bounding_boxes
assert bounding_box._bounding_boxes[_selector] == bbox
for _selector, bbox in bounding_box._bounding_boxes.items():
assert _selector in bounding_boxes
assert bounding_boxes[_selector] == bbox
assert isinstance(bbox, ModelBoundingBox)
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == 'F'
def test_copy(self):
bounding_box = CompoundBoundingBox.validate(Gaussian2D(), {(1,): (-1.5, 1.3), (2,): (-2.7, 2.4)},
((0, True),), mk.MagicMock())
copy = bounding_box.copy()
assert bounding_box == copy
assert id(bounding_box) != id(copy)
# model is not copied to prevent infinite recursion
assert bounding_box._model == copy._model
assert id(bounding_box._model) == id(copy._model)
# Same string values have will have same id
assert bounding_box._order == copy._order
assert id(bounding_box._order) == id(copy._order)
assert bounding_box._create_selector == copy._create_selector
assert id(bounding_box._create_selector) != id(copy._create_selector)
# Check selector_args
for index, argument in enumerate(bounding_box.selector_args):
assert argument == copy.selector_args[index]
assert id(argument) != id(copy.selector_args[index])
# Same integer values have will have same id
assert argument.index == copy.selector_args[index].index
assert id(argument.index) == id(copy.selector_args[index].index)
# Same boolean values have will have same id
assert argument.ignore == copy.selector_args[index].ignore
assert id(argument.ignore) == id(copy.selector_args[index].ignore)
assert len(bounding_box.selector_args) == len(copy.selector_args)
# Check bounding_boxes
for selector, bbox in bounding_box.bounding_boxes.items():
assert bbox == copy.bounding_boxes[selector]
assert id(bbox) != id(copy.bounding_boxes[selector])
assert bbox.ignored == copy.bounding_boxes[selector].ignored
assert id(bbox.ignored) != id(copy.bounding_boxes[selector].ignored)
# model is not copied to prevent infinite recursion
assert bbox._model == copy.bounding_boxes[selector]._model
assert id(bbox._model) == id(copy.bounding_boxes[selector]._model)
# Same string values have will have same id
assert bbox._order == copy.bounding_boxes[selector]._order
assert id(bbox._order) == id(copy.bounding_boxes[selector]._order)
# Check interval objects
for index, interval in bbox.intervals.items():
assert interval == copy.bounding_boxes[selector].intervals[index]
assert id(interval) != id(copy.bounding_boxes[selector].intervals[index])
# Same float values have will have same id
assert interval.lower == copy.bounding_boxes[selector].intervals[index].lower
assert id(interval.lower) == id(copy.bounding_boxes[selector].intervals[index].lower)
# Same float values have will have same id
assert interval.upper == copy.bounding_boxes[selector].intervals[index].upper
assert id(interval.upper) == id(copy.bounding_boxes[selector].intervals[index].upper)
assert len(bbox.intervals) == len(copy.bounding_boxes[selector].intervals)
assert bbox.intervals.keys() == copy.bounding_boxes[selector].intervals.keys()
assert len(bounding_box.bounding_boxes) == len(copy.bounding_boxes)
assert bounding_box.bounding_boxes.keys() == copy.bounding_boxes.keys()
def test___repr__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert bounding_box.__repr__() ==\
"CompoundBoundingBox(\n" + \
" bounding_boxes={\n" + \
" (1,) = ModelBoundingBox(\n" + \
" intervals={\n" + \
" x: Interval(lower=-1, upper=1)\n" + \
" }\n" + \
" model=Gaussian2D(inputs=('x', 'y'))\n" + \
" order='C'\n" + \
" )\n" + \
" (2,) = ModelBoundingBox(\n" + \
" intervals={\n" + \
" x: Interval(lower=-2, upper=2)\n" + \
" }\n" + \
" model=Gaussian2D(inputs=('x', 'y'))\n" + \
" order='C'\n" + \
" )\n" + \
" }\n" + \
" selector_args = SelectorArguments(\n" + \
" Argument(name='x', ignore=True)\n" + \
" )\n" + \
")"
def test_bounding_boxes(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box.bounding_boxes == bounding_boxes
def test_selector_args(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_box = CompoundBoundingBox({}, model, selector_args)
# Get
assert bounding_box._selector_args == selector_args
assert bounding_box.selector_args == selector_args
# Set
selector_args = ((1, False),)
with pytest.warns(RuntimeWarning, match=r"Overriding selector_args.*"):
bounding_box.selector_args = selector_args
assert bounding_box._selector_args == selector_args
assert bounding_box.selector_args == selector_args
def test_create_selector(self):
model = Gaussian2D()
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox({}, model, ((1,),), create_selector)
assert bounding_box._create_selector == create_selector
assert bounding_box.create_selector == create_selector
def test__get_selector_key(self):
bounding_box = CompoundBoundingBox({}, Gaussian2D(), ((1, True),))
assert len(bounding_box.bounding_boxes) == 0
# Singlar
assert bounding_box._get_selector_key(5) == (5,)
assert bounding_box._get_selector_key((5,)) == (5,)
assert bounding_box._get_selector_key([5]) == (5,)
assert bounding_box._get_selector_key(np.asanyarray(5)) == (5,)
assert bounding_box._get_selector_key(np.array([5])) == (5,)
# multiple
assert bounding_box._get_selector_key((5, 19)) == (5, 19)
assert bounding_box._get_selector_key([5, 19]) == (5, 19)
assert bounding_box._get_selector_key(np.array([5, 19])) == (5, 19)
def test___setitem__(self):
model = Gaussian2D()
# Ignored argument
bounding_box = CompoundBoundingBox({}, model, ((1, True),), order='F')
assert len(bounding_box.bounding_boxes) == 0
# Valid
bounding_box[(15, )] = (-15, 15)
assert len(bounding_box.bounding_boxes) == 1
assert (15,) in bounding_box._bounding_boxes
assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)
assert bounding_box._bounding_boxes[(15,)] == (-15, 15)
assert bounding_box._bounding_boxes[(15,)].order == 'F'
# Invalid key
assert (7, 13) not in bounding_box._bounding_boxes
with pytest.raises(ValueError) as err:
bounding_box[(7, 13)] = (-7, 7)
assert str(err.value) == \
"(7, 13) is not a selector!"
assert (7, 13) not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# Invalid bounding box
assert 13 not in bounding_box._bounding_boxes
with pytest.raises(ValueError):
bounding_box[(13,)] = ((-13, 13), (-3, 3))
assert 13 not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# No ignored argument
bounding_box = CompoundBoundingBox({}, model, ((1, False),), order='F')
assert len(bounding_box.bounding_boxes) == 0
# Valid
bounding_box[(15, )] = ((-15, 15), (-6, 6))
assert len(bounding_box.bounding_boxes) == 1
assert (15,) in bounding_box._bounding_boxes
assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)
assert bounding_box._bounding_boxes[(15,)] == ((-15, 15), (-6, 6))
assert bounding_box._bounding_boxes[(15,)].order == 'F'
# Invalid key
assert (14, 11) not in bounding_box._bounding_boxes
with pytest.raises(ValueError) as err:
bounding_box[(14, 11)] = ((-7, 7), (-12, 12))
assert str(err.value) == \
"(14, 11) is not a selector!"
assert (14, 11) not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# Invalid bounding box
assert 13 not in bounding_box._bounding_boxes
with pytest.raises(ValueError):
bounding_box[(13,)] = (-13, 13)
assert 13 not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
def test__validate(self):
model = Gaussian2D()
selector_args = ((0, True),)
# Tuple selector_args
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox({}, model, selector_args)
bounding_box._validate(bounding_boxes)
for _selector, bbox in bounding_boxes.items():
assert _selector in bounding_box._bounding_boxes
assert bounding_box._bounding_boxes[_selector] == bbox
for _selector, bbox in bounding_box._bounding_boxes.items():
assert _selector in bounding_boxes
assert bounding_boxes[_selector] == bbox
assert isinstance(bbox, ModelBoundingBox)
assert bounding_box._bounding_boxes == bounding_boxes
def test___eq__(self):
bounding_box_1 = CompoundBoundingBox({(1,): (-1, 1), (2,): (-2, 2)}, Gaussian2D(), ((0, True),))
bounding_box_2 = CompoundBoundingBox({(1,): (-1, 1), (2,): (-2, 2)}, Gaussian2D(), ((0, True),))
# Equal
assert bounding_box_1 == bounding_box_2
# Not equal to non-compound bounding_box
assert not bounding_box_1 == mk.MagicMock()
assert not bounding_box_2 == mk.MagicMock()
# Not equal bounding_boxes
bounding_box_2[(15,)] = (-15, 15)
assert not bounding_box_1 == bounding_box_2
del bounding_box_2._bounding_boxes[(15,)]
assert bounding_box_1 == bounding_box_2
# Not equal selector_args
bounding_box_2._selector_args = _SelectorArguments.validate(Gaussian2D(), ((0, False),))
assert not bounding_box_1 == bounding_box_2
bounding_box_2._selector_args = _SelectorArguments.validate(Gaussian2D(), ((0, True),))
assert bounding_box_1 == bounding_box_2
# Not equal create_selector
bounding_box_2._create_selector = mk.MagicMock()
assert not bounding_box_1 == bounding_box_2
def test_validate(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
create_selector = mk.MagicMock()
# Fail selector_args
with pytest.raises(ValueError) as err:
CompoundBoundingBox.validate(model, bounding_boxes)
assert str(err.value) ==\
"Selector arguments must be provided (can be passed as part of bounding_box argument)!"
# Normal validate
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args,
create_selector, order='F')
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == 'F'
# Re-validate
new_bounding_box = CompoundBoundingBox.validate(model, bounding_box)
assert bounding_box == new_bounding_box
assert new_bounding_box._order == 'F'
# Default order
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args,
create_selector)
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == 'C'
def test___contains__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert (1,) in bounding_box
assert (2,) in bounding_box
assert (3,) not in bounding_box
assert 1 not in bounding_box
assert 2 not in bounding_box
def test__create_bounding_box(self):
model = Gaussian2D()
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox({}, model, ((1, False),),
create_selector)
# Create is successful
create_selector.return_value = ((-15, 15), (-23, 23))
assert len(bounding_box._bounding_boxes) == 0
bbox = bounding_box._create_bounding_box((7,))
assert isinstance(bbox, ModelBoundingBox)
assert bbox == ((-15, 15), (-23, 23))
assert len(bounding_box._bounding_boxes) == 1
assert (7,) in bounding_box
assert isinstance(bounding_box[(7,)], ModelBoundingBox)
assert bounding_box[(7,)] == bbox
# Create is unsuccessful
create_selector.return_value = (-42, 42)
with pytest.raises(ValueError):
bounding_box._create_bounding_box((27,))
def test___getitem__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
# already exists
assert isinstance(bounding_box[1], ModelBoundingBox)
assert bounding_box[1] == (-1, 1)
assert isinstance(bounding_box[(2,)], ModelBoundingBox)
assert bounding_box[2] == (-2, 2)
assert isinstance(bounding_box[(1,)], ModelBoundingBox)
assert bounding_box[(1,)] == (-1, 1)
assert isinstance(bounding_box[(2,)], ModelBoundingBox)
assert bounding_box[(2,)] == (-2, 2)
# no selector
with pytest.raises(RuntimeError) as err:
bounding_box[(3,)]
assert str(err.value) == \
"No bounding box is defined for selector: (3,)."
# Create a selector
bounding_box._create_selector = mk.MagicMock()
with mk.patch.object(CompoundBoundingBox, '_create_bounding_box',
autospec=True) as mkCreate:
assert bounding_box[(3,)] == mkCreate.return_value
assert mkCreate.call_args_list == \
[mk.call(bounding_box, (3,))]
def test__select_bounding_box(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
inputs = [mk.MagicMock() for _ in range(3)]
with mk.patch.object(_SelectorArguments, 'get_selector',
autospec=True) as mkSelector:
with mk.patch.object(CompoundBoundingBox, '__getitem__',
autospec=True) as mkGet:
assert bounding_box._select_bounding_box(inputs) == mkGet.return_value
assert mkGet.call_args_list == \
[mk.call(bounding_box, mkSelector.return_value)]
assert mkSelector.call_args_list == \
[mk.call(bounding_box.selector_args, *inputs)]
def test_prepare_inputs(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
input_shape = mk.MagicMock()
with mk.patch.object(ModelBoundingBox, 'prepare_inputs',
autospec=True) as mkPrepare:
assert bounding_box.prepare_inputs(input_shape, [1, 2, 3]) == mkPrepare.return_value
assert mkPrepare.call_args_list == \
[mk.call(bounding_box[(1,)], input_shape, [1, 2, 3])]
mkPrepare.reset_mock()
assert bounding_box.prepare_inputs(input_shape, [2, 2, 3]) == mkPrepare.return_value
assert mkPrepare.call_args_list == \
[mk.call(bounding_box[(2,)], input_shape, [2, 2, 3])]
mkPrepare.reset_mock()
def test__matching_bounding_boxes(self):
# Single selector index
selector_args = ((0, False),)
bounding_boxes = {(1,): ((-1, 1), (-2, 2)), (2,): ((-2, 2), (-3, 3)), (3,): ((-3, 3), (-4, 4))}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
matching = bounding_box._matching_bounding_boxes('x', value)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox
assert 'x' in bbox.ignored_inputs
assert 'y' in bbox
assert bbox['y'] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
# Multiple selector index
selector_args = ((0, False), (1, False))
bounding_boxes = {(1, 3): ((-1, 1), (-2, 2)), (2, 2): ((-2, 2), (-3, 3)), (3, 1): ((-3, 3), (-4, 4))}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
matching = bounding_box._matching_bounding_boxes('x', value)
assert isinstance(matching, dict)
assert (4 - value,) in matching
bbox = matching[(4 - value,)]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox
assert 'x' in bbox.ignored_inputs
assert 'y' in bbox
assert bbox['y'] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
matching = bounding_box._matching_bounding_boxes('y', value)
assert isinstance(matching, dict)
assert (4 - value,) in matching
bbox = matching[(4 - value,)]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'y' in bbox
assert 'y' in bbox.ignored_inputs
assert 'x' in bbox
assert bbox['x'] == (-(5 - value), (5 - value))
assert len(bbox.intervals) == 1
assert bbox.ignored == [1]
# Real fix input of slicing input
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {(0,): ((-0.5, 1047.5), (-0.5, 2047.5)), (1,): ((-0.5, 3047.5), (-0.5, 4047.5)), }
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args=[('slit_id', True)], order='F')
matching = bounding_box._matching_bounding_boxes('slit_id', 0)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 1047.5),
'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
matching = bounding_box._matching_bounding_boxes('slit_id', 1)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 3047.5),
'y': (-0.5, 4047.5)}
assert bbox.order == 'F'
# Errors
with pytest.raises(ValueError) as err:
bounding_box._matching_bounding_boxes('slit_id', 2)
assert str(err.value) ==\
"Attempting to fix input slit_id, but there are no bounding boxes for argument value 2."
def test__fix_input_selector_arg(self):
# Single selector index
selector_args = ((0, False),)
bounding_boxes = {(1,): ((-1, 1), (-2, 2)), (2,): ((-2, 2), (-3, 3)), (3,): ((-3, 3), (-4, 4))}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
bbox = bounding_box._fix_input_selector_arg('x', value)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox
assert 'x' in bbox.ignored_inputs
assert 'y' in bbox
assert bbox['y'] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
# Multiple selector index
selector_args = ((0, False), (1, False))
bounding_boxes = {(1, 3): ((-1, 1), (-2, 2)), (2, 2): ((-2, 2), (-3, 3)), (3, 1): ((-3, 3), (-4, 4))}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
bbox = bounding_box._fix_input_selector_arg('x', value)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert bbox.selector_args == ((1, False),)
assert (4 - value,) in bbox
bbox_selector = bbox[(4 - value,)]
assert isinstance(bbox_selector, ModelBoundingBox)
assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox_selector
assert 'x' in bbox_selector.ignored_inputs
assert 'y' in bbox_selector
assert bbox_selector['y'] == (-value, value)
assert len(bbox_selector.intervals) == 1
assert bbox_selector.ignored == [0]
bbox = bounding_box._fix_input_selector_arg('y', value)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert bbox.selector_args == ((0, False),)
assert (4 - value,) in bbox
bbox_selector = bbox[(4 - value,)]
assert isinstance(bbox_selector, ModelBoundingBox)
assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()
assert 'y' in bbox_selector
assert 'y' in bbox_selector.ignored_inputs
assert 'x' in bbox_selector
assert bbox_selector['x'] == (-(5 - value), (5 - value))
assert len(bbox_selector.intervals) == 1
assert bbox_selector.ignored == [1]
# Real fix input of slicing input
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {(0,): ((-0.5, 1047.5), (-0.5, 2047.5)), (1,): ((-0.5, 3047.5), (-0.5, 4047.5)), }
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args=[('slit_id', True)], order='F')
bbox = bounding_box._fix_input_selector_arg('slit_id', 0)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 1047.5),
'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
bbox = bounding_box._fix_input_selector_arg('slit_id', 1)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 3047.5),
'y': (-0.5, 4047.5)}
assert bbox.order == 'F'
def test__fix_input_bbox_arg(self):
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {(0,): ((-0.5, 1047.5), (-0.5, 2047.5)), (1,): ((-0.5, 3047.5), (-0.5, 4047.5)), }
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args=[('slit_id', True)], order='F')
bbox = bounding_box._fix_input_bbox_arg('x', 5)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((2, True),)
assert bbox.selector_args._kept_ignore == [0]
assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)
assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)
assert len(bbox._bounding_boxes) == 2
bbox = bounding_box._fix_input_bbox_arg('y', 5)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((2, True),)
assert bbox.selector_args._kept_ignore == [1]
assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)
assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)
assert len(bbox._bounding_boxes) == 2
def test_fix_inputs(self):
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {(0,): ((-0.5, 1047.5), (-0.5, 2047.5)), (1,): ((-0.5, 3047.5), (-0.5, 4047.5)), }
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args=[('slit_id', True)], order='F')
model.bounding_box = bounding_box
# Fix selector argument
new_model = fix_inputs(model, {'slit_id': 0})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {'x': (-0.5, 1047.5),
'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
# Fix a bounding_box field
new_model = fix_inputs(model, {'x': 5})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((1, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)
assert bbox._bounding_boxes[(0,)].order == 'F'
assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)
assert bbox._bounding_boxes[(1,)].order == 'F'
assert len(bbox._bounding_boxes) == 2
new_model = fix_inputs(model, {'y': 5})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((1, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)
assert bbox._bounding_boxes[(0,)].order == 'F'
assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)
assert bbox._bounding_boxes[(1,)].order == 'F'
assert len(bbox._bounding_boxes) == 2
# Fix selector argument and a bounding_box field
new_model = fix_inputs(model, {'slit_id': 0, 'x': 5})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
new_model = fix_inputs(model, {'y': 5, 'slit_id': 1})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {'x': (-0.5, 3047.5)}
assert bbox.order == 'F'
# Fix two bounding_box fields
new_model = fix_inputs(model, {'x': 5, 'y': 7})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert bbox.selector_args == ((0, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-np.inf, np.inf)
assert bbox._bounding_boxes[(0,)].order == 'F'
assert bbox._bounding_boxes[(1,)] == (-np.inf, np.inf)
assert bbox._bounding_boxes[(1,)].order == 'F'
assert len(bbox._bounding_boxes) == 2
|
9eb0cc5cb549463a6eb57a5a3b6fb63fd9a385ae9667687b100be79bae80e362 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name, pointless-statement
import pickle
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy.utils import minversion
from astropy.modeling.core import Model, ModelDefinitionError, CompoundModel
from astropy.modeling.parameters import Parameter
from astropy.modeling.models import (Const1D, Shift, Scale, Rotation2D, Gaussian1D,
Gaussian2D, Polynomial1D, Polynomial2D,
Chebyshev2D, Legendre2D, Chebyshev1D, Legendre1D,
Identity, Mapping, Linear1D,
Tabular1D, fix_inputs,)
from astropy.modeling.fitting import LevMarLSQFitter
import astropy.units as u
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, [5.0, 5.0]),
(lambda x, y: x - y, [-1.0, -1.0]),
(lambda x, y: x * y, [6.0, 6.0]),
(lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]),
(lambda x, y: x ** y, [8.0, 8.0])])
def test_model_set(expr, result):
s = expr(Const1D((2, 2), n_models=2), Const1D((3, 3), n_models=2))
out = s(0, model_set_axis=False)
assert_array_equal(out, result)
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, [5.0, 5.0]),
(lambda x, y: x - y, [-1.0, -1.0]),
(lambda x, y: x * y, [6.0, 6.0]),
(lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]),
(lambda x, y: x ** y, [8.0, 8.0])])
def test_model_set_raises_value_error(expr, result):
"""Check that creating model sets with components whose _n_models are
different raise a value error
"""
with pytest.raises(ValueError):
expr(Const1D((2, 2), n_models=2), Const1D(3, n_models=1))
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, 5.0),
(lambda x, y: x - y, -1.0),
(lambda x, y: x * y, 6.0),
(lambda x, y: x / y, 2.0 / 3.0),
(lambda x, y: x ** y, 8.0)])
def test_two_model_instance_arithmetic_1d(expr, result):
"""
Like test_two_model_class_arithmetic_1d, but creates a new model from two
model *instances* with fixed parameters.
"""
s = expr(Const1D(2), Const1D(3))
assert isinstance(s, CompoundModel)
assert s.n_inputs == 1
assert s.n_outputs == 1
out = s(0)
assert out == result
assert isinstance(out, float)
def test_simple_two_model_compose_1d():
"""
Shift and Scale are two of the simplest models to test model composition
with.
"""
S1 = Shift(2) | Scale(3) # First shift then scale
assert isinstance(S1, CompoundModel)
assert S1.n_inputs == 1
assert S1.n_outputs == 1
assert S1(1) == 9.0
S2 = Scale(2) | Shift(3) # First scale then shift
assert isinstance(S2, CompoundModel)
assert S2.n_inputs == 1
assert S2.n_outputs == 1
assert S2(1) == 5.0
# Test with array inputs
assert_array_equal(S2([1, 2, 3]), [5.0, 7.0, 9.0])
def test_simple_two_model_compose_2d():
"""
A simple example consisting of two rotations.
"""
r1 = Rotation2D(45) | Rotation2D(45)
assert isinstance(r1, CompoundModel)
assert r1.n_inputs == 2
assert r1.n_outputs == 2
assert_allclose(r1(0, 1), (-1, 0), atol=1e-10)
r2 = Rotation2D(90) | Rotation2D(90) # Rotate twice by 90 degrees
assert_allclose(r2(0, 1), (0, -1), atol=1e-10)
# Compose R with itself to produce 4 rotations
r3 = r1 | r1
assert_allclose(r3(0, 1), (0, -1), atol=1e-10)
def test_n_submodels():
"""
Test that CompoundModel.n_submodels properly returns the number
of components.
"""
g2 = Gaussian1D() + Gaussian1D()
assert g2.n_submodels == 2
g3 = g2 + Gaussian1D()
assert g3.n_submodels == 3
g5 = g3 | g2
assert g5.n_submodels == 5
g7 = g5 / g2
assert g7.n_submodels == 7
def test_expression_formatting():
"""
Test that the expression strings from compound models are formatted
correctly.
"""
# For the purposes of this test it doesn't matter a great deal what
# model(s) are used in the expression, I don't think
G = Gaussian1D(1, 1, 1)
G2 = Gaussian2D(1, 2, 3, 4, 5, 6)
M = G + G
assert M._format_expression() == '[0] + [1]'
M = G + G + G
assert M._format_expression() == '[0] + [1] + [2]'
M = G + G * G
assert M._format_expression() == '[0] + [1] * [2]'
M = G * G + G
assert M._format_expression() == '[0] * [1] + [2]'
M = G + G * G + G
assert M._format_expression() == '[0] + [1] * [2] + [3]'
M = (G + G) * (G + G)
assert M._format_expression() == '([0] + [1]) * ([2] + [3])'
# This example uses parentheses in the expression, but those won't be
# preserved in the expression formatting since they technically aren't
# necessary, and there's no way to know that they were originally
# parenthesized (short of some deep, and probably not worthwhile
# introspection)
M = (G * G) + (G * G)
assert M._format_expression() == '[0] * [1] + [2] * [3]'
M = G ** G
assert M._format_expression() == '[0] ** [1]'
M = G + G ** G
assert M._format_expression() == '[0] + [1] ** [2]'
M = (G + G) ** G
assert M._format_expression() == '([0] + [1]) ** [2]'
M = G + G | G
assert M._format_expression() == '[0] + [1] | [2]'
M = G + (G | G)
assert M._format_expression() == '[0] + ([1] | [2])'
M = G & G | G2
assert M._format_expression() == '[0] & [1] | [2]'
M = G & (G | G)
assert M._format_expression() == '[0] & ([1] | [2])'
def test_basic_compound_inverse():
"""
Test basic inversion of compound models in the limited sense supported for
models made from compositions and joins only.
"""
t = (Shift(2) & Shift(3)) | (Scale(2) & Scale(3)) | Rotation2D(90)
assert_allclose(t.inverse(*t(0, 1)), (0, 1))
@pytest.mark.parametrize('model', [
Shift(0) + Shift(0) | Shift(0),
Shift(0) - Shift(0) | Shift(0),
Shift(0) * Shift(0) | Shift(0),
Shift(0) / Shift(0) | Shift(0),
Shift(0) ** Shift(0) | Shift(0),
Gaussian1D(1, 2, 3) | Gaussian1D(4, 5, 6)])
def test_compound_unsupported_inverse(model):
"""
Ensure inverses aren't supported in cases where it shouldn't be.
"""
with pytest.raises(NotImplementedError):
model.inverse
def test_mapping_basic_permutations():
"""
Tests a couple basic examples of the Mapping model--specifically examples
that merely permute the outputs.
"""
x, y = Rotation2D(90)(1, 2)
rs = Rotation2D(90) | Mapping((1, 0))
x_prime, y_prime = rs(1, 2)
assert_allclose((x, y), (y_prime, x_prime))
# A more complicated permutation
m = Rotation2D(90) & Scale(2)
x, y, z = m(1, 2, 3)
ms = m | Mapping((2, 0, 1))
x_prime, y_prime, z_prime = ms(1, 2, 3)
assert_allclose((x, y, z), (y_prime, z_prime, x_prime))
def test_mapping_inverse():
"""Tests inverting a compound model that includes a `Mapping`."""
rs1 = Rotation2D(12.1) & Scale(13.2)
rs2 = Rotation2D(14.3) & Scale(15.4)
# Rotates 2 of the coordinates and scales the third--then rotates on a
# different axis and scales on the axis of rotation. No physical meaning
# here just a simple test
m = rs1 | Mapping([2, 0, 1]) | rs2
assert_allclose((0, 1, 2), m.inverse(*m(0, 1, 2)), atol=1e-08)
def test_identity_input():
"""
Test a case where an Identity (or Mapping) model is the first in a chain
of composite models and thus is responsible for handling input broadcasting
properly.
Regression test for https://github.com/astropy/astropy/pull/3362
"""
ident1 = Identity(1)
shift = Shift(1)
rotation = Rotation2D(angle=90)
model = ident1 & shift | rotation
assert_allclose(model(1, 2), [-3.0, 1.0])
def test_invalid_operands():
"""
Test that certain operators do not work with models whose inputs/outputs do
not match up correctly.
"""
with pytest.raises(ModelDefinitionError):
Rotation2D(90) | Gaussian1D(1, 0, 0.1)
with pytest.raises(ModelDefinitionError):
Rotation2D(90) + Gaussian1D(1, 0, 0.1)
@pytest.mark.parametrize('poly', [Chebyshev2D(1, 2), Polynomial2D(2), Legendre2D(1, 2)])
def test_compound_with_polynomials_2d(poly):
"""
Tests that polynomials are scaled when used in compound models.
Issue #3699
"""
poly.parameters = [1, 2, 3, 4, 1, 2]
shift = Shift(3)
model = poly | shift
x, y = np.mgrid[:20, :37]
result_compound = model(x, y)
result = shift(poly(x, y))
assert_allclose(result, result_compound)
def test_fix_inputs():
g1 = Gaussian2D(1, 0, 0, 1, 2)
g2 = Gaussian2D(1.5, .5, -.2, .5, .3)
sg1_1 = fix_inputs(g1, {1: 0})
assert_allclose(sg1_1(0), g1(0, 0))
assert_allclose(sg1_1([0, 1, 3]), g1([0, 1, 3], [0, 0, 0]))
sg1_2 = fix_inputs(g1, {'x': 1})
assert_allclose(sg1_2(1.5), g1(1, 1.5))
gg1 = g1 & g2
sgg1_1 = fix_inputs(gg1, {1: 0.1, 3: 0.2})
assert_allclose(sgg1_1(0, 0), gg1(0, 0.1, 0, 0.2))
sgg1_2 = fix_inputs(gg1, {'x0': -.1, 2: .1})
assert_allclose(sgg1_2(1, 1), gg1(-0.1, 1, 0.1, 1))
assert_allclose(sgg1_2(y0=1, y1=1), gg1(-0.1, 1, 0.1, 1))
def test_fix_inputs_invalid():
g1 = Gaussian2D(1, 0, 0, 1, 2)
with pytest.raises(ValueError):
fix_inputs(g1, {'x0': 0, 0: 0})
with pytest.raises(ValueError):
fix_inputs(g1, (0, 1))
with pytest.raises(ValueError):
fix_inputs(g1, {3: 2})
with pytest.raises(ValueError):
fix_inputs(g1, {np.int32(3): 2})
with pytest.raises(ValueError):
fix_inputs(g1, {np.int64(3): 2})
with pytest.raises(ValueError):
fix_inputs(g1, {'w': 2})
with pytest.raises(ModelDefinitionError):
CompoundModel('#', g1, g1)
with pytest.raises(ValueError):
gg1 = fix_inputs(g1, {0: 1})
gg1(2, y=2)
with pytest.raises(ValueError):
gg1 = fix_inputs(g1, {np.int32(0): 1})
gg1(2, y=2)
with pytest.raises(ValueError):
gg1 = fix_inputs(g1, {np.int64(0): 1})
gg1(2, y=2)
def test_fix_inputs_with_bounding_box():
g1 = Gaussian2D(1, 0, 0, 1, 1)
g2 = Gaussian2D(1, 0, 0, 1, 1)
assert g1.bounding_box == ((-5.5, 5.5), (-5.5, 5.5))
gg1 = g1 & g2
gg1.bounding_box = ((-5.5, 5.5), (-5.4, 5.4), (-5.3, 5.3), (-5.2, 5.2))
assert gg1.bounding_box == ((-5.5, 5.5), (-5.4, 5.4), (-5.3, 5.3), (-5.2, 5.2))
sg = fix_inputs(gg1, {0: 0, 2: 0})
assert sg.bounding_box == ((-5.5, 5.5), (-5.3, 5.3))
g1 = Gaussian1D(10, 3, 1)
g = g1 & g1
g.bounding_box = ((1, 4), (6, 8))
gf = fix_inputs(g, {0: 1})
assert gf.bounding_box == (1, 4)
def test_indexing_on_instance():
"""Test indexing on compound model instances."""
m = Gaussian1D(1, 0, 0.1) + Const1D(2)
assert isinstance(m[0], Gaussian1D)
assert isinstance(m[1], Const1D)
assert m.param_names == ('amplitude_0', 'mean_0', 'stddev_0', 'amplitude_1')
# Test parameter equivalence
assert m[0].amplitude == 1 == m.amplitude_0
assert m[0].mean == 0 == m.mean_0
assert m[0].stddev == 0.1 == m.stddev_0
assert m[1].amplitude == 2 == m.amplitude_1
# Test that parameter value updates are symmetric between the compound
# model and the submodel returned by indexing
const = m[1]
m.amplitude_1 = 42
assert const.amplitude == 42
const.amplitude = 137
assert m.amplitude_1 == 137
# Similar couple of tests, but now where the compound model was created
# from model instances
g = Gaussian1D(1, 2, 3, name='g')
p = Polynomial1D(2, name='p')
m = g + p
assert m[0].name == 'g'
assert m[1].name == 'p'
assert m['g'].name == 'g'
assert m['p'].name == 'p'
poly = m[1]
m.c0_1 = 12345
assert poly.c0 == 12345
poly.c1 = 6789
assert m.c1_1 == 6789
# Test negative indexing
assert isinstance(m[-1], Polynomial1D)
assert isinstance(m[-2], Gaussian1D)
with pytest.raises(IndexError):
m[42]
with pytest.raises(IndexError):
m['foobar']
# Confirm index-by-name works with fix_inputs
g = Gaussian2D(1, 2, 3, 4, 5, name='g')
m = fix_inputs(g, {0: 1})
assert m['g'].name == 'g'
# Test string slicing
A = Const1D(1.1, name='A')
B = Const1D(2.1, name='B')
C = Const1D(3.1, name='C')
M = A + B * C
assert_allclose(M['B':'C'](1), 6.510000000000001)
class _ConstraintsTestA(Model):
stddev = Parameter(default=0, min=0, max=0.3)
mean = Parameter(default=0, fixed=True)
@staticmethod
def evaluate(stddev, mean):
return stddev, mean
class _ConstraintsTestB(Model):
mean = Parameter(default=0, fixed=True)
@staticmethod
def evaluate(mean):
return mean
def test_inherit_constraints():
"""
Various tests for copying of constraint values between compound models and
their members.
Regression test for https://github.com/astropy/astropy/issues/3481
"""
model = (Gaussian1D(bounds={'stddev': (0, 0.3)}, fixed={'mean': True}) +
Gaussian1D(fixed={'mean': True}))
# Lots of assertions in this test as there are multiple interfaces to
# parameter constraints
assert 'stddev_0' in model.bounds
assert model.bounds['stddev_0'] == (0, 0.3)
assert model.stddev_0.bounds == (0, 0.3)
assert 'mean_0' in model.fixed
assert model.fixed['mean_0'] is True
assert model.mean_0.fixed is True
assert 'mean_1' in model.fixed
assert model.fixed['mean_1'] is True
assert model.mean_1.fixed is True
assert model.stddev_0 is model[0].stddev
# Great, all the constraints were inherited properly
# Now what about if we update them through the sub-models?
model.stddev_0.bounds = (0, 0.4)
assert model[0].stddev.bounds == (0, 0.4)
assert model[0].bounds['stddev'] == (0, 0.4)
model.stddev_0.bounds = (0.1, 0.5)
assert model[0].stddev.bounds == (0.1, 0.5)
assert model[0].bounds['stddev'] == (0.1, 0.5)
model[1].mean.fixed = False
assert model.mean_1.fixed is False
assert model[1].mean.fixed is False
# Now turn off syncing of constraints
assert model.bounds['stddev_0'] == (0.1, 0.5)
model.sync_constraints = False
model[0].stddev.bounds = (0, 0.2)
assert model.bounds['stddev_0'] == (0.1, 0.5)
model.sync_constraints = True
assert model.bounds['stddev_0'] == (0, 0.2)
def test_compound_custom_inverse():
"""
Test that a compound model with a custom inverse has that inverse applied
when the inverse of another model, of which it is a component, is computed.
Regression test for https://github.com/astropy/astropy/issues/3542
"""
poly = Polynomial1D(1, c0=1, c1=2)
scale = Scale(1)
shift = Shift(1)
model1 = poly | scale
model1.inverse = poly
# model1 now has a custom inverse (the polynomial itself, ignoring the
# trivial scale factor)
model2 = shift | model1
assert_allclose(model2.inverse(1), (poly | shift.inverse)(1))
# Make sure an inverse is not allowed if the models were combined with the
# wrong operator, or if one of the models doesn't have an inverse defined
with pytest.raises(NotImplementedError):
(shift + model1).inverse
with pytest.raises(NotImplementedError):
(model1 & poly).inverse
def test_pickle_compound():
"""
Regression test for
https://github.com/astropy/astropy/issues/3867#issuecomment-114547228
"""
# Test pickling a compound model instance
g1 = Gaussian1D(1.0, 0.0, 0.1)
g2 = Gaussian1D([2.0, 3.0], [0.0, 0.0], [0.2, 0.3])
m = g1 + g2
m2 = pickle.loads(pickle.dumps(m))
assert m.param_names == m2.param_names
assert m.__class__.__name__ == m2.__class__.__name__
assert np.all(m.parameters == m2.parameters)
assert np.all(m(0) == m2(0))
def test_update_parameters():
offx = Shift(1)
scl = Scale(2)
m = offx | scl
assert m(1) == 4
offx.offset = 42
assert m(1) == 86
m.factor_1 = 100
assert m(1) == 4300
m2 = m | offx
assert m2(1) == 4342
def test_name():
offx = Shift(1)
scl = Scale(2)
m = offx | scl
scl.name = "scale"
assert m.submodel_names == ('None_0', 'scale')
assert m.name is None
m.name = "M"
assert m.name == "M"
m1 = m.rename("M1")
assert m.name == "M1"
assert m1.name == "M1"
def test_name_index():
g1 = Gaussian1D(1, 1, 1)
g2 = Gaussian1D(1, 2, 1)
g = g1 + g2
with pytest.raises(IndexError):
g['bozo']
g1.name = 'bozo'
assert g['bozo'].mean == 1
g2.name = 'bozo'
with pytest.raises(IndexError):
g['bozo']
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_in_compound():
"""
Issue #7411 - evaluate should not change the shape of the output.
"""
t = Tabular1D(points=([1, 5, 7],), lookup_table=[12, 15, 19],
bounds_error=False)
rot = Rotation2D(2)
p = Polynomial1D(1)
x = np.arange(12).reshape((3, 4))
# Create a compound model which does not execute Tabular.__call__,
# but model.evaluate and is followed by a Rotation2D which
# checks the exact shapes.
model = p & t | rot
x1, y1 = model(x, x)
assert x1.ndim == 2
assert y1.ndim == 2
def test_bounding_box():
g = Gaussian2D() + Gaussian2D(2, .5, .1, 2, 3, 0)
g.bounding_box = ((0, 1), (0, .5))
y, x = np.mgrid[0:10, 0:10]
y = y / 3.
x = x / 3.
val = g(x, y, with_bounding_box=True)
compare = np.array([
[2.93738984, 2.93792011, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[2.87857153, 2.88188761, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[2.70492922, 2.71529265, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[2.45969972, 2.47912103, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]])
mask = ~np.isnan(val)
assert_allclose(val[mask], compare[mask])
val2 = g(x+2, y+2, with_bounding_box=True)
assert np.isnan(val2).sum() == 100
# val3 = g(.1, .1, with_bounding_box=True)
@pytest.mark.skipif("not HAS_SCIPY")
def test_bounding_box_with_units():
points = np.arange(5) * u.pix
lt = np.arange(5) * u.AA
t = Tabular1D(points, lt)
assert t(1 * u.pix, with_bounding_box=True) == 1. * u.AA
@pytest.mark.parametrize('poly', [Chebyshev1D(5), Legendre1D(5), Polynomial1D(5)])
def test_compound_with_polynomials_1d(poly):
"""
Tests that polynomials are offset when used in compound models.
Issue #3699
"""
poly.parameters = [1, 2, 3, 4, 1, 2]
shift = Shift(3)
model = poly | shift
x = np.linspace(-5, 5, 10)
result_compound = model(x)
result = shift(poly(x))
assert_allclose(result, result_compound)
assert model.param_names == ('c0_0', 'c1_0', 'c2_0', 'c3_0', 'c4_0', 'c5_0', 'offset_1')
def test_replace_submodel():
"""
Replace a model in a Compound model
"""
S1 = Shift(2, name='shift2') | Scale(3, name='scale3') # First shift then scale
S2 = Scale(2, name='scale2') | Shift(3, name='shift3') # First scale then shift
m = S1 & S2
assert m(1, 2) == (9, 7)
m2 = m.replace_submodel('scale3', Scale(4, name='scale4'))
assert m2(1, 2) == (12, 7)
assert m(1, 2) == (9, 7)
# Check the inverse has been updated
assert m2.inverse(12, 7) == (1, 2)
# Produce the same result by replacing a single model with a compound
m3 = m.replace_submodel('shift2', Shift(2) | Scale(2))
assert m(1, 2) == (9, 7)
assert m3(1, 2) == (18, 7)
# Check the inverse has been updated
assert m3.inverse(18, 7) == (1, 2)
# Test with arithmetic model compunding operator
m = S1 + S2
assert m(1) == 14
m2 = m.replace_submodel('scale2', Scale(4, name='scale4'))
assert m2(1) == 16
# Test with fix_inputs()
R = fix_inputs(Rotation2D(angle=90, name='rotate'), {0: 1})
m4 = S1 | R
assert_allclose(m4(0), (-6, 1))
m5 = m4.replace_submodel('rotate', Rotation2D(180))
assert_allclose(m5(0), (-1, -6))
# Check we get a value error when model name doesn't exist
with pytest.raises(ValueError):
m2 = m.replace_submodel('not_there', Scale(2))
# And now a model set
P = Polynomial1D(degree=1, n_models=2, name='poly')
S = Shift([1, 2], n_models=2)
m = P | S
assert_array_equal(m([0, 1]), (1, 2))
with pytest.raises(ValueError):
m2 = m.replace_submodel('poly', Polynomial1D(degree=1, c0=1))
m2 = m.replace_submodel('poly', Polynomial1D(degree=1, c0=[1, 2],
n_models=2))
assert_array_equal(m2([0, 1]), (2, 4))
# Ensure previous _user_inverse doesn't stick around
S1 = Shift(1)
S2 = Shift(2)
S3 = Shift(3, name='S3')
S23 = S2 | S3
S23.inverse = Shift(-4.9)
m = S1 & S23
# This should delete the S23._user_inverse
m2 = m.replace_submodel('S3', Shift(4))
assert m2(1, 2) == (2, 8)
assert m2.inverse(2, 8) == (1, 2)
@pytest.mark.parametrize(
"expr",
[
lambda m1, m2: m1 + m2,
lambda m1, m2: m1 - m2,
lambda m1, m2: m1 * m2,
lambda m1, m2: m1 / m2,
],
)
def test_compound_evaluate(expr):
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
# Some evaluate functions assume that inputs are numpy arrays or quantities including Const1D
p1 = np.array([1, 2, 3, 4, 1, 2])
p2 = np.array([1, 0, 0.5])
model1 = Polynomial1D(5)
model2 = Gaussian1D(2, 1, 5)
compound = expr(model1, model2)
assert_array_equal(
compound.evaluate(x, *p1, *p2),
expr(model1.evaluate(x, *p1), model2.evaluate(x, *p2)),
)
def test_compound_evaluate_power():
"""
Tests that compound evaluate function produces the same
result as the models with the power operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0, 0.2])
p2 = np.array([3])
model1 = Gaussian1D(2, 1, 5)
model2 = Const1D(2)
compound = model1 ** model2
assert_array_equal(
compound.evaluate(x, *p1, *p2),
model1.evaluate(x, *p1) ** model2.evaluate(x, *p2),
)
def test_compound_evaluate_double_shift():
x = np.linspace(-5, 5, 10)
y = np.linspace(-5, 5, 10)
m1 = Gaussian2D(1, 0, 0, 1, 1, 1)
m2 = Shift(1)
m3 = Shift(2)
m = Gaussian2D(1, 0, 0, 1, 1, 1) & Shift(1) & Shift(2)
assert_array_equal(
m.evaluate(x, y, x - 10, y + 20, 1, 0, 0, 1, 1, 1, 1, 2),
[
m1.evaluate(x, y, 1, 0, 0, 1, 1, 1),
m2.evaluate(x - 10, 1),
m3.evaluate(y + 20, 2),
],
)
@pytest.mark.parametrize(
"expr",
[
lambda m1, m2: m1 + m2,
lambda m1, m2: m1 - m2,
lambda m1, m2: m1 * m2,
lambda m1, m2: m1 / m2,
],
)
def test_compound_evaluate_named_param(expr):
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0, 0.2])
p2 = np.array([3, 0.5, 0.5])
model1 = Gaussian1D(2, 1, 5)
model2 = Gaussian1D(2, 1, 5)
compound = expr(model1, model2)
assert_array_equal(
compound.evaluate(
x, *p2, amplitude_0=p1[0], mean_0=p1[1], stddev_0=p1[2]
),
expr(model1.evaluate(x, *p1), model2.evaluate(x, *p2)),
)
def test_compound_evaluate_name_param_power():
"""
Tests that compound evaluate function produces the same
result as the models with the power operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0, 0.2])
p2 = np.array([3])
model1 = Gaussian1D(2, 1, 5)
model2 = Const1D(2)
compound = model1 ** model2
assert_array_equal(
compound.evaluate(
x, *p2, amplitude_0=p1[0], mean_0=p1[1], stddev_0=p1[2]
),
model1.evaluate(x, *p1) ** model2.evaluate(x, *p2),
)
def test_compound_evaluate_and():
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0.1, 0.5])
p2 = np.array([3])
model1 = Gaussian1D()
model2 = Shift()
compound = model1 & model2
assert_array_equal(
compound.evaluate(x, x, *p1, p2),
[model1.evaluate(x, *p1), model2.evaluate(x, p2)],
)
def test_compound_evaluate_or():
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([0.5])
p2_amplitude = np.array([3])
p2_mean = np.array([0])
p2_std = np.array([0.1])
model1 = Shift(0.5)
model2 = Gaussian1D(1, 0, 0.5)
compound = model1 | model2
assert_array_equal(
compound.evaluate(x, p1, p2_amplitude, p2_mean, p2_std),
model2.evaluate(model1.evaluate(x, p1), p2_amplitude, p2_mean, p2_std),
)
def test_compound_evaluate_fix_inputs_by_keyword():
"""
Tests that compound evaluate function produces the same
result as the models fix_inputs operator is applied
when using the keyword
"""
y, x = np.mgrid[:10, :10]
model_params = [3, 0, 0.1, 1, 0.5, 0]
model = Gaussian2D(1, 2, 0, 0.5)
compound = fix_inputs(model, {"x": x + 5})
assert_array_equal(
compound.evaluate(x, y, *model_params),
model.evaluate(x + 5, y, *model_params),
)
def test_compound_evaluate_fix_inputs_by_position():
"""
Tests that compound evaluate function produces the same
result as the models fix_inputs operator is applied
when using the input index
"""
y, x = np.mgrid[:10, :10]
model_params = [3, 0, 0.1, 1, 0.5, 0]
model = Gaussian2D(1, 2, 0, 0.5)
compound = fix_inputs(model, {0: x + 5})
assert_array_equal(
compound.evaluate(x, y, *model_params),
model.evaluate(x + 5, y, *model_params),
)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_multiplied_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.m * u.kg / u.s
m1 = Linear1D(slope=5*u.m/u.s/u.s, intercept=1.0*u.m/u.s)
m2 = Linear1D(slope=0.0*u.kg/u.s, intercept=10.0*u.kg)
truth = m1 * m2
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_multiplied_recursive_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.m * u.m * u.kg / u.s
m1 = Linear1D(slope=5*u.m/u.s/u.s, intercept=1.0*u.m/u.s)
m2 = Linear1D(slope=0.0*u.kg/u.s, intercept=10.0*u.kg)
m3 = Linear1D(slope=0.0*u.m/u.s, intercept=10.0*u.m)
truth = m1 * m2 * m3
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.m * u.m * u.kg * u.kg / u.s
m1 = Linear1D(slope=5*u.m/u.s/u.s, intercept=1.0*u.m/u.s)
m2 = Linear1D(slope=0.0*u.kg/u.s, intercept=10.0*u.kg)
m3 = Linear1D(slope=0.0*u.m/u.s, intercept=10.0*u.m)
m4 = Linear1D(slope=0.0*u.kg/u.s, intercept=10.0*u.kg)
m11 = m1 * m2
m22 = m3 * m4
truth = m11 * m22
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_divided_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.kg * u.m / u.s
m1 = Linear1D(slope=5*u.kg*u.m/u.s, intercept=1.0*u.kg*u.m)
m2 = Linear1D(slope=0.0*u.s/u.s, intercept=10.0*u.s)
truth = m1 / m2
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_mixed_recursive_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.kg * u.m * u.m / u.s
m1 = Linear1D(slope=5*u.kg*u.m/u.s, intercept=1.0*u.kg*u.m)
m2 = Linear1D(slope=0.0*u.s/u.s, intercept=10.0*u.s)
m3 = Linear1D(slope=0.0*u.m/u.s, intercept=10.0*u.m)
truth = m1 / m2 * m3
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.kg * u.kg * u.m * u.m / u.s
m1 = Linear1D(slope=5*u.kg*u.m/u.s, intercept=1.0*u.kg*u.m)
m2 = Linear1D(slope=0.0*u.s/u.s, intercept=10.0*u.s)
m3 = Linear1D(slope=0.0*u.m/u.s, intercept=10.0*u.m)
m4 = Linear1D(slope=0.0*u.kg/u.s, intercept=10.0*u.kg)
m11 = m1 / m2
m22 = m3 * m4
truth = m11 * m22
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
|
a45e80639e00bb568df9341b8f40440d31c79c92f803b0b6e57b39750d19f341 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import pytest
import numpy as np
from astropy.convolution import convolve_models_fft
from astropy.modeling.models import Const1D, Const2D
try:
import scipy # pylint: disable=W0611 # noqa
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
@pytest.mark.skipif('not HAS_SCIPY')
def test_clear_cache():
m1 = Const1D()
m2 = Const1D()
model = convolve_models_fft(m1, m2, (-1, 1), 0.01)
assert model._kwargs is None
assert model._convolution is None
results = model(0)
assert results.all() == np.array([1.]).all()
assert model._kwargs is not None
assert model._convolution is not None
model.clear_cache()
assert model._kwargs is None
assert model._convolution is None
@pytest.mark.skipif('not HAS_SCIPY')
def test_input_shape_1d():
m1 = Const1D()
m2 = Const1D()
model = convolve_models_fft(m1, m2, (-1, 1), 0.01)
results = model(0)
assert results.shape == (1,)
x = np.arange(-1, 1, 0.1)
results = model(x)
assert results.shape == x.shape
@pytest.mark.skipif('not HAS_SCIPY')
def test_input_shape_2d():
m1 = Const2D()
m2 = Const2D()
model = convolve_models_fft(m1, m2, ((-1, 1), (-1, 1)), 0.01)
results = model(0, 0)
assert results.shape == (1,)
x = np.arange(-1, 1, 0.1)
results = model(x, 0)
assert results.shape == x.shape
results = model(0, x)
assert results.shape == x.shape
grid = np.meshgrid(x, x)
results = model(*grid)
assert results.shape == grid[0].shape
assert results.shape == grid[1].shape
@pytest.mark.skipif('not HAS_SCIPY')
def test__convolution_inputs():
m1 = Const2D()
m2 = Const2D()
model = convolve_models_fft(m1, m2, ((-1, 1), (-1, 1)), 0.01)
x = np.arange(-1, 1, 0.1)
y = np.arange(-2, 2, 0.1)
grid0 = np.meshgrid(x, x)
grid1 = np.meshgrid(y, y)
# scalar inputs
assert (np.array([1]), (1,)) == model._convolution_inputs(1)
# Multiple inputs
assert np.all(model._convolution_inputs(*grid0)[0] ==
np.reshape([grid0[0], grid0[1]], (2, -1)).T)
assert model._convolution_inputs(*grid0)[1] == grid0[0].shape
assert np.all(model._convolution_inputs(*grid1)[0] ==
np.reshape([grid1[0], grid1[1]], (2, -1)).T)
assert model._convolution_inputs(*grid1)[1] == grid1[0].shape
# Error
with pytest.raises(ValueError) as err:
model._convolution_inputs(grid0[0], grid1[1])
assert str(err.value) ==\
"Values have differing shapes"
|
6e83786f8a372f63514c39f3584863443f8147078d04c4da8a66b1af2bcec128 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy import units as u
from astropy.modeling.core import Model, fix_inputs
from astropy.modeling.models import Polynomial1D
class _ExampleModel(Model):
n_inputs = 1
n_outputs = 1
def __init__(self):
self._input_units = {"x": u.m}
self._return_units = {"y": u.m/u.s}
super().__init__()
def evaluate(self, input):
return input / u.Quantity(1, u.s)
def _models_with_units():
m1 = _ExampleModel() & _ExampleModel()
m2 = _ExampleModel() + _ExampleModel()
p = Polynomial1D(1)
p._input_units = {'x': u.m / u.s}
p._return_units = {'y': u.m / u.s}
m3 = _ExampleModel() | p
m4 = fix_inputs(m1, {'x0': 1})
m5 = fix_inputs(m1, {0: 1})
models = [m1, m2, m3, m4, m5]
input_units = [{'x0': u.Unit("m"), 'x1': u.Unit("m")},
{'x': u.Unit("m")},
{'x': u.Unit("m")},
{'x1': u.Unit("m")},
{'x1': u.Unit("m")}
]
return_units = [{'y0': u.Unit("m / s"), 'y1': u.Unit("m / s")},
{'y': u.Unit("m / s")},
{'y': u.Unit("m / s")},
{'y0': u.Unit("m / s"), 'y1': u.Unit("m / s")},
{'y0': u.Unit("m / s"), 'y1': u.Unit("m / s")}
]
return np.array([models, input_units, return_units], dtype=object).T
@pytest.mark.parametrize(("model", "input_units", "return_units"), _models_with_units())
def test_input_units(model, input_units, return_units):
""" Test input_units on various compound models."""
assert model.input_units == input_units
assert model.return_units == return_units
|
7dbbb893d7cbda1badf5ef9f2c5fb145b41a242692aba97bbe53280342e5b32a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import os
import sys
import subprocess
import pytest
import unittest.mock as mk
import numpy as np
from inspect import signature
from numpy.testing import assert_allclose, assert_equal
import astropy
from astropy.modeling.core import (Model, CompoundModel, custom_model,
SPECIAL_OPERATORS, _add_special_operator,
bind_bounding_box, bind_compound_bounding_box,
fix_inputs)
from astropy.modeling.bounding_box import ModelBoundingBox, CompoundBoundingBox
from astropy.modeling.separable import separability_matrix
from astropy.modeling.parameters import Parameter
from astropy.modeling import models
from astropy.convolution import convolve_models
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
import astropy.modeling.core as core
class NonFittableModel(Model):
"""An example class directly subclassing Model for testing."""
a = Parameter()
def __init__(self, a, model_set_axis=None):
super().__init__(a, model_set_axis=model_set_axis)
@staticmethod
def evaluate():
pass
def test_Model_instance_repr_and_str():
m = NonFittableModel(42.5)
assert repr(m) == "<NonFittableModel(a=42.5)>"
assert (str(m) ==
"Model: NonFittableModel\n"
"Inputs: ()\n"
"Outputs: ()\n"
"Model set size: 1\n"
"Parameters:\n"
" a \n"
" ----\n"
" 42.5")
assert len(m) == 1
def test_Model_array_parameter():
model = models.Gaussian1D(4, 2, 1)
assert_allclose(model.param_sets, [[4], [2], [1]])
def test_inputless_model():
"""
Regression test for
https://github.com/astropy/astropy/pull/3772#issuecomment-101821641
"""
class TestModel(Model):
n_outputs = 1
a = Parameter()
@staticmethod
def evaluate(a):
return a
m = TestModel(1)
assert m.a == 1
assert m() == 1
# Test array-like output
m = TestModel([1, 2, 3], model_set_axis=False)
assert len(m) == 1
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[1, 2, 3], model_set_axis=0)
assert len(m) == 3
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=0)
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=np.int64(0))
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
def test_ParametericModel():
with pytest.raises(TypeError):
models.Gaussian1D(1, 2, 3, wrong=4)
def test_custom_model_signature():
"""
Tests that the signatures for the __init__ and __call__
methods of custom models are useful.
"""
@custom_model
def model_a(x):
return x
assert model_a.param_names == ()
assert model_a.n_inputs == 1
sig = signature(model_a.__init__)
assert list(sig.parameters.keys()) == ['self', 'args', 'meta', 'name', 'kwargs']
sig = signature(model_a.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
@custom_model
def model_b(x, a=1, b=2):
return x + a + b
assert model_b.param_names == ('a', 'b')
assert model_b.n_inputs == 1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs']
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
@custom_model
def model_c(x, y, a=1, b=2):
return x + y + a + b
assert model_c.param_names == ('a', 'b')
assert model_c.n_inputs == 2
sig = signature(model_c.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs']
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_c.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
def test_custom_model_subclass():
"""Test that custom models can be subclassed."""
@custom_model
def model_a(x, a=1):
return x * a
class model_b(model_a):
# Override the evaluate from model_a
@classmethod
def evaluate(cls, x, a):
return -super().evaluate(x, a)
b = model_b()
assert b.param_names == ('a',)
assert b.a == 1
assert b(1) == -1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'kwargs']
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
def test_custom_model_parametrized_decorator():
"""Tests using custom_model as a decorator with parameters."""
def cosine(x, amplitude=1):
return [amplitude * np.cos(x)]
@custom_model(fit_deriv=cosine)
def sine(x, amplitude=1):
return amplitude * np.sin(x)
assert issubclass(sine, Model)
s = sine(2)
assert_allclose(s(np.pi / 2), 2)
assert_allclose(s.fit_deriv(0, 2), 2)
def test_custom_model_n_outputs():
"""
Test creating a custom_model which has more than one output, which
requires special handling.
Demonstrates issue #11791's ``n_outputs`` error has been solved
"""
@custom_model
def model(x, y, n_outputs=2):
return x+1, y+1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 2
assert m.outputs == ('x0', 'x1')
assert (separability_matrix(m) == [[True, True],
[True, True]]).all()
@custom_model
def model(x, y, z, n_outputs=3):
return x+1, y+1, z+1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 3
assert m.outputs == ('x0', 'x1', 'x2')
assert (separability_matrix(m) == [[True, True, True],
[True, True, True],
[True, True, True]]).all()
def test_custom_model_settable_parameters():
"""
Test creating a custom_model which specifically sets adjustable model
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, settable parameters
should be allowed to have defaults set.
"""
@custom_model
def model(x, y, n_outputs=2, bounding_box=((1, 2), (3, 4))):
return x+1, y+1
m = model()
assert m.n_outputs == 2
assert m.bounding_box == ((1, 2), (3, 4))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
m = model(bounding_box=((5, 6), (7, 8)))
assert m.n_outputs == 2
assert m.bounding_box == ((5, 6), (7, 8))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
@custom_model
def model(x, y, n_outputs=2, outputs=('z0', 'z1')):
return x+1, y+1
m = model()
assert m.n_outputs == 2
assert m.outputs == ('z0', 'z1')
m.outputs = ('a0', 'a1')
assert m.outputs == ('a0', 'a1')
m = model(outputs=('w0', 'w1'))
assert m.n_outputs == 2
assert m.outputs == ('w0', 'w1')
m.outputs = ('a0', 'a1')
assert m.outputs == ('a0', 'a1')
def test_custom_model_regected_parameters():
"""
Test creating a custom_model which attempts to override non-overridable
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, non-settable parameters
should raise an error (unexpected behavior may occur).
"""
with pytest.raises(ValueError,
match=r"Parameter 'n_inputs' cannot be a model property: *"):
@custom_model
def model(x, y, n_outputs=2, n_inputs=3):
return x+1, y+1
with pytest.raises(ValueError,
match=r"Parameter 'uses_quantity' cannot be a model property: *"):
@custom_model
def model(x, y, n_outputs=2, uses_quantity=True):
return x+1, y+1
def test_custom_inverse():
"""Test setting a custom inverse on a model."""
p = models.Polynomial1D(1, c0=-2, c1=3)
# A trivial inverse for a trivial polynomial
inv = models.Polynomial1D(1, c0=(2./3.), c1=(1./3.))
with pytest.raises(NotImplementedError):
p.inverse
p.inverse = inv
x = np.arange(100)
assert_allclose(x, p(p.inverse(x)))
assert_allclose(x, p.inverse(p(x)))
p.inverse = None
with pytest.raises(NotImplementedError):
p.inverse
def test_custom_inverse_reset():
"""Test resetting a custom inverse to the model's default inverse."""
class TestModel(Model):
n_inputs = 0
outputs = ('y',)
@property
def inverse(self):
return models.Shift()
@staticmethod
def evaluate():
return 0
# The above test model has no meaning, nor does its inverse--this just
# tests that setting an inverse and resetting to the default inverse works
m = TestModel()
assert isinstance(m.inverse, models.Shift)
m.inverse = models.Scale()
assert isinstance(m.inverse, models.Scale)
del m.inverse
assert isinstance(m.inverse, models.Shift)
def test_render_model_2d():
imshape = (71, 141)
image = np.zeros(imshape)
coords = y, x = np.indices(imshape)
model = models.Gaussian2D(x_stddev=6.1, y_stddev=3.9, theta=np.pi / 3)
# test points for edges
ye, xe = [0, 35, 70], [0, 70, 140]
# test points for floating point positions
yf, xf = [35.1, 35.5, 35.9], [70.1, 70.5, 70.9]
test_pts = [(a, b) for a in xe for b in ye]
test_pts += [(a, b) for a in xf for b in yf]
for x0, y0 in test_pts:
model.x_mean = x0
model.y_mean = y0
expected = model(x, y)
for xy in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (xy is None):
# this case is tested in Fittable2DModelTester
continue
actual = model.render(out=im, coords=xy)
if im is None:
assert_allclose(actual, model.render(coords=xy))
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, y0) == (70, 35):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
# test an error is raised when the bounding box is larger than the input array
try:
actual = model.render(out=np.zeros((1, 1)))
except ValueError:
pass
def test_render_model_1d():
npix = 101
image = np.zeros(npix)
coords = np.arange(npix)
model = models.Gaussian1D()
# test points
test_pts = [0, 49.1, 49.5, 49.9, 100]
# test widths
test_stdv = np.arange(5.5, 6.7, .2)
for x0, stdv in [(p, s) for p in test_pts for s in test_stdv]:
model.mean = x0
model.stddev = stdv
expected = model(coords)
for x in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (x is None):
# this case is tested in Fittable1DModelTester
continue
actual = model.render(out=im, coords=x)
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, stdv) == (49.5, 5.5):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
@pytest.mark.filterwarnings('ignore:invalid value encountered in less')
def test_render_model_3d():
imshape = (17, 21, 27)
image = np.zeros(imshape)
coords = np.indices(imshape)
def ellipsoid(x, y, z, x0=13., y0=10., z0=8., a=4., b=3., c=2., amp=1.):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(custom_model(ellipsoid)):
@property
def bounding_box(self):
return ((self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a))
model = Ellipsoid3D()
# test points for edges
ze, ye, xe = [0, 8, 16], [0, 10, 20], [0, 13, 26]
# test points for floating point positions
zf, yf, xf = [8.1, 8.5, 8.9], [10.1, 10.5, 10.9], [13.1, 13.5, 13.9]
test_pts = [(x, y, z) for x in xe for y in ye for z in ze]
test_pts += [(x, y, z) for x in xf for y in yf for z in zf]
for x0, y0, z0 in test_pts:
model.x0 = x0
model.y0 = y0
model.z0 = z0
expected = model(*coords[::-1])
for c in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (c is None):
continue
actual = model.render(out=im, coords=c)
boxed = model.render()
# assert images match
assert_allclose(expected, actual)
# assert model fully captured
if (z0, y0, x0) == (8, 10, 13):
boxed = model.render()
assert (np.sum(expected) - np.sum(boxed)) == 0
def test_render_model_out_dtype():
"""Test different out.dtype for model.render."""
for model in [models.Gaussian2D(), models.Gaussian2D() + models.Planar2D()]:
for dtype in [np.float64, np.float32, np.complex64]:
im = np.zeros((40, 40), dtype=dtype)
imout = model.render(out=im)
assert imout is im
assert imout.sum() != 0
with pytest.raises(TypeError):
im = np.zeros((40, 40), dtype=np.int32)
imout = model.render(out=im)
def test_custom_bounding_box_1d():
"""
Tests that the bounding_box setter works.
"""
# 1D models
g1 = models.Gaussian1D()
bb = g1.bounding_box
expected = g1.render()
# assign the same bounding_box, now through the bounding_box setter
g1.bounding_box = bb
assert_allclose(g1.render(), expected)
# 2D models
g2 = models.Gaussian2D()
bb = g2.bounding_box
expected = g2.render()
# assign the same bounding_box, now through the bounding_box setter
g2.bounding_box = bb
assert_allclose(g2.render(), expected)
def test_n_submodels_in_single_models():
assert models.Gaussian1D().n_submodels == 1
assert models.Gaussian2D().n_submodels == 1
def test_compound_deepcopy():
model = (models.Gaussian1D(10, 2, 3) | models.Shift(2)) & models.Rotation2D(21.3)
new_model = model.deepcopy()
assert id(model) != id(new_model)
assert id(model._leaflist) != id(new_model._leaflist)
assert id(model[0]) != id(new_model[0])
assert id(model[1]) != id(new_model[1])
assert id(model[2]) != id(new_model[2])
@pytest.mark.skipif('not HAS_SCIPY')
def test_units_with_bounding_box():
points = np.arange(10, 20)
table = np.arange(10) * u.Angstrom
t = models.Tabular1D(points, lookup_table=table)
assert isinstance(t(10), u.Quantity)
assert isinstance(t(10, with_bounding_box=True), u.Quantity)
assert_quantity_allclose(t(10), t(10, with_bounding_box=True))
RENAMED_MODEL = models.Gaussian1D.rename('CustomGaussian')
MODEL_RENAME_CODE = """
from astropy.modeling.models import Gaussian1D
print(repr(Gaussian1D))
print(repr(Gaussian1D.rename('CustomGaussian')))
""".strip()
MODEL_RENAME_EXPECTED = b"""
<class 'astropy.modeling.functional_models.Gaussian1D'>
Name: Gaussian1D
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
<class '__main__.CustomGaussian'>
Name: CustomGaussian (Gaussian1D)
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
""".strip()
def test_rename_path(tmpdir):
# Regression test for a bug that caused the path to the class to be
# incorrect in a renamed model's __repr__.
assert repr(RENAMED_MODEL).splitlines()[0] == "<class 'astropy.modeling.tests.test_core.CustomGaussian'>"
# Make sure that when called from a user script, the class name includes
# __main__.
env = os.environ.copy()
paths = [os.path.dirname(astropy.__path__[0])] + sys.path
env['PYTHONPATH'] = os.pathsep.join(paths)
script = tmpdir.join('rename.py').strpath
with open(script, 'w') as f:
f.write(MODEL_RENAME_CODE)
output = subprocess.check_output([sys.executable, script], env=env)
assert output.splitlines() == MODEL_RENAME_EXPECTED.splitlines()
@pytest.mark.parametrize('model_class',
[models.Gaussian1D, models.Polynomial1D,
models.Shift, models.Tabular1D])
def test_rename_1d(model_class):
new_model = model_class.rename(name='Test1D')
assert new_model.name == 'Test1D'
@pytest.mark.parametrize('model_class',
[models.Gaussian2D, models.Polynomial2D, models.Tabular2D])
def test_rename_2d(model_class):
new_model = model_class.rename(name='Test2D')
assert new_model.name == 'Test2D'
def test_fix_inputs_integer():
"""
Tests that numpy integers can be passed as dictionary keys to fix_inputs
Issue #11358
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {1: 22})
assert mf(1) == (1, 22)
mf_int32 = models.fix_inputs(m, {np.int32(1): 33})
assert mf_int32(1) == (1, 33)
mf_int64 = models.fix_inputs(m, {np.int64(1): 44})
assert mf_int64(1) == (1, 44)
def test_fix_inputs_empty_dict():
"""
Tests that empty dictionary can be passed to fix_inputs
Issue #11355
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {})
assert mf(1, 2) == (1, 2)
def test_rename_inputs_outputs():
g2 = models.Gaussian2D(10, 2, 3, 1, 2)
assert g2.inputs == ("x", "y")
assert g2.outputs == ("z",)
with pytest.raises(ValueError):
g2.inputs = ("w", )
with pytest.raises(ValueError):
g2.outputs = ("w", "e")
def test__prepare_output_single_model():
model = models.Gaussian1D()
# No broadcast
assert (np.array([1, 2]) ==
model._prepare_output_single_model(np.array([1, 2]), None)).all()
# Broadcast to scalar
assert 1 == model._prepare_output_single_model(np.array([1]), ())
assert 2 == model._prepare_output_single_model(np.asanyarray(2), ())
# Broadcast reshape
output = np.array([[1, 2, 3],
[4, 5, 6]])
reshape = np.array([[1, 2],
[3, 4],
[5, 6]])
assert (output == model._prepare_output_single_model(output, (2, 3))).all()
assert (reshape == model._prepare_output_single_model(output, (3, 2))).all()
# Broadcast reshape scalar
assert 1 == model._prepare_output_single_model(np.array([1]), (1, 2))
assert 2 == model._prepare_output_single_model(np.asanyarray(2), (3, 4))
# Fail to broadcast
assert (output == model._prepare_output_single_model(output, (1, 2))).all()
assert (output == model._prepare_output_single_model(output, (3, 4))).all()
def test_prepare_outputs_mixed_broadcast():
"""
Tests that _prepare_outputs_single_model does not fail when a smaller
array is passed as first input, but output is broadcast to larger
array.
Issue #10170
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model([1, 2], 3)
assert output.shape == (2,)
np.testing.assert_array_equal(output, [0.9692332344763441, 1.0])
output = model(4, [5, 6])
assert output.shape == (2,)
np.testing.assert_array_equal(output, [0.8146473164114145, 0.7371233743916278])
def test_prepare_outputs_complex_reshape():
x = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]])
y = np.array([[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
[26, 27, 28, 29, 30]])
m = models.Identity(3) | models.Mapping((2, 1, 0))
m.bounding_box = ((0, 100), (0, 200), (0, 50))
mf = models.fix_inputs(m, {2: 22})
t = mf | models.Mapping((2, 1), n_inputs=3)
output = mf(1, 2)
assert output == (22, 2, 1)
output = t(1, 2)
assert output == (1, 2)
output = t(x, y)
assert len(output) == 2
np.testing.assert_array_equal(output[0], x)
np.testing.assert_array_equal(output[1], y)
m = models.Identity(3) | models.Mapping((0, 1, 2))
m.bounding_box = ((0, 100), (0, 200), (0, 50))
mf = models.fix_inputs(m, {2: 22})
t = mf | models.Mapping((0, 1), n_inputs=3)
output = mf(1, 2)
assert output == (1, 2, 22)
output = t(1, 2)
assert output == (1, 2)
output = t(x, y)
assert len(output) == 2
np.testing.assert_array_equal(output[0], x)
np.testing.assert_array_equal(output[1], y)
def test_prepare_outputs_single_entry_vector():
"""
jwst and gwcs both require that single entry vectors produce single entry output vectors, not scalars. This
tests for that behavior.
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model(np.array([1]), np.array([2]))
assert output.shape == (1,)
np.testing.assert_array_equal(output, [0.9500411305585278])
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings('ignore: Using a non-tuple')
def test_prepare_outputs_sparse_grid():
"""
Test to show that #11060 has been solved.
"""
shape = (3, 3)
data = np.arange(np.product(shape)).reshape(shape) * u.m / u.s
points_unit = u.pix
points = [np.arange(size) * points_unit for size in shape]
kwargs = {
'bounds_error': False,
'fill_value': np.nan,
'method': 'nearest',
}
transform = models.Tabular2D(points, data, **kwargs)
truth = np.array([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]]) * u.m / u.s
points = np.meshgrid(np.arange(3), np.arange(3), indexing='ij', sparse=True)
x = points[0] * u.pix
y = points[1] * u.pix
value = transform(x, y)
assert (value == truth).all()
points = np.meshgrid(np.arange(3), np.arange(3), indexing='ij', sparse=False) * u.pix
value = transform(*points)
assert (value == truth).all()
def test_coerce_units():
model = models.Polynomial1D(1, c0=1, c1=2)
with pytest.raises(u.UnitsError):
model(u.Quantity(10, u.m))
with_input_units = model.coerce_units({"x": u.m})
result = with_input_units(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_input_units_tuple = model.coerce_units((u.m,))
result = with_input_units_tuple(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_return_units = model.coerce_units(return_units={"y": u.s})
result = with_return_units(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_return_units_tuple = model.coerce_units(return_units=(u.s,))
result = with_return_units_tuple(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_both = model.coerce_units({"x": u.m}, {"y": u.s})
result = with_both(u.Quantity(10, u.m))
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with pytest.raises(ValueError, match=r"input_units keys.*do not match model inputs"):
model.coerce_units({"q": u.m})
with pytest.raises(ValueError, match=r"input_units length does not match n_inputs"):
model.coerce_units((u.m, u.s))
model_with_existing_input_units = models.BlackBody()
with pytest.raises(ValueError, match=r"Cannot specify input_units for model with existing input units"):
model_with_existing_input_units.coerce_units({"x": u.m})
with pytest.raises(ValueError, match=r"return_units keys.*do not match model outputs"):
model.coerce_units(return_units={"q": u.m})
with pytest.raises(ValueError, match=r"return_units length does not match n_outputs"):
model.coerce_units(return_units=(u.m, u.s))
def test_bounding_box_general_inverse():
model = NonFittableModel(42.5)
with pytest.raises(NotImplementedError):
model.bounding_box
model.bounding_box = ()
assert model.bounding_box.bounding_box() == ()
model.inverse = NonFittableModel(3.14)
inverse_model = model.inverse
with pytest.raises(NotImplementedError):
inverse_model.bounding_box
def test__add_special_operator():
sop_name = 'name'
sop = 'value'
key = _add_special_operator(sop_name, 'value')
assert key[0] == sop_name
assert key[1] == SPECIAL_OPERATORS._unique_id
assert key in SPECIAL_OPERATORS
assert SPECIAL_OPERATORS[key] == sop
def test_print_special_operator_CompoundModel(capsys):
"""
Test that issue #11310 has been fixed
"""
model = convolve_models(models.Sersic2D(), models.Gaussian2D())
print(model)
true_out = "Model: CompoundModel\n" +\
"Inputs: ('x', 'y')\n" +\
"Outputs: ('z',)\n" +\
"Model set size: 1\n" +\
"Expression: convolve_fft (([0]), ([1]))\n" +\
"Components: \n" +\
" [0]: <Sersic2D(amplitude=1., r_eff=1., n=4., x_0=0., y_0=0., ellip=0., theta=0.)>\n" +\
"\n" +\
" [1]: <Gaussian2D(amplitude=1., x_mean=0., y_mean=0., x_stddev=1., y_stddev=1., theta=0.)>\n" +\
"Parameters:\n" +\
" amplitude_0 r_eff_0 n_0 x_0_0 y_0_0 ... y_mean_1 x_stddev_1 y_stddev_1 theta_1\n" +\
" ----------- ------- --- ----- ----- ... -------- ---------- ---------- -------\n" +\
" 1.0 1.0 4.0 0.0 0.0 ... 0.0 1.0 1.0 0.0\n"
out, err = capsys.readouterr()
assert err == ''
assert out == true_out
def test__validate_input_shape():
model = models.Gaussian1D()
model._n_models = 2
_input = np.array([[1, 2, 3],
[4, 5, 6]])
# Successful validation
assert model._validate_input_shape(_input, 0, model.inputs, 1, False) == (2, 3)
# Fail number of axes
with pytest.raises(ValueError) as err:
model._validate_input_shape(_input, 0, model.inputs, 2, True)
assert str(err.value) == \
"For model_set_axis=2, all inputs must be at least 3-dimensional."
# Fail number of models (has argname)
with pytest.raises(ValueError) as err:
model._validate_input_shape(_input, 0, model.inputs, 1, True)
assert str(err.value) == \
"Input argument 'x' does not have the correct dimensions in model_set_axis=1 " +\
"for a model set with n_models=2."
# Fail number of models (no argname)
with pytest.raises(ValueError) as err:
model._validate_input_shape(_input, 0, [], 1, True)
assert str(err.value) == \
"Input argument '0' does not have the correct dimensions in model_set_axis=1 " +\
"for a model set with n_models=2."
def test__validate_input_shapes():
model = models.Gaussian1D()
model._n_models = 2
inputs = [mk.MagicMock() for _ in range(3)]
argnames = mk.MagicMock()
model_set_axis = mk.MagicMock()
all_shapes = [mk.MagicMock() for _ in inputs]
# Successful validation
with mk.patch.object(Model, '_validate_input_shape',
autospec=True, side_effect=all_shapes) as mkValidate:
with mk.patch.object(core, 'check_broadcast',
autospec=True) as mkCheck:
assert mkCheck.return_value == \
model._validate_input_shapes(inputs, argnames, model_set_axis)
assert mkCheck.call_args_list == [mk.call(*all_shapes)]
assert mkValidate.call_args_list == \
[mk.call(model, _input, idx, argnames, model_set_axis, True)
for idx, _input in enumerate(inputs)]
# Fail check_broadcast
with mk.patch.object(Model, '_validate_input_shape',
autospec=True, side_effect=all_shapes) as mkValidate:
with mk.patch.object(core, 'check_broadcast',
autospec=True, return_value=None) as mkCheck:
with pytest.raises(ValueError) as err:
model._validate_input_shapes(inputs, argnames, model_set_axis)
assert str(err.value) == \
"All inputs must have identical shapes or must be scalars."
assert mkCheck.call_args_list == [mk.call(*all_shapes)]
assert mkValidate.call_args_list == \
[mk.call(model, _input, idx, argnames, model_set_axis, True)
for idx, _input in enumerate(inputs)]
def test__remove_axes_from_shape():
model = models.Gaussian1D()
# len(shape) == 0
assert model._remove_axes_from_shape((), mk.MagicMock()) == ()
# axis < 0
assert model._remove_axes_from_shape((1, 2, 3), -1) == (1, 2)
assert model._remove_axes_from_shape((1, 2, 3), -2) == (1, 3)
assert model._remove_axes_from_shape((1, 2, 3), -3) == (2, 3)
# axis >= len(shape)
assert model._remove_axes_from_shape((1, 2, 3), 3) == ()
assert model._remove_axes_from_shape((1, 2, 3), 4) == ()
# 0 <= axis < len(shape)
assert model._remove_axes_from_shape((1, 2, 3), 0) == (2, 3)
assert model._remove_axes_from_shape((1, 2, 3), 1) == (3,)
assert model._remove_axes_from_shape((1, 2, 3), 2) == ()
def test_get_bounding_box():
model = models.Const2D(2)
# No with_bbox
assert model.get_bounding_box(False) is None
# No bounding_box
with pytest.raises(NotImplementedError):
model.bounding_box
assert model.get_bounding_box(True) is None
# Normal bounding_box
model.bounding_box = ((0, 1), (0, 1))
assert not isinstance(model.bounding_box, CompoundBoundingBox)
assert model.get_bounding_box(True) == ((0, 1), (0, 1))
# CompoundBoundingBox with no removal
bbox = CompoundBoundingBox.validate(model, {(1,): ((-1, 0), (-1, 0)), (2,): ((0, 1), (0, 1))},
selector_args=[('y', False)])
model.bounding_box = bbox
assert isinstance(model.bounding_box, CompoundBoundingBox)
# Get using argument not with_bbox
assert model.get_bounding_box(True) == bbox
# Get using with_bbox not argument
assert model.get_bounding_box((1,)) == ((-1, 0), (-1, 0))
assert model.get_bounding_box((2,)) == ((0, 1), (0, 1))
def test_compound_bounding_box():
model = models.Gaussian1D()
truth = models.Gaussian1D()
bbox1 = CompoundBoundingBox.validate(model, {(1,): (-1, 0), (2,): (0, 1)},
selector_args=[('x', False)])
bbox2 = CompoundBoundingBox.validate(model, {(-0.5,): (-1, 0), (0.5,): (0, 1)},
selector_args=[('x', False)])
# Using with_bounding_box to pass a selector
model.bounding_box = bbox1
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=(1,)) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=(2,)))
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=(2,)) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(1,)))
# Using argument value to pass bounding_box
model.bounding_box = bbox2
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=True) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=True) == truth(0.5)
with pytest.raises(RuntimeError):
model(0, with_bounding_box=True)
model1 = models.Gaussian1D()
truth1 = models.Gaussian1D()
model2 = models.Const1D(2)
truth2 = models.Const1D(2)
model = model1 + model2
truth = truth1 + truth2
assert isinstance(model, CompoundModel)
model.bounding_box = bbox1
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=1) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=(2,)))
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=2) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(1,)))
model.bounding_box = bbox2
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=True) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=True) == truth(0.5)
with pytest.raises(RuntimeError):
model(0, with_bounding_box=True)
def test_bind_bounding_box():
model = models.Polynomial2D(3)
bbox = ((-1, 1), (-2, 2))
bind_bounding_box(model, bbox)
assert model.get_bounding_box() is not None
assert model.bounding_box == bbox
assert model.bounding_box['x'] == (-2, 2)
assert model.bounding_box['y'] == (-1, 1)
bind_bounding_box(model, bbox, order='F')
assert model.get_bounding_box() is not None
assert model.bounding_box == bbox
assert model.bounding_box['x'] == (-1, 1)
assert model.bounding_box['y'] == (-2, 2)
def test_bind_compound_bounding_box_using_with_bounding_box_select():
"""
This demonstrates how to bind multiple bounding_boxes which are
selectable using the `with_bounding_box`, note there must be a
fall-back to implicit.
"""
model = models.Gaussian1D()
truth = models.Gaussian1D()
bbox = (0, 1)
with pytest.raises(AttributeError):
bind_compound_bounding_box(model, bbox, 'x')
bbox = {0: (-1, 0), 1: (0, 1)}
bind_compound_bounding_box(model, bbox, [('x', False)])
# No bounding box
assert model(-0.5) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0) == truth(0)
assert model(1) == truth(1)
# `with_bounding_box` selects as `-0.5` will not be a key
assert model(-0.5, with_bounding_box=0) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=1))
# `with_bounding_box` selects as `0.5` will not be a key
assert model(0.5, with_bounding_box=1) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(0,)))
# Fall back onto implicit selector
assert model(0, with_bounding_box=True) == truth(0)
assert model(1, with_bounding_box=True) == truth(1)
# Attempt to fall-back on implicit selector, but no bounding_box
with pytest.raises(RuntimeError):
model(0.5, with_bounding_box=True)
# Override implicit selector
assert np.isnan(model(1, with_bounding_box=0))
def test_fix_inputs_compound_bounding_box():
base_model = models.Gaussian2D(1, 2, 3, 4, 5)
bbox = {2.5: (-1, 1), 3.14: (-7, 3)}
model = fix_inputs(base_model, {'y': 2.5}, bounding_boxes=bbox)
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {'x': 2.5}, bounding_boxes=bbox)
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {'y': 2.5}, bounding_boxes=bbox, selector_args=(('y', True),))
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {'x': 2.5}, bounding_boxes=bbox, selector_args=(('x', True),))
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {'x': 2.5}, bounding_boxes=bbox, selector_args=((0, True),))
assert model.bounding_box == (-1, 1)
base_model = models.Identity(4)
bbox = {(2.5, 1.3): ((-1, 1), (-3, 3)), (2.5, 2.71): ((-3, 3), (-1, 1))}
model = fix_inputs(base_model, {'x0': 2.5, 'x1': 1.3}, bounding_boxes=bbox)
assert model.bounding_box == ((-1, 1), (-3, 3))
model = fix_inputs(base_model, {'x0': 2.5, 'x1': 1.3}, bounding_boxes=bbox,
selector_args=(('x0', True), ('x1', True)))
assert model.bounding_box == ((-1, 1), (-3, 3))
model = fix_inputs(base_model, {'x0': 2.5, 'x1': 1.3}, bounding_boxes=bbox,
selector_args=((0, True), (1, True)))
assert model.bounding_box == ((-1, 1), (-3, 3))
def test_model_copy_with_bounding_box():
model = models.Polynomial2D(2)
bbox = ModelBoundingBox.validate(model, ((-0.5, 1047.5), (-0.5, 2047.5)), order='F')
# No bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() == model.get_bounding_box() == None
# with bbox
model.bounding_box = bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
for index, interval in model.bounding_box.intervals.items():
interval_copy = model_copy.bounding_box.intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(1)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() == model1.get_bounding_box() == None
def test_compound_model_copy_with_bounding_box():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bbox = ModelBoundingBox.validate(model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)), order='F')
# No bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() == model.get_bounding_box() == None
# with bbox
model.bounding_box = bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
for index, interval in model.bounding_box.intervals.items():
interval_copy = model_copy.bounding_box.intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(3)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() == model1.get_bounding_box() == None
def test_model_copy_with_compound_bounding_box():
model = models.Polynomial2D(2)
bbox = {(0,): (-0.5, 1047.5),
(1,): (-0.5, 3047.5)}
cbbox = CompoundBoundingBox.validate(model, bbox, selector_args=[('x', True)], order='F')
# No cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() == model.get_bounding_box() == None
# with cbbox
model.bounding_box = cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args
assert id(model_copy.bounding_box.selector_args) != id(model.bounding_box.selector_args)
for selector, bbox in model.bounding_box.bounding_boxes.items():
for index, interval in bbox.intervals.items():
interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(1)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() == model1.get_bounding_box() == None
def test_compound_model_copy_with_compound_bounding_box():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bbox = {(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)), }
cbbox = CompoundBoundingBox.validate(model, bbox, selector_args=[('slit_id', True)], order='F')
# No cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() == model.get_bounding_box() == None
# with cbbox
model.bounding_box = cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args
assert id(model_copy.bounding_box.selector_args) != id(model.bounding_box.selector_args)
for selector, bbox in model.bounding_box.bounding_boxes.items():
for index, interval in bbox.intervals.items():
interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(3)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() == model1.get_bounding_box() == None
def test_compound_model_copy_user_attribute():
"""Regression test for issue #12370"""
model = models.Gaussian2D(100, 25, 25, 5, 5) | models.Identity(1)
model.xname = 'x_mean' # user-defined attribute
assert hasattr(model, 'xname')
assert model.xname == 'x_mean'
model_copy = model.copy()
model_copy.xname
assert hasattr(model_copy, 'xname')
assert model_copy.xname == 'x_mean'
def test_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Gaussian2D()
bbox = ModelBoundingBox.validate(model, ((-1, 1), (-np.inf, np.inf)), order='F')
model.bounding_box = bbox
x = np.array([-0.5, 0.5])
y = 0
# Everything works when its all in the bounding box
assert (model(x, y) == (model(x, y, with_bounding_box=True))).all()
def test_compound_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bbox = ModelBoundingBox.validate(model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)), order='F')
model.bounding_box = bbox
x = np.array([1000, 1001])
y = np.array([2000, 2001])
slit_id = 0
# Everything works when its all in the bounding box
value0 = model(x, y, slit_id)
value1 = model(x, y, slit_id, with_bounding_box=True)
assert_equal(value0, value1)
def test_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,))
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4])
def test_compound_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,)) | models.Shift(1)
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5])
def test_bounding_box_pass_with_ignored():
"""Test the possiblity of setting ignored variables in bounding box"""
model = models.Polynomial2D(2)
bbox = ModelBoundingBox.validate(model, (-1, 1), ignored=['y'])
model.bounding_box = bbox
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox
model = models.Polynomial2D(2)
bind_bounding_box(model, (-1, 1), ignored=['y'])
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox
def test_compound_bounding_box_pass_with_ignored():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bbox = {(0,): (-0.5, 1047.5),
(1,): (-0.5, 2047.5), }
cbbox = CompoundBoundingBox.validate(model, bbox, selector_args=[('slit_id', True)],
ignored=['y'], order='F')
model.bounding_box = cbbox
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bind_compound_bounding_box(model, bbox, selector_args=[('slit_id', True)],
ignored=['y'], order='F')
assert model.bounding_box == cbbox
|
474c3405257c47647e8ad2989c04b42a1a0396444c6ea99f7ca9a005e012acc0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for spline models and fitters"""
# pylint: disable=invalid-name
from astropy.utils.exceptions import AstropyUserWarning
import pytest
import unittest.mock as mk
import numpy as np
from numpy.testing import assert_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from astropy.utils.exceptions import (AstropyUserWarning,)
from astropy.modeling.core import (FittableModel, ModelDefinitionError)
from astropy.modeling.spline import (_Spline, Spline1D, _SplineFitter)
from astropy.modeling.fitting import (SplineInterpolateFitter, SplineSmoothingFitter,
SplineExactKnotsFitter, SplineSplrepFitter)
from astropy.modeling.parameters import Parameter
npts = 50
nknots = 10
np.random.seed(42)
test_w = np.random.rand(npts)
test_t = [-1, 0, 1]
noise = np.random.randn(npts)
degree_tests = [1, 2, 3, 4, 5]
wieght_tests = [None, test_w]
smoothing_tests = [None, 0.01]
class TestSpline:
def setup_class(self):
self.num_opt = 3
self.optional_inputs = {f'test{i}': mk.MagicMock() for i in range(self.num_opt)}
self.extra_kwargs = {f'new{i}': mk.MagicMock() for i in range(self.num_opt)}
class Spline(_Spline):
optional_inputs = {'test': 'test'}
def _init_parameters(self):
super()._init_parameters()
def _init_data(self, knots, coeffs, bounds=None):
super()._init_data(knots, coeffs, bounds=bounds)
self.Spline = Spline
def test___init__(self):
# empty spline
spl = self.Spline()
assert spl._t is None
assert spl._c is None
assert spl._user_knots is False
assert spl._degree is None
assert spl._test is None
assert not hasattr(spl, 'degree')
# Call _init_spline
with mk.patch.object(_Spline, '_init_spline',
autospec=True) as mkInit:
# No call (knots=None)
spl = self.Spline()
assert mkInit.call_args_list == []
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
spl = self.Spline(knots=knots, coeffs=coeffs, bounds=bounds)
assert mkInit.call_args_list == \
[mk.call(spl, knots, coeffs, bounds)]
assert spl._t is None
assert spl._c is None
assert spl._user_knots is False
assert spl._degree is None
assert spl._test is None
# Coeffs but no knots
with pytest.raises(ValueError) as err:
self.Spline(coeffs=mk.MagicMock())
assert str(err.value) == \
"If one passes a coeffs vector one needs to also pass knots!"
def test_param_names(self):
# no parameters
spl = self.Spline()
assert spl.param_names == ()
knot_names = tuple([mk.MagicMock() for _ in range(3)])
spl._knot_names = knot_names
assert spl.param_names == knot_names
coeff_names = tuple([mk.MagicMock() for _ in range(3)])
spl._coeff_names = coeff_names
assert spl.param_names == knot_names + coeff_names
def test__optional_arg(self):
spl = self.Spline()
assert spl._optional_arg('test') == '_test'
def test__create_optional_inputs(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
def __init__(self):
self._create_optional_inputs()
spl = Spline()
for arg in self.optional_inputs:
attribute = spl._optional_arg(arg)
assert hasattr(spl, attribute)
assert getattr(spl, attribute) is None
with pytest.raises(ValueError,
match=r"Optional argument .* already exists in this class!"):
spl._create_optional_inputs()
def test__intercept_optional_inputs(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
def __init__(self):
self._create_optional_inputs()
spl = Spline()
new_kwargs = spl._intercept_optional_inputs(**self.extra_kwargs)
for arg, value in self.optional_inputs.items():
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is None
assert new_kwargs == self.extra_kwargs
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
new_kwargs = spl._intercept_optional_inputs(**kwargs)
for arg, value in self.optional_inputs.items():
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is not None
assert getattr(spl, attribute) == kwargs[arg]
assert getattr(spl, attribute) != value
assert arg not in new_kwargs
assert new_kwargs == self.extra_kwargs
assert kwargs != self.extra_kwargs
with pytest.raises(RuntimeError,
match=r".* has already been set, something has gone wrong!"):
spl._intercept_optional_inputs(**kwargs)
def test_evaluate(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
spl = Spline()
# No options passed in and No options set
new_kwargs = spl.evaluate(**self.extra_kwargs)
for arg, value in self.optional_inputs.items():
assert new_kwargs[arg] == value
for arg, value in self.extra_kwargs.items():
assert new_kwargs[arg] == value
assert len(new_kwargs) == (len(self.optional_inputs) + len(self.extra_kwargs))
# No options passed in and Options set
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
spl._intercept_optional_inputs(**kwargs)
new_kwargs = spl.evaluate(**self.extra_kwargs)
assert new_kwargs == kwargs
for arg in self.optional_inputs:
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is None
# Options passed in
set_kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
spl._intercept_optional_inputs(**set_kwargs)
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
assert set_kwargs != kwargs
new_kwargs = spl.evaluate(**kwargs)
assert new_kwargs == kwargs
def test___call__(self):
spl = self.Spline()
args = tuple([mk.MagicMock() for _ in range(3)])
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)}
with mk.patch.object(_Spline, "_intercept_optional_inputs",
autospec=True, return_value=new_kwargs) as mkIntercept:
with mk.patch.object(FittableModel, "__call__",
autospec=True) as mkCall:
assert mkCall.return_value == spl(*args, **kwargs)
assert mkCall.call_args_list == \
[mk.call(spl, *args, **new_kwargs)]
assert mkIntercept.call_args_list == \
[mk.call(spl, **kwargs)]
def test__create_parameter(self):
np.random.seed(37)
base_vec = np.random.random(20)
test = base_vec.copy()
fixed_test = base_vec.copy()
class Spline(self.Spline):
@property
def test(self):
return test
@property
def fixed_test(self):
return fixed_test
spl = Spline()
assert (spl.test == test).all()
assert (spl.fixed_test == fixed_test).all()
for index in range(20):
name = f"test_name{index}"
spl._create_parameter(name, index, 'test')
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.model == spl
assert param.fixed is False
assert param.value == test[index] == spl.test[index] == base_vec[index]
new_set = np.random.random()
param.value = new_set
assert spl.test[index] == new_set
assert spl.test[index] != base_vec[index]
new_get = np.random.random()
spl.test[index] = new_get
assert param.value == new_get
assert param.value != new_set
for index in range(20):
name = f"fixed_test_name{index}"
spl._create_parameter(name, index, 'fixed_test', True)
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.model == spl
assert param.fixed is True
assert param.value == fixed_test[index] == spl.fixed_test[index] == base_vec[index]
new_set = np.random.random()
param.value = new_set
assert spl.fixed_test[index] == new_set
assert spl.fixed_test[index] != base_vec[index]
new_get = np.random.random()
spl.fixed_test[index] = new_get
assert param.value == new_get
assert param.value != new_set
def test__create_parameters(self):
np.random.seed(37)
test = np.random.random(20)
class Spline(self.Spline):
@property
def test(self):
return test
spl = Spline()
fixed = mk.MagicMock()
with mk.patch.object(_Spline, '_create_parameter',
autospec=True) as mkCreate:
params = spl._create_parameters("test_param", "test", fixed)
assert params == tuple([f"test_param{idx}" for idx in range(20)])
assert mkCreate.call_args_list == \
[mk.call(spl, f"test_param{idx}", idx, 'test', fixed) for idx in range(20)]
def test__init_parameters(self):
spl = self.Spline()
with pytest.raises(NotImplementedError) as err:
spl._init_parameters()
assert str(err.value) == \
"This needs to be implemented"
def test__init_data(self):
spl = self.Spline()
with pytest.raises(NotImplementedError) as err:
spl._init_data(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) == \
"This needs to be implemented"
with pytest.raises(NotImplementedError) as err:
spl._init_data(mk.MagicMock(), mk.MagicMock())
assert str(err.value) == \
"This needs to be implemented"
def test__init_spline(self):
spl = self.Spline()
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
with mk.patch.object(_Spline, "_init_parameters",
autospec=True) as mkParameters:
with mk.patch.object(_Spline, "_init_data",
autospec=True) as mkData:
main = mk.MagicMock()
main.attach_mock(mkParameters, 'parameters')
main.attach_mock(mkData, 'data')
spl._init_spline(knots, coeffs, bounds)
assert main.mock_calls == [
mk.call.data(spl, knots, coeffs, bounds=bounds),
mk.call.parameters(spl)
]
def test__init_tck(self):
spl = self.Spline()
assert spl._c is None
assert spl._t is None
assert spl._degree is None
spl = self.Spline(degree=4)
assert spl._c is None
assert spl._t is None
assert spl._degree == 4
@pytest.mark.skipif('not HAS_SCIPY')
class TestSpline1D:
def setup_class(self):
def func(x, noise=0):
return np.exp(-x**2) + 0.1*noise
self.x = np.linspace(-3, 3, npts)
self.y = func(self.x, noise)
self.truth = func(self.x)
arg_sort = np.argsort(self.x)
np.random.shuffle(arg_sort)
self.x_s = self.x[arg_sort]
self.y_s = func(self.x_s, noise[arg_sort])
self.npts_out = 1000
self.xs = np.linspace(-3, 3, self.npts_out)
self.t = np.linspace(-3, 3, nknots)[1:-1]
def check_parameter(self, spl, base_name, name, index, value, fixed):
assert base_name in name
assert index == int(name.split(base_name)[-1])
knot_name = f"{base_name}{index}"
assert knot_name == name
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.name == name
assert param.value == value(index)
assert param.model == spl
assert param.fixed is fixed
def check_parameters(self, spl, params, base_name, value, fixed):
for idx, name in enumerate(params):
self.check_parameter(spl, base_name, name, idx, value, fixed)
def update_parameters(self, spl, knots, value):
for name in knots:
param = getattr(spl, name)
param.value = value
assert param.value == value
def test___init__with_no_knot_information(self):
spl = Spline1D()
assert spl._degree == 3
assert spl._user_knots is False
assert spl._t is None
assert spl._c is None
assert spl._nu is None
# Check no parameters created
assert len(spl._knot_names) == 0
assert len(spl._coeff_names) == 0
def test___init__with_number_of_knots(self):
spl = Spline1D(knots=10)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is False
assert spl._nu is None
# Check vector data
assert len(spl._t) == 18
t = np.zeros(18)
t[-4:] = 1
assert (spl._t == t).all()
assert len(spl._c) == 18
assert (spl._c == np.zeros(18)).all()
# Check all parameter names created:
assert len(spl._knot_names) == 18
assert len(spl._coeff_names) == 18
# Check knot values:
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values:
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_full_custom_knots(self):
t = 17*np.arange(20) - 32
spl = Spline1D(knots=t)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert (spl._t == t).all()
assert len(spl._c) == 20
assert (spl._c == np.zeros(20)).all()
# Check all parameter names created
assert len(spl._knot_names) == 20
assert len(spl._coeff_names) == 20
# Check knot values:
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_interior_custom_knots(self):
t = np.arange(1, 20)
spl = Spline1D(knots=t, bounds=[0, 20])
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert len(spl._t) == 27
assert (spl._t[4:-4] == t).all()
assert (spl._t[:4] == 0).all()
assert (spl._t[-4:] == 20).all()
assert len(spl._c) == 27
assert (spl._c == np.zeros(27)).all()
# Check knot values:
def value0(idx):
if idx < 4:
return 0
elif idx >= 19 + 4:
return 20
else:
return t[idx-4]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_user_knots_and_coefficients(self):
t = 17*np.arange(20) - 32
c = np.linspace(-1, 1, 20)
spl = Spline1D(knots=t, coeffs=c)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert (spl._t == t).all()
assert len(spl._c) == 20
assert (spl._c == c).all()
# Check all parameter names created
assert len(spl._knot_names) == 20
assert len(spl._coeff_names) == 20
# Check knot values:
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__errors(self):
# Bad knot type
knots = 3.5
with pytest.raises(ValueError) as err:
Spline1D(knots=knots)
assert str(err.value) ==\
f"Knots: {knots} must be iterable or value"
# Not enough knots
for idx in range(8):
with pytest.raises(ValueError) as err:
Spline1D(knots=np.arange(idx))
assert str(err.value) ==\
"Must have at least 8 knots."
# Bad scipy spline
t = np.arange(20)[::-1]
with pytest.raises(ValueError):
Spline1D(knots=t)
def test_parameter_array_link(self):
spl = Spline1D(10)
# Check knot base values
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check knot vector -> knot parameter link
t = np.arange(18)
spl._t = t.copy()
def value1(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value1, True)
# Check knot parameter -> knot vector link
self.update_parameters(spl, spl._knot_names, 3)
assert (spl._t[:] == 3).all()
# Check coeff base values
def value2(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value2, False)
# Check coeff vector -> coeff parameter link
c = 5 * np.arange(18) + 18
spl._c = c.copy()
def value3(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value3, False)
# Check coeff parameter -> coeff vector link
self.update_parameters(spl, spl._coeff_names, 4)
assert (spl._c[:] == 4).all()
def test_two_splines(self):
spl0 = Spline1D(knots=10)
spl1 = Spline1D(knots=15, degree=2)
assert spl0._degree == 3
assert len(spl0._t) == 18
t = np.zeros(18)
t[-4:] = 1
assert (spl0._t == t).all()
assert len(spl0._c) == 18
assert (spl0._c == np.zeros(18)).all()
assert spl1._degree == 2
assert len(spl1._t) == 21
t = np.zeros(21)
t[-3:] = 1
assert (spl1._t == t).all()
assert len(spl1._c) == 21
assert (spl1._c == np.zeros(21)).all()
# Check all knot names created
assert len(spl0._knot_names) == 18
assert len(spl1._knot_names) == 21
# Check knot base values
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl0, spl0._knot_names, "knot", value0, True)
def value1(idx):
if idx < 21 - 3:
return 0
else:
return 1
self.check_parameters(spl1, spl1._knot_names, "knot", value1, True)
# Check knot vector -> knot parameter link
t0 = 7 * np.arange(18) + 27
t1 = 11 * np.arange(21) + 19
spl0._t[:] = t0.copy()
spl1._t[:] = t1.copy()
def value2(idx):
return t0[idx]
self.check_parameters(spl0, spl0._knot_names, "knot", value2, True)
def value3(idx):
return t1[idx]
self.check_parameters(spl1, spl1._knot_names, "knot", value3, True)
# Check knot parameter -> knot vector link
self.update_parameters(spl0, spl0._knot_names, 3)
self.update_parameters(spl1, spl1._knot_names, 4)
assert (spl0._t[:] == 3).all()
assert (spl1._t[:] == 4).all()
# Check all coeff names created
assert len(spl0._coeff_names) == 18
assert len(spl1._coeff_names) == 21
# Check coeff base values
def value4(idx):
return 0
self.check_parameters(spl0, spl0._coeff_names, "coeff", value4, False)
self.check_parameters(spl1, spl1._coeff_names, "coeff", value4, False)
# Check coeff vector -> coeff parameter link
c0 = 17 * np.arange(18) + 14
c1 = 37 * np.arange(21) + 47
spl0._c[:] = c0.copy()
spl1._c[:] = c1.copy()
def value5(idx):
return c0[idx]
self.check_parameters(spl0, spl0._coeff_names, "coeff", value5, False)
def value6(idx):
return c1[idx]
self.check_parameters(spl1, spl1._coeff_names, "coeff", value6, False)
# Check coeff parameter -> coeff vector link
self.update_parameters(spl0, spl0._coeff_names, 5)
self.update_parameters(spl1, spl1._coeff_names, 6)
assert (spl0._t[:] == 3).all()
assert (spl1._t[:] == 4).all()
assert (spl0._c[:] == 5).all()
assert (spl1._c[:] == 6).all()
def test__knot_names(self):
# no parameters
spl = Spline1D()
assert spl._knot_names == ()
# some parameters
knot_names = [f"knot{idx}" for idx in range(18)]
spl = Spline1D(10)
assert spl._knot_names == tuple(knot_names)
def test__coeff_names(self):
# no parameters
spl = Spline1D()
assert spl._coeff_names == ()
# some parameters
coeff_names = [f"coeff{idx}" for idx in range(18)]
spl = Spline1D(10)
assert spl._coeff_names == tuple(coeff_names)
def test_param_names(self):
# no parameters
spl = Spline1D()
assert spl.param_names == ()
# some parameters
knot_names = [f"knot{idx}" for idx in range(18)]
coeff_names = [f"coeff{idx}" for idx in range(18)]
param_names = knot_names + coeff_names
spl = Spline1D(10)
assert spl.param_names == tuple(param_names)
def test_t(self):
# no parameters
spl = Spline1D()
# test get
assert spl._t is None
assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()
# test set
with pytest.raises(ValueError) as err:
spl.t = mk.MagicMock()
assert str(err.value) ==\
"The model parameters must be initialized before setting knots."
# with parameters
spl = Spline1D(10)
# test get
t = np.zeros(18)
t[-4:] = 1
assert (spl._t == t).all()
assert (spl.t == t).all()
# test set
spl.t = (np.arange(18) + 15)
assert (spl._t == (np.arange(18) + 15)).all()
assert (spl.t == (np.arange(18) + 15)).all()
assert (spl.t != t).all()
# set error
for idx in range(30):
if idx == 18:
continue
with pytest.raises(ValueError) as err:
spl.t = np.arange(idx)
assert str(err.value) == \
"There must be exactly as many knots as previously defined."
def test_c(self):
# no parameters
spl = Spline1D()
# test get
assert spl._c is None
assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()
# test set
with pytest.raises(ValueError) as err:
spl.c = mk.MagicMock()
assert str(err.value) ==\
"The model parameters must be initialized before setting coeffs."
# with parameters
spl = Spline1D(10)
# test get
assert (spl._c == np.zeros(18)).all()
assert (spl.c == np.zeros(18)).all()
# test set
spl.c = (np.arange(18) + 15)
assert (spl._c == (np.arange(18) + 15)).all()
assert (spl.c == (np.arange(18) + 15)).all()
assert (spl.c != np.zeros(18)).all()
# set error
for idx in range(30):
if idx == 18:
continue
with pytest.raises(ValueError) as err:
spl.c = np.arange(idx)
assert str(err.value) == \
"There must be exactly as many coeffs as previously defined."
def test_degree(self):
# default degree
spl = Spline1D()
# test get
assert spl._degree == 3
assert spl.degree == 3
# test set
# non-default degree
spl = Spline1D(degree=2)
# test get
assert spl._degree == 2
assert spl.degree == 2
def test__initialized(self):
# no parameters
spl = Spline1D()
assert spl._initialized is False
# with parameters
spl = Spline1D(knots=10, degree=2)
assert spl._initialized is True
def test_tck(self):
# no parameters
spl = Spline1D()
# test get
assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()
assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()
assert spl.degree == 3
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# test set
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])
np.random.seed(619)
c = np.random.random(12)
k = 3
spl.tck = (t, c, k)
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
def value1(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# with parameters
spl = Spline1D(knots=10, degree=2)
# test get
t = np.zeros(16)
t[-3:] = 1
assert (spl.t == t).all()
assert (spl.c == np.zeros(16)).all()
assert spl.degree == 2
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# test set
t = 5*np.arange(16) + 11
c = 7*np.arange(16) + 13
k = 2
spl.tck = (t, c, k)
assert (spl.t == t).all()
assert (spl.c == c).all()
assert spl.degree == k
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# Error
with pytest.raises(ValueError) as err:
spl.tck = (t, c, 4)
assert str(err.value) ==\
"tck has incompatible degree!"
def test_bspline(self):
from scipy.interpolate import BSpline
# no parameters
spl = Spline1D()
bspline = spl.bspline
assert isinstance(bspline, BSpline)
assert (bspline.tck[0] == spl.tck[0]).all()
assert (bspline.tck[1] == spl.tck[1]).all()
assert bspline.tck[2] == spl.tck[2]
t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])
np.random.seed(619)
c = np.random.random(12)
k = 3
def value0(idx):
return t[idx]
def value1(idx):
return c[idx]
# set (bspline)
spl = Spline1D()
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
bspline = BSpline(t, c, k)
spl.bspline = bspline
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# set (tuple spline)
spl = Spline1D()
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
spl.bspline = (t, c, k)
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# with parameters
spl = Spline1D(knots=10, degree=2)
bspline = spl.bspline
assert isinstance(bspline, BSpline)
assert (bspline.tck[0] == spl.tck[0]).all()
assert (bspline.tck[1] == spl.tck[1]).all()
assert bspline.tck[2] == spl.tck[2]
def test_knots(self):
# no parameters
spl = Spline1D()
assert spl.knots == []
# with parameters
spl = Spline1D(10)
knots = spl.knots
assert len(knots) == 18
for knot in knots:
assert isinstance(knot, Parameter)
assert hasattr(spl, knot.name)
assert getattr(spl, knot.name) == knot
def test_coeffs(self):
# no parameters
spl = Spline1D()
assert spl.coeffs == []
# with parameters
spl = Spline1D(10)
coeffs = spl.coeffs
assert len(coeffs) == 18
for coeff in coeffs:
assert isinstance(coeff, Parameter)
assert hasattr(spl, coeff.name)
assert getattr(spl, coeff.name) == coeff
def test__init_parameters(self):
spl = Spline1D()
with mk.patch.object(Spline1D, '_create_parameters',
autospec=True) as mkCreate:
spl._init_parameters()
assert mkCreate.call_args_list == [
mk.call(spl, "knot", "t", fixed=True),
mk.call(spl, "coeff", "c")
]
def test__init_bounds(self):
spl = Spline1D()
has_bounds, lower, upper = spl._init_bounds()
assert has_bounds is False
assert (lower == [0, 0, 0, 0]).all()
assert (upper == [1, 1, 1, 1]).all()
assert spl._user_bounding_box is None
has_bounds, lower, upper = spl._init_bounds((-5, 5))
assert has_bounds is True
assert (lower == [-5, -5, -5, -5]).all()
assert (upper == [5, 5, 5, 5]).all()
assert spl._user_bounding_box == (-5, 5)
def test__init_knots(self):
np.random.seed(19)
lower = np.random.random(4)
upper = np.random.random(4)
# Integer
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
spl = Spline1D()
assert spl._t is None
spl._init_knots(10, mk.MagicMock(), lower, upper)
t = np.concatenate((lower, np.zeros(10), upper))
assert (spl._t == t).all()
assert mkBspline.call_args_list == [mk.call()]
# vector with bounds
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
knots = np.random.random(10)
spl = Spline1D()
assert spl._t is None
spl._init_knots(knots, True, lower, upper)
t = np.concatenate((lower, knots, upper))
assert (spl._t == t).all()
assert mkBspline.call_args_list == [mk.call()]
# vector with no bounds
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
knots = np.random.random(10)
spl = Spline1D()
assert spl._t is None
spl._init_knots(knots, False, lower, upper)
assert (spl._t == knots).all()
assert mkBspline.call_args_list == [mk.call()]
# error
for num in range(8):
knots = np.random.random(num)
spl = Spline1D()
assert spl._t is None
with pytest.raises(ValueError) as err:
spl._init_knots(knots, False, lower, upper)
assert str(err.value) == \
"Must have at least 8 knots."
# Error
spl = Spline1D()
assert spl._t is None
with pytest.raises(ValueError) as err:
spl._init_knots(0.5, False, lower, upper)
assert str(err.value) ==\
"Knots: 0.5 must be iterable or value"
def test__init_coeffs(self):
np.random.seed(492)
# No coeffs
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
spl = Spline1D()
assert spl._c is None
spl._t = [1, 2, 3, 4]
spl._init_coeffs()
assert (spl._c == [0, 0, 0, 0]).all()
assert mkBspline.call_args_list == [mk.call()]
# Some coeffs
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
coeffs = np.random.random(10)
spl = Spline1D()
assert spl._c is None
spl._init_coeffs(coeffs)
assert (spl._c == coeffs).all()
assert mkBspline.call_args_list == [mk.call()]
def test__init_data(self):
spl = Spline1D()
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
has_bounds = mk.MagicMock()
lower = mk.MagicMock()
upper = mk.MagicMock()
with mk.patch.object(Spline1D, '_init_bounds', autospec=True,
return_value=(has_bounds, lower, upper)) as mkBounds:
with mk.patch.object(Spline1D, '_init_knots',
autospec=True) as mkKnots:
with mk.patch.object(Spline1D, '_init_coeffs',
autospec=True) as mkCoeffs:
main = mk.MagicMock()
main.attach_mock(mkBounds, 'bounds')
main.attach_mock(mkKnots, 'knots')
main.attach_mock(mkCoeffs, 'coeffs')
spl._init_data(knots, coeffs, bounds)
assert main.mock_calls == [
mk.call.bounds(spl, bounds),
mk.call.knots(spl, knots, has_bounds, lower, upper),
mk.call.coeffs(spl, coeffs)
]
def test_evaluate(self):
spl = Spline1D()
args = tuple([mk.MagicMock() for _ in range(3)])
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)}
with mk.patch.object(_Spline, 'evaluate', autospec=True,
return_value=new_kwargs) as mkEval:
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
assert mkBspline.return_value.return_value == spl.evaluate(*args, **kwargs)
assert mkBspline.return_value.call_args_list == \
[mk.call(args[0], **new_kwargs)]
assert mkBspline.call_args_list == [mk.call()]
assert mkEval.call_args_list == \
[mk.call(spl, *args, **kwargs)]
# Error
for idx in range(5, 8):
with mk.patch.object(_Spline, 'evaluate', autospec=True,
return_value={'nu': idx}):
with pytest.raises(RuntimeError) as err:
spl.evaluate(*args, **kwargs)
assert str(err.value) == \
"Cannot evaluate a derivative of order higher than 4"
def check_knots_created(self, spl, k):
def value0(idx):
return self.x[0]
def value1(idx):
return self.x[-1]
for idx in range(k + 1):
name = f"knot{idx}"
self.check_parameter(spl, "knot", name, idx, value0, True)
index = len(spl.t) - (k + 1) + idx
name = f"knot{index}"
self.check_parameter(spl, "knot", name, index, value1, True)
def value3(idx):
return spl.t[idx]
assert len(spl._knot_names) == len(spl.t)
for idx, name in enumerate(spl._knot_names):
assert name == f"knot{idx}"
self.check_parameter(spl, "knot", name, idx, value3, True)
def check_coeffs_created(self, spl):
def value(idx):
return spl.c[idx]
assert len(spl._coeff_names) == len(spl.c)
for idx, name in enumerate(spl._coeff_names):
assert name == f"coeff{idx}"
self.check_parameter(spl, "coeff", name, idx, value, False)
@staticmethod
def check_base_spline(spl, t, c, k):
"""Check the base spline form"""
if t is None:
assert spl._t is None
else:
assert_allclose(spl._t, t)
if c is None:
assert spl._c is None
else:
assert_allclose(spl._c, c)
assert spl.degree == k
assert spl._bounding_box is None
def check_spline_fit(self, fit_spl, spline, fitter, atol_fit, atol_truth):
"""Check the spline fit"""
assert_allclose(fit_spl.t, spline._eval_args[0])
assert_allclose(fit_spl.c, spline._eval_args[1])
assert_allclose(fitter.fit_info['spline']._eval_args[0], spline._eval_args[0])
assert_allclose(fitter.fit_info['spline']._eval_args[1], spline._eval_args[1])
# check that _parameters are correct
assert len(fit_spl._parameters) == len(fit_spl.t) + len(fit_spl.c)
assert_allclose(fit_spl._parameters[:len(fit_spl.t)], fit_spl.t)
assert_allclose(fit_spl._parameters[len(fit_spl.t):], fit_spl.c)
# check that parameters are correct
assert len(fit_spl.parameters) == len(fit_spl.t) + len(fit_spl.c)
assert_allclose(fit_spl.parameters[:len(fit_spl.t)], fit_spl.t)
assert_allclose(fit_spl.parameters[len(fit_spl.t):], fit_spl.c)
assert_allclose(spline.get_residual(), fitter.fit_info['resid'])
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), fitter.fit_info['spline'](self.x))
assert_allclose(fit_spl(self.x), self.y, atol=atol_fit)
assert_allclose(fit_spl(self.x), self.truth, atol=atol_truth)
def check_bbox(self, spl, fit_spl, fitter, w, **kwargs):
"""Check the spline fit with bbox option"""
bbox = [self.x[0], self.x[-1]]
bbox_spl = fitter(spl, self.x, self.y, weights=w, bbox=bbox, **kwargs)
assert bbox_spl.bounding_box == tuple(bbox)
assert_allclose(fit_spl.t, bbox_spl.t)
assert_allclose(fit_spl.c, bbox_spl.c)
def check_knots_warning(self, fitter, knots, k, w, **kwargs):
"""Check that the knots warning is raised"""
spl = Spline1D(knots=knots, degree=k)
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, weights=w, **kwargs)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_interpolate_fitter(self, w, k):
fitter = SplineInterpolateFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, None, None, k)
assert len(fit_spl.t) == (len(self.x) + k + 1) == len(fit_spl._knot_names)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import InterpolatedUnivariateSpline, UnivariateSpline
spline = InterpolatedUnivariateSpline(self.x, self.y, w=w, k=k)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
assert spline.get_residual() == 0
self.check_spline_fit(fit_spl, spline, fitter, 0, 1)
self.check_bbox(spl, fit_spl, fitter, w)
knots = np.linspace(self.x[0], self.x[-1], len(self.x) + k + 1)
self.check_knots_warning(fitter, knots, k, w)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
@pytest.mark.parametrize('s', smoothing_tests)
def test_smoothing_fitter(self, w, k, s):
fitter = SplineSmoothingFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(self.x, self.y, w=w, k=k, s=s)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
self.check_spline_fit(fit_spl, spline, fitter, 1, 1)
self.check_bbox(spl, fit_spl, fitter, w, s=s)
# test warning
knots = fit_spl.t.copy()
self.check_knots_warning(fitter, knots, k, w, s=s)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_exact_knots_fitter(self, w, k):
fitter = SplineExactKnotsFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
knots = [-1, 0, 1]
t = np.concatenate(([self.x[0]]*(k + 1), knots, [self.x[-1]]*(k + 1)))
c = np.zeros(len(t))
# With knots preset
spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
assert len(fit_spl.t) == len(t) == len(fit_spl._knot_names)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
spline = LSQUnivariateSpline(self.x, self.y, knots, w=w, k=k)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
assert_allclose(spline.get_residual(), 0.1, atol=1)
assert_allclose(fitter.fit_info['spline'].get_residual(), 0.1, atol=1)
self.check_spline_fit(fit_spl, spline, fitter, 1, 1)
self.check_bbox(spl, fit_spl, fitter, w)
# Pass knots via fitter function
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, t=knots, weights=w)
# pass no knots
spl = Spline1D(degree=k)
with pytest.raises(RuntimeError) as err:
fitter(spl, self.x, self.y, weights=w)
assert str(err.value) ==\
"No knots have been provided"
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
@pytest.mark.parametrize('s', smoothing_tests)
def test_splrep_fitter_no_knots(self, w, k, s):
fitter = SplineSplrepFitter()
assert fitter.fit_info == {'fp': None, 'ier': None, 'msg': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import splrep, BSpline
tck, spline_fp, spline_ier, spline_msg = splrep(self.x, self.y,
w=w, k=k, s=s, full_output=1)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
assert fitter.fit_info['fp'] == spline_fp
assert fitter.fit_info['ier'] == spline_ier
assert fitter.fit_info['msg'] == spline_msg
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w, s=s)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_splrep_fitter_with_knots(self, w, k):
fitter = SplineSplrepFitter()
assert fitter.fit_info == {'fp': None, 'ier': None, 'msg': None}
knots = [-1, 0, 1]
t = np.concatenate(([self.x[0]]*(k + 1), knots, [self.x[-1]]*(k + 1)))
c = np.zeros(len(t))
# With knots preset
spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import splrep, BSpline
tck, spline_fp, spline_ier, spline_msg = splrep(self.x, self.y,
w=w, k=k, t=knots, full_output=1)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
assert fitter.fit_info['fp'] == spline_fp
assert fitter.fit_info['ier'] == spline_ier
assert fitter.fit_info['msg'] == spline_msg
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w)
# test warning
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, t=knots, weights=w)
# With no knots present
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, t=knots, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import splrep, BSpline
tck = splrep(self.x, self.y, w=w, k=k, t=knots)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w, t=knots)
def generate_spline(self, w=None, bbox=[None]*2, k=None, s=None, t=None):
if k is None:
k = 3
from scipy.interpolate import splrep, BSpline
tck = splrep(self.x, self.y, w=w, xb=bbox[0], xe=bbox[1],
k=k, s=s, t=t)
return BSpline(*tck)
def test_derivative(self):
bspline = self.generate_spline()
spl = Spline1D()
spl.bspline = bspline
assert_allclose(spl.t, bspline.t)
assert_allclose(spl.c, bspline.c)
assert spl.degree == bspline.k
# 1st derivative
d_bspline = bspline.derivative(nu=1)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=1))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=2))
assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=3), bspline(self.xs, nu=4))
der = spl.derivative()
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 2
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=1))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=2))
assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=3), spl.evaluate(self.xs, nu=4))
# 2nd derivative
d_bspline = bspline.derivative(nu=2)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=2))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=4))
der = spl.derivative(nu=2)
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 1
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=2))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=4))
# 3rd derivative
d_bspline = bspline.derivative(nu=3)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=4))
der = spl.derivative(nu=3)
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 0
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=4))
# Too many derivatives
for nu in range(4, 9):
with pytest.raises(ValueError) as err:
spl.derivative(nu=nu)
assert str(err.value) == \
"Must have nu <= 3"
def test_antiderivative(self):
bspline = self.generate_spline()
spl = Spline1D()
spl.bspline = bspline
# 1st antiderivative
a_bspline = bspline.antiderivative(nu=1)
assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=1))
assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=2))
assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=3))
assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=4))
assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=5))
anti = spl.antiderivative()
assert_allclose(anti.t, a_bspline.t)
assert_allclose(anti.c, a_bspline.c)
assert anti.degree == a_bspline.k == 4
assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=1))
assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=2))
assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=3))
assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=4))
assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=5))
# 2nd antiderivative
a_bspline = bspline.antiderivative(nu=2)
assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=2))
assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=3))
assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=4))
assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=5))
assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=6))
anti = spl.antiderivative(nu=2)
assert_allclose(anti.t, a_bspline.t)
assert_allclose(anti.c, a_bspline.c)
assert anti.degree == a_bspline.k == 5
assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=2))
assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=3))
assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=4))
assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=5))
assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=6))
# Too many anti derivatives
for nu in range(3, 9):
with pytest.raises(ValueError) as err:
spl.antiderivative(nu=nu)
assert str(err.value) == \
f"Supported splines can have max degree 5, antiderivative degree will be {nu + 3}"
def test__SplineFitter_error(self):
spl = Spline1D()
class SplineFitter(_SplineFitter):
def _fit_method(self, model, x, y, **kwargs):
super()._fit_method(model, x, y, **kwargs)
fitter = SplineFitter()
with pytest.raises(ValueError) as err:
fitter(spl, mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) ==\
"1D model can only have 2 data points."
with pytest.raises(ModelDefinitionError) as err:
fitter(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) ==\
"Only spline models are compatible with this fitter."
with pytest.raises(NotImplementedError) as err:
fitter(spl, mk.MagicMock(), mk.MagicMock())
assert str(err.value) ==\
"This has not been implemented for _SplineFitter."
|
25e72c604c7c28afbb71afd4b5c65f8ab1f762e5f16fc96bab06b56a8cde83db | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test separability of models.
"""
# pylint: disable=invalid-name
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.modeling import custom_model, models
from astropy.modeling.models import Mapping
from astropy.modeling.separable import (_coord_matrix, is_separable, _cdot,
_cstack, _arith_oper, separability_matrix)
from astropy.modeling.core import ModelDefinitionError
sh1 = models.Shift(1, name='shift1')
sh2 = models.Shift(2, name='sh2')
scl1 = models.Scale(1, name='scl1')
scl2 = models.Scale(2, name='scl2')
map1 = Mapping((0, 1, 0, 1), name='map1')
map2 = Mapping((0, 0, 1), name='map2')
map3 = Mapping((0, 0), name='map3')
rot = models.Rotation2D(2, name='rotation')
p2 = models.Polynomial2D(1, name='p2')
p22 = models.Polynomial2D(2, name='p22')
p1 = models.Polynomial1D(1, name='p1')
compound_models = {
'cm1': (map3 & sh1 | rot & sh1 | sh1 & sh2 & sh1,
(np.array([False, False, True]),
np.array([[True, False], [True, False], [False, True]]))
),
'cm2': (sh1 & sh2 | rot | map1 | p2 & p22,
(np.array([False, False]),
np.array([[True, True], [True, True]]))
),
'cm3': (map2 | rot & scl1,
(np.array([False, False, True]),
np.array([[True, False], [True, False], [False, True]]))
),
'cm4': (sh1 & sh2 | map2 | rot & scl1,
(np.array([False, False, True]),
np.array([[True, False], [True, False], [False, True]]))
),
'cm5': (map3 | sh1 & sh2 | scl1 & scl2,
(np.array([False, False]),
np.array([[True], [True]]))
),
'cm7': (map2 | p2 & sh1,
(np.array([False, True]),
np.array([[True, False], [False, True]]))
)
}
def test_coord_matrix():
c = _coord_matrix(p2, 'left', 2)
assert_allclose(np.array([[1, 1], [0, 0]]), c)
c = _coord_matrix(p2, 'right', 2)
assert_allclose(np.array([[0, 0], [1, 1]]), c)
c = _coord_matrix(p1, 'left', 2)
assert_allclose(np.array([[1], [0]]), c)
c = _coord_matrix(p1, 'left', 1)
assert_allclose(np.array([[1]]), c)
c = _coord_matrix(sh1, 'left', 2)
assert_allclose(np.array([[1], [0]]), c)
c = _coord_matrix(sh1, 'right', 2)
assert_allclose(np.array([[0], [1]]), c)
c = _coord_matrix(sh1, 'right', 3)
assert_allclose(np.array([[0], [0], [1]]), c)
c = _coord_matrix(map3, 'left', 2)
assert_allclose(np.array([[1], [1]]), c)
c = _coord_matrix(map3, 'left', 3)
assert_allclose(np.array([[1], [1], [0]]), c)
def test_cdot():
result = _cdot(sh1, scl1)
assert_allclose(result, np.array([[1]]))
result = _cdot(rot, p2)
assert_allclose(result, np.array([[2, 2]]))
result = _cdot(rot, rot)
assert_allclose(result, np.array([[2, 2], [2, 2]]))
result = _cdot(Mapping((0, 0)), rot)
assert_allclose(result, np.array([[2], [2]]))
with pytest.raises(ModelDefinitionError,
match=r"Models cannot be combined with the \"|\" operator; .*"):
_cdot(sh1, map1)
def test_cstack():
result = _cstack(sh1, scl1)
assert_allclose(result, np.array([[1, 0], [0, 1]]))
result = _cstack(sh1, rot)
assert_allclose(result,
np.array([[1, 0, 0],
[0, 1, 1],
[0, 1, 1]])
)
result = _cstack(rot, sh1)
assert_allclose(result,
np.array([[1, 1, 0],
[1, 1, 0],
[0, 0, 1]])
)
def test_arith_oper():
# Models as inputs
result = _arith_oper(sh1, scl1)
assert_allclose(result, np.array([[1]]))
result = _arith_oper(rot, rot)
assert_allclose(result, np.array([[1, 1], [1, 1]]))
# ndarray
result = _arith_oper(np.array([[1, 2], [3, 4]]), np.array([[1, 2], [3, 4]]))
assert_allclose(result, np.array([[1, 1], [1, 1]]))
# Error
with pytest.raises(ModelDefinitionError, match=r"Unsupported operands for arithmetic operator: .*"):
_arith_oper(sh1, map1)
@pytest.mark.parametrize(('compound_model', 'result'), compound_models.values())
def test_separable(compound_model, result):
assert_allclose(is_separable(compound_model), result[0])
assert_allclose(separability_matrix(compound_model), result[1])
def test_custom_model_separable():
@custom_model
def model_a(x):
return x
assert model_a().separable
@custom_model
def model_c(x, y):
return x + y
assert not model_c().separable
assert np.all(separability_matrix(model_c()) == [True, True])
|
0375f8bedab67b916b0fac0194ff0b539484e7011462d3da5d2971053bccce8f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests models.parameters
"""
# pylint: disable=invalid-name
import itertools
import functools
import pytest
import numpy as np
import unittest.mock as mk
from astropy.modeling import models, fitting
from astropy.modeling.core import Model, FittableModel
from astropy.modeling.parameters import Parameter, InputParameterError, _tofloat, param_repr_oneline
from astropy import units as u
from astropy.utils.data import get_pkg_data_filename
from . import irafutil
def setter1(val):
return val
def setter2(val, model):
model.do_something(val)
return val * model.p
class SetterModel(FittableModel):
n_inputs = 2
n_outputs = 1
xc = Parameter(default=1, setter=setter1)
yc = Parameter(default=1, setter=setter2)
def do_something(self, v):
pass
def __init__(self, xc, yc, p):
self.p = p # p is a value intended to be used by the setter
super().__init__()
self.xc = xc
self.yc = yc
def evaluate(self, x, y, xc, yc):
return (x - xc)**2 + (y - yc)**2
def do_something(self, v):
pass
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super().__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(coeff, e):
pass
class MockModel(FittableModel):
alpha = Parameter(name='alpha', default=42)
@staticmethod
def evaluate(*args):
pass
def test__tofloat():
# iterable
value = _tofloat([1, 2, 3])
assert isinstance(value, np.ndarray)
assert (value == np.array([1, 2, 3])).all()
assert np.all([isinstance(val, float) for val in value])
value = _tofloat(np.array([1, 2, 3]))
assert isinstance(value, np.ndarray)
assert (value == np.array([1, 2, 3])).all()
assert np.all([isinstance(val, float) for val in value])
with pytest.raises(InputParameterError) as err:
_tofloat('test')
assert str(err.value) == \
"Parameter of <class 'str'> could not be converted to float"
# quantity
assert _tofloat(1 * u.m) == 1 * u.m
# dimensions/scalar array
value = _tofloat(np.asanyarray(3))
assert isinstance(value, float)
assert value == 3
# A regular number
value = _tofloat(3)
assert isinstance(value, float)
assert value == 3
value = _tofloat(3.0)
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.float32(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.float64(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.int32(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.int64(3))
assert isinstance(value, float)
assert value == 3
# boolean
message = "Expected parameter to be of numerical type, not boolean"
with pytest.raises(InputParameterError) as err:
_tofloat(True)
assert str(err.value) == message
with pytest.raises(InputParameterError) as err:
_tofloat(False)
assert str(err.value) == message
# other
class Value(object):
pass
with pytest.raises(InputParameterError) as err:
_tofloat(Value)
assert str(err.value) == \
"Don't know how to convert parameter of <class 'type'> to float"
def test_parameter_properties():
"""Test if getting / setting of Parameter properties works."""
p = Parameter('alpha', default=1)
assert p.name == 'alpha'
# Parameter names are immutable
with pytest.raises(AttributeError):
p.name = 'beta'
assert p.fixed is False
p.fixed = True
assert p.fixed is True
assert p.tied is False
p.tied = lambda _: 0
p.tied = False
assert p.tied is False
assert p.min is None
p.min = 42
assert p.min == 42
p.min = None
assert p.min is None
assert p.max is None
p.max = 41
assert p.max == 41
def test_parameter_operators():
"""Test if the parameter arithmetic operators work."""
par = Parameter('alpha', default=42)
num = 42.
val = 3
assert par - val == num - val
assert val - par == val - num
assert par / val == num / val
assert val / par == val / num
assert par ** val == num ** val
assert val ** par == val ** num
assert par < 45
assert par > 41
assert par <= par
assert par >= par
assert par == par
assert -par == -num
assert abs(par) == abs(num)
# Test inherited models
class M1(Model):
m1a = Parameter(default=1.)
m1b = Parameter(default=5.)
def evaluate():
pass
class M2(M1):
m2c = Parameter(default=11.)
class M3(M2):
m3d = Parameter(default=20.)
def test_parameter_inheritance():
mod = M3()
assert mod.m1a == 1.
assert mod.m1b == 5.
assert mod.m2c == 11.
assert mod.m3d == 20.
for key in ['m1a', 'm1b', 'm2c', 'm3d']:
assert key in mod.__dict__
assert mod.param_names == ('m1a', 'm1b', 'm2c', 'm3d')
def test_param_metric():
mod = M3()
assert mod._param_metrics['m1a']['slice'] == slice(0, 1)
assert mod._param_metrics['m1b']['slice'] == slice(1, 2)
assert mod._param_metrics['m2c']['slice'] == slice(2, 3)
assert mod._param_metrics['m3d']['slice'] == slice(3, 4)
mod._parameters_to_array()
assert (mod._parameters == np.array([1., 5., 11., 20], dtype=np.float64)).all()
class TestParameters:
def setup_class(self):
"""
Unit tests for parameters
Read an iraf database file created by onedspec.identify. Use the
information to create a 1D Chebyshev model and perform the same fit.
Create also a gaussian model.
"""
test_file = get_pkg_data_filename('data/idcompspec.fits')
f = open(test_file)
lines = f.read()
reclist = lines.split("begin")
f.close()
record = irafutil.IdentifyRecord(reclist[1])
self.icoeff = record.coeff
order = int(record.fields['order'])
self.model = models.Chebyshev1D(order - 1)
self.gmodel = models.Gaussian1D(2, mean=3, stddev=4)
self.linear_fitter = fitting.LinearLSQFitter()
self.x = record.x
self.y = record.z
self.yy = np.array([record.z, record.z])
def test_set_parameters_as_list(self):
"""Tests updating parameters using a list."""
self.model.parameters = [30, 40, 50, 60, 70]
assert (self.model.parameters == [30., 40., 50., 60, 70]).all()
def test_set_parameters_as_array(self):
"""Tests updating parameters using an array."""
self.model.parameters = np.array([3, 4, 5, 6, 7])
assert (self.model.parameters == [3., 4., 5., 6., 7.]).all()
def test_set_as_tuple(self):
"""Tests updating parameters using a tuple."""
self.model.parameters = (1, 2, 3, 4, 5)
assert (self.model.parameters == [1, 2, 3, 4, 5]).all()
def test_set_model_attr_seq(self):
"""
Tests updating the parameters attribute when a model's
parameter (in this case coeff) is updated.
"""
self.model.parameters = [0, 0., 0., 0, 0]
self.model.c0 = 7
assert (self.model.parameters == [7, 0., 0., 0, 0]).all()
def test_set_model_attr_num(self):
"""Update the parameter list when a model's parameter is updated."""
self.gmodel.amplitude = 7
assert (self.gmodel.parameters == [7, 3, 4]).all()
def test_set_item(self):
"""Update the parameters using indexing."""
self.model.parameters = [1, 2, 3, 4, 5]
tpar = self.model.parameters
tpar[0] = 10.
self.model.parameters = tpar
assert (self.model.parameters == [10, 2, 3, 4, 5]).all()
assert self.model.c0 == 10
def test_wrong_size1(self):
"""
Tests raising an error when attempting to reset the parameters
using a list of a different size.
"""
with pytest.raises(InputParameterError):
self.model.parameters = [1, 2, 3]
def test_wrong_size2(self):
"""
Tests raising an exception when attempting to update a model's
parameter (in this case coeff) with a sequence of the wrong size.
"""
with pytest.raises(InputParameterError):
self.model.c0 = [1, 2, 3]
def test_wrong_shape(self):
"""
Tests raising an exception when attempting to update a model's
parameter and the new value has the wrong shape.
"""
with pytest.raises(InputParameterError):
self.gmodel.amplitude = [1, 2]
def test_par_against_iraf(self):
"""
Test the fitter modifies model.parameters.
Uses an iraf example.
"""
new_model = self.linear_fitter(self.model, self.x, self.y)
np.testing.assert_allclose(
new_model.parameters,
np.array([4826.1066602783685, 952.8943813407858, 12.641236013982386,
-1.7910672553339604, 0.90252884366711317]),
rtol=10 ** (-2))
def testPolynomial1D(self):
d = {'c0': 11, 'c1': 12, 'c2': 13, 'c3': 14}
p1 = models.Polynomial1D(3, **d)
np.testing.assert_equal(p1.parameters, [11, 12, 13, 14])
def test_poly1d_multiple_sets(self):
p1 = models.Polynomial1D(3, n_models=3)
np.testing.assert_equal(p1.parameters, [0.0, 0.0, 0.0, 0, 0, 0,
0, 0, 0, 0, 0, 0])
np.testing.assert_array_equal(p1.c0, [0, 0, 0])
p1.c0 = [10, 10, 10]
np.testing.assert_equal(p1.parameters, [10.0, 10.0, 10.0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
def test_par_slicing(self):
"""
Test assigning to a parameter slice
"""
p1 = models.Polynomial1D(3, n_models=3)
p1.c0[:2] = [10, 10]
np.testing.assert_equal(p1.parameters, [10.0, 10.0, 0.0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
def test_poly2d(self):
p2 = models.Polynomial2D(degree=3)
p2.c0_0 = 5
np.testing.assert_equal(p2.parameters, [5, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_poly2d_multiple_sets(self):
kw = {'c0_0': [2, 3], 'c1_0': [1, 2], 'c2_0': [4, 5],
'c0_1': [1, 1], 'c0_2': [2, 2], 'c1_1': [5, 5]}
p2 = models.Polynomial2D(2, **kw)
np.testing.assert_equal(p2.parameters, [2, 3, 1, 2, 4, 5,
1, 1, 2, 2, 5, 5])
def test_shift_model_parameters1d(self):
sh1 = models.Shift(2)
sh1.offset = 3
assert sh1.offset == 3
assert sh1.offset.value == 3
def test_scale_model_parametersnd(self):
sc1 = models.Scale([2, 2])
sc1.factor = [3, 3]
assert np.all(sc1.factor == [3, 3])
np.testing.assert_array_equal(sc1.factor.value, [3, 3])
def test_bounds(self):
# Valid __init__
param = Parameter(bounds=(1, 2))
assert param.bounds == (1, 2)
param = Parameter(min=1, max=2)
assert param.bounds == (1, 2)
# Errors __init__
message = "bounds may not be specified simultaneously with min or max" +\
" when instantiating Parameter test"
with pytest.raises(ValueError) as err:
Parameter(bounds=(1, 2), min=1, name='test')
assert str(err.value) == message
with pytest.raises(ValueError) as err:
Parameter(bounds=(1, 2), max=2, name='test')
assert str(err.value) == message
with pytest.raises(ValueError) as err:
Parameter(bounds=(1, 2), min=1, max=2, name='test')
assert str(err.value) == message
# Setters
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.bounds == (None, None) == param._bounds
# Set errors
with pytest.raises(TypeError) as err:
param.bounds = ('test', None)
assert str(err.value) == \
"Min value must be a number or a Quantity"
with pytest.raises(TypeError) as err:
param.bounds = (None, 'test')
assert str(err.value) == \
"Max value must be a number or a Quantity"
# Set number
param.bounds = (1, 2)
assert param.bounds == (1, 2) == param._bounds
# Set Quantity
param.bounds = (1 * u.m, 2 * u.m)
assert param.bounds == (1, 2) == param._bounds
def test_modify_value(self):
param = Parameter(name='test', default=[1, 2, 3])
assert (param.value == [1, 2, 3]).all()
# Errors
with pytest.raises(InputParameterError) as err:
param[slice(0, 0)] = 2
assert str(err.value) == \
"Slice assignment outside the parameter dimensions for 'test'"
with pytest.raises(InputParameterError) as err:
param[3] = np.array([5])
assert str(err.value) == \
"Input dimension 3 invalid for 'test' parameter with dimension 1"
# assignment of a slice
param[slice(0, 2)] = [4, 5]
assert (param.value == [4, 5, 3]).all()
# assignment of a value
param[2] = 6
assert (param.value == [4, 5, 6]).all()
def test__set_unit(self):
param = Parameter(name='test', default=[1, 2, 3])
assert param.unit is None
# No force Error (no existing unit)
with pytest.raises(ValueError) as err:
param._set_unit(u.m)
assert str(err.value) == \
"Cannot attach units to parameters that were not initially specified with units"
# Force
param._set_unit(u.m, True)
assert param.unit == u.m
# No force Error (existing unit)
with pytest.raises(ValueError) as err:
param._set_unit(u.K)
assert str(err.value) == \
"Cannot change the unit attribute directly, instead change the parameter to a new quantity"
def test_quantity(self):
param = Parameter(name='test', default=[1, 2, 3])
assert param.unit is None
assert param.quantity is None
param = Parameter(name='test', default=[1, 2, 3], unit=u.m)
assert param.unit == u.m
assert (param.quantity == np.array([1, 2, 3]) * u.m).all()
def test_shape(self):
# Array like
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.shape == (4,)
# Reshape error
with pytest.raises(ValueError) as err:
param.shape = (5,)
assert str(err.value) == \
"cannot reshape array of size 4 into shape (5,)"
# Reshape success
param.shape = (2, 2)
assert param.shape == (2, 2)
assert (param.value == [[1, 2], [3, 4]]).all()
# Scalar
param = Parameter(name='test', default=1)
assert param.shape == ()
# Reshape error
with pytest.raises(ValueError) as err:
param.shape = (5,)
assert str(err.value) == \
"Cannot assign this shape to a scalar quantity"
param.shape = (1,)
# single value
param = Parameter(name='test', default=np.array([1]))
assert param.shape == (1,)
# Reshape error
with pytest.raises(ValueError) as err:
param.shape = (5,)
assert str(err.value) == \
"Cannot assign this shape to a scalar quantity"
param.shape = ()
def test_size(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.size == 4
param = Parameter(name='test', default=[1])
assert param.size == 1
param = Parameter(name='test', default=1)
assert param.size == 1
def test_std(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.std == None == param._std
param.std = 5
assert param.std == 5 == param._std
def test_fixed(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.fixed == False == param._fixed
# Set error
with pytest.raises(ValueError) as err:
param.fixed = 3
assert str(err.value) == \
"Value must be boolean"
# Set
param.fixed = True
assert param.fixed == True == param._fixed
def test_tied(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.tied == False == param._tied
# Set error
with pytest.raises(TypeError) as err:
param.tied = mk.NonCallableMagicMock()
assert str(err.value) == \
"Tied must be a callable or set to False or None"
# Set None
param.tied = None
assert param.tied == None == param._tied
# Set False
param.tied = False
assert param.tied == False == param._tied
# Set other
tied = mk.MagicMock()
param.tied = tied
assert param.tied == tied == param._tied
def test_validator(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param._validator is None
valid = mk.MagicMock()
param.validator(valid)
assert param._validator == valid
with pytest.raises(ValueError) as err:
param.validator(mk.NonCallableMagicMock())
assert str(err.value) == \
"This decorator method expects a callable.\n" +\
"The use of this method as a direct validator is\n" +\
"deprecated; use the new validate method instead\n"
def test_validate(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param._validator is None
assert param.model is None
# Run without validator
param.validate(mk.MagicMock())
# Run with validator but no Model
validator = mk.MagicMock()
param.validator(validator)
assert param._validator == validator
param.validate(mk.MagicMock())
assert validator.call_args_list == []
# Full validate
param._model = mk.MagicMock()
value = mk.MagicMock()
param.validate(value)
assert validator.call_args_list == [mk.call(param._model, value)]
def test_copy(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
copy_param = param.copy()
assert (param == copy_param).all()
assert id(param) != id(copy_param)
def test_model(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.model == None == param._model
assert param._model_required == False
assert (param._value == [1, 2, 3, 4]).all()
setter = mk.MagicMock()
getter = mk.MagicMock()
param._setter = setter
param._getter = getter
# No Model Required
param._value = [5, 6, 7, 8]
model0 = mk.MagicMock()
setter0 = mk.MagicMock()
getter0 = mk.MagicMock()
with mk.patch.object(Parameter, '_create_value_wrapper',
side_effect=[setter0, getter0]) as mkCreate:
param.model = model0
assert param.model == model0 == param._model
assert param._setter == setter0
assert param._getter == getter0
assert mkCreate.call_args_list == [
mk.call(setter, model0),
mk.call(getter, model0)
]
assert param._value == [5, 6, 7, 8]
param._setter = setter
param._getter = getter
# Model required
param._model_required = True
model1 = mk.MagicMock()
setter1 = mk.MagicMock()
getter1 = mk.MagicMock()
setter1.return_value = [9, 10, 11, 12]
getter1.return_value = [9, 10, 11, 12]
with mk.patch.object(Parameter, '_create_value_wrapper',
side_effect=[setter1, getter1]) as mkCreate:
param.model = model1
assert param.model == model1 == param._model
assert param._setter == setter1
assert param._getter == getter1
assert mkCreate.call_args_list == [
mk.call(setter, model1),
mk.call(getter, model1)
]
assert (param.value == [9, 10, 11, 12]).all()
param._setter = setter
param._getter = getter
param._default = None
with mk.patch.object(Parameter, '_create_value_wrapper',
side_effect=[setter1, getter1]) as mkCreate:
param.model = model1
assert param.model == model1 == param._model
assert param._setter == setter1
assert param._getter == getter1
assert mkCreate.call_args_list == [
mk.call(setter, model1),
mk.call(getter, model1)
]
assert param._value is None
def test_raw_value(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
# Normal case
assert (param._raw_value == param.value).all()
# Bad setter
param._setter = True
param._internal_value = 4
assert param._raw_value == 4
def test__create_value_wrapper(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
# Bad ufunc
with pytest.raises(TypeError) as err:
param._create_value_wrapper(np.add, mk.MagicMock())
assert str(err.value) == \
"A numpy.ufunc used for Parameter getter/setter may only take one input argument"
# Good ufunc
assert param._create_value_wrapper(np.negative, mk.MagicMock()) == np.negative
# None
assert param._create_value_wrapper(None, mk.MagicMock()) is None
# wrapper with one argument
def wrapper1(a):
pass
assert param._create_value_wrapper(wrapper1, mk.MagicMock()) == wrapper1
# wrapper with two argument2
def wrapper2(a, b):
pass
# model is None
assert param._model_required == False
assert param._create_value_wrapper(wrapper2, None) == wrapper2
assert param._model_required == True
# model is not None
param._model_required = False
model = mk.MagicMock()
with mk.patch.object(functools, 'partial', autospec=True) as mkPartial:
assert param._create_value_wrapper(wrapper2, model) == mkPartial.return_value
# wrapper with more than 2 arguments
def wrapper3(a, b, c):
pass
with pytest.raises(TypeError) as err:
param._create_value_wrapper(wrapper3, mk.MagicMock())
assert str(err.value) == \
"Parameter getter/setter must be a function of either one or two arguments"
def test_bool(self):
# single value is true
param = Parameter(name='test', default=1)
assert param.value == 1
assert np.all(param)
if param:
assert True
else:
assert False
# single value is false
param = Parameter(name='test', default=0)
assert param.value == 0
assert not np.all(param)
if param:
assert False
else:
assert True
# vector value all true
param = Parameter(name='test', default=[1, 2, 3, 4])
assert np.all(param.value == [1, 2, 3, 4])
assert np.all(param)
if param:
assert True
else:
assert False
# vector value at least one false
param = Parameter(name='test', default=[1, 2, 0, 3, 4])
assert np.all(param.value == [1, 2, 0, 3, 4])
assert not np.all(param)
if param:
assert False
else:
assert True
def test_param_repr_oneline(self):
# Single value no units
param = Parameter(name='test', default=1)
assert param_repr_oneline(param) == '1.'
# Vector value no units
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param_repr_oneline(param) == '[1., 2., 3., 4.]'
# Single value units
param = Parameter(name='test', default=1*u.m)
assert param_repr_oneline(param) == '1. m'
# Vector value units
param = Parameter(name='test', default=[1, 2, 3, 4] * u.m)
assert param_repr_oneline(param) == '[1., 2., 3., 4.] m'
class TestMultipleParameterSets:
def setup_class(self):
self.x1 = np.arange(1, 10, .1)
self.y, self.x = np.mgrid[:10, :7]
self.x11 = np.array([self.x1, self.x1]).T
self.gmodel = models.Gaussian1D([12, 10], [3.5, 5.2], stddev=[.4, .7],
n_models=2)
def test_change_par(self):
"""
Test that a change to one parameter as a set propagates to param_sets.
"""
self.gmodel.amplitude = [1, 10]
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array([[1.,
10],
[3.5,
5.2],
[0.4,
0.7]]))
np.all(self.gmodel.parameters == [1.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_par2(self):
"""
Test that a change to one single parameter in a set propagates to
param_sets.
"""
self.gmodel.amplitude[0] = 11
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array([[11.,
10],
[3.5,
5.2],
[0.4,
0.7]]))
np.all(self.gmodel.parameters == [11.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_parameters(self):
self.gmodel.parameters = [13, 10, 9, 5.2, 0.4, 0.7]
np.testing.assert_almost_equal(self.gmodel.amplitude.value, [13., 10.])
np.testing.assert_almost_equal(self.gmodel.mean.value, [9., 5.2])
class TestParameterInitialization:
"""
This suite of tests checks most if not all cases if instantiating a model
with parameters of different shapes/sizes and with different numbers of
parameter sets.
"""
def test_single_model_scalar_parameters(self):
t = TParModel(10, 1)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[10], [1]])
assert np.all(t.parameters == [10, 1])
assert t.coeff.shape == ()
assert t.e.shape == ()
def test_single_model_scalar_and_array_parameters(self):
t = TParModel(10, [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert len(t.param_sets) == 2
assert np.all(t.param_sets[0] == [10])
assert np.all(t.param_sets[1] == [[1, 2]])
assert np.all(t.parameters == [10, 1, 2])
assert t.coeff.shape == ()
assert t.e.shape == (2,)
def test_single_model_1d_array_parameters(self):
t = TParModel([10, 20], [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[10, 20]], [[1, 2]]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
def test_single_model_1d_array_different_length_parameters(self):
with pytest.raises(InputParameterError):
# Not broadcastable
t = TParModel([1, 2], [3, 4, 5])
def test_single_model_2d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[[10, 20], [30, 40]]],
[[[1, 2], [3, 4]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
def test_single_model_2d_non_square_parameters(self):
coeff = np.array([[10, 20], [30, 40], [50, 60]])
e = np.array([[1, 2], [3, 4], [5, 6]])
t = TParModel(coeff, e)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[[10, 20], [30, 40], [50, 60]]],
[[[1, 2], [3, 4], [5, 6]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60,
1, 2, 3, 4, 5, 6])
assert t.coeff.shape == (3, 2)
assert t.e.shape == (3, 2)
t2 = TParModel(coeff.T, e.T)
assert len(t2) == 1
assert t2.model_set_axis is False
assert np.all(t2.param_sets == [[[[10, 30, 50], [20, 40, 60]]],
[[[1, 3, 5], [2, 4, 6]]]])
assert np.all(t2.parameters == [10, 30, 50, 20, 40, 60,
1, 3, 5, 2, 4, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
# Not broadcastable
with pytest.raises(InputParameterError):
TParModel(coeff, e.T)
with pytest.raises(InputParameterError):
TParModel(coeff.T, e)
def test_single_model_2d_broadcastable_parameters(self):
t = TParModel([[10, 20, 30], [40, 50, 60]], [1, 2, 3])
assert len(t) == 1
assert t.model_set_axis is False
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 20, 30], [40, 50, 60]]])
assert np.all(t.param_sets[1] == [[1, 2, 3]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3])
@pytest.mark.parametrize(('p1', 'p2'), [
(1, 2), (1, [2, 3]), ([1, 2], 3), ([1, 2, 3], [4, 5]),
([1, 2], [3, 4, 5])])
def test_two_model_incorrect_scalar_parameters(self, p1, p2):
with pytest.raises(InputParameterError):
TParModel(p1, p2, n_models=2)
@pytest.mark.parametrize('kwargs', [
{'n_models': 2}, {'model_set_axis': 0},
{'n_models': 2, 'model_set_axis': 0}])
def test_two_model_scalar_parameters(self, kwargs):
t = TParModel([10, 20], [1, 2], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[10, 20], [1, 2]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
@pytest.mark.parametrize('kwargs', [
{'n_models': 2}, {'model_set_axis': 0},
{'n_models': 2, 'model_set_axis': 0}])
def test_two_model_scalar_and_array_parameters(self, kwargs):
t = TParModel([10, 20], [[1, 2], [3, 4]], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[10], [20]])
assert np.all(t.param_sets[1] == [[1, 2], [3, 4]])
assert np.all(t.parameters == [10, 20, 1, 2, 3, 4])
assert t.coeff.shape == (2,)
assert t.e.shape == (2, 2)
def test_two_model_1d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[[10, 20], [30, 40]],
[[1, 2], [3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
t2 = TParModel([[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]], n_models=2)
assert len(t2) == 2
assert t2.model_set_axis == 0
assert np.all(t2.param_sets == [[[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]]])
assert np.all(t2.parameters == [10, 20, 30, 40, 50, 60,
1, 2, 3, 4, 5, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
def test_two_model_mixed_dimension_array_parameters(self):
with pytest.raises(InputParameterError):
# Can't broadcast different array shapes
TParModel([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[9, 10, 11], [12, 13, 14]], n_models=2)
t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 20], [30, 40]],
[[50, 60], [70, 80]]])
assert np.all(t.param_sets[1] == [[[1, 2]], [[3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,
1, 2, 3, 4])
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2)
def test_two_model_2d_array_parameters(self):
t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[[[10, 20], [30, 40]],
[[50, 60], [70, 80]]],
[[[1, 2], [3, 4]],
[[5, 6], [7, 8]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,
1, 2, 3, 4, 5, 6, 7, 8])
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2, 2)
def test_two_model_nonzero_model_set_axis(self):
# An example where the model set axis is the *last* axis of the
# parameter arrays
coeff = np.array([[[10, 20, 30], [30, 40, 50]], [[50, 60, 70], [70, 80, 90]]])
coeff = np.rollaxis(coeff, 0, 3)
e = np.array([[1, 2, 3], [3, 4, 5]])
e = np.rollaxis(e, 0, 2)
t = TParModel(coeff, e, n_models=2, model_set_axis=-1)
assert len(t) == 2
assert t.model_set_axis == -1
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 50], [20, 60], [30, 70]],
[[30, 70], [40, 80], [50, 90]]])
assert np.all(t.param_sets[1] == [[[1, 3], [2, 4], [3, 5]]])
assert np.all(t.parameters == [10, 50, 20, 60, 30, 70, 30, 70, 40, 80,
50, 90, 1, 3, 2, 4, 3, 5])
assert t.coeff.shape == (2, 3, 2) # note change in api
assert t.e.shape == (3, 2) # note change in api
def test_wrong_number_of_params(self):
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), n_models=2)
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), model_set_axis=0)
def test_wrong_number_of_params2(self):
with pytest.raises(InputParameterError):
m = TParModel(coeff=[[1, 2], [3, 4]], e=4, n_models=2)
with pytest.raises(InputParameterError):
m = TParModel(coeff=[[1, 2], [3, 4]], e=4, model_set_axis=0)
def test_array_parameter1(self):
with pytest.raises(InputParameterError):
t = TParModel(np.array([[1, 2], [3, 4]]), 1, model_set_axis=0)
def test_array_parameter2(self):
with pytest.raises(InputParameterError):
m = TParModel(np.array([[1, 2], [3, 4]]), (1, 1, 11),
model_set_axis=0)
def test_array_parameter4(self):
"""
Test multiple parameter model with array-valued parameters of the same
size as the number of parameter sets.
"""
t4 = TParModel([[1, 2], [3, 4]], [5, 6], model_set_axis=False)
assert len(t4) == 1
assert t4.coeff.shape == (2, 2)
assert t4.e.shape == (2,)
assert np.issubdtype(t4.param_sets.dtype, np.object_)
assert np.all(t4.param_sets[0] == [[1, 2], [3, 4]])
assert np.all(t4.param_sets[1] == [5, 6])
def test_non_broadcasting_parameters():
"""
Tests that in a model with 3 parameters that do not all mutually broadcast,
this is determined correctly regardless of what order the parameters are
in.
"""
a = 3
b = np.array([[1, 2, 3], [4, 5, 6]])
c = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
class TestModel(Model):
p1 = Parameter()
p2 = Parameter()
p3 = Parameter()
def evaluate(self, *args):
return
# a broadcasts with both b and c, but b does not broadcast with c
for args in itertools.permutations((a, b, c)):
with pytest.raises(InputParameterError):
TestModel(*args)
def test_setter():
pars = np.random.rand(20).reshape((10, 2))
model = SetterModel(xc=-1, yc=3, p=np.pi)
for x, y in pars:
np.testing.assert_almost_equal(
model(x, y),
(x + 1)**2 + (y - np.pi * 3)**2)
|
9ef195ce05922cf9fa0dd95dc308df91eff9712ac675251fde9e31872c05d826 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for physical functions."""
# pylint: disable=no-member, invalid-name
import pytest
import numpy as np
from astropy.modeling.physical_models import BlackBody, NFW
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.tests.helper import assert_quantity_allclose
from astropy import units as u
from astropy.utils.exceptions import AstropyUserWarning
from astropy import cosmology
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
__doctest_skip__ = ["*"]
# BlackBody tests
@pytest.mark.parametrize("temperature", (3000 * u.K, 2726.85 * u.deg_C))
def test_blackbody_evaluate(temperature):
b = BlackBody(temperature=temperature, scale=1.0)
assert_quantity_allclose(b(1.4 * u.micron), 486787299458.15656 * u.MJy / u.sr)
assert_quantity_allclose(b(214.13747 * u.THz), 486787299458.15656 * u.MJy / u.sr)
def test_blackbody_weins_law():
b = BlackBody(293.0 * u.K)
assert_quantity_allclose(b.lambda_max, 9.890006672986939 * u.micron)
assert_quantity_allclose(b.nu_max, 17.22525080856469 * u.THz)
def test_blackbody_sefanboltzman_law():
b = BlackBody(293.0 * u.K)
assert_quantity_allclose(b.bolometric_flux, 133.02471751812573 * u.W / (u.m * u.m))
def test_blackbody_return_units():
# return of evaluate has no units when temperature has no units
b = BlackBody(1000.0 * u.K, scale=1.0)
assert not isinstance(b.evaluate(1.0 * u.micron, 1000.0, 1.0), u.Quantity)
# return has "standard" units when scale has no units
b = BlackBody(1000.0 * u.K, scale=1.0)
assert isinstance(b(1.0 * u.micron), u.Quantity)
assert b(1.0 * u.micron).unit == u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr)
# return has scale units when scale has units
b = BlackBody(1000.0 * u.K, scale=1.0 * u.MJy / u.sr)
assert isinstance(b(1.0 * u.micron), u.Quantity)
assert b(1.0 * u.micron).unit == u.MJy / u.sr
# scale has units but evaluate scale has no units
assert_quantity_allclose(b.evaluate(1.0 * u.micron, 1000.0 * u.K, 4.0), 89668184.86321202 * u.MJy / u.sr)
@pytest.mark.skipif("not HAS_SCIPY")
def test_blackbody_fit():
fitter = LevMarLSQFitter()
b = BlackBody(3000 * u.K, scale=5e-17 * u.Jy / u.sr)
wav = np.array([0.5, 5, 10]) * u.micron
fnu = np.array([1, 10, 5]) * u.Jy / u.sr
b_fit = fitter(b, wav, fnu, maxiter=1000)
assert_quantity_allclose(b_fit.temperature, 2840.7438355865065 * u.K)
assert_quantity_allclose(b_fit.scale, 5.803783292762381e-17 * u.Jy / u.sr)
def test_blackbody_overflow():
"""Test Planck function with overflow."""
photlam = u.photon / (u.cm ** 2 * u.s * u.AA)
wave = [0.0, 1000.0, 100000.0, 1e55] # Angstrom
temp = 10000.0 # Kelvin
bb = BlackBody(temperature=temp * u.K, scale=1.0)
with pytest.warns(
AstropyUserWarning,
match=r'Input contains invalid wavelength/frequency value\(s\)'):
with np.errstate(all="ignore"):
bb_lam = bb(wave) * u.sr
flux = bb_lam.to(photlam, u.spectral_density(wave * u.AA)) / u.sr
# First element is NaN, last element is very small, others normal
assert np.isnan(flux[0])
with np.errstate(all="ignore"):
assert np.log10(flux[-1].value) < -134
np.testing.assert_allclose(
flux.value[1:-1], [0.00046368, 0.04636773], rtol=1e-3
) # 0.1% accuracy in PHOTLAM/sr
with np.errstate(all="ignore"):
flux = bb(1.0 * u.AA)
assert flux.value == 0
def test_blackbody_exceptions_and_warnings():
"""Test exceptions."""
# Negative temperature
with pytest.raises(ValueError) as exc:
bb = BlackBody(-100 * u.K)
bb(1.0 * u.micron)
assert exc.value.args[0] == "Temperature should be positive: [-100.] K"
bb = BlackBody(5000 * u.K)
# Zero wavelength given for conversion to Hz
with pytest.warns(AstropyUserWarning, match='invalid') as w:
bb(0 * u.AA)
assert len(w) == 3 # 2 of these are RuntimeWarning from zero divide
# Negative wavelength given for conversion to Hz
with pytest.warns(AstropyUserWarning, match='invalid') as w:
bb(-1.0 * u.AA)
assert len(w) == 1
# Test that a non surface brightness converatable scale unit
with pytest.raises(ValueError) as exc:
bb = BlackBody(5000 * u.K, scale=1.0 * u.Jy)
bb(1.0 * u.micron)
assert exc.value.args[0] == "scale units not surface brightness: Jy"
def test_blackbody_array_temperature():
"""Regression test to make sure that the temperature can be an array."""
multibb = BlackBody([100, 200, 300] * u.K)
flux = multibb(1.2 * u.mm)
np.testing.assert_allclose(
flux.value, [1.804908e-12, 3.721328e-12, 5.638513e-12], rtol=1e-5
)
flux = multibb([2, 4, 6] * u.mm)
np.testing.assert_allclose(
flux.value, [6.657915e-13, 3.420677e-13, 2.291897e-13], rtol=1e-5
)
multibb = BlackBody(np.ones(4) * u.K)
flux = multibb(np.ones((3, 4)) * u.mm)
assert flux.shape == (3, 4)
@pytest.mark.parametrize("mass", (2.0000000000000E15 * u.M_sun, 3.976819741e+45 * u.kg))
def test_NFW_evaluate(mass):
"""Evaluation, density, and radii validation of NFW model."""
# Test parameters
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
# Parsec tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200c(3.0 * u.Mpc), (3.709693508e+12 * (u.solMass / u.Mpc ** 3),
7.376391187e+42 * (u.kg / u.Mpc ** 3)))
assert_quantity_allclose(n200c.rho_scale, (7800150779863018.0 * (u.solMass / u.Mpc ** 3)))
assert_quantity_allclose(n200c.r_s, (0.24684627641195428 * u.Mpc))
assert_quantity_allclose(n200c.r_virial, (2.0981933495016114 * u.Mpc))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200m(3.0 * u.Mpc), (3.626093406e+12 * (u.solMass / u.Mpc**3),
7.210159921e+42 * (u.kg / u.Mpc**3)))
assert_quantity_allclose(n200m.rho_scale, (5118547639858115.0 * (u.solMass / u.Mpc ** 3)))
assert_quantity_allclose(n200m.r_s, (0.2840612517326848 * u.Mpc))
assert_quantity_allclose(n200m.r_virial, (2.414520639727821 * u.Mpc))
# Virial mass
massfactor = ("virial")
nvir = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(nvir(3.0 * u.Mpc), (3.646475546e+12 * (u.solMass / u.Mpc**3),
7.250687967e+42 * (u.kg / u.Mpc**3)))
assert_quantity_allclose(nvir.rho_scale, (5649367524651067.0 * (u.solMass / u.Mpc ** 3)))
assert_quantity_allclose(nvir.r_s, (0.2748701862303786 * u.Mpc))
assert_quantity_allclose(nvir.r_virial, (2.3363965829582183 * u.Mpc))
# kpc tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200c(3141 * u.kpc), (3254.373619264334 * (u.solMass / u.kpc ** 3),
6.471028627484543e+33 * (u.kg / u.kpc ** 3)))
assert_quantity_allclose(n200c.rho_scale, (7800150.779863021 * (u.solMass / u.kpc ** 3)))
assert_quantity_allclose(n200c.r_s, (246.84627641195425 * u.kpc))
assert_quantity_allclose(n200c.r_virial, (2098.193349501611 * u.kpc))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200m(3141 * u.kpc), (3184.0370866188623 * (u.solMass / u.kpc**3),
6.33117077170161e+33 * (u.kg / u.kpc**3)))
assert_quantity_allclose(n200m.rho_scale, (5118547.639858116 * (u.solMass / u.kpc ** 3)))
assert_quantity_allclose(n200m.r_s, (284.0612517326848 * u.kpc))
assert_quantity_allclose(n200m.r_virial, (2414.5206397278207 * u.kpc))
# Virial mass
massfactor = ("virial")
nvir = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(nvir(3141 * u.kpc), (3201.1946851294997 * (u.solMass / u.kpc**3),
6.365287109937637e+33 * (u.kg / u.kpc**3)))
assert_quantity_allclose(nvir.rho_scale, (5649367.5246510655 * (u.solMass / u.kpc ** 3)))
assert_quantity_allclose(nvir.r_s, (274.87018623037864 * u.kpc))
assert_quantity_allclose(nvir.r_virial, (2336.3965829582185 * u.kpc))
# Meter tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200c(4.2e+23 * u.m), (1.527649658673012e-57 * (u.solMass / u.m ** 3),
3.0375936602739256e-27 * (u.kg / u.m ** 3)))
assert_quantity_allclose(n200c.rho_scale, (2.654919529637763e-52 * (u.solMass / u.m ** 3)))
assert_quantity_allclose(n200c.r_s, (7.616880211930209e+21 * u.m))
assert_quantity_allclose(n200c.r_virial, (6.474348180140678e+22 * u.m))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200m(4.2e+23 * u.m), (1.5194778058079436e-57 * (u.solMass / u.m ** 3),
3.0213446673751314e-27 * (u.kg / u.m ** 3)))
assert_quantity_allclose(n200m.rho_scale, (1.742188385322371e-52 * (u.solMass / u.m ** 3)))
assert_quantity_allclose(n200m.r_s, (8.76521436235054e+21 * u.m))
assert_quantity_allclose(n200m.r_virial, (7.450432207997959e+22 * u.m))
# Virial mass
massfactor = ("virial")
nvir = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(nvir(4.2e+23 * u.m), (1.5214899184117633e-57 * (u.solMass / u.m ** 3),
3.0253455719375224e-27 * (u.kg / u.m ** 3)))
assert_quantity_allclose(nvir.rho_scale, (1.922862338766335e-52 * (u.solMass / u.m ** 3)))
assert_quantity_allclose(nvir.r_s, (8.481607714647913e+21 * u.m))
assert_quantity_allclose(nvir.r_virial, (7.209366557450727e+22 * u.m))
# Verify string input of overdensity type
# 200c Overdensity
massfactor = "200c"
n200c = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200c(3.0 * u.Mpc), (3.709693508e+12 * (u.solMass / u.Mpc ** 3),
7.376391187e+42 * (u.kg / u.Mpc ** 3)))
# 200m Overdensity
massfactor = "200m"
n200m = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200m(3.0 * u.Mpc), (3.626093406e+12 * (u.solMass / u.Mpc**3),
7.210159921e+42 * (u.kg / u.Mpc**3)))
# Virial mass
massfactor = "virial"
nvir = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(nvir(3.0 * u.Mpc), (3.646475546e+12 * (u.solMass / u.Mpc**3),
7.250687967e+42 * (u.kg / u.Mpc**3)))
@pytest.mark.skipif("not HAS_SCIPY")
def test_NFW_fit():
"""Test linear fitting of NFW model."""
# Fixed parameters
redshift = 0.63
cosmo = cosmology.Planck15
# Radial set
r = np.array([1.00e+01, 1.00e+02, 2.00e+02, 2.50e+02, 3.00e+02, 4.00e+02, 5.00e+02,
7.50e+02, 1.00e+03, 1.50e+03, 2.50e+03, 6.50e+03, 1.15e+04]) * u.kpc
# 200c Overdensity
massfactor = ("critical", 200)
density_r = np.array([1.77842761e+08, 9.75233623e+06, 2.93789626e+06, 1.90107238e+06,
1.30776878e+06, 7.01004140e+05, 4.20678479e+05, 1.57421880e+05,
7.54669701e+04, 2.56319769e+04, 6.21976562e+03, 3.96522424e+02,
7.39336808e+01]) * (u.solMass / u.kpc ** 3)
fitter = LevMarLSQFitter()
n200c = NFW(mass=1.8E15 * u.M_sun, concentration=7.0, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
n200c.redshift.fixed = True
n_fit = fitter(n200c, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000E15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
# 200m Overdensity
massfactor = ("mean", 200)
density_r = np.array([1.35677282e+08, 7.95392979e+06, 2.50352599e+06, 1.64535870e+06,
1.14642248e+06, 6.26805453e+05, 3.81691731e+05, 1.46294819e+05,
7.11559560e+04, 2.45737796e+04, 6.05459585e+03, 3.92183991e+02,
7.34674416e+01]) * (u.solMass / u.kpc ** 3)
fitter = LevMarLSQFitter()
n200m = NFW(mass=1.8E15 * u.M_sun, concentration=7.0, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
n200m.redshift.fixed = True
n_fit = fitter(n200m, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000E15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
# Virial mass
massfactor = ("virial", 200)
density_r = np.array([1.44573515e+08, 8.34873998e+06, 2.60137484e+06, 1.70348738e+06,
1.18337370e+06, 6.43994654e+05, 3.90800249e+05, 1.48930537e+05,
7.21856397e+04, 2.48289464e+04, 6.09477095e+03, 3.93248818e+02,
7.35821787e+01]) * (u.solMass / u.kpc ** 3)
fitter = LevMarLSQFitter()
nvir = NFW(mass=1.8E15 * u.M_sun, concentration=7.0, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
nvir.redshift.fixed = True
n_fit = fitter(nvir, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000E15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
def test_NFW_circular_velocity():
"""Test circular velocity and radial validation of NFW model."""
# Test parameters
mass = 2.0000000000000E15 * u.M_sun
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
r_r = np.array([0.01, 0.1, 0.2, 0.25, 0.3, 0.4, 0.5, 0.75, 1.0, 1.5, 2.5, 6.5, 11.5]) * u.Mpc
# 200c Overdensity tests
massfactor = ("critical", 200)
n200c = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
circ_v_200c = np.array([702.45487454, 1812.4138346, 2150.50929296, 2231.5802568, 2283.96950242,
2338.45989696, 2355.78876772, 2332.41766543, 2276.89433811,
2154.53909153, 1950.07947819, 1512.37442943,
1260.94034541]) * (u.km / u.s)
assert_quantity_allclose(n200c.circular_velocity(r_r), circ_v_200c)
assert_quantity_allclose(n200c.r_max, (0.5338248204429641 * u.Mpc))
assert_quantity_allclose(n200c.v_max, (2356.7204380904027 * (u.km / u.s)))
# 200m Overdensity tests
massfactor = ("mean", 200)
mass = 1.0e14 * u.M_sun
concentration = 12.3
redshift = 1.5
n200m = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
circ_v_200m = np.array([670.18236647, 1088.9843324, 1046.82334367, 1016.88890732, 987.97273478,
936.00207134, 891.80115232, 806.63307977, 744.91002191, 659.33401039,
557.82823549, 395.9735786, 318.29863006]) * (u.km / u.s)
assert_quantity_allclose(n200m.circular_velocity(r_r), circ_v_200m)
assert_quantity_allclose(n200m.r_max, (0.10196917920081808 * u.Mpc))
assert_quantity_allclose(n200m.v_max, (1089.0224395818727 * (u.km / u.s)))
# Virial Overdensity tests
massfactor = ("virial")
mass = 1.2e+45 * u.kg
concentration = 2.4
redshift = 0.34
r_r = np.array([3.08567758e+20, 3.08567758e+21, 6.17135516e+21, 7.71419395e+21,
9.25703274e+21, 1.23427103e+22, 1.54283879e+22, 2.31425819e+22,
3.08567758e+22, 4.62851637e+22, 7.71419395e+22, 2.00569043e+23,
3.54852922e+23]) * u.m
nvir = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
circ_v_vir = np.array([205.87461783, 604.65091823, 793.9190629, 857.52516521, 908.90280843,
986.53582718, 1041.69089845, 1124.19719446, 1164.58270747, 1191.33193561,
1174.02934755, 1023.69360527, 895.52206321]) * (u.km / u.s)
assert_quantity_allclose(nvir.circular_velocity(r_r), circ_v_vir)
assert_quantity_allclose(nvir.r_max, (1.6484542328623448 * u.Mpc))
assert_quantity_allclose(nvir.v_max, (1192.3130989914962 * (u.km / u.s)))
def test_NFW_exceptions_and_warnings_and_misc():
"""Test NFW exceptions."""
# Arbitrary Test parameters
mass = 2.0000000000000E15 * u.M_sun
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
massfactor = ("critical", 200)
r_r = np.array([1.00e+01, 1.00e+02, 2.00e+02, 2.50e+02, 3.00e+02, 4.00e+02, 5.00e+02,
7.50e+02, 1.00e+03, 1.50e+03, 2.50e+03, 6.50e+03, 1.15e+04]) * u.kpc
# Massfactor exception tests
with pytest.raises(ValueError) as exc:
NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=("not", "virial"))
assert exc.value.args[0] == "Massfactor 'not' not one of 'critical', 'mean', or 'virial'"
with pytest.raises(ValueError) as exc:
NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor="not virial")
assert exc.value.args[0] == "Massfactor not virial string not of the form '#m', '#c', " \
"or 'virial'"
with pytest.raises(TypeError) as exc:
NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=200)
assert exc.value.args[0] == "Massfactor 200 not a tuple or string"
# Verify unitless mass
# Density test
n200c = NFW(mass=mass.value, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200c(3000.0), (3.709693508e+12 * (u.solMass / u.Mpc ** 3),
7.376391187e+42 * (u.kg / u.Mpc ** 3)))
# Circular velocity test with unitless mass
circ_v_200c = np.array([702.45487454, 1812.4138346, 2150.50929296, 2231.5802568, 2283.96950242,
2338.45989696, 2355.78876772, 2332.41766543, 2276.89433811,
2154.53909153, 1950.07947819, 1512.37442943,
1260.94034541]) * (u.km / u.s)
assert_quantity_allclose(n200c.circular_velocity(r_r), circ_v_200c)
# test with unitless input velocity
assert_quantity_allclose(n200c.circular_velocity(r_r.value), circ_v_200c)
# Test Default Cosmology
ncos = NFW(mass=mass, concentration=concentration, redshift=redshift)
assert_quantity_allclose(ncos.A_NFW(concentration), 1.356554956501232)
|
1b1e9eadd0bf2e8e3f1aed4697463ebf4ae3d142a89417c27ee995f59c80e4f7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
from math import cos, sin
import pytest
import unittest.mock as mk
import numpy as np
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.modeling import models
from astropy.modeling import rotations
from astropy.wcs import wcs
@pytest.mark.parametrize(('inp'), [(0, 0), (4000, -20.56), (-2001.5, 45.9),
(0, 90), (0, -90), (np.mgrid[:4, :6]),
([[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]),
([[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]],
[[[25, 26, 27, 28],
[29, 30, 31, 32],
[33, 34, 35, 36]],
[[37, 38, 39, 40],
[41, 42, 43, 44],
[45, 46, 47, 48]]])])
def test_against_wcslib(inp):
w = wcs.WCS()
crval = [202.4823228, 47.17511893]
w.wcs.crval = crval
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
lonpole = 180
tan = models.Pix2Sky_TAN()
n2c = models.RotateNative2Celestial(crval[0], crval[1], lonpole)
c2n = models.RotateCelestial2Native(crval[0], crval[1], lonpole)
m = tan | n2c
minv = c2n | tan.inverse
radec = w.wcs_pix2world(inp[0], inp[1], 1)
xy = w.wcs_world2pix(radec[0], radec[1], 1)
assert_allclose(m(*inp), radec, atol=1e-12)
assert_allclose(minv(*radec), xy, atol=1e-12)
@pytest.mark.parametrize(('inp'), [(1e-5, 1e-4), (40, -20.56), (21.5, 45.9),
([[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]),
([[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]],
[[[25, 26, 27, 28],
[29, 30, 31, 32],
[33, 34, 35, 36]],
[[37, 38, 39, 40],
[41, 42, 43, 44],
[45, 46, 47, 48]]])])
def test_roundtrip_sky_rotation(inp):
lon, lat, lon_pole = 42, 43, 44
n2c = models.RotateNative2Celestial(lon, lat, lon_pole)
c2n = models.RotateCelestial2Native(lon, lat, lon_pole)
assert_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13)
assert_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13)
def test_native_celestial_lat90():
n2c = models.RotateNative2Celestial(1, 90, 0)
alpha, delta = n2c(1, 1)
assert_allclose(delta, 1)
assert_allclose(alpha, 182)
def test_Rotation2D():
model = models.Rotation2D(angle=90)
x, y = model(1, 0)
assert_allclose([x, y], [0, 1], atol=1e-10)
def test_Rotation2D_quantity():
model = models.Rotation2D(angle=90*u.deg)
x, y = model(1*u.deg, 0*u.arcsec)
assert_quantity_allclose([x, y], [0, 1]*u.deg, atol=1e-10*u.deg)
def test_Rotation2D_inverse():
model = models.Rotation2D(angle=234.23494)
x, y = model.inverse(*model(1, 0))
assert_allclose([x, y], [1, 0], atol=1e-10)
def test_Rotation2D_errors():
model = models.Rotation2D(angle=90*u.deg)
# Bad evaluation input shapes
x = np.array([1, 2])
y = np.array([1, 2, 3])
message = "Expected input arrays to have the same shape"
with pytest.raises(ValueError) as err:
model.evaluate(x, y, model.angle)
assert str(err.value) == message
with pytest.raises(ValueError) as err:
model.evaluate(y, x, model.angle)
assert str(err.value) == message
# Bad evaluation units
x = np.array([1, 2])
y = np.array([1, 2])
message = "x and y must have compatible units"
with pytest.raises(u.UnitsError) as err:
model.evaluate(x * u.m, y, model.angle)
assert str(err.value) == message
def test_euler_angle_rotations():
x = (0, 0)
y = (90, 0)
z = (0, 90)
negx = (180, 0)
negy = (-90, 0)
# rotate y into minus z
model = models.EulerAngleRotation(0, 90, 0, 'zxz')
assert_allclose(model(*z), y, atol=10**-12)
# rotate z into minus x
model = models.EulerAngleRotation(0, 90, 0, 'zyz')
assert_allclose(model(*z), negx, atol=10**-12)
# rotate x into minus y
model = models.EulerAngleRotation(0, 90, 0, 'yzy')
assert_allclose(model(*x), negy, atol=10**-12)
euler_axes_order = ['zxz', 'zyz', 'yzy', 'yxy', 'xyx', 'xzx']
@pytest.mark.parametrize(('axes_order'), euler_axes_order)
def test_euler_angles(axes_order):
"""
Tests against all Euler sequences.
The rotation matrices definitions come from Wikipedia.
"""
phi = np.deg2rad(23.4)
theta = np.deg2rad(12.2)
psi = np.deg2rad(34)
c1 = cos(phi)
c2 = cos(theta)
c3 = cos(psi)
s1 = sin(phi)
s2 = sin(theta)
s3 = sin(psi)
matrices = {'zxz': np.array([[(c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1), (s1*s2)],
[(c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3), (-c1*s2)],
[(s2*s3), (c3*s2), (c2)]]),
'zyz': np.array([[(c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3), (c1*s2)],
[(c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3), (s1*s2)],
[(-c3*s2), (s2*s3), (c2)]]),
'yzy': np.array([[(c1*c2*c3 - s1*s3), (-c1*s2), (c3*s1+c1*c2*s3)],
[(c3*s2), (c2), (s2*s3)],
[(-c1*s3 - c2*c3*s1), (s1*s2), (c1*c3-c2*s1*s3)]]),
'yxy': np.array([[(c1*c3 - c2*s1*s3), (s1*s2), (c1*s3+c2*c3*s1)],
[(s2*s3), (c2), (-c3*s2)],
[(-c3*s1 - c1*c2*s3), (c1*s2), (c1*c2*c3 - s1*s3)]]),
'xyx': np.array([[(c2), (s2*s3), (c3*s2)],
[(s1*s2), (c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1)],
[(-c1*s2), (c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3)]]),
'xzx': np.array([[(c2), (-c3*s2), (s2*s3)],
[(c1*s2), (c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3)],
[(s1*s2), (c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3)]])
}
mat = rotations._create_matrix([phi, theta, psi], axes_order)
assert_allclose(mat.T, matrices[axes_order]) # get_rotation_matrix(axes_order))
def test_rotation_3d():
"""
A sanity test - when V2_REF = 0 and V3_REF = 0,
for V2, V3 close to the origin
ROLL_REF should be approximately PA_V3 .
(Test taken from JWST SIAF report.)
"""
def _roll_angle_from_matrix(matrix, v2, v3):
X = -(matrix[2, 0] * np.cos(v2) + matrix[2, 1] * np.sin(v2)) * \
np.sin(v3) + matrix[2, 2] * np.cos(v3)
Y = (matrix[0, 0] * matrix[1, 2] - matrix[1, 0] * matrix[0, 2]) * np.cos(v2) + \
(matrix[0, 1] * matrix[1, 2] - matrix[1, 1] * matrix[0, 2]) * np.sin(v2)
new_roll = np.rad2deg(np.arctan2(Y, X))
if new_roll < 0:
new_roll += 360
return new_roll
# reference points on sky and in a coordinate frame associated
# with the telescope
ra_ref = 165 # in deg
dec_ref = 54 # in deg
v2_ref = 0
v3_ref = 0
pa_v3 = 37 # in deg
v2 = np.deg2rad(2.7e-6) # in deg.01 # in arcsec
v3 = np.deg2rad(2.7e-6) # in deg .01 # in arcsec
angles = [v2_ref, -v3_ref, pa_v3, dec_ref, -ra_ref]
axes = "zyxyz"
M = rotations._create_matrix(np.deg2rad(angles) * u.deg, axes)
roll_angle = _roll_angle_from_matrix(M, v2, v3)
assert_allclose(roll_angle, pa_v3, atol=1e-3)
def test_spherical_rotation():
"""
Test taken from JWST INS report - converts
JWST telescope (V2, V3) coordinates to RA, DEC.
"""
ra_ref = 165 # in deg
dec_ref = 54 # in deg
v2_ref = -503.654472 / 3600 # in deg
v3_ref = -318.742464 / 3600 # in deg
r0 = 37 # in deg
v2 = 210 # in deg
v3 = -75 # in deg
expected_ra_dec = (107.12810484789563, -35.97940247128502) # in deg
angles = np.array([v2_ref, -v3_ref, r0, dec_ref, -ra_ref])
axes = "zyxyz"
v2s = rotations.RotationSequence3D(angles, axes_order=axes)
x, y, z = rotations.spherical2cartesian(v2, v3)
x1, y1, z1 = v2s(x, y, z)
radec = rotations.cartesian2spherical(x1, y1, z1)
assert_allclose(radec, expected_ra_dec, atol=1e-10)
v2s = rotations.SphericalRotationSequence(angles, axes_order=axes)
radec = v2s(v2, v3)
assert_allclose(radec, expected_ra_dec, atol=1e-10)
#assert_allclose(v2s.inverse(*v2s(v2, v3)), (v2, v3))
def test_RotationSequence3D_errors():
# Bad axes_order labels
with pytest.raises(ValueError, match=r"Unrecognized axis label .* should be one of .*"):
rotations.RotationSequence3D(mk.MagicMock(), axes_order="abc")
# Bad number of angles
with pytest.raises(ValueError) as err:
rotations.RotationSequence3D([1, 2, 3, 4], axes_order="zyx")
assert str(err.value) ==\
"The number of angles 4 should match the number of axes 3."
# Bad evaluation input shapes
model = rotations.RotationSequence3D([1, 2, 3], axes_order="zyx")
message = "Expected input arrays to have the same shape"
with pytest.raises(ValueError) as err:
model.evaluate(np.array([1, 2, 3]),
np.array([1, 2]),
np.array([1, 2]),
[1, 2, 3])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
model.evaluate(np.array([1, 2]),
np.array([1, 2, 3]),
np.array([1, 2]),
[1, 2, 3])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
model.evaluate(np.array([1, 2]),
np.array([1, 2]),
np.array([1, 2, 3]),
[1, 2, 3])
assert str(err.value) == message
def test_RotationSequence3D_inverse():
model = rotations.RotationSequence3D([1, 2, 3], axes_order="zyx")
assert_allclose(model.inverse.angles.value, [-3, -2, -1])
assert model.inverse.axes_order == "xyz"
def test_EulerAngleRotation_errors():
# Bad length of axes_order
with pytest.raises(TypeError) as err:
rotations.EulerAngleRotation(mk.MagicMock(), mk.MagicMock(), mk.MagicMock(),
axes_order="xyzx")
assert str(err.value) ==\
"Expected axes_order to be a character sequence of length 3, got xyzx"
# Bad axes_order labels
with pytest.raises(ValueError, match=r"Unrecognized axis label .* should be one of .*"):
rotations.EulerAngleRotation(mk.MagicMock(), mk.MagicMock(), mk.MagicMock(),
axes_order="abc")
# Bad units
message = "All parameters should be of the same type - float or Quantity."
with pytest.raises(TypeError) as err:
rotations.EulerAngleRotation(1 * u.m, 2, 3,
axes_order="xyz")
assert str(err.value) == message
with pytest.raises(TypeError) as err:
rotations.EulerAngleRotation(1, 2 * u.m, 3,
axes_order="xyz")
assert str(err.value) == message
with pytest.raises(TypeError) as err:
rotations.EulerAngleRotation(1, 2, 3 * u.m,
axes_order="xyz")
assert str(err.value) == message
def test_EulerAngleRotation_inverse():
model = rotations.EulerAngleRotation(1, 2, 3, "xyz")
assert_allclose(model.inverse.phi, -3)
assert_allclose(model.inverse.theta, -2)
assert_allclose(model.inverse.psi, -1)
assert model.inverse.axes_order == "zyx"
def test__SkyRotation_errors():
# Bad units
message = "All parameters should be of the same type - float or Quantity."
with pytest.raises(TypeError) as err:
rotations._SkyRotation(1 * u.m, 2, 3)
assert str(err.value) == message
with pytest.raises(TypeError) as err:
rotations._SkyRotation(1, 2 * u.m, 3)
assert str(err.value) == message
with pytest.raises(TypeError) as err:
rotations._SkyRotation(1, 2, 3 * u.m)
assert str(err.value) == message
def test__SkyRotation__evaluate():
model = rotations._SkyRotation(1, 2, 3)
phi = mk.MagicMock()
theta = mk.MagicMock()
lon = mk.MagicMock()
lat = mk.MagicMock()
lon_pole = mk.MagicMock()
alpha = 5
delta = mk.MagicMock()
with mk.patch.object(rotations._EulerRotation, 'evaluate',
autospec=True, return_value=(alpha, delta)) as mkEval:
assert (365, delta) == model._evaluate(phi, theta, lon, lat, lon_pole)
assert mkEval.call_args_list ==\
[mk.call(model, phi, theta, lon, lat, lon_pole, 'zxz')]
|
2e5ac5e24a828fb6586088c8afc51dbf1e8f6803f8839dfa547f00b673e2ef87 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.modeling import math_functions
x = np.linspace(-20, 360, 100)
@pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
def test_math():
for name in math_functions.__all__:
model_class = getattr(math_functions, name)
assert model_class.__module__ == 'astropy.modeling.math_functions'
model = model_class()
func = getattr(np, model.func.__name__)
if model.n_inputs == 1:
assert_allclose(model(x), func(x))
elif model.n_inputs == 2:
assert_allclose(model(x, x), func(x, x))
assert math_functions.ModUfunc is math_functions.RemainderUfunc
assert math_functions.DivideUfunc is math_functions.True_divideUfunc
|
2dbb0d55941d8f06647679ccf57096fffdbff93d233e404ad12314f54ddcc55c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import types
import warnings
import pytest
import numpy as np
from numpy.testing import assert_allclose
from numpy.random import default_rng
from astropy.modeling.core import Fittable1DModel
from astropy.modeling.parameters import Parameter
from astropy.modeling import models
from astropy.modeling import fitting
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
class TestNonLinearConstraints:
def setup_class(self):
self.g1 = models.Gaussian1D(10, 14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, 13, stddev=.4)
self.x = np.arange(10, 20, .1)
self.y1 = self.g1(self.x)
self.y2 = self.g2(self.x)
rsn = default_rng(1234567890)
self.n = rsn.standard_normal(100)
self.ny1 = self.y1 + 2 * self.n
self.ny2 = self.y2 + 2 * self.n
@pytest.mark.skipif('not HAS_SCIPY')
def test_fixed_par(self):
g1 = models.Gaussian1D(10, mean=14.9, stddev=.3,
fixed={'amplitude': True})
fitter = fitting.LevMarLSQFitter()
model = fitter(g1, self.x, self.ny1)
assert model.amplitude.value == 10
@pytest.mark.skipif('not HAS_SCIPY')
def test_tied_par(self):
def tied(model):
mean = 50 * model.stddev
return mean
g1 = models.Gaussian1D(10, mean=14.9, stddev=.3, tied={'mean': tied})
fitter = fitting.LevMarLSQFitter()
model = fitter(g1, self.x, self.ny1)
assert_allclose(model.mean.value, 50 * model.stddev,
rtol=10 ** (-5))
@pytest.mark.skipif('not HAS_SCIPY')
def test_joint_fitter(self):
from scipy import optimize
g1 = models.Gaussian1D(10, 14.9, stddev=.3)
g2 = models.Gaussian1D(10, 13, stddev=.4)
jf = fitting.JointFitter([g1, g2], {g1: ['amplitude'],
g2: ['amplitude']}, [9.8])
x = np.arange(10, 20, .1)
y1 = g1(x)
y2 = g2(x)
n = np.random.randn(100)
ny1 = y1 + 2 * n
ny2 = y2 + 2 * n
jf(x, ny1, x, ny2)
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def compmodel(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errf(p, x1, y1, x2, y2):
return np.ravel(
np.r_[compmodel(p[0], p[1:3], x1) - y1,
compmodel(p[0], p[3:], x2) - y2])
fitparams, _ = optimize.leastsq(errf, p, args=(x, ny1, x, ny2))
assert_allclose(jf.fitparams, fitparams, rtol=10 ** (-5))
assert_allclose(g1.amplitude.value, g2.amplitude.value)
@pytest.mark.skipif('not HAS_SCIPY')
def test_no_constraints(self):
from scipy import optimize
g1 = models.Gaussian1D(9.9, 14.5, stddev=.3)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errf(p, x, y):
return func(p, x) - y
p0 = [9.9, 14.5, 0.3]
y = g1(self.x)
n = np.random.randn(100)
ny = y + n
fitpar, s = optimize.leastsq(errf, p0, args=(self.x, ny))
fitter = fitting.LevMarLSQFitter()
model = fitter(g1, self.x, ny)
assert_allclose(model.parameters, fitpar, rtol=5 * 10 ** (-3))
@pytest.mark.skipif('not HAS_SCIPY')
class TestBounds:
def setup_class(self):
A = -2.0
B = 0.5
self.x = np.linspace(-1.0, 1.0, 100)
self.y = A * self.x + B + np.random.normal(scale=0.1, size=100)
data = np.array([505.0, 556.0, 630.0, 595.0, 561.0, 553.0, 543.0, 496.0, 460.0, 469.0,
426.0, 518.0, 684.0, 798.0, 830.0, 794.0, 649.0, 706.0, 671.0, 545.0,
479.0, 454.0, 505.0, 700.0, 1058.0, 1231.0, 1325.0, 997.0, 1036.0, 884.0,
610.0, 487.0, 453.0, 527.0, 780.0, 1094.0, 1983.0, 1993.0, 1809.0, 1525.0,
1056.0, 895.0, 604.0, 466.0, 510.0, 678.0, 1130.0, 1986.0, 2670.0, 2535.0,
1878.0, 1450.0, 1200.0, 663.0, 511.0, 474.0, 569.0, 848.0, 1670.0, 2611.0,
3129.0, 2507.0, 1782.0, 1211.0, 723.0, 541.0, 511.0, 518.0, 597.0, 1137.0,
1993.0, 2925.0, 2438.0, 1910.0, 1230.0, 738.0, 506.0, 461.0, 486.0, 597.0,
733.0, 1262.0, 1896.0, 2342.0, 1792.0, 1180.0, 667.0, 482.0, 454.0, 482.0,
504.0, 566.0, 789.0, 1194.0, 1545.0, 1361.0, 933.0, 562.0, 418.0, 463.0,
435.0, 466.0, 528.0, 487.0, 664.0, 799.0, 746.0, 550.0, 478.0, 535.0, 443.0,
416.0, 439.0, 472.0, 472.0, 492.0, 523.0, 569.0, 487.0, 441.0, 428.0])
self.data = data.reshape(11, 11)
def test_bounds_lsq(self):
guess_slope = 1.1
guess_intercept = 0.0
bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept,
bounds=bounds)
fitter = fitting.LevMarLSQFitter()
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10 ** -5 >= bounds['slope'][0]
assert slope - 10 ** -5 <= bounds['slope'][1]
assert intercept + 10 ** -5 >= bounds['intercept'][0]
assert intercept - 10 ** -5 <= bounds['intercept'][1]
def test_bounds_slsqp(self):
guess_slope = 1.1
guess_intercept = 0.0
bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept,
bounds=bounds)
fitter = fitting.SLSQPLSQFitter()
with pytest.warns(AstropyUserWarning, match='consider using linear fitting methods'):
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10 ** -5 >= bounds['slope'][0]
assert slope - 10 ** -5 <= bounds['slope'][1]
assert intercept + 10 ** -5 >= bounds['intercept'][0]
assert intercept - 10 ** -5 <= bounds['intercept'][1]
def test_bounds_gauss2d_lsq(self):
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {"x_mean": [0., 11.],
"y_mean": [0., 11.],
"x_stddev": [1., 4],
"y_stddev": [1., 4]}
gauss = models.Gaussian2D(amplitude=10., x_mean=5., y_mean=5.,
x_stddev=4., y_stddev=4., theta=0.5,
bounds=bounds)
gauss_fit = fitting.LevMarLSQFitter()
with pytest.warns(AstropyUserWarning,
match='The fit may be unsuccessful'):
model = gauss_fit(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10 ** -5 >= bounds['x_mean'][0]
assert x_mean - 10 ** -5 <= bounds['x_mean'][1]
assert y_mean + 10 ** -5 >= bounds['y_mean'][0]
assert y_mean - 10 ** -5 <= bounds['y_mean'][1]
assert x_stddev + 10 ** -5 >= bounds['x_stddev'][0]
assert x_stddev - 10 ** -5 <= bounds['x_stddev'][1]
assert y_stddev + 10 ** -5 >= bounds['y_stddev'][0]
assert y_stddev - 10 ** -5 <= bounds['y_stddev'][1]
def test_bounds_gauss2d_slsqp(self):
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {"x_mean": [0., 11.],
"y_mean": [0., 11.],
"x_stddev": [1., 4],
"y_stddev": [1., 4]}
gauss = models.Gaussian2D(amplitude=10., x_mean=5., y_mean=5.,
x_stddev=4., y_stddev=4., theta=0.5,
bounds=bounds)
gauss_fit = fitting.SLSQPLSQFitter()
# Warning does not appear in all the CI jobs.
# TODO: Rewrite the test for more consistent warning behavior.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=r'.*The fit may be unsuccessful.*',
category=AstropyUserWarning)
model = gauss_fit(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10 ** -5 >= bounds['x_mean'][0]
assert x_mean - 10 ** -5 <= bounds['x_mean'][1]
assert y_mean + 10 ** -5 >= bounds['y_mean'][0]
assert y_mean - 10 ** -5 <= bounds['y_mean'][1]
assert x_stddev + 10 ** -5 >= bounds['x_stddev'][0]
assert x_stddev - 10 ** -5 <= bounds['x_stddev'][1]
assert y_stddev + 10 ** -5 >= bounds['y_stddev'][0]
assert y_stddev - 10 ** -5 <= bounds['y_stddev'][1]
class TestLinearConstraints:
def setup_class(self):
self.p1 = models.Polynomial1D(4)
self.p1.c0 = 0
self.p1.c1 = 0
self.p1.window = [0., 9.]
self.x = np.arange(10)
self.y = self.p1(self.x)
rsn = default_rng(1234567890)
self.n = rsn.standard_normal(10)
self.ny = self.y + self.n
def test(self):
self.p1.c0.fixed = True
self.p1.c1.fixed = True
pfit = fitting.LinearLSQFitter()
model = pfit(self.p1, self.x, self.y)
assert_allclose(self.y, model(self.x))
# Test constraints as parameter properties
def test_set_fixed_1():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.mean.fixed = True
assert gauss.fixed == {'amplitude': False, 'mean': True, 'stddev': False}
def test_set_fixed_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
fixed={'mean': True})
assert gauss.mean.fixed is True
def test_set_tied_1():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.amplitude.tied = tie_amplitude
assert gauss.amplitude.tied is not False
assert isinstance(gauss.tied['amplitude'], types.FunctionType)
def test_set_tied_2():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
tied={'amplitude': tie_amplitude})
assert gauss.amplitude.tied
def test_unset_fixed():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
fixed={'mean': True})
gauss.mean.fixed = False
assert gauss.fixed == {'amplitude': False, 'mean': False, 'stddev': False}
def test_unset_tied():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
tied={'amplitude': tie_amplitude})
gauss.amplitude.tied = False
assert gauss.tied == {'amplitude': False, 'mean': False, 'stddev': False}
def test_set_bounds_1():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
bounds={'stddev': (0, None)})
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (0.0, None)}
def test_set_bounds_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.stddev.min = 0.
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (0.0, None)}
def test_unset_bounds():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
bounds={'stddev': (0, 2)})
gauss.stddev.min = None
gauss.stddev.max = None
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (None, None)}
def test_default_constraints():
"""Regression test for https://github.com/astropy/astropy/issues/2396
Ensure that default constraints defined on parameters are carried through
to instances of the models those parameters are defined for.
"""
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=0, min=0, fixed=True)
@staticmethod
def evaluate(x, a, b):
return x * a + b
assert MyModel.a.default == 1
assert MyModel.b.default == 0
assert MyModel.b.min == 0
assert MyModel.b.bounds == (0, None)
assert MyModel.b.fixed is True
m = MyModel()
assert m.a.value == 1
assert m.b.value == 0
assert m.b.min == 0
assert m.b.bounds == (0, None)
assert m.b.fixed is True
assert m.bounds == {'a': (None, None), 'b': (0, None)}
assert m.fixed == {'a': False, 'b': True}
# Make a model instance that overrides the default constraints and values
m = MyModel(3, 4, bounds={'a': (1, None), 'b': (2, None)},
fixed={'a': True, 'b': False})
assert m.a.value == 3
assert m.b.value == 4
assert m.a.min == 1
assert m.b.min == 2
assert m.a.bounds == (1, None)
assert m.b.bounds == (2, None)
assert m.a.fixed is True
assert m.b.fixed is False
assert m.bounds == {'a': (1, None), 'b': (2, None)}
assert m.fixed == {'a': True, 'b': False}
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_with_fixed_and_bound_constraints():
"""
Regression test for https://github.com/astropy/astropy/issues/2235
Currently doesn't test that the fit is any *good*--just that parameters
stay within their given constraints.
"""
m = models.Gaussian1D(amplitude=3, mean=4, stddev=1,
bounds={'mean': (4, 5)},
fixed={'amplitude': True})
x = np.linspace(0, 10, 10)
y = np.exp(-x ** 2 / 2)
f = fitting.LevMarLSQFitter()
fitted_1 = f(m, x, y)
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
assert fitted_1.amplitude == 3.0
m.amplitude.fixed = False
_ = f(m, x, y)
# It doesn't matter anymore what the amplitude ends up as so long as the
# bounds constraint was still obeyed
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_with_bound_constraints_estimate_jacobian():
"""
Regression test for https://github.com/astropy/astropy/issues/2400
Checks that bounds constraints are obeyed on a custom model that does not
define fit_deriv (and thus its Jacobian must be estimated for non-linear
fitting).
"""
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=2)
@staticmethod
def evaluate(x, a, b):
return a * x + b
m_real = MyModel(a=1.5, b=-3)
x = np.arange(100)
y = m_real(x)
m = MyModel()
f = fitting.LevMarLSQFitter()
fitted_1 = f(m, x, y)
# This fit should be trivial so even without constraints on the bounds it
# should be right
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
m2 = MyModel()
m2.a.bounds = (-2, 2)
f2 = fitting.LevMarLSQFitter()
_ = f2(m2, x, y)
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
# Check that the estimated Jacobian was computed (it doesn't matter what
# the values are so long as they're not all zero.
assert np.any(f2.fit_info['fjac'] != 0)
# https://github.com/astropy/astropy/issues/6014
@pytest.mark.skipif('not HAS_SCIPY')
def test_gaussian2d_positive_stddev():
# This is 2D Gaussian with noise to be fitted, as provided by @ysBach
test = [
[-54.33, 13.81, -34.55, 8.95, -143.71, -0.81, 59.25, -14.78, -204.9,
-30.87, -124.39, 123.53, 70.81, -109.48, -106.77, 35.64, 18.29],
[-126.19, -89.13, 63.13, 50.74, 61.83, 19.06, 65.7, 77.94, 117.14,
139.37, 52.57, 236.04, 100.56, 242.28, -180.62, 154.02, -8.03],
[91.43, 96.45, -118.59, -174.58, -116.49, 80.11, -86.81, 14.62, 79.26,
7.56, 54.99, 260.13, -136.42, -20.77, -77.55, 174.52, 134.41],
[33.88, 7.63, 43.54, 70.99, 69.87, 33.97, 273.75, 176.66, 201.94,
336.34, 340.54, 163.77, -156.22, 21.49, -148.41, 94.88, 42.55],
[82.28, 177.67, 26.81, 17.66, 47.81, -31.18, 353.23, 589.11, 553.27,
242.35, 444.12, 186.02, 140.73, 75.2, -87.98, -18.23, 166.74],
[113.09, -37.01, 134.23, 71.89, 107.88, 198.69, 273.88, 626.63, 551.8,
547.61, 580.35, 337.8, 139.8, 157.64, -1.67, -26.99, 37.35],
[106.47, 31.97, 84.99, -125.79, 195.0, 493.65, 861.89, 908.31, 803.9,
781.01, 532.59, 404.67, 115.18, 111.11, 28.08, 122.05, -58.36],
[183.62, 45.22, 40.89, 111.58, 425.81, 321.53, 545.09, 866.02, 784.78,
731.35, 609.01, 405.41, -19.65, 71.2, -140.5, 144.07, 25.24],
[137.13, -86.95, 15.39, 180.14, 353.23, 699.01, 1033.8, 1014.49,
814.11, 647.68, 461.03, 249.76, 94.8, 41.17, -1.16, 183.76, 188.19],
[35.39, 26.92, 198.53, -37.78, 638.93, 624.41, 816.04, 867.28, 697.0,
491.56, 378.21, -18.46, -65.76, 98.1, 12.41, -102.18, 119.05],
[190.73, 125.82, 311.45, 369.34, 554.39, 454.37, 755.7, 736.61, 542.43,
188.24, 214.86, 217.91, 7.91, 27.46, -172.14, -82.36, -80.31],
[-55.39, 80.18, 267.19, 274.2, 169.53, 327.04, 488.15, 437.53, 225.38,
220.94, 4.01, -92.07, 39.68, 57.22, 144.66, 100.06, 34.96],
[130.47, -4.23, 46.3, 101.49, 115.01, 217.38, 249.83, 115.9, 87.36,
105.81, -47.86, -9.94, -82.28, 144.45, 83.44, 23.49, 183.9],
[-110.38, -115.98, 245.46, 103.51, 255.43, 163.47, 56.52, 33.82,
-33.26, -111.29, 88.08, 193.2, -100.68, 15.44, 86.32, -26.44, -194.1],
[109.36, 96.01, -124.89, -16.4, 84.37, 114.87, -65.65, -58.52, -23.22,
42.61, 144.91, -209.84, 110.29, 66.37, -117.85, -147.73, -122.51],
[10.94, 45.98, 118.12, -46.53, -72.14, -74.22, 21.22, 0.39, 86.03,
23.97, -45.42, 12.05, -168.61, 27.79, 61.81, 84.07, 28.79],
[46.61, -104.11, 56.71, -90.85, -16.51, -66.45, -141.34, 0.96, 58.08,
285.29, -61.41, -9.01, -323.38, 58.35, 80.14, -101.22, 145.65]]
g_init = models.Gaussian2D(x_mean=8, y_mean=8)
fitter = fitting.LevMarLSQFitter()
y, x = np.mgrid[:17, :17]
g_fit = fitter(g_init, x, y, test)
# Compare with @ysBach original result:
# - x_stddev was negative, so its abs value is used for comparison here.
# - theta is beyond (-90, 90) deg, which doesn't make sense, so ignored.
assert_allclose([g_fit.amplitude.value, g_fit.y_stddev.value],
[984.7694929790363, 3.1840618351417307], rtol=1.5e-6)
assert_allclose(g_fit.x_mean.value, 7.198391516587464)
assert_allclose(g_fit.y_mean.value, 7.49720660088511, rtol=5e-7)
assert_allclose(g_fit.x_stddev.value, 1.9840185107597297, rtol=2e-6)
# Issue #6403
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
def test_2d_model():
from astropy.utils import NumpyRNGContext
# 2D model with LevMarLSQFitter
gauss2d = models.Gaussian2D(10.2, 4.3, 5, 2, 1.2, 1.4)
fitter = fitting.LevMarLSQFitter()
X = np.linspace(-1, 7, 200)
Y = np.linspace(-1, 7, 200)
x, y = np.meshgrid(X, Y)
z = gauss2d(x, y)
w = np.ones(x.size)
w.shape = x.shape
with NumpyRNGContext(1234567890):
n = np.random.randn(x.size)
n.shape = x.shape
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# 2D model with LevMarLSQFitter, fixed constraint
gauss2d.x_stddev.fixed = True
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False
p2 = models.Polynomial2D(1, c0_0=1, c1_0=1.2, c0_1=3.2)
z = p2(x, y)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False, fixed constraint
p2.c1_0.fixed = True
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
def test_set_prior_posterior():
model = models.Polynomial1D(1)
model.c0.prior = models.Gaussian1D(2.3, 2, .1)
assert model.c0.prior(2) == 2.3
model.c0.posterior = models.Linear1D(1, .2)
assert model.c0.posterior(1) == 1.2
def test_set_constraints():
g = models.Gaussian1D()
p = models.Polynomial1D(1)
# Set bounds before model combination
g.stddev.bounds = (0, 3)
m = g + p
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (0.0, 3.0),
'c0_1': (None, None),
'c1_1': (None, None)}
# Set bounds on the compound model
m.stddev_0.bounds = (1, 3)
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (1.0, 3.0),
'c0_1': (None, None),
'c1_1': (None, None)}
# Set the bounds of a Parameter directly in the bounds dict
m.bounds['stddev_0'] = (4, 5)
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (4, 5),
'c0_1': (None, None),
'c1_1': (None, None)}
# Set the bounds of a Parameter on the child model bounds dict
g.bounds['stddev'] = (1, 5)
m = g + p
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (1, 5),
'c0_1': (None, None),
'c1_1': (None, None)}
|
9798a7b6e3d5468828a6353a6f02267a6f3f7fc713cb6cb917b377b424262e19 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.models import Shift, Rotation2D, Gaussian1D, Identity, Mapping, UnitsMapping
from astropy import units as u
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
def test_swap_axes():
x = np.zeros((2, 3))
y = np.ones((2, 3))
mapping = Mapping((1, 0))
assert mapping(1, 2) == (2.0, 1.0)
assert mapping.inverse(2, 1) == (1, 2)
assert_array_equal(mapping(x, y), (y, x))
assert_array_equal(mapping.inverse(y, x), (x, y))
def test_duplicate_axes():
mapping = Mapping((0, 1, 0, 1))
assert mapping(1, 2) == (1.0, 2., 1., 2)
assert mapping.inverse(1, 2, 1, 2) == (1, 2)
assert mapping.inverse.n_inputs == 4
assert mapping.inverse.n_outputs == 2
def test_drop_axes_1():
mapping = Mapping((0,), n_inputs=2)
assert mapping(1, 2) == (1.)
def test_drop_axes_2():
mapping = Mapping((1, ))
assert mapping(1, 2) == (2.)
with pytest.raises(NotImplementedError):
mapping.inverse
def test_drop_axes_3():
mapping = Mapping((1,), n_inputs=2)
assert mapping.n_inputs == 2
rotation = Rotation2D(60)
model = rotation | mapping
assert_allclose(model(1, 2), 1.86602540378)
@pytest.mark.parametrize('name', [None, 'test_name'])
def test_bad_inputs(name):
mapping = Mapping((1, 0), name=name)
if name is None:
name = "Mapping"
x = [np.ones((2, 3))*idx for idx in range(5)]
for idx in range(1, 6):
if idx == 2:
continue
with pytest.raises(TypeError) as err:
mapping.evaluate(*x[:idx])
assert str(err.value) == \
f"{name} expects 2 inputs; got {idx}"
def test_identity():
x = np.zeros((2, 3))
y = np.ones((2, 3))
ident1 = Identity(1)
shift = Shift(1)
rotation = Rotation2D(angle=60)
model = ident1 & shift | rotation
assert_allclose(model(1, 2), (-2.098076211353316, 2.3660254037844393))
res_x, res_y = model(x, y)
assert_allclose((res_x, res_y),
(np.array([[-1.73205081, -1.73205081, -1.73205081],
[-1.73205081, -1.73205081, -1.73205081]]),
np.array([[1., 1., 1.],
[1., 1., 1.]])))
assert_allclose(model.inverse(res_x, res_y), (x, y), atol=1.e-10)
# https://github.com/astropy/astropy/pull/6018
@pytest.mark.skipif('not HAS_SCIPY')
def test_fittable_compound():
m = Identity(1) | Mapping((0, )) | Gaussian1D(1, 5, 4)
x = np.arange(10)
y_real = m(x)
dy = 0.005
with NumpyRNGContext(1234567):
n = np.random.normal(0., dy, x.shape)
y_noisy = y_real + n
pfit = LevMarLSQFitter()
new_model = pfit(m, x, y_noisy)
y_fit = new_model(x)
assert_allclose(y_fit, y_real, atol=dy)
def test_identity_repr():
m = Identity(1, name='foo')
assert repr(m) == "<Identity(1, name='foo')>"
m = Identity(1)
assert repr(m) == "<Identity(1)>"
def test_mapping_repr():
m = Mapping([0, 1], name='foo')
assert repr(m) == "<Mapping([0, 1], name='foo')>"
m = Mapping([0, 1])
assert repr(m) == "<Mapping([0, 1])>"
class TestUnitsMapping:
def test___init__(self):
# Set values
model = UnitsMapping(((u.m, None),),
input_units_equivalencies='test_eqiv',
input_units_allow_dimensionless=True,
name='test')
assert model._mapping == ((u.m, None),)
assert model._input_units_strict == {'x': True}
assert model.input_units_equivalencies == 'test_eqiv'
assert model.input_units_allow_dimensionless == {'x': True}
assert model.name == 'test'
assert model._input_units == {'x': u.m}
# Default values
model = UnitsMapping(((u.K, None),))
assert model._mapping == ((u.K, None),)
assert model._input_units_strict == {'x': True}
assert model.input_units_equivalencies is None
assert model.input_units_allow_dimensionless == {'x': False}
assert model.name is None
assert model._input_units == {'x': u.K}
# Error
with pytest.raises(ValueError) as err:
UnitsMapping(((u.m, None), (u.m, u.K)))
assert str(err.value) == \
"If one return unit is None, then all must be None"
def test_evaluate(self):
model = UnitsMapping(((u.m, None),))
assert model(10*u.m) == 10
model = UnitsMapping(((u.m, u.K),))
assert model(10*u.m) == 10 * u.K
model = UnitsMapping(((u.m, None), (u.K, None)),)
assert model(10*u.m, 20*u.K) == (10, 20)
model = UnitsMapping(((u.m, u.K), (u.K, u.m)),)
assert model(10*u.m, 20*u.K) == (10*u.K, 20*u.m)
def test_repr(self):
model = UnitsMapping(((u.m, None),), name='foo')
assert repr(model) == f"<UnitsMapping((({repr(u.m)}, None),), name='foo')>"
model = UnitsMapping(((u.m, None),))
assert repr(model) == f"<UnitsMapping((({repr(u.m)}, None),))>"
|
9baccb42396f4c7925c29db38799ef84e051aeff597df312dbf62c5d76bd26db | # Various tests of models not related to evaluation, fitting, or parameters
# pylint: disable=invalid-name, no-member
import warnings
import pytest
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.modeling.models import Mapping, Pix2Sky_TAN, Gaussian1D
from astropy.modeling import models
from astropy.modeling.core import _ModelMeta
def test_gaussian1d_bounding_box():
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
bbox = g.bounding_box.bounding_box()
assert_quantity_allclose(bbox[0], 2.835 * u.m)
assert_quantity_allclose(bbox[1], 3.165 * u.m)
def test_gaussian1d_n_models():
g = Gaussian1D(
amplitude=[1 * u.J, 2. * u.J],
mean=[1 * u.m, 5000 * u.AA],
stddev=[0.1 * u.m, 100 * u.AA],
n_models=2)
assert_quantity_allclose(g(1.01 * u.m), [0.99501248, 0.] * u.J)
assert_quantity_allclose(
g(u.Quantity([1.01 * u.m, 5010 * u.AA])), [0.99501248, 1.990025] * u.J)
# FIXME: The following doesn't work as np.asanyarray doesn't work with a
# list of quantity objects.
# assert_quantity_allclose(g([1.01 * u.m, 5010 * u.AA]),
# [ 0.99501248, 1.990025] * u.J)
"""
Test the "rules" of model units.
"""
def test_quantity_call():
"""
Test that if constructed with Quanties models must be called with quantities.
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
g(10 * u.m)
with pytest.raises(u.UnitsError):
g(10)
def test_no_quantity_call():
"""
Test that if not constructed with Quantites they can be called without quantities.
"""
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
assert isinstance(g, Gaussian1D)
g(10)
def test_default_parameters():
# Test that calling with a quantity works when one of the parameters
# defaults to dimensionless
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm)
assert isinstance(g, Gaussian1D)
g(10*u.m)
def test_uses_quantity():
"""
Test Quantity
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
assert g.uses_quantity
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
assert not g.uses_quantity
g.mean = 3 * u.m
assert g.uses_quantity
def test_uses_quantity_compound():
"""
Test Quantity
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
g2 = Gaussian1D(mean=5 * u.m, stddev=5 * u.cm, amplitude=5 * u.Jy)
assert (g | g2).uses_quantity
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
g2 = Gaussian1D(mean=5, stddev=5, amplitude=5)
comp = g | g2
assert not (comp).uses_quantity
def test_uses_quantity_no_param():
comp = Mapping((0, 1)) | Pix2Sky_TAN()
assert comp.uses_quantity
def _allmodels():
allmodels = []
for name in dir(models):
model = getattr(models, name)
if type(model) is _ModelMeta:
try:
m = model()
except Exception:
pass
allmodels.append(m)
return allmodels
@pytest.mark.parametrize("m", _allmodels())
def test_read_only(m):
"""
input_units
return_units
input_units_allow_dimensionless
input_units_strict
"""
with pytest.raises(AttributeError):
m.input_units = {}
with pytest.raises(AttributeError):
m.return_units = {}
with pytest.raises(AttributeError):
m.input_units_allow_dimensionless = {}
with pytest.raises(AttributeError):
m.input_units_strict = {}
|
b8e6041495c266b2a1310602178306baf1c747435ecdf4d12c14398c8642cafa | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name, no-member
import pytest
import numpy as np
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from astropy.modeling.core import fix_inputs
from astropy.modeling.functional_models import (
Gaussian1D, Sersic1D,
Sine1D, Cosine1D, Tangent1D, ArcSine1D, ArcCosine1D, ArcTangent1D,
Linear1D, Lorentz1D, Voigt1D, Const1D,
Box1D, Trapezoid1D, RickerWavelet1D,
Moffat1D, Gaussian2D, Const2D, Ellipse2D,
Disk2D, Ring2D, Box2D, TrapezoidDisk2D,
RickerWavelet2D, AiryDisk2D, Moffat2D, Sersic2D,
KingProjectedAnalytic1D,
Scale, Multiply,
Planar2D, Logarithmic1D, Exponential1D)
from astropy.modeling.physical_models import Plummer1D, Drude1D
from astropy.modeling.powerlaws import (
PowerLaw1D, BrokenPowerLaw1D, SmoothlyBrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D, LogParabola1D)
from astropy.modeling.polynomial import Polynomial1D, Polynomial2D
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.bounding_box import ModelBoundingBox
from astropy.modeling.parameters import InputParameterError
FUNC_MODELS_1D = [
{'class': Gaussian1D,
'parameters': {'amplitude': 3 * u.Jy, 'mean': 2 * u.m, 'stddev': 30 * u.cm},
'evaluation': [(2600 * u.mm, 3 * u.Jy * np.exp(-2))],
'bounding_box': [0.35, 3.65] * u.m},
{'class': Sersic1D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'r_eff': 2 * u.arcsec, 'n': 4},
'evaluation': [(3 * u.arcsec, 1.3237148119468918 * u.MJy/u.sr)],
'bounding_box': False},
{'class': Sine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},
'evaluation': [(1 * u.s, -3 * u.km / u.s)],
'bounding_box': False},
{'class': Cosine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.25},
'evaluation': [(1 * u.s, -3 * u.km / u.s)],
'bounding_box': False},
{'class': Tangent1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.125 * u.Hz, 'phase': 0.25},
'evaluation': [(1 * u.s, -3 * u.km / u.s)],
'bounding_box': [-4, 0] / u.Hz},
{'class': ArcSine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},
'evaluation': [(0 * u.km / u.s, -2 * u.s)],
'bounding_box': [-3, 3] * u.km / u.s},
{'class': ArcCosine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},
'evaluation': [(0 * u.km / u.s, -1 * u.s)],
'bounding_box': [-3, 3] * u.km / u.s},
{'class': ArcTangent1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.125 * u.Hz, 'phase': 0.25},
'evaluation': [(0 * u.km / u.s, -2 * u.s)],
'bounding_box': False},
{'class': Linear1D,
'parameters': {'slope': 3 * u.km / u.s, 'intercept': 5000 * u.m},
'evaluation': [(6000 * u.ms, 23 * u.km)],
'bounding_box': False},
{'class': Lorentz1D,
'parameters': {'amplitude': 2 * u.Jy, 'x_0': 505 * u.nm, 'fwhm': 100 * u.AA},
'evaluation': [(0.51 * u.micron, 1 * u.Jy)],
'bounding_box': [255, 755] * u.nm},
{'class': Voigt1D,
'parameters': {'amplitude_L': 2 * u.Jy, 'x_0': 505 * u.nm,
'fwhm_L': 100 * u.AA, 'fwhm_G': 50 * u.AA},
'evaluation': [(0.51 * u.micron, 1.0621795524 * u.Jy)],
'bounding_box': False},
{'class': Const1D,
'parameters': {'amplitude': 3 * u.Jy},
'evaluation': [(0.6 * u.micron, 3 * u.Jy)],
'bounding_box': False},
{'class': Box1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'width': 1 * u.um},
'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
'bounding_box': [3.9, 4.9] * u.um},
{'class': Trapezoid1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'width': 1 * u.um, 'slope': 5 * u.Jy / u.um},
'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
'bounding_box': [3.3, 5.5] * u.um},
{'class': RickerWavelet1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'sigma': 1e-3 * u.mm},
'evaluation': [(1000 * u.nm, -0.09785050 * u.Jy)],
'bounding_box': [-5.6, 14.4] * u.um},
{'class': Moffat1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 0.238853503 * u.Jy)],
'bounding_box': False},
{'class': KingProjectedAnalytic1D,
'parameters': {'amplitude': 1. * u.Msun/u.pc**2, 'r_core': 1. * u.pc, 'r_tide': 2. * u.pc},
'evaluation': [(0.5 * u.pc, 0.2 * u.Msun/u.pc**2)],
'bounding_box': [0. * u.pc, 2. * u.pc]},
{'class': Logarithmic1D,
'parameters': {'amplitude': 5*u.m, 'tau': 2 * u.m},
'evaluation': [(4 * u.m, 3.4657359027997265 * u.m)],
'bounding_box': False},
{'class': Exponential1D,
'parameters': {'amplitude': 5*u.m, 'tau': 2 * u.m},
'evaluation': [(4 * u.m, 36.945280494653254 * u.m)],
'bounding_box': False}
]
SCALE_MODELS = [
{'class': Scale,
'parameters': {'factor': 2*u.m},
'evaluation': [(1*u.m, 2*u.m)],
'bounding_box': False},
{'class': Multiply,
'parameters': {'factor': 2*u.m},
'evaluation': [(1 * u.m/u.m, 2*u.m)],
'bounding_box': False},
]
PHYS_MODELS_1D = [
{'class': Plummer1D,
'parameters': {'mass': 3 * u.kg, 'r_plum': 0.5 * u.m},
'evaluation': [(1* u.m, 0.10249381 * u.kg / (u.m **3))],
'bounding_box': False},
{'class': Drude1D,
'parameters': {'amplitude': 1.0 * u.m, 'x_0': 2175. * u.AA, 'fwhm': 400. * u.AA},
'evaluation': [(2000*u.AA, 0.5452317018423869 * u.m)],
'bounding_box': [-17825, 22175] * u.AA},
]
FUNC_MODELS_2D = [
{'class': Gaussian2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_mean': 2 * u.m, 'y_mean': 1 * u.m,
'x_stddev': 3 * u.m, 'y_stddev': 2 * u.m, 'theta': 45 * u.deg},
'evaluation': [(412.1320343 * u.cm, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))],
'bounding_box': [[-14.18257445, 16.18257445], [-10.75693665, 14.75693665]] * u.m},
{'class': Const2D,
'parameters': {'amplitude': 3 * u.Jy},
'evaluation': [(0.6 * u.micron, 0.2 * u.m, 3 * u.Jy)],
'bounding_box': False},
{'class': Disk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'R_0': 300 * u.cm},
'evaluation': [(5.8 * u.m, 201 * u.cm, 3 * u.Jy)],
'bounding_box': [[-1, 5], [0, 6]] * u.m},
{'class': TrapezoidDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 1 * u.m, 'y_0': 2 * u.m,
'R_0': 100 * u.cm, 'slope': 1 * u.Jy / u.m},
'evaluation': [(3.5 * u.m, 2 * u.m, 1.5 * u.Jy)],
'bounding_box': [[-2, 6], [-3, 5]] * u.m},
{'class': Ellipse2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'a': 300 * u.cm, 'b': 200 * u.cm, 'theta': 45 * u.deg},
'evaluation': [(4 * u.m, 300 * u.cm, 3 * u.Jy)],
'bounding_box': [[-0.76046808, 4.76046808], [0.68055697, 5.31944302]] * u.m},
{'class': Ring2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'r_in': 2 * u.cm, 'r_out': 2.1 * u.cm},
'evaluation': [(302.05 * u.cm, 2 * u.m + 10 * u.um, 3 * u.Jy)],
'bounding_box': [[1.979, 2.021], [2.979, 3.021]] * u.m},
{'class': Box2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.s,
'x_width': 4 * u.cm, 'y_width': 3 * u.s},
'evaluation': [(301 * u.cm, 3 * u.s, 3 * u.Jy)],
'bounding_box': [[0.5 * u.s, 3.5 * u.s], [2.98 * u.m, 3.02 * u.m]]},
{'class': RickerWavelet2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'sigma': 1 * u.m},
'evaluation': [(4 * u.m, 2.5 * u.m, 0.602169107 * u.Jy)],
'bounding_box': False},
{'class': AiryDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'radius': 1 * u.m},
'evaluation': [(4 * u.m, 2.1 * u.m, 4.76998480e-05 * u.Jy)],
'bounding_box': False},
{'class': Moffat2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'y_0': 3.5 * u.um,
'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 2 * u.um, 0.202565833 * u.Jy)],
'bounding_box': False},
{'class': Sersic2D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'x_0': 1 * u.arcsec,
'y_0': 2 * u.arcsec, 'r_eff': 2 * u.arcsec, 'n': 4,
'ellip': 0, 'theta': 0},
'evaluation': [(3 * u.arcsec, 2.5 * u.arcsec, 2.829990489 * u.MJy/u.sr)],
'bounding_box': False},
{'class': Planar2D,
'parameters': {'slope_x': 2*u.m, 'slope_y': 3*u.m, 'intercept': 4*u.m},
'evaluation': [(5*u.m/u.m, 6*u.m/u.m, 32*u.m)],
'bounding_box': False},
]
POWERLAW_MODELS = [
{'class': PowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1},
'evaluation': [(1 * u.m, 500 * u.g)],
'bounding_box': False},
{'class': BrokenPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm, 'alpha_1': 1, 'alpha_2': -1},
'evaluation': [(1 * u.m, 50 * u.kg), (1 * u.cm, 50 * u.kg)],
'bounding_box': False},
{'class': SmoothlyBrokenPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm, 'alpha_1': 1, 'alpha_2': -1, 'delta': 1},
'evaluation': [(1 * u.cm, 15.125 * u.kg), (1 * u.m, 15.125 * u.kg)],
'bounding_box': False},
{'class': ExponentialCutoffPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'x_cutoff': 1 * u.m},
'evaluation': [(1 * u.um, 499999.5 * u.kg), (10 * u.m, 50 * np.exp(-10) * u.g)],
'bounding_box': False},
{'class': LogParabola1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'beta': 2},
'evaluation': [(1 * u.cm, 5 * 0.1 ** (-1 - 2 * np.log(0.1)) * u.kg)],
'bounding_box': False}
]
POLY_MODELS = [
{'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.one, 'c1': 2 / u.m, 'c2': 3 / u.m**2},
'evaluation': [(3 * u.m, 36 * u.one)],
'bounding_box': False},
{'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg / u.m, 'c2': 3 * u.kg / u.m**2},
'evaluation': [(3 * u.m, 36 * u.kg)],
'bounding_box': False},
{'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg, 'c2': 3 * u.kg},
'evaluation': [(3 * u.one, 36 * u.kg)],
'bounding_box': False},
{'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.one, 'c1_0': 2 / u.m, 'c2_0': 3 / u.m**2,
'c0_1': 3 / u.s, 'c0_2': -2 / u.s**2, 'c1_1': 5 / u.m / u.s},
'evaluation': [(3 * u.m, 2 * u.s, 64 * u.one)],
'bounding_box': False},
{'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.kg, 'c1_0': 2 * u.kg / u.m, 'c2_0': 3 * u.kg / u.m**2,
'c0_1': 3 * u.kg / u.s, 'c0_2': -2 * u.kg / u.s**2, 'c1_1': 5 * u.kg / u.m / u.s},
'evaluation': [(3 * u.m, 2 * u.s, 64 * u.kg)],
'bounding_box': False},
{'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.kg, 'c1_0': 2 * u.kg, 'c2_0': 3 * u.kg,
'c0_1': 3 * u.kg, 'c0_2': -2 * u.kg, 'c1_1': 5 * u.kg},
'evaluation': [(3 * u.one, 2 * u.one, 64 * u.kg)],
'bounding_box': False},
]
MODELS = FUNC_MODELS_1D + SCALE_MODELS + FUNC_MODELS_2D + POWERLAW_MODELS +\
PHYS_MODELS_1D + POLY_MODELS
SCIPY_MODELS = set([Sersic1D, Sersic2D, AiryDisk2D])
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_without_units(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
kwargs = dict(zip(('x', 'y'), args))
else:
kwargs = dict(zip(('x', 'y', 'z'), args))
if kwargs['x'].unit.is_equivalent(kwargs['y'].unit):
kwargs['x'] = kwargs['x'].to(kwargs['y'].unit)
mnu = m.without_units_for_data(**kwargs)
args = [x.value for x in kwargs.values()]
assert_quantity_allclose(mnu(*args[:-1]), args[-1])
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
assert_quantity_allclose(m(*args[:-1]), args[-1])
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units_x_array(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x])
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y]))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units_param_array(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
params = {}
for key, value in model['parameters'].items():
if value is None or key == 'degree':
params[key] = value
else:
params[key] = np.repeat(value, 2)
params['n_models'] = 2
m = model['class'](**params)
for args in model['evaluation']:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x])
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y]))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
if model['class'] == Drude1D:
params['x_0'][-1] = 0 * u.AA
with pytest.raises(InputParameterError) as err:
model['class'](**params)
assert str(err.value) ==\
'0 is not an allowed value for x_0'
@pytest.mark.parametrize('model', MODELS)
def test_models_bounding_box(model):
# In some cases, having units in parameters caused bounding_box to break,
# so this is to ensure that it works correctly.
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
# In the following we need to explicitly test that the value is False
# since Quantities no longer evaluate as as True
if model['bounding_box'] is False:
# Check that NotImplementedError is raised, so that if bounding_box is
# implemented we remember to set bounding_box=True in the list of models
# above
with pytest.raises(NotImplementedError):
m.bounding_box
else:
# A bounding box may have inhomogeneous units so we need to check the
# values one by one.
for i in range(len(model['bounding_box'])):
bbox = m.bounding_box
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
assert_quantity_allclose(bbox[i], model['bounding_box'][i])
@pytest.mark.parametrize('model', MODELS)
def test_compound_model_input_units_equivalencies_defaults(model):
m = model['class'](**model['parameters'])
assert m.input_units_equivalencies is None
compound_model = m + m
assert compound_model.inputs_map()['x'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x':1})
assert fixed_input_model.input_units_equivalencies is None
compound_model = m - m
assert compound_model.inputs_map()['x'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x':1})
assert fixed_input_model.input_units_equivalencies is None
compound_model = m & m
assert compound_model.inputs_map()['x1'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x0':1})
assert fixed_input_model.inputs_map()['x1'][0].input_units_equivalencies is None
assert fixed_input_model.input_units_equivalencies is None
if m.n_outputs == m.n_inputs:
compound_model = m | m
assert compound_model.inputs_map()['x'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x':1})
assert fixed_input_model.input_units_equivalencies is None
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
@pytest.mark.filterwarnings(r'ignore:The fit may be unsuccessful.*')
@pytest.mark.parametrize('model', MODELS)
def test_models_fitting(model):
m = model['class'](**model['parameters'])
if len(model['evaluation'][0]) == 2:
x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit
y = np.exp(-x.value ** 2) * model['evaluation'][0][1].unit
args = [x, y]
else:
x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit
y = np.linspace(1, 3, 100) * model['evaluation'][0][1].unit
z = np.exp(-x.value**2 - y.value**2) * model['evaluation'][0][2].unit
args = [x, y, z]
# Test that the model fits even if it has units on parameters
fitter = LevMarLSQFitter()
m_new = fitter(m, *args)
# Check that units have been put back correctly
for param_name in m.param_names:
par_bef = getattr(m, param_name)
par_aft = getattr(m_new, param_name)
if par_bef.unit is None:
# If the parameter used to not have a unit then had a radian unit
# for example, then we should allow that
assert par_aft.unit is None or par_aft.unit is u.rad
else:
assert par_aft.unit.is_equivalent(par_bef.unit)
unit_mismatch_models = [
{'class': Gaussian2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_mean': 2 * u.m, 'y_mean': 1 * u.m,
'x_stddev': 3 * u.m, 'y_stddev': 2 * u.m, 'theta': 45 * u.deg},
'evaluation': [(412.1320343 * u.cm, 3.121320343 * u.K, 3 * u.Jy * np.exp(-0.5)),
(412.1320343 * u.K, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))],
'bounding_box': [[-14.18257445, 16.18257445], [-10.75693665, 14.75693665]] * u.m},
{'class': Ellipse2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'a': 300 * u.cm, 'b': 200 * u.cm, 'theta': 45 * u.deg},
'evaluation': [(4 * u.m, 300 * u.K, 3 * u.Jy),
(4 * u.K, 300 * u.cm, 3 * u.Jy)],
'bounding_box': [[-0.76046808, 4.76046808], [0.68055697, 5.31944302]] * u.m},
{'class': Disk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'R_0': 300 * u.cm},
'evaluation': [(5.8 * u.m, 201 * u.K, 3 * u.Jy),
(5.8 * u.K, 201 * u.cm, 3 * u.Jy)],
'bounding_box': [[-1, 5], [0, 6]] * u.m},
{'class': Ring2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'r_in': 2 * u.cm, 'r_out': 2.1 * u.cm},
'evaluation': [(302.05 * u.cm, 2 * u.K + 10 * u.K, 3 * u.Jy),
(302.05 * u.K, 2 * u.m + 10 * u.um, 3 * u.Jy)],
'bounding_box': [[1.979, 2.021], [2.979, 3.021]] * u.m},
{'class': TrapezoidDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 1 * u.m, 'y_0': 2 * u.m,
'R_0': 100 * u.cm, 'slope': 1 * u.Jy / u.m},
'evaluation': [(3.5 * u.m, 2 * u.K, 1.5 * u.Jy),
(3.5 * u.K, 2 * u.m, 1.5 * u.Jy)],
'bounding_box': [[-2, 6], [-3, 5]] * u.m},
{'class': RickerWavelet2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'sigma': 1 * u.m},
'evaluation': [(4 * u.m, 2.5 * u.K, 0.602169107 * u.Jy),
(4 * u.K, 2.5 * u.m, 0.602169107 * u.Jy)],
'bounding_box': False},
{'class': AiryDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'radius': 1 * u.m},
'evaluation': [(4 * u.m, 2.1 * u.K, 4.76998480e-05 * u.Jy),
(4 * u.K, 2.1 * u.m, 4.76998480e-05 * u.Jy)],
'bounding_box': False},
{'class': Moffat2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'y_0': 3.5 * u.um,
'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 2 * u.K, 0.202565833 * u.Jy),
(1000 * u.K, 2 * u.um, 0.202565833 * u.Jy)],
'bounding_box': False},
{'class': Sersic2D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'x_0': 1 * u.arcsec,
'y_0': 2 * u.arcsec, 'r_eff': 2 * u.arcsec, 'n': 4,
'ellip': 0, 'theta': 0},
'evaluation': [(3 * u.arcsec, 2.5 * u.m, 2.829990489 * u.MJy/u.sr),
(3 * u.m, 2.5 * u.arcsec, 2.829990489 * u.MJy/u.sr)],
'bounding_box': False},
]
@pytest.mark.parametrize('model', unit_mismatch_models)
def test_input_unit_mismatch_error(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
message = "Units of 'x' and 'y' inputs should match"
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
kwargs = dict(zip(('x', 'y'), args))
else:
kwargs = dict(zip(('x', 'y', 'z'), args))
if kwargs['x'].unit.is_equivalent(kwargs['y'].unit):
kwargs['x'] = kwargs['x'].to(kwargs['y'].unit)
with pytest.raises(u.UnitsError) as err:
m.without_units_for_data(**kwargs)
assert str(err.value) == message
|
3357fb8e43ce751b987e23b80b80c966cce1803739d7eaba1a876edee00f6108 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
dpath = os.path.split(os.path.abspath(__file__))[0]
|
2cd424901c69f6c38a53689e184cca1fd1088d2f9c7c5f7620472608d2559179 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.samp.errors import SAMPHubError, SAMPClientError, SAMPProxyError
# By default, tests should not use the internet.
from astropy.samp import conf
def setup_module(module):
conf.use_internet = False
def test_SAMPHubError():
"""Test that SAMPHubError can be instantiated"""
SAMPHubError("test")
def test_SAMPClientError():
"""Test that SAMPClientError can be instantiated"""
SAMPClientError("test")
def test_SAMPProxyError():
"""Test that SAMPProxyError can be instantiated"""
SAMPProxyError("test", "any")
|
80fefe96c387a22187d99f605372ac0ad1742a85c91b4b8a30a7e6e342a53755 | import ssl
import tempfile
import pytest
from astropy.utils.data import get_pkg_data_filename
from astropy.samp.hub import SAMPHubServer
from astropy.samp.integrated_client import SAMPIntegratedClient
from astropy.samp.errors import SAMPProxyError
# By default, tests should not use the internet.
from astropy.samp import conf
from .test_helpers import random_params, Receiver, assert_output, TEST_REPLY
def setup_module(module):
conf.use_internet = False
class TestStandardProfile:
@property
def hub_init_kwargs(self):
return {}
@property
def client_init_kwargs(self):
return {}
@property
def client_connect_kwargs(self):
return {}
def setup_method(self, method):
self.tmpdir = tempfile.mkdtemp()
self.hub = SAMPHubServer(web_profile=False, mode='multiple', pool_size=1,
**self.hub_init_kwargs)
self.hub.start()
self.client1 = SAMPIntegratedClient(**self.client_init_kwargs)
self.client1.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs)
self.client2 = SAMPIntegratedClient(**self.client_init_kwargs)
self.client2.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs)
def teardown_method(self, method):
if self.client1.is_connected:
self.client1.disconnect()
if self.client2.is_connected:
self.client2.disconnect()
self.hub.stop()
def test_main(self):
self.client1_id = self.client1.get_public_id()
self.client2_id = self.client2.get_public_id()
self.metadata1 = {"samp.name": "Client 1",
"samp.description.text": "Client 1 Description",
"client.version": "1.1"}
self.metadata2 = {"samp.name": "Client 2",
"samp.description.text": "Client 2 Description",
"client.version": "1.2"}
# Check that the clients are connected
assert self.client1.is_connected
assert self.client2.is_connected
# Check that ping works
self.client1.ping()
self.client2.ping()
# Check that get_registered_clients works as expected.
assert self.client1_id not in self.client1.get_registered_clients()
assert self.client2_id in self.client1.get_registered_clients()
assert self.client1_id in self.client2.get_registered_clients()
assert self.client2_id not in self.client2.get_registered_clients()
# Check that get_metadata works as expected
assert self.client1.get_metadata(self.client1_id) == {}
assert self.client1.get_metadata(self.client2_id) == {}
assert self.client2.get_metadata(self.client1_id) == {}
assert self.client2.get_metadata(self.client2_id) == {}
self.client1.declare_metadata(self.metadata1)
assert self.client1.get_metadata(self.client1_id) == self.metadata1
assert self.client2.get_metadata(self.client1_id) == self.metadata1
assert self.client1.get_metadata(self.client2_id) == {}
assert self.client2.get_metadata(self.client2_id) == {}
self.client2.declare_metadata(self.metadata2)
assert self.client1.get_metadata(self.client1_id) == self.metadata1
assert self.client2.get_metadata(self.client1_id) == self.metadata1
assert self.client1.get_metadata(self.client2_id) == self.metadata2
assert self.client2.get_metadata(self.client2_id) == self.metadata2
# Check that, without subscriptions, sending a notification from one
# client to another raises an error.
message = {}
message['samp.mtype'] = "table.load.votable"
message['samp.params'] = {}
with pytest.raises(SAMPProxyError):
self.client1.notify(self.client2_id, message)
# Check that there are no currently active subscriptions
assert self.client1.get_subscribed_clients('table.load.votable') == {}
assert self.client2.get_subscribed_clients('table.load.votable') == {}
# We now test notifications and calls
rec1 = Receiver(self.client1)
rec2 = Receiver(self.client2)
self.client2.bind_receive_notification('table.load.votable',
rec2.receive_notification)
self.client2.bind_receive_call('table.load.votable',
rec2.receive_call)
self.client1.bind_receive_response('test-tag', rec1.receive_response)
# Check resulting subscriptions
assert self.client1.get_subscribed_clients('table.load.votable') == {self.client2_id: {}}
assert self.client2.get_subscribed_clients('table.load.votable') == {}
assert 'table.load.votable' in self.client1.get_subscriptions(self.client2_id)
assert 'table.load.votable' in self.client2.get_subscriptions(self.client2_id)
# Once we have finished with the calls and notifications, we will
# check the data got across correctly.
# Test notify
params = random_params(self.tmpdir)
self.client1.notify(self.client2.get_public_id(),
{'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.enotify(self.client2.get_public_id(),
"table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test notify_all
params = random_params(self.tmpdir)
self.client1.notify_all({'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.enotify_all("table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test call
params = random_params(self.tmpdir)
self.client1.call(self.client2.get_public_id(), 'test-tag',
{'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.ecall(self.client2.get_public_id(), 'test-tag',
"table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test call_all
params = random_params(self.tmpdir)
self.client1.call_all('tag1',
{'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.ecall_all('tag2',
"table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test call_and_wait
params = random_params(self.tmpdir)
result = self.client1.call_and_wait(self.client2.get_public_id(),
{'samp.mtype': 'table.load.votable',
'samp.params': params}, timeout=5)
assert result == TEST_REPLY
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
result = self.client1.ecall_and_wait(self.client2.get_public_id(),
"table.load.votable", timeout=5, **params)
assert result == TEST_REPLY
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# TODO: check that receive_response received the right data
|
72ce5e2de14d3775b8bb6ca4bbc1991e707e48454a3dcf7f2483b0481a4b52e7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.samp.hub_proxy import SAMPHubProxy
from astropy.samp.client import SAMPClient
from astropy.samp.integrated_client import SAMPIntegratedClient
from astropy.samp.hub import SAMPHubServer
# By default, tests should not use the internet.
from astropy.samp import conf
def setup_module(module):
conf.use_internet = False
def test_SAMPHubProxy():
"""Test that SAMPHubProxy can be instantiated"""
SAMPHubProxy()
def test_SAMPClient():
"""Test that SAMPClient can be instantiated"""
proxy = SAMPHubProxy()
SAMPClient(proxy)
def test_SAMPIntegratedClient():
"""Test that SAMPIntegratedClient can be instantiated"""
SAMPIntegratedClient()
@pytest.fixture
def samp_hub(request):
"""A fixture that can be used by client tests that require a HUB."""
my_hub = SAMPHubServer()
my_hub.start()
request.addfinalizer(my_hub.stop)
def test_reconnect(samp_hub):
"""Test that SAMPIntegratedClient can reconnect.
This is a regression test for bug [#2673]
https://github.com/astropy/astropy/issues/2673
"""
my_client = SAMPIntegratedClient()
my_client.connect()
my_client.disconnect()
my_client.connect()
|
4e64b6bbb8b1ad4b5fa924418a3527786da11197ae741ea7d43ce09609ab7269 | import sys
from astropy.samp.hub_script import hub_script
from astropy.samp import conf
def setup_module(module):
conf.use_internet = False
def setup_function(function):
function.sys_argv_orig = sys.argv
sys.argv = ["samp_hub"]
def teardown_function(function):
sys.argv = function.sys_argv_orig
def test_hub_script():
sys.argv.append('-m') # run in multiple mode
sys.argv.append('-w') # disable web profile
hub_script(timeout=3)
|
b87e5cbee34f8270a5cf05cab4660dc44e82b74a542b20595df2be1f94ed50d9 | from astropy.samp.hub_proxy import SAMPHubProxy
from astropy.samp.hub import SAMPHubServer
from astropy.samp import conf
def setup_module(module):
conf.use_internet = False
class TestHubProxy:
def setup_method(self, method):
self.hub = SAMPHubServer(web_profile=False, mode='multiple', pool_size=1)
self.hub.start()
self.proxy = SAMPHubProxy()
self.proxy.connect(hub=self.hub, pool_size=1)
def teardown_method(self, method):
if self.proxy.is_connected:
self.proxy.disconnect()
self.hub.stop()
def test_is_connected(self):
assert self.proxy.is_connected
def test_disconnect(self):
self.proxy.disconnect()
def test_ping(self):
self.proxy.ping()
def test_registration(self):
result = self.proxy.register(self.proxy.lockfile["samp.secret"])
self.proxy.unregister(result['samp.private-key'])
def test_custom_lockfile(tmpdir):
lockfile = tmpdir.join('.samptest').realpath().strpath
hub = SAMPHubServer(web_profile=False, lockfile=lockfile, pool_size=1)
hub.start()
proxy = SAMPHubProxy()
proxy.connect(hub=hub, pool_size=1)
hub.stop()
|
308426876df5a16c8324dbd0dcf79d14cffb8b139abc3b4328beffa9abe99f0d | """
Test the web profile using Python classes that have been adapted to act like a
web client. We can only put a single test here because only one hub can run
with the web profile active, and the user might want to run the tests in
parallel.
"""
import os
import threading
import tempfile
from urllib.request import Request, urlopen
from astropy.utils.data import get_readable_fileobj
from astropy.samp import SAMPIntegratedClient, SAMPHubServer
from .web_profile_test_helpers import (AlwaysApproveWebProfileDialog,
SAMPIntegratedWebClient)
from astropy.samp.web_profile import CROSS_DOMAIN, CLIENT_ACCESS_POLICY
from astropy.samp import conf
from .test_standard_profile import TestStandardProfile as BaseTestStandardProfile
def setup_module(module):
conf.use_internet = False
class TestWebProfile(BaseTestStandardProfile):
def setup_method(self, method):
self.dialog = AlwaysApproveWebProfileDialog()
t = threading.Thread(target=self.dialog.poll)
t.start()
self.tmpdir = tempfile.mkdtemp()
lockfile = os.path.join(self.tmpdir, '.samp')
self.hub = SAMPHubServer(web_profile_dialog=self.dialog,
lockfile=lockfile,
web_port=0, pool_size=1)
self.hub.start()
self.client1 = SAMPIntegratedClient()
self.client1.connect(hub=self.hub, pool_size=1)
self.client1_id = self.client1.get_public_id()
self.client1_key = self.client1.get_private_key()
self.client2 = SAMPIntegratedWebClient()
self.client2.connect(web_port=self.hub._web_port, pool_size=2)
self.client2_id = self.client2.get_public_id()
self.client2_key = self.client2.get_private_key()
def teardown_method(self, method):
if self.client1.is_connected:
self.client1.disconnect()
if self.client2.is_connected:
self.client2.disconnect()
self.hub.stop()
self.dialog.stop()
# The full communication tests are run since TestWebProfile inherits
# test_main from TestStandardProfile
def test_web_profile(self):
# Check some additional queries to the server
with get_readable_fileobj(f'http://localhost:{self.hub._web_port}/crossdomain.xml') as f:
assert f.read() == CROSS_DOMAIN
with get_readable_fileobj(f'http://localhost:{self.hub._web_port}/clientaccesspolicy.xml') as f:
assert f.read() == CLIENT_ACCESS_POLICY
# Check headers
req = Request(f'http://localhost:{self.hub._web_port}/crossdomain.xml')
req.add_header('Origin', 'test_web_profile')
resp = urlopen(req)
assert resp.getheader('Access-Control-Allow-Origin') == 'test_web_profile'
assert resp.getheader('Access-Control-Allow-Headers') == 'Content-Type'
assert resp.getheader('Access-Control-Allow-Credentials') == 'true'
|
a1bffc862b5fd18c6a7e4c97a62b131db14a50038bf7459ced43e02fb9ac63e1 | import os
import time
import pickle
import random
import string
from astropy.samp import SAMP_STATUS_OK
TEST_REPLY = {"samp.status": SAMP_STATUS_OK,
"samp.result": {"txt": "test"}}
def write_output(mtype, private_key, sender_id, params):
filename = params['verification_file']
f = open(filename, 'wb')
pickle.dump(mtype, f)
pickle.dump(private_key, f)
pickle.dump(sender_id, f)
pickle.dump(params, f)
f.close()
def assert_output(mtype, private_key, sender_id, params, timeout=None):
filename = params['verification_file']
start = time.time()
while True:
try:
with open(filename, 'rb') as f:
rec_mtype = pickle.load(f)
rec_private_key = pickle.load(f)
rec_sender_id = pickle.load(f)
rec_params = pickle.load(f)
break
except (OSError, EOFError):
if timeout is not None and time.time() - start > timeout:
raise Exception(f"Timeout while waiting for file: {filename}")
assert rec_mtype == mtype
assert rec_private_key == private_key
assert rec_sender_id == sender_id
assert rec_params == params
class Receiver:
def __init__(self, client):
self.client = client
def receive_notification(self, private_key, sender_id, mtype, params, extra):
write_output(mtype, private_key, sender_id, params)
def receive_call(self, private_key, sender_id, msg_id, mtype, params, extra):
# Here we need to make sure that we first reply, *then* write out the
# file, otherwise the tests see the file and move to the next call
# before waiting for the reply to be received.
self.client.reply(msg_id, TEST_REPLY)
self.receive_notification(private_key, sender_id, mtype, params, extra)
def receive_response(self, private_key, sender_id, msg_id, response):
pass
def random_id(length=16):
return ''.join(random.sample(string.ascii_letters + string.digits, length))
def random_params(directory):
return {'verification_file': os.path.join(directory, random_id()),
'parameter1': 'abcde',
'parameter2': 1331}
|
ff435c6d9f55cbbfd6bd2eacf7d06f6e89d13e516ac07d944f4e5c1b5ce5ae8a | import time
import threading
import xmlrpc.client as xmlrpc
from astropy.samp.hub import WebProfileDialog
from astropy.samp.hub_proxy import SAMPHubProxy
from astropy.samp.client import SAMPClient
from astropy.samp.integrated_client import SAMPIntegratedClient
from astropy.samp.utils import ServerProxyPool
from astropy.samp.errors import SAMPClientError, SAMPHubError
class AlwaysApproveWebProfileDialog(WebProfileDialog):
def __init__(self):
self.polling = True
WebProfileDialog.__init__(self)
def show_dialog(self, *args):
self.consent()
def poll(self):
while self.polling:
self.handle_queue()
time.sleep(0.1)
def stop(self):
self.polling = False
class SAMPWebHubProxy(SAMPHubProxy):
"""
Proxy class to simplify the client interaction with a SAMP hub (via the web
profile).
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
"""
def connect(self, pool_size=20, web_port=21012):
"""
Connect to the current SAMP Hub on localhost:web_port
Parameters
----------
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self._connected = False
try:
self.proxy = ServerProxyPool(pool_size, xmlrpc.ServerProxy,
f'http://127.0.0.1:{web_port}',
allow_none=1)
self.ping()
self._connected = True
except xmlrpc.ProtocolError as p:
raise SAMPHubError(f"Protocol Error {p.errcode}: {p.errmsg}")
@property
def _samp_hub(self):
"""
Property to abstract away the path to the hub, which allows this class
to be used for both the standard and the web profile.
"""
return self.proxy.samp.webhub
def set_xmlrpc_callback(self, private_key, xmlrpc_addr):
raise NotImplementedError("set_xmlrpc_callback is not defined for the "
"web profile")
def register(self, identity_info):
"""
Proxy to ``register`` SAMP Hub method.
"""
return self._samp_hub.register(identity_info)
def allow_reverse_callbacks(self, private_key, allow):
"""
Proxy to ``allowReverseCallbacks`` SAMP Hub method.
"""
return self._samp_hub.allowReverseCallbacks(private_key, allow)
def pull_callbacks(self, private_key, timeout):
"""
Proxy to ``pullCallbacks`` SAMP Hub method.
"""
return self._samp_hub.pullCallbacks(private_key, timeout)
class SAMPWebClient(SAMPClient):
"""
Utility class which provides facilities to create and manage a SAMP
compliant XML-RPC server that acts as SAMP callable web client application.
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
Parameters
----------
hub : :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy`
An instance of :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` to
be used for messaging with the SAMP Hub.
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
def __init__(self, hub, name=None, description=None, metadata=None,
callable=True):
# GENERAL
self._is_running = False
self._is_registered = False
if metadata is None:
metadata = {}
if name is not None:
metadata["samp.name"] = name
if description is not None:
metadata["samp.description.text"] = description
self._metadata = metadata
self._callable = callable
# HUB INTERACTION
self.client = None
self._public_id = None
self._private_key = None
self._hub_id = None
self._notification_bindings = {}
self._call_bindings = {"samp.app.ping": [self._ping, {}],
"client.env.get": [self._client_env_get, {}]}
self._response_bindings = {}
self.hub = hub
self._registration_lock = threading.Lock()
self._registered_event = threading.Event()
if self._callable:
self._thread = threading.Thread(target=self._serve_forever)
self._thread.daemon = True
def _serve_forever(self):
while self.is_running:
# Wait until we are actually registered before trying to do
# anything, to avoid busy looping
# Watch for callbacks here
self._registered_event.wait()
with self._registration_lock:
if not self._is_registered:
return
results = self.hub.pull_callbacks(self.get_private_key(), 0)
for result in results:
if result['samp.methodName'] == 'receiveNotification':
self.receive_notification(self._private_key,
*result['samp.params'])
elif result['samp.methodName'] == 'receiveCall':
self.receive_call(self._private_key,
*result['samp.params'])
elif result['samp.methodName'] == 'receiveResponse':
self.receive_response(self._private_key,
*result['samp.params'])
self.hub.disconnect()
def register(self):
"""
Register the client to the SAMP Hub.
"""
if self.hub.is_connected:
if self._private_key is not None:
raise SAMPClientError("Client already registered")
result = self.hub.register("Astropy SAMP Web Client")
if result["samp.self-id"] == "":
raise SAMPClientError("Registation failed - samp.self-id "
"was not set by the hub.")
if result["samp.private-key"] == "":
raise SAMPClientError("Registation failed - samp.private-key "
"was not set by the hub.")
self._public_id = result["samp.self-id"]
self._private_key = result["samp.private-key"]
self._hub_id = result["samp.hub-id"]
if self._callable:
self._declare_subscriptions()
self.hub.allow_reverse_callbacks(self._private_key, True)
if self._metadata != {}:
self.declare_metadata()
self._is_registered = True
# Let the client thread proceed
self._registered_event.set()
else:
raise SAMPClientError("Unable to register to the SAMP Hub. Hub "
"proxy not connected.")
def unregister(self):
# We have to hold the registration lock if the client is callable
# to avoid a race condition where the client queries the hub for
# pushCallbacks after it has already been unregistered from the hub
with self._registration_lock:
super().unregister()
class SAMPIntegratedWebClient(SAMPIntegratedClient):
"""
A Simple SAMP web client.
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
This class is meant to simplify the client usage providing a proxy class
that merges the :class:`~astropy.samp.client.SAMPWebClient` and
:class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` functionalities in a
simplified API.
Parameters
----------
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
def __init__(self, name=None, description=None, metadata=None,
callable=True):
self.hub = SAMPWebHubProxy()
self.client = SAMPWebClient(self.hub, name, description, metadata,
callable)
def connect(self, pool_size=20, web_port=21012):
"""
Connect with the current or specified SAMP Hub, start and register the
client.
Parameters
----------
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self.hub.connect(pool_size, web_port=web_port)
self.client.start()
self.client.register()
|
1888a78f9719fb4394cc643a2d41e586c026ae9130ebdcab6b660bbd3797a4e7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import time
from astropy.samp.hub import SAMPHubServer
from astropy.samp import conf
def setup_module(module):
conf.use_internet = False
def test_SAMPHubServer():
"""Test that SAMPHub can be instantiated"""
SAMPHubServer(web_profile=False, mode='multiple', pool_size=1)
def test_SAMPHubServer_run():
"""Test that SAMPHub can be run"""
hub = SAMPHubServer(web_profile=False, mode='multiple', pool_size=1)
hub.start()
time.sleep(1)
hub.stop()
def test_SAMPHubServer_run_repeated():
"""
Test that SAMPHub can be restarted after it has been stopped, including
when web profile support is enabled.
"""
hub = SAMPHubServer(web_profile=True, mode='multiple', pool_size=1)
hub.start()
time.sleep(1)
hub.stop()
time.sleep(1)
hub.start()
time.sleep(1)
hub.stop()
|
6c92009ce72a60330bb8291233e1436b7674ef48cb992f6f54ae36520f736e58 |
from .iers import *
|
ffb1e4f6ad7a546bbc0a38c3b84d1fda32be1a45b81b34c22836c9bb1a91e9a1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.utils.iers package provides access to the tables provided by
the International Earth Rotation and Reference Systems Service, in
particular allowing interpolation of published UT1-UTC values for given
times. These are used in `astropy.time` to provide UT1 values. The polar
motions are also used for determining earth orientation for
celestial-to-terrestrial coordinate transformations
(in `astropy.coordinates`).
"""
import re
from datetime import datetime
from warnings import warn
from urllib.parse import urlparse
import numpy as np
import erfa
from astropy.time import Time, TimeDelta
from astropy import config as _config
from astropy import units as u
from astropy.table import QTable, MaskedColumn
from astropy.utils.data import (get_pkg_data_filename, clear_download_cache,
is_url_in_cache, get_readable_fileobj)
from astropy.utils.state import ScienceState
from astropy import utils
from astropy.utils.exceptions import AstropyWarning
__all__ = ['Conf', 'conf', 'earth_orientation_table',
'IERS', 'IERS_B', 'IERS_A', 'IERS_Auto',
'FROM_IERS_B', 'FROM_IERS_A', 'FROM_IERS_A_PREDICTION',
'TIME_BEFORE_IERS_RANGE', 'TIME_BEYOND_IERS_RANGE',
'IERS_A_FILE', 'IERS_A_URL', 'IERS_A_URL_MIRROR', 'IERS_A_README',
'IERS_B_FILE', 'IERS_B_URL', 'IERS_B_README',
'IERSRangeError', 'IERSStaleWarning',
'LeapSeconds', 'IERS_LEAP_SECOND_FILE', 'IERS_LEAP_SECOND_URL',
'IETF_LEAP_SECOND_URL']
# IERS-A default file name, URL, and ReadMe with content description
IERS_A_FILE = 'finals2000A.all'
IERS_A_URL = 'ftp://anonymous:mail%[email protected]/pub/products/iers/finals2000A.all' # noqa: E501
IERS_A_URL_MIRROR = 'https://datacenter.iers.org/data/9/finals2000A.all'
IERS_A_README = get_pkg_data_filename('data/ReadMe.finals2000A')
# IERS-B default file name, URL, and ReadMe with content description
IERS_B_FILE = get_pkg_data_filename('data/eopc04_IAU2000.62-now')
IERS_B_URL = 'http://hpiers.obspm.fr/iers/eop/eopc04/eopc04_IAU2000.62-now'
IERS_B_README = get_pkg_data_filename('data/ReadMe.eopc04_IAU2000')
# LEAP SECONDS default file name, URL, and alternative format/URL
IERS_LEAP_SECOND_FILE = get_pkg_data_filename('data/Leap_Second.dat')
IERS_LEAP_SECOND_URL = 'https://hpiers.obspm.fr/iers/bul/bulc/Leap_Second.dat'
IETF_LEAP_SECOND_URL = 'https://www.ietf.org/timezones/data/leap-seconds.list'
# Status/source values returned by IERS.ut1_utc
FROM_IERS_B = 0
FROM_IERS_A = 1
FROM_IERS_A_PREDICTION = 2
TIME_BEFORE_IERS_RANGE = -1
TIME_BEYOND_IERS_RANGE = -2
MJD_ZERO = 2400000.5
INTERPOLATE_ERROR = """\
interpolating from IERS_Auto using predictive values that are more
than {0} days old.
Normally you should not see this error because this class
automatically downloads the latest IERS-A table. Perhaps you are
offline? If you understand what you are doing then this error can be
suppressed by setting the auto_max_age configuration variable to
``None``:
from astropy.utils.iers import conf
conf.auto_max_age = None
"""
MONTH_ABBR = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
def download_file(*args, **kwargs):
"""
Overload astropy.utils.data.download_file within iers module to use a
custom (longer) wait time. This just passes through ``*args`` and
``**kwargs`` after temporarily setting the download_file remote timeout to
the local ``iers.conf.remote_timeout`` value.
"""
kwargs.setdefault('http_headers', {'User-Agent': 'astropy/iers',
'Accept': '*/*'})
with utils.data.conf.set_temp('remote_timeout', conf.remote_timeout):
return utils.data.download_file(*args, **kwargs)
def _none_to_float(value):
"""
Convert None to a valid floating point value. Especially
for auto_max_age = None.
"""
return (value if value is not None else np.finfo(float).max)
class IERSStaleWarning(AstropyWarning):
pass
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.iers`.
"""
auto_download = _config.ConfigItem(
True,
'Enable auto-downloading of the latest IERS data. If set to False '
'then the local IERS-B file will be used by default (even if the '
'full IERS file with predictions was already downloaded and cached). '
'This parameter also controls whether internet resources will be '
'queried to update the leap second table if the installed version is '
'out of date. Default is True.')
auto_max_age = _config.ConfigItem(
30.0,
'Maximum age (days) of predictive data before auto-downloading. '
'See "Auto refresh behavior" in astropy.utils.iers documentation for details. '
'Default is 30.')
iers_auto_url = _config.ConfigItem(
IERS_A_URL,
'URL for auto-downloading IERS file data.')
iers_auto_url_mirror = _config.ConfigItem(
IERS_A_URL_MIRROR,
'Mirror URL for auto-downloading IERS file data.')
remote_timeout = _config.ConfigItem(
10.0,
'Remote timeout downloading IERS file data (seconds).')
system_leap_second_file = _config.ConfigItem(
'',
'System file with leap seconds.')
iers_leap_second_auto_url = _config.ConfigItem(
IERS_LEAP_SECOND_URL,
'URL for auto-downloading leap seconds.')
ietf_leap_second_auto_url = _config.ConfigItem(
IETF_LEAP_SECOND_URL,
'Alternate URL for auto-downloading leap seconds.')
conf = Conf()
class IERSRangeError(IndexError):
"""
Any error for when dates are outside of the valid range for IERS
"""
class IERS(QTable):
"""Generic IERS table class, defining interpolation functions.
Sub-classed from `astropy.table.QTable`. The table should hold columns
'MJD', 'UT1_UTC', 'dX_2000A'/'dY_2000A', and 'PM_x'/'PM_y'.
"""
iers_table = None
"""Cached table, returned if ``open`` is called without arguments."""
@classmethod
def open(cls, file=None, cache=False, **kwargs):
"""Open an IERS table, reading it from a file if not loaded before.
Parameters
----------
file : str or None
full local or network path to the ascii file holding IERS data,
for passing on to the ``read`` class methods (further optional
arguments that are available for some IERS subclasses can be added).
If None, use the default location from the ``read`` class method.
cache : bool
Whether to use cache. Defaults to False, since IERS files
are regularly updated.
Returns
-------
IERS
An IERS table class instance
Notes
-----
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table if ``file=None`` (the default).
If a table needs to be re-read from disk, pass on an explicit file
location or use the (sub-class) close method and re-open.
If the location is a network location it is first downloaded via
download_file.
For the IERS class itself, an IERS_B sub-class instance is opened.
"""
if file is not None or cls.iers_table is None:
if file is not None:
if urlparse(file).netloc:
kwargs.update(file=download_file(file, cache=cache))
else:
kwargs.update(file=file)
# TODO: the below is really ugly and probably a bad idea. Instead,
# there should probably be an IERSBase class, which provides
# useful methods but cannot really be used on its own, and then
# *perhaps* an IERS class which provides best defaults. But for
# backwards compatibility, we use the IERS_B reader for IERS here.
if cls is IERS:
cls.iers_table = IERS_B.read(**kwargs)
else:
cls.iers_table = cls.read(**kwargs)
return cls.iers_table
@classmethod
def close(cls):
"""Remove the IERS table from the class.
This allows the table to be re-read from disk during one's session
(e.g., if one finds it is out of date and has updated the file).
"""
cls.iers_table = None
def mjd_utc(self, jd1, jd2=0.):
"""Turn a time to MJD, returning integer and fractional parts.
Parameters
----------
jd1 : float, array, or `~astropy.time.Time`
first part of two-part JD, or Time object
jd2 : float or array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
Returns
-------
mjd : float or array
integer part of MJD
utc : float or array
fractional part of MJD
"""
try: # see if this is a Time object
jd1, jd2 = jd1.utc.jd1, jd1.utc.jd2
except Exception:
pass
mjd = np.floor(jd1 - MJD_ZERO + jd2)
utc = jd1 - (MJD_ZERO+mjd) + jd2
return mjd, utc
def ut1_utc(self, jd1, jd2=0., return_status=False):
"""Interpolate UT1-UTC corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['UT1_UTC'],
self.ut1_utc_source if return_status else None)
def dcip_xy(self, jd1, jd2=0., return_status=False):
"""Interpolate CIP corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD (default 0., ignored if jd1 is Time)
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
D_x : `~astropy.units.Quantity` ['angle']
x component of CIP correction for the requested times.
D_y : `~astropy.units.Quantity` ['angle']
y component of CIP correction for the requested times
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['dX_2000A', 'dY_2000A'],
self.dcip_source if return_status else None)
def pm_xy(self, jd1, jd2=0., return_status=False):
"""Interpolate polar motions from IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
PM_x : `~astropy.units.Quantity` ['angle']
x component of polar motion for the requested times.
PM_y : `~astropy.units.Quantity` ['angle']
y component of polar motion for the requested times.
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['PM_x', 'PM_y'],
self.pm_source if return_status else None)
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""
Check that the indices from interpolation match those after clipping
to the valid table range. This method gets overridden in the IERS_Auto
class because it has different requirements.
"""
if np.any(indices_orig != indices_clipped):
raise IERSRangeError('(some) times are outside of range covered '
'by IERS table.')
def _interpolate(self, jd1, jd2, columns, source=None):
mjd, utc = self.mjd_utc(jd1, jd2)
# enforce array
is_scalar = not hasattr(mjd, '__array__') or mjd.ndim == 0
if is_scalar:
mjd = np.array([mjd])
utc = np.array([utc])
elif mjd.size == 0:
# Short-cut empty input.
return np.array([])
self._refresh_table_as_needed(mjd)
# For typical format, will always find a match (since MJD are integer)
# hence, important to define which side we will be; this ensures
# self['MJD'][i-1]<=mjd<self['MJD'][i]
i = np.searchsorted(self['MJD'].value, mjd, side='right')
# Get index to MJD at or just below given mjd, clipping to ensure we
# stay in range of table (status will be set below for those outside)
i1 = np.clip(i, 1, len(self) - 1)
i0 = i1 - 1
mjd_0, mjd_1 = self['MJD'][i0].value, self['MJD'][i1].value
results = []
for column in columns:
val_0, val_1 = self[column][i0], self[column][i1]
d_val = val_1 - val_0
if column == 'UT1_UTC':
# Check & correct for possible leap second (correcting diff.,
# not 1st point, since jump can only happen right at 2nd point)
d_val -= d_val.round()
# Linearly interpolate (which is what TEMPO does for UT1-UTC, but
# may want to follow IERS gazette #13 for more precise
# interpolation and correction for tidal effects;
# https://maia.usno.navy.mil/iers-gaz13)
val = val_0 + (mjd - mjd_0 + utc) / (mjd_1 - mjd_0) * d_val
# Do not extrapolate outside range, instead just propagate last values.
val[i == 0] = self[column][0]
val[i == len(self)] = self[column][-1]
if is_scalar:
val = val[0]
results.append(val)
if source:
# Set status to source, using the routine passed in.
status = source(i1)
# Check for out of range
status[i == 0] = TIME_BEFORE_IERS_RANGE
status[i == len(self)] = TIME_BEYOND_IERS_RANGE
if is_scalar:
status = status[0]
results.append(status)
return results
else:
self._check_interpolate_indices(i1, i, np.max(mjd))
return results[0] if len(results) == 1 else results
def _refresh_table_as_needed(self, mjd):
"""
Potentially update the IERS table in place depending on the requested
time values in ``mdj`` and the time span of the table. The base behavior
is not to update the table. ``IERS_Auto`` overrides this method.
"""
pass
def ut1_utc_source(self, i):
"""Source for UT1-UTC. To be overridden by subclass."""
return np.zeros_like(i)
def dcip_source(self, i):
"""Source for CIP correction. To be overridden by subclass."""
return np.zeros_like(i)
def pm_source(self, i):
"""Source for polar motion. To be overridden by subclass."""
return np.zeros_like(i)
@property
def time_now(self):
"""
Property to provide the current time, but also allow for explicitly setting
the _time_now attribute for testing purposes.
"""
try:
return self._time_now
except Exception:
return Time.now()
def _convert_col_for_table(self, col):
# Fill masked columns with units to avoid dropped-mask warnings
# when converting to Quantity.
# TODO: Once we support masked quantities, we can drop this and
# in the code below replace b_bad with table['UT1_UTC_B'].mask, etc.
if (getattr(col, 'unit', None) is not None and
isinstance(col, MaskedColumn)):
col = col.filled(np.nan)
return super()._convert_col_for_table(col)
class IERS_A(IERS):
"""IERS Table class targeted to IERS A, provided by USNO.
These include rapid turnaround and predicted times.
See https://datacenter.iers.org/eop.php
Notes
-----
The IERS A file is not part of astropy. It can be downloaded from
``iers.IERS_A_URL`` or ``iers.IERS_A_URL_MIRROR``. See ``iers.__doc__``
for instructions on use in ``Time``, etc.
"""
iers_table = None
@classmethod
def _combine_a_b_columns(cls, iers_a):
"""
Return a new table with appropriate combination of IERS_A and B columns.
"""
# IERS A has some rows at the end that hold nothing but dates & MJD
# presumably to be filled later. Exclude those a priori -- there
# should at least be a predicted UT1-UTC and PM!
table = iers_a[np.isfinite(iers_a['UT1_UTC_A']) &
(iers_a['PolPMFlag_A'] != '')]
# This does nothing for IERS_A, but allows IERS_Auto to ensure the
# IERS B values in the table are consistent with the true ones.
table = cls._substitute_iers_b(table)
# Combine A and B columns, using B where possible.
b_bad = np.isnan(table['UT1_UTC_B'])
table['UT1_UTC'] = np.where(b_bad, table['UT1_UTC_A'], table['UT1_UTC_B'])
table['UT1Flag'] = np.where(b_bad, table['UT1Flag_A'], 'B')
# Repeat for polar motions.
b_bad = np.isnan(table['PM_X_B']) | np.isnan(table['PM_Y_B'])
table['PM_x'] = np.where(b_bad, table['PM_x_A'], table['PM_X_B'])
table['PM_y'] = np.where(b_bad, table['PM_y_A'], table['PM_Y_B'])
table['PolPMFlag'] = np.where(b_bad, table['PolPMFlag_A'], 'B')
b_bad = np.isnan(table['dX_2000A_B']) | np.isnan(table['dY_2000A_B'])
table['dX_2000A'] = np.where(b_bad, table['dX_2000A_A'], table['dX_2000A_B'])
table['dY_2000A'] = np.where(b_bad, table['dY_2000A_A'], table['dY_2000A_B'])
table['NutFlag'] = np.where(b_bad, table['NutFlag_A'], 'B')
# Get the table index for the first row that has predictive values
# PolPMFlag_A IERS (I) or Prediction (P) flag for
# Bull. A polar motion values
# UT1Flag_A IERS (I) or Prediction (P) flag for
# Bull. A UT1-UTC values
# Since only 'P' and 'I' are possible and 'P' is guaranteed to come
# after 'I', we can use searchsorted for 100 times speed up over
# finding the first index where the flag equals 'P'.
p_index = min(np.searchsorted(table['UT1Flag_A'], 'P'),
np.searchsorted(table['PolPMFlag_A'], 'P'))
table.meta['predictive_index'] = p_index
table.meta['predictive_mjd'] = table['MJD'][p_index].value
return table
@classmethod
def _substitute_iers_b(cls, table):
# See documentation in IERS_Auto.
return table
@classmethod
def read(cls, file=None, readme=None):
"""Read IERS-A table from a finals2000a.* file provided by USNO.
Parameters
----------
file : str
full path to ascii file holding IERS-A data.
Defaults to ``iers.IERS_A_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_A_README``.
Returns
-------
``IERS_A`` class instance
"""
if file is None:
file = IERS_A_FILE
if readme is None:
readme = IERS_A_README
iers_a = super().read(file, format='cds', readme=readme)
# Combine the A and B data for UT1-UTC and PM columns
table = cls._combine_a_b_columns(iers_a)
table.meta['data_path'] = file
table.meta['readme_path'] = readme
return table
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table"""
ut1flag = self['UT1Flag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[ut1flag == 'I'] = FROM_IERS_A
source[ut1flag == 'P'] = FROM_IERS_A_PREDICTION
return source
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table"""
nutflag = self['NutFlag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[nutflag == 'I'] = FROM_IERS_A
source[nutflag == 'P'] = FROM_IERS_A_PREDICTION
return source
def pm_source(self, i):
"""Set polar motion source flag for entries in IERS table"""
pmflag = self['PolPMFlag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[pmflag == 'I'] = FROM_IERS_A
source[pmflag == 'P'] = FROM_IERS_A_PREDICTION
return source
class IERS_B(IERS):
"""IERS Table class targeted to IERS B, provided by IERS itself.
These are final values; see https://www.iers.org/IERS/EN/Home/home_node.html
Notes
-----
If the package IERS B file (```iers.IERS_B_FILE``) is out of date, a new
version can be downloaded from ``iers.IERS_B_URL``.
"""
iers_table = None
@classmethod
def read(cls, file=None, readme=None, data_start=14):
"""Read IERS-B table from a eopc04_iau2000.* file provided by IERS.
Parameters
----------
file : str
full path to ascii file holding IERS-B data.
Defaults to package version, ``iers.IERS_B_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_B_README``.
data_start : int
starting row. Default is 14, appropriate for standard IERS files.
Returns
-------
``IERS_B`` class instance
"""
if file is None:
file = IERS_B_FILE
if readme is None:
readme = IERS_B_README
table = super().read(file, format='cds', readme=readme,
data_start=data_start)
table.meta['data_path'] = file
table.meta['readme_path'] = readme
return table
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
def pm_source(self, i):
"""Set PM source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
class IERS_Auto(IERS_A):
"""
Provide most-recent IERS data and automatically handle downloading
of updated values as necessary.
"""
iers_table = None
@classmethod
def open(cls):
"""If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to True (default), then open a recent version of the IERS-A
table with predictions for UT1-UTC and polar motion out to
approximately one year from now. If the available version of this file
is older than ``astropy.utils.iers.conf.auto_max_age`` days old
(or non-existent) then it will be downloaded over the network and cached.
If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to False then ``astropy.utils.iers.IERS()`` is returned. This
is normally the IERS-B table that is supplied with astropy.
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table.
Returns
-------
`~astropy.table.QTable` instance
With IERS (Earth rotation) data columns
"""
if not conf.auto_download:
cls.iers_table = IERS_B.open()
return cls.iers_table
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
if cls.iers_table is not None:
# If the URL has changed, we need to redownload the file, so we
# should ignore the internally cached version.
if cls.iers_table.meta.get('data_url') in all_urls:
return cls.iers_table
try:
filename = download_file(all_urls[0], sources=all_urls, cache=True)
except Exception as err:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream when actually trying to interpolate
# predictive values.
warn(AstropyWarning(
f'failed to download {" and ".join(all_urls)}, '
f'using local IERS-B: {err}'))
cls.iers_table = IERS_B.open()
return cls.iers_table
cls.iers_table = cls.read(file=filename)
cls.iers_table.meta['data_url'] = all_urls[0]
return cls.iers_table
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""Check that the indices from interpolation match those after clipping to the
valid table range. The IERS_Auto class is exempted as long as it has
sufficiently recent available data so the clipped interpolation is
always within the confidence bounds of current Earth rotation
knowledge.
"""
predictive_mjd = self.meta['predictive_mjd']
# See explanation in _refresh_table_as_needed for these conditions
auto_max_age = _none_to_float(conf.auto_max_age)
if (max_input_mjd > predictive_mjd and
self.time_now.mjd - predictive_mjd > auto_max_age):
raise ValueError(INTERPOLATE_ERROR.format(auto_max_age))
def _refresh_table_as_needed(self, mjd):
"""Potentially update the IERS table in place depending on the requested
time values in ``mjd`` and the time span of the table.
For IERS_Auto the behavior is that the table is refreshed from the IERS
server if both the following apply:
- Any of the requested IERS values are predictive. The IERS-A table
contains predictive data out for a year after the available
definitive values.
- The first predictive values are at least ``conf.auto_max_age days`` old.
In other words the IERS-A table was created by IERS long enough
ago that it can be considered stale for predictions.
"""
max_input_mjd = np.max(mjd)
now_mjd = self.time_now.mjd
# IERS-A table contains predictive data out for a year after
# the available definitive values.
fpi = self.meta['predictive_index']
predictive_mjd = self.meta['predictive_mjd']
# Update table in place if necessary
auto_max_age = _none_to_float(conf.auto_max_age)
# If auto_max_age is smaller than IERS update time then repeated downloads may
# occur without getting updated values (giving a IERSStaleWarning).
if auto_max_age < 10:
raise ValueError('IERS auto_max_age configuration value must be larger than 10 days')
if (max_input_mjd > predictive_mjd and
(now_mjd - predictive_mjd) > auto_max_age):
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
# Get the latest version
try:
filename = download_file(
all_urls[0], sources=all_urls, cache="update")
except Exception as err:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream when actually trying to interpolate
# predictive values.
warn(AstropyWarning(
f'failed to download {" and ".join(all_urls)}: {err}.\n'
'A coordinate or time-related '
'calculation might be compromised or fail because the dates are '
'not covered by the available IERS file. See the '
'"IERS data access" section of the astropy documentation '
'for additional information on working offline.'))
return
new_table = self.__class__.read(file=filename)
new_table.meta['data_url'] = str(all_urls[0])
# New table has new values?
if new_table['MJD'][-1] > self['MJD'][-1]:
# Replace *replace* current values from the first predictive index through
# the end of the current table. This replacement is much faster than just
# deleting all rows and then using add_row for the whole duration.
new_fpi = np.searchsorted(new_table['MJD'].value, predictive_mjd, side='right')
n_replace = len(self) - fpi
self[fpi:] = new_table[new_fpi:new_fpi + n_replace]
# Sanity check for continuity
if new_table['MJD'][new_fpi + n_replace] - self['MJD'][-1] != 1.0 * u.d:
raise ValueError('unexpected gap in MJD when refreshing IERS table')
# Now add new rows in place
for row in new_table[new_fpi + n_replace:]:
self.add_row(row)
self.meta.update(new_table.meta)
else:
warn(IERSStaleWarning(
'IERS_Auto predictive values are older than {} days but downloading '
'the latest table did not find newer values'.format(conf.auto_max_age)))
@classmethod
def _substitute_iers_b(cls, table):
"""Substitute IERS B values with those from a real IERS B table.
IERS-A has IERS-B values included, but for reasons unknown these
do not match the latest IERS-B values (see comments in #4436).
Here, we use the bundled astropy IERS-B table to overwrite the values
in the downloaded IERS-A table.
"""
iers_b = IERS_B.open()
# Substitute IERS-B values for existing B values in IERS-A table
mjd_b = table['MJD'][np.isfinite(table['UT1_UTC_B'])]
i0 = np.searchsorted(iers_b['MJD'], mjd_b[0], side='left')
i1 = np.searchsorted(iers_b['MJD'], mjd_b[-1], side='right')
iers_b = iers_b[i0:i1]
n_iers_b = len(iers_b)
# If there is overlap then replace IERS-A values from available IERS-B
if n_iers_b > 0:
# Sanity check that we are overwriting the correct values
if not u.allclose(table['MJD'][:n_iers_b], iers_b['MJD']):
raise ValueError('unexpected mismatch when copying '
'IERS-B values into IERS-A table.')
# Finally do the overwrite
table['UT1_UTC_B'][:n_iers_b] = iers_b['UT1_UTC']
table['PM_X_B'][:n_iers_b] = iers_b['PM_x']
table['PM_Y_B'][:n_iers_b] = iers_b['PM_y']
table['dX_2000A_B'][:n_iers_b] = iers_b['dX_2000A']
table['dY_2000A_B'][:n_iers_b] = iers_b['dY_2000A']
return table
class earth_orientation_table(ScienceState):
"""Default IERS table for Earth rotation and reference systems service.
These tables are used to calculate the offsets between ``UT1`` and ``UTC``
and for conversion to Earth-based coordinate systems.
The state itself is an IERS table, as an instance of one of the
`~astropy.utils.iers.IERS` classes. The default, the auto-updating
`~astropy.utils.iers.IERS_Auto` class, should suffice for most
purposes.
Examples
--------
To temporarily use the IERS-B file packaged with astropy::
>>> from astropy.utils import iers
>>> from astropy.time import Time
>>> iers_b = iers.IERS_B.open(iers.IERS_B_FILE)
>>> with iers.earth_orientation_table.set(iers_b):
... print(Time('2000-01-01').ut1.isot)
2000-01-01T00:00:00.355
To use the most recent IERS-A file for the whole session::
>>> iers_a = iers.IERS_A.open(iers.IERS_A_URL) # doctest: +SKIP
>>> iers.earth_orientation_table.set(iers_a) # doctest: +SKIP
<ScienceState earth_orientation_table: <IERS_A length=17463>...>
To go back to the default (of `~astropy.utils.iers.IERS_Auto`)::
>>> iers.earth_orientation_table.set(None) # doctest: +SKIP
<ScienceState earth_orientation_table: <IERS_Auto length=17428>...>
"""
_value = None
@classmethod
def validate(cls, value):
if value is None:
value = IERS_Auto.open()
if not isinstance(value, IERS):
raise ValueError("earth_orientation_table requires an IERS Table.")
return value
class LeapSeconds(QTable):
"""Leap seconds class, holding TAI-UTC differences.
The table should hold columns 'year', 'month', 'tai_utc'.
Methods are provided to initialize the table from IERS ``Leap_Second.dat``,
IETF/ntp ``leap-seconds.list``, or built-in ERFA/SOFA, and to update the
list used by ERFA.
Notes
-----
Astropy has a built-in ``iers.IERS_LEAP_SECONDS_FILE``. Up to date versions
can be downloaded from ``iers.IERS_LEAP_SECONDS_URL`` or
``iers.LEAP_SECONDS_LIST_URL``. Many systems also store a version
of ``leap-seconds.list`` for use with ``ntp`` (e.g., on Debian/Ubuntu
systems, ``/usr/share/zoneinfo/leap-seconds.list``).
To prevent querying internet resources if the available local leap second
file(s) are out of date, set ``iers.conf.auto_download = False``. This
must be done prior to performing any ``Time`` scale transformations related
to UTC (e.g. converting from UTC to TAI).
"""
# Note: Time instances in this class should use scale='tai' to avoid
# needing leap seconds in their creation or interpretation.
_re_expires = re.compile(r'^#.*File expires on[:\s]+(\d+\s\w+\s\d+)\s*$')
_expires = None
_auto_open_files = ['erfa',
IERS_LEAP_SECOND_FILE,
'system_leap_second_file',
'iers_leap_second_auto_url',
'ietf_leap_second_auto_url']
"""Files or conf attributes to try in auto_open."""
@classmethod
def open(cls, file=None, cache=False):
"""Open a leap-second list.
Parameters
----------
file : path-like or None
Full local or network path to the file holding leap-second data,
for passing on to the various ``from_`` class methods.
If 'erfa', return the data used by the ERFA library.
If `None`, use default locations from file and configuration to
find a table that is not expired.
cache : bool
Whether to use cache. Defaults to False, since leap-second files
are regularly updated.
Returns
-------
leap_seconds : `~astropy.utils.iers.LeapSeconds`
Table with 'year', 'month', and 'tai_utc' columns, plus possibly
others.
Notes
-----
Bulletin C is released about 10 days after a possible leap second is
introduced, i.e., mid-January or mid-July. Expiration days are thus
generally at least 150 days after the present. For the auto-loading,
a list comprised of the table shipped with astropy, and files and
URLs in `~astropy.utils.iers.Conf` are tried, returning the first
that is sufficiently new, or the newest among them all.
"""
if file is None:
return cls.auto_open()
if file.lower() == 'erfa':
return cls.from_erfa()
if urlparse(file).netloc:
file = download_file(file, cache=cache)
# Just try both reading methods.
try:
return cls.from_iers_leap_seconds(file)
except Exception:
return cls.from_leap_seconds_list(file)
@staticmethod
def _today():
# Get current day in scale='tai' without going through a scale change
# (so we do not need leap seconds).
s = '{0.year:04d}-{0.month:02d}-{0.day:02d}'.format(datetime.utcnow())
return Time(s, scale='tai', format='iso', out_subfmt='date')
@classmethod
def auto_open(cls, files=None):
"""Attempt to get an up-to-date leap-second list.
The routine will try the files in sequence until it finds one
whose expiration date is "good enough" (see below). If none
are good enough, it returns the one with the most recent expiration
date, warning if that file is expired.
For remote files that are cached already, the cached file is tried
first before attempting to retrieve it again.
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses
``cls._auto_open_files``.
Returns
-------
leap_seconds : `~astropy.utils.iers.LeapSeconds`
Up to date leap-second table
Notes
-----
Bulletin C is released about 10 days after a possible leap second is
introduced, i.e., mid-January or mid-July. Expiration days are thus
generally at least 150 days after the present. We look for a file
that expires more than 180 - `~astropy.utils.iers.Conf.auto_max_age`
after the present.
"""
offset = 180 - (30 if conf.auto_max_age is None else conf.auto_max_age)
good_enough = cls._today() + TimeDelta(offset, format='jd')
if files is None:
# Basic files to go over (entries in _auto_open_files can be
# configuration items, which we want to be sure are up to date).
files = [getattr(conf, f, f) for f in cls._auto_open_files]
# Remove empty entries.
files = [f for f in files if f]
# Our trials start with normal files and remote ones that are
# already in cache. The bools here indicate that the cache
# should be used.
trials = [(f, True) for f in files
if not urlparse(f).netloc or is_url_in_cache(f)]
# If we are allowed to download, we try downloading new versions
# if none of the above worked.
if conf.auto_download:
trials += [(f, False) for f in files if urlparse(f).netloc]
self = None
err_list = []
# Go through all entries, and return the first one that
# is not expired, or the most up to date one.
for f, allow_cache in trials:
if not allow_cache:
clear_download_cache(f)
try:
trial = cls.open(f, cache=True)
except Exception as exc:
err_list.append(exc)
continue
if self is None or trial.expires > self.expires:
self = trial
self.meta['data_url'] = str(f)
if self.expires > good_enough:
break
if self is None:
raise ValueError('none of the files could be read. The '
'following errors were raised:\n' + str(err_list))
if self.expires < self._today() and conf.auto_max_age is not None:
warn('leap-second file is expired.', IERSStaleWarning)
return self
@property
def expires(self):
"""The limit of validity of the table."""
return self._expires
@classmethod
def _read_leap_seconds(cls, file, **kwargs):
"""Read a file, identifying expiration by matching 'File expires'"""
expires = None
# Find expiration date.
with get_readable_fileobj(file) as fh:
lines = fh.readlines()
for line in lines:
match = cls._re_expires.match(line)
if match:
day, month, year = match.groups()[0].split()
month_nb = MONTH_ABBR.index(month[:3]) + 1
expires = Time(f'{year}-{month_nb:02d}-{day}',
scale='tai', out_subfmt='date')
break
else:
raise ValueError(f'did not find expiration date in {file}')
self = cls.read(lines, format='ascii.no_header', **kwargs)
self._expires = expires
return self
@classmethod
def from_iers_leap_seconds(cls, file=IERS_LEAP_SECOND_FILE):
"""Create a table from a file like the IERS ``Leap_Second.dat``.
Parameters
----------
file : path-like, optional
Full local or network path to the file holding leap-second data
in a format consistent with that used by IERS. By default, uses
``iers.IERS_LEAP_SECOND_FILE``.
Notes
-----
The file *must* contain the expiration date in a comment line, like
'# File expires on 28 June 2020'
"""
return cls._read_leap_seconds(
file, names=['mjd', 'day', 'month', 'year', 'tai_utc'])
@classmethod
def from_leap_seconds_list(cls, file):
"""Create a table from a file like the IETF ``leap-seconds.list``.
Parameters
----------
file : path-like, optional
Full local or network path to the file holding leap-second data
in a format consistent with that used by IETF. Up to date versions
can be retrieved from ``iers.IETF_LEAP_SECOND_URL``.
Notes
-----
The file *must* contain the expiration date in a comment line, like
'# File expires on: 28 June 2020'
"""
from astropy.io.ascii import convert_numpy # Here to avoid circular import
names = ['ntp_seconds', 'tai_utc', 'comment', 'day', 'month', 'year']
# Note: ntp_seconds does not fit in 32 bit, so causes problems on
# 32-bit systems without the np.int64 converter.
self = cls._read_leap_seconds(
file, names=names, include_names=names[:2],
converters={'ntp_seconds': [convert_numpy(np.int64)]})
self['mjd'] = (self['ntp_seconds']/86400 + 15020).round()
# Note: cannot use Time.ymdhms, since that might require leap seconds.
isot = Time(self['mjd'], format='mjd', scale='tai').isot
ymd = np.array([[int(part) for part in t.partition('T')[0].split('-')]
for t in isot])
self['year'], self['month'], self['day'] = ymd.T
return self
@classmethod
def from_erfa(cls, built_in=False):
"""Create table from the leap-second list in ERFA.
Parameters
----------
built_in : bool
If `False` (default), retrieve the list currently used by ERFA,
which may have been updated. If `True`, retrieve the list shipped
with erfa.
"""
current = cls(erfa.leap_seconds.get())
current._expires = Time('{0.year:04d}-{0.month:02d}-{0.day:02d}'
.format(erfa.leap_seconds.expires),
scale='tai')
if not built_in:
return current
try:
erfa.leap_seconds.set(None) # reset to defaults
return cls.from_erfa(built_in=False)
finally:
erfa.leap_seconds.set(current)
def update_erfa_leap_seconds(self, initialize_erfa=False):
"""Add any leap seconds not already present to the ERFA table.
This method matches leap seconds with those present in the ERFA table,
and extends the latter as necessary.
Parameters
----------
initialize_erfa : bool, or 'only', or 'empty'
Initialize the ERFA leap second table to its built-in value before
trying to expand it. This is generally not needed but can help
in case it somehow got corrupted. If equal to 'only', the ERFA
table is reinitialized and no attempt it made to update it.
If 'empty', the leap second table is emptied before updating, i.e.,
it is overwritten altogether (note that this may break things in
surprising ways, as most leap second tables do not include pre-1970
pseudo leap-seconds; you were warned).
Returns
-------
n_update : int
Number of items updated.
Raises
------
ValueError
If the leap seconds in the table are not on 1st of January or July,
or if the matches are inconsistent. This would normally suggest
a corrupted leap second table, but might also indicate that the
ERFA table was corrupted. If needed, the ERFA table can be reset
by calling this method with an appropriate value for
``initialize_erfa``.
"""
if initialize_erfa == 'empty':
# Initialize to empty and update is the same as overwrite.
erfa.leap_seconds.set(self)
return len(self)
if initialize_erfa:
erfa.leap_seconds.set()
if initialize_erfa == 'only':
return 0
return erfa.leap_seconds.update(self)
|
ce96447faa44ea2c193a9761b5d7f2768b6f5b8b5c9b7e5e6286338a2c5be53d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A collection of functions for checking various XML-related strings for
standards compliance.
"""
import re
import urllib.parse
def check_id(ID):
"""
Returns `True` if *ID* is a valid XML ID.
"""
return re.match(r"^[A-Za-z_][A-Za-z0-9_\.\-]*$", ID) is not None
def fix_id(ID):
"""
Given an arbitrary string, create one that can be used as an xml
id. This is rather simplistic at the moment, since it just
replaces non-valid characters with underscores.
"""
if re.match(r"^[A-Za-z_][A-Za-z0-9_\.\-]*$", ID):
return ID
if len(ID):
corrected = ID
if not len(corrected) or re.match('^[^A-Za-z_]$', corrected[0]):
corrected = '_' + corrected
corrected = (re.sub(r"[^A-Za-z_]", '_', corrected[0]) +
re.sub(r"[^A-Za-z0-9_\.\-]", "_", corrected[1:]))
return corrected
return ''
_token_regex = r"(?![\r\l\t ])[^\r\l\t]*(?![\r\l\t ])"
def check_token(token):
"""
Returns `True` if *token* is a valid XML token, as defined by XML
Schema Part 2.
"""
return (token == '' or
re.match(
r"[^\r\n\t ]?([^\r\n\t ]| [^\r\n\t ])*[^\r\n\t ]?$", token)
is not None)
def check_mime_content_type(content_type):
"""
Returns `True` if *content_type* is a valid MIME content type
(syntactically at least), as defined by RFC 2045.
"""
ctrls = ''.join(chr(x) for x in range(0, 0x20))
token_regex = f'[^()<>@,;:\\\"/[\\]?= {ctrls}\x7f]+'
return re.match(
fr'(?P<type>{token_regex})/(?P<subtype>{token_regex})$',
content_type) is not None
def check_anyuri(uri):
"""
Returns `True` if *uri* is a valid URI as defined in RFC 2396.
"""
if (re.match(
(r"(([a-zA-Z][0-9a-zA-Z+\-\.]*:)?/{0,2}[0-9a-zA-Z;" +
r"/?:@&=+$\.\-_!~*'()%]+)?(#[0-9a-zA-Z;/?:@&=+$\.\-_!~*'()%]+)?"),
uri) is None):
return False
try:
urllib.parse.urlparse(uri)
except Exception:
return False
return True
|
03573e228055f873656d6754c8510ae63314dc79a633073ef073289104b5efc3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""URL unescaper functions."""
# STDLIB
from xml.sax import saxutils
__all__ = ['unescape_all']
# This is DIY
_bytes_entities = {b'&': b'&', b'<': b'<', b'>': b'>',
b'&&': b'&', b'&&': b'&', b'%2F': b'/'}
_bytes_keys = [b'&&', b'&&', b'&', b'<', b'>', b'%2F']
# This is used by saxutils
_str_entities = {'&&': '&', '&&': '&', '%2F': '/'}
_str_keys = ['&&', '&&', '&', '<', '>', '%2F']
def unescape_all(url):
"""Recursively unescape a given URL.
.. note:: '&&' becomes a single '&'.
Parameters
----------
url : str or bytes
URL to unescape.
Returns
-------
clean_url : str or bytes
Unescaped URL.
"""
if isinstance(url, bytes):
func2use = _unescape_bytes
keys2use = _bytes_keys
else:
func2use = _unescape_str
keys2use = _str_keys
clean_url = func2use(url)
not_done = [clean_url.count(key) > 0 for key in keys2use]
if True in not_done:
return unescape_all(clean_url)
else:
return clean_url
def _unescape_str(url):
return saxutils.unescape(url, _str_entities)
def _unescape_bytes(url):
clean_url = url
for key in _bytes_keys:
clean_url = clean_url.replace(key, _bytes_entities[key])
return clean_url
|
e06bf12a6b9bbd578d1bab08bf7b790a313cff8c7d09fe368c05fa861f17c344 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains a class that makes it simple to stream out well-formed and
nicely-indented XML.
"""
# STDLIB
import contextlib
import textwrap
try:
from . import _iterparser
except ImportError:
def xml_escape_cdata(s):
"""
Escapes &, < and > in an XML CDATA string.
"""
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
def xml_escape(s):
"""
Escapes &, ', ", < and > in an XML attribute value.
"""
s = s.replace("&", "&")
s = s.replace("'", "'")
s = s.replace("\"", """)
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
else:
xml_escape_cdata = _iterparser.escape_xml_cdata
xml_escape = _iterparser.escape_xml
class XMLWriter:
"""
A class to write well-formed and nicely indented XML.
Use like this::
w = XMLWriter(fh)
with w.tag('html'):
with w.tag('body'):
w.data('This is the content')
Which produces::
<html>
<body>
This is the content
</body>
</html>
"""
def __init__(self, file):
"""
Parameters
----------
file : writable file-like
"""
self.write = file.write
if hasattr(file, "flush"):
self.flush = file.flush
self._open = 0 # true if start tag is open
self._tags = []
self._data = []
self._indentation = " " * 64
self.xml_escape_cdata = xml_escape_cdata
self.xml_escape = xml_escape
def _flush(self, indent=True, wrap=False):
"""
Flush internal buffers.
"""
if self._open:
if indent:
self.write(">\n")
else:
self.write(">")
self._open = 0
if self._data:
data = ''.join(self._data)
if wrap:
indent = self.get_indentation_spaces(1)
data = textwrap.fill(
data,
initial_indent=indent,
subsequent_indent=indent)
self.write('\n')
self.write(self.xml_escape_cdata(data))
self.write('\n')
self.write(self.get_indentation_spaces())
else:
self.write(self.xml_escape_cdata(data))
self._data = []
def start(self, tag, attrib={}, **extra):
"""
Opens a new element. Attributes can be given as keyword
arguments, or as a string/string dictionary. The method
returns an opaque identifier that can be passed to the
:meth:`close` method, to close all open elements up to and
including this one.
Parameters
----------
tag : str
The element name
attrib : dict of str -> str
Attribute dictionary. Alternatively, attributes can
be given as keyword arguments.
Returns
-------
id : int
Returns an element identifier.
"""
self._flush()
# This is just busy work -- we know our tag names are clean
# tag = xml_escape_cdata(tag)
self._data = []
self._tags.append(tag)
self.write(self.get_indentation_spaces(-1))
self.write(f"<{tag}")
if attrib or extra:
attrib = attrib.copy()
attrib.update(extra)
attrib = list(attrib.items())
attrib.sort()
for k, v in attrib:
if v is not None:
# This is just busy work -- we know our keys are clean
# k = xml_escape_cdata(k)
v = self.xml_escape(v)
self.write(f" {k}=\"{v}\"")
self._open = 1
return len(self._tags)
@contextlib.contextmanager
def xml_cleaning_method(self, method='escape_xml', **clean_kwargs):
"""Context manager to control how XML data tags are cleaned (escaped) to
remove potentially unsafe characters or constructs.
The default (``method='escape_xml'``) applies brute-force escaping of
certain key XML characters like ``<``, ``>``, and ``&`` to ensure that
the output is not valid XML.
In order to explicitly allow certain XML tags (e.g. link reference or
emphasis tags), use ``method='bleach_clean'``. This sanitizes the data
string using the ``clean`` function of the
`bleach <https://bleach.readthedocs.io/en/latest/clean.html>`_ package.
Any additional keyword arguments will be passed directly to the
``clean`` function.
Finally, use ``method='none'`` to disable any sanitization. This should
be used sparingly.
Example::
w = writer.XMLWriter(ListWriter(lines))
with w.xml_cleaning_method('bleach_clean'):
w.start('td')
w.data('<a href="https://google.com">google.com</a>')
w.end()
Parameters
----------
method : str
Cleaning method. Allowed values are "escape_xml",
"bleach_clean", and "none".
**clean_kwargs : keyword args
Additional keyword args that are passed to the
bleach.clean() function.
"""
current_xml_escape_cdata = self.xml_escape_cdata
if method == 'bleach_clean':
# NOTE: bleach is imported locally to avoid importing it when
# it is not nocessary
try:
import bleach
except ImportError:
raise ValueError('bleach package is required when HTML escaping is disabled.\n'
'Use "pip install bleach".')
if clean_kwargs is None:
clean_kwargs = {}
self.xml_escape_cdata = lambda x: bleach.clean(x, **clean_kwargs)
elif method == "none":
self.xml_escape_cdata = lambda x: x
elif method != 'escape_xml':
raise ValueError('allowed values of method are "escape_xml", "bleach_clean", and "none"')
yield
self.xml_escape_cdata = current_xml_escape_cdata
@contextlib.contextmanager
def tag(self, tag, attrib={}, **extra):
"""
A convenience method for creating wrapper elements using the
``with`` statement.
Examples
--------
>>> with writer.tag('foo'): # doctest: +SKIP
... writer.element('bar')
... # </foo> is implicitly closed here
...
Parameters are the same as to `start`.
"""
self.start(tag, attrib, **extra)
yield
self.end(tag)
def comment(self, comment):
"""
Adds a comment to the output stream.
Parameters
----------
comment : str
Comment text, as a Unicode string.
"""
self._flush()
self.write(self.get_indentation_spaces())
self.write(f"<!-- {self.xml_escape_cdata(comment)} -->\n")
def data(self, text):
"""
Adds character data to the output stream.
Parameters
----------
text : str
Character data, as a Unicode string.
"""
self._data.append(text)
def end(self, tag=None, indent=True, wrap=False):
"""
Closes the current element (opened by the most recent call to
`start`).
Parameters
----------
tag : str
Element name. If given, the tag must match the start tag.
If omitted, the current element is closed.
"""
if tag:
if not self._tags:
raise ValueError(f"unbalanced end({tag})")
if tag != self._tags[-1]:
raise ValueError(f"expected end({self._tags[-1]}), got {tag}")
else:
if not self._tags:
raise ValueError("unbalanced end()")
tag = self._tags.pop()
if self._data:
self._flush(indent, wrap)
elif self._open:
self._open = 0
self.write("/>\n")
return
if indent:
self.write(self.get_indentation_spaces())
self.write(f"</{tag}>\n")
def close(self, id):
"""
Closes open elements, up to (and including) the element identified
by the given identifier.
Parameters
----------
id : int
Element identifier, as returned by the `start` method.
"""
while len(self._tags) > id:
self.end()
def element(self, tag, text=None, wrap=False, attrib={}, **extra):
"""
Adds an entire element. This is the same as calling `start`,
`data`, and `end` in sequence. The ``text`` argument
can be omitted.
"""
self.start(tag, attrib, **extra)
if text:
self.data(text)
self.end(indent=False, wrap=wrap)
def flush(self):
pass # replaced by the constructor
def get_indentation(self):
"""
Returns the number of indentation levels the file is currently
in.
"""
return len(self._tags)
def get_indentation_spaces(self, offset=0):
"""
Returns a string of spaces that matches the current
indentation level.
"""
return self._indentation[:len(self._tags) + offset]
@staticmethod
def object_attrs(obj, attrs):
"""
Converts an object with a bunch of attributes on an object
into a dictionary for use by the `XMLWriter`.
Parameters
----------
obj : object
Any Python object
attrs : sequence of str
Attribute names to pull from the object
Returns
-------
attrs : dict
Maps attribute names to the values retrieved from
``obj.attr``. If any of the attributes is `None`, it will
not appear in the output dictionary.
"""
d = {}
for attr in attrs:
if getattr(obj, attr) is not None:
d[attr.replace('_', '-')] = str(getattr(obj, attr))
return d
|
61d92c23d6be5d110a8fe3ce9f7ebe42cbecdffa94771167ea7147e58856cf46 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from collections import defaultdict
from setuptools import Extension
from os.path import join
import sys
from extension_helpers import pkg_config
def get_extensions(build_type='release'):
XML_DIR = 'astropy/utils/xml/src'
cfg = defaultdict(list)
cfg['sources'] = [join(XML_DIR, "iterparse.c")]
if (int(os.environ.get('ASTROPY_USE_SYSTEM_EXPAT', 0)) or
int(os.environ.get('ASTROPY_USE_SYSTEM_ALL', 0))):
for k, v in pkg_config(['expat'], ['expat']).items():
cfg[k].extend(v)
else:
EXPAT_DIR = 'cextern/expat/lib'
cfg['sources'].extend([
join(EXPAT_DIR, fn) for fn in
["xmlparse.c", "xmlrole.c", "xmltok.c", "xmltok_impl.c"]])
cfg['include_dirs'].extend([XML_DIR, EXPAT_DIR])
if sys.platform.startswith('linux'):
# This is to ensure we only export the Python entry point
# symbols and the linker won't try to use the system expat in
# place of ours.
cfg['extra_link_args'].extend([
f"-Wl,--version-script={join(XML_DIR, 'iterparse.map')}"
])
cfg['define_macros'].append(("HAVE_EXPAT_CONFIG_H", 1))
if sys.byteorder == 'big':
cfg['define_macros'].append(('BYTEORDER', '4321'))
else:
cfg['define_macros'].append(('BYTEORDER', '1234'))
if sys.platform != 'win32':
cfg['define_macros'].append(('HAVE_UNISTD_H', None))
return [Extension("astropy.utils.xml._iterparser", **cfg)]
|
bd6948951f8881061c7c2eaff18500ae669ab75f904a5ea36c3b9df7540b14c3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module includes a fast iterator-based XML parser.
"""
# STDLIB
import contextlib
import io
import sys
# ASTROPY
from astropy.utils import data
__all__ = ['get_xml_iterator', 'get_xml_encoding', 'xml_readlines']
@contextlib.contextmanager
def _convert_to_fd_or_read_function(fd):
"""
Returns a function suitable for streaming input, or a file object.
This function is only useful if passing off to C code where:
- If it's a real file object, we want to use it as a real
C file object to avoid the Python overhead.
- If it's not a real file object, it's much handier to just
have a Python function to call.
This is somewhat quirky behavior, of course, which is why it is
private. For a more useful version of similar behavior, see
`astropy.utils.misc.get_readable_fileobj`.
Parameters
----------
fd : object
May be:
- a file object. If the file is uncompressed, this raw
file object is returned verbatim. Otherwise, the read
method is returned.
- a function that reads from a stream, in which case it is
returned verbatim.
- a file path, in which case it is opened. Again, like a
file object, if it's uncompressed, a raw file object is
returned, otherwise its read method.
- an object with a :meth:`read` method, in which case that
method is returned.
Returns
-------
fd : context-dependent
See above.
"""
if callable(fd):
yield fd
return
with data.get_readable_fileobj(fd, encoding='binary') as new_fd:
if sys.platform.startswith('win'):
yield new_fd.read
else:
if isinstance(new_fd, io.FileIO):
yield new_fd
else:
yield new_fd.read
def _fast_iterparse(fd, buffersize=2 ** 10):
from xml.parsers import expat
if not callable(fd):
read = fd.read
else:
read = fd
queue = []
text = []
def start(name, attr):
queue.append((True, name, attr,
(parser.CurrentLineNumber, parser.CurrentColumnNumber)))
del text[:]
def end(name):
queue.append((False, name, ''.join(text).strip(),
(parser.CurrentLineNumber, parser.CurrentColumnNumber)))
parser = expat.ParserCreate()
parser.specified_attributes = True
parser.StartElementHandler = start
parser.EndElementHandler = end
parser.CharacterDataHandler = text.append
Parse = parser.Parse
data = read(buffersize)
while data:
Parse(data, False)
for elem in queue:
yield elem
del queue[:]
data = read(buffersize)
Parse('', True)
for elem in queue:
yield elem
# Try to import the C version of the iterparser, otherwise fall back
# to the Python implementation above.
_slow_iterparse = _fast_iterparse
try:
from . import _iterparser
_fast_iterparse = _iterparser.IterParser
except ImportError:
pass
@contextlib.contextmanager
def get_xml_iterator(source, _debug_python_based_parser=False):
"""
Returns an iterator over the elements of an XML file.
The iterator doesn't ever build a tree, so it is much more memory
and time efficient than the alternative in ``cElementTree``.
Parameters
----------
source : path-like, readable file-like, or callable
Handle that contains the data or function that reads it.
If a function or callable object, it must directly read from a stream.
Non-callable objects must define a ``read`` method.
Returns
-------
parts : iterator
The iterator returns 4-tuples (*start*, *tag*, *data*, *pos*):
- *start*: when `True` is a start element event, otherwise
an end element event.
- *tag*: The name of the element
- *data*: Depends on the value of *event*:
- if *start* == `True`, data is a dictionary of
attributes
- if *start* == `False`, data is a string containing
the text content of the element
- *pos*: Tuple (*line*, *col*) indicating the source of the
event.
"""
with _convert_to_fd_or_read_function(source) as fd:
if _debug_python_based_parser:
context = _slow_iterparse(fd)
else:
context = _fast_iterparse(fd)
yield iter(context)
def get_xml_encoding(source):
"""
Determine the encoding of an XML file by reading its header.
Parameters
----------
source : path-like, readable file-like, or callable
Handle that contains the data or function that reads it.
If a function or callable object, it must directly read from a stream.
Non-callable objects must define a ``read`` method.
Returns
-------
encoding : str
"""
with get_xml_iterator(source) as iterator:
start, tag, data, pos = next(iterator)
if not start or tag != 'xml':
raise OSError('Invalid XML file')
# The XML spec says that no encoding === utf-8
return data.get('encoding') or 'utf-8'
def xml_readlines(source):
"""
Get the lines from a given XML file. Correctly determines the
encoding and always returns unicode.
Parameters
----------
source : path-like, readable file-like, or callable
Handle that contains the data or function that reads it.
If a function or callable object, it must directly read from a stream.
Non-callable objects must define a ``read`` method.
Returns
-------
lines : list of unicode
"""
encoding = get_xml_encoding(source)
with data.get_readable_fileobj(source, encoding=encoding) as input:
input.seek(0)
xml_lines = input.readlines()
return xml_lines
|
680154076c904a77f9c7ef007fbe9e5d1b6dc3d9401a56f82583818f1eff1b43 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Functions to do XML schema and DTD validation. At the moment, this
makes a subprocess call to xmllint. This could use a Python-based
library at some point in the future, if something appropriate could be
found.
"""
import os
import subprocess
def validate_schema(filename, schema_file):
"""
Validates an XML file against a schema or DTD.
Parameters
----------
filename : str
The path to the XML file to validate
schema_file : str
The path to the XML schema or DTD
Returns
-------
returncode, stdout, stderr : int, str, str
Returns the returncode from xmllint and the stdout and stderr
as strings
"""
base, ext = os.path.splitext(schema_file)
if ext == '.xsd':
schema_part = '--schema ' + schema_file
elif ext == '.dtd':
schema_part = '--dtdvalid ' + schema_file
else:
raise TypeError("schema_file must be a path to an XML Schema or DTD")
p = subprocess.Popen(
f"xmllint --noout --nonet {schema_part} {filename}",
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode == 127:
raise OSError(
"xmllint not found, so can not validate schema")
elif p.returncode < 0:
from astropy.utils.misc import signal_number_to_name
raise OSError(
"xmllint was terminated by signal '{}'".format(
signal_number_to_name(-p.returncode)))
return p.returncode, stdout, stderr
|
34add0ce9d5bc25fed19110d21a7cde794876954ccac49de5434f180e304d3f4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import json
import locale
import os
import urllib.error
from datetime import datetime
import pytest
import numpy as np
from astropy.utils import data, misc
from astropy.io import fits
def test_isiterable():
assert misc.isiterable(2) is False
assert misc.isiterable([2]) is True
assert misc.isiterable([1, 2, 3]) is True
assert misc.isiterable(np.array(2)) is False
assert misc.isiterable(np.array([1, 2, 3])) is True
def test_signal_number_to_name_no_failure():
# Regression test for #5340: ensure signal_number_to_name throws no
# AttributeError (it used ".iteritems()" which was removed in Python3).
misc.signal_number_to_name(0)
@pytest.mark.remote_data
def test_api_lookup():
try:
strurl = misc.find_api_page('astropy.utils.misc', 'dev', False,
timeout=5)
objurl = misc.find_api_page(misc, 'dev', False, timeout=5)
except urllib.error.URLError:
if os.environ.get('CI', False):
pytest.xfail('Timed out in CI')
else:
raise
assert strurl == objurl
assert strurl == 'http://devdocs.astropy.org/utils/index.html#module-astropy.utils.misc' # noqa
# Try a non-dev version
objurl = misc.find_api_page(misc, 'v3.2.1', False, timeout=3)
assert objurl == 'https://docs.astropy.org/en/v3.2.1/utils/index.html#module-astropy.utils.misc' # noqa
def test_skip_hidden():
path = data.get_pkg_data_path('data')
for root, dirs, files in os.walk(path):
assert '.hidden_file.txt' in files
assert 'local.dat' in files
# break after the first level since the data dir contains some other
# subdirectories that don't have these files
break
for root, dirs, files in misc.walk_skip_hidden(path):
assert '.hidden_file.txt' not in files
assert 'local.dat' in files
break
def test_JsonCustomEncoder():
from astropy import units as u
assert json.dumps(np.arange(3), cls=misc.JsonCustomEncoder) == '[0, 1, 2]'
assert json.dumps(1+2j, cls=misc.JsonCustomEncoder) == '[1.0, 2.0]'
assert json.dumps(set([1, 2, 1]), cls=misc.JsonCustomEncoder) == '[1, 2]'
assert json.dumps(b'hello world \xc3\x85',
cls=misc.JsonCustomEncoder) == '"hello world \\u00c5"'
assert json.dumps({1: 2},
cls=misc.JsonCustomEncoder) == '{"1": 2}' # default
assert json.dumps({1: u.m}, cls=misc.JsonCustomEncoder) == '{"1": "m"}'
# Quantities
tmp = json.dumps({'a': 5*u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp)
tmpd = {"a": {"unit": "cm", "value": 5.0}}
assert newd == tmpd
tmp2 = json.dumps({'a': np.arange(2)*u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp2)
tmpd = {"a": {"unit": "cm", "value": [0., 1.]}}
assert newd == tmpd
tmp3 = json.dumps({'a': np.arange(2)*u.erg/u.s}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp3)
tmpd = {"a": {"unit": "erg / s", "value": [0., 1.]}}
assert newd == tmpd
def test_JsonCustomEncoder_FITS_rec_from_files():
with fits.open(fits.util.get_testdata_filepath('variable_length_table.fits')) as hdul:
assert json.dumps(hdul[1].data, cls=misc.JsonCustomEncoder) == \
"[[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]]"
with fits.open(fits.util.get_testdata_filepath('btable.fits')) as hdul:
assert json.dumps(hdul[1].data, cls=misc.JsonCustomEncoder) == \
'[[1, "Sirius", -1.4500000476837158, "A1V"], ' \
'[2, "Canopus", -0.7300000190734863, "F0Ib"], ' \
'[3, "Rigil Kent", -0.10000000149011612, "G2V"]]'
with fits.open(fits.util.get_testdata_filepath('table.fits')) as hdul:
assert json.dumps(hdul[1].data, cls=misc.JsonCustomEncoder) == \
'[["NGC1001", 11.100000381469727], ' \
'["NGC1002", 12.300000190734863], ' \
'["NGC1003", 15.199999809265137]]'
def test_set_locale():
# First, test if the required locales are available
current = locale.setlocale(locale.LC_ALL)
try:
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
locale.setlocale(locale.LC_ALL, 'fr_FR.utf8')
except locale.Error as e:
pytest.skip(f'Locale error: {e}')
finally:
locale.setlocale(locale.LC_ALL, current)
date = datetime(2000, 10, 1, 0, 0, 0)
day_mon = date.strftime('%a, %b')
with misc._set_locale('en_US.utf8'):
assert date.strftime('%a, %b') == 'Sun, Oct'
with misc._set_locale('fr_FR.utf8'):
assert date.strftime('%a, %b') == 'dim., oct.'
# Back to original
assert date.strftime('%a, %b') == day_mon
with misc._set_locale(current):
assert date.strftime('%a, %b') == day_mon
def test_dtype_bytes_or_chars():
assert misc.dtype_bytes_or_chars(np.dtype(np.float64)) == 8
assert misc.dtype_bytes_or_chars(np.dtype(object)) is None
assert misc.dtype_bytes_or_chars(np.dtype(np.int32)) == 4
assert misc.dtype_bytes_or_chars(np.array(b'12345').dtype) == 5
assert misc.dtype_bytes_or_chars(np.array('12345').dtype) == 5
|
a0b40b1869f3167b7784b01c17a759f5dad213295d8395ba4932c553f10377a7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
from astropy.utils.xml import check, unescaper, writer
from astropy.utils.compat.optional_deps import HAS_BLEACH # noqa
def test_writer():
fh = io.StringIO()
w = writer.XMLWriter(fh)
with w.tag("html"):
with w.tag("body"):
w.data("This is the content")
w.comment("comment")
value = ''.join(fh.getvalue().split())
assert value == '<html><body>Thisisthecontent<!--comment--></body></html>'
def test_check_id():
assert check.check_id("Fof32")
assert check.check_id("_Fof32")
assert not check.check_id("32Fof")
def test_fix_id():
assert check.fix_id("Fof32") == "Fof32"
assert check.fix_id("@#f") == "___f"
def test_check_token():
assert check.check_token("token")
assert not check.check_token("token\rtoken")
def test_check_mime_content_type():
assert check.check_mime_content_type("image/jpeg")
assert not check.check_mime_content_type("image")
def test_check_anyuri():
assert check.check_anyuri("https://github.com/astropy/astropy")
def test_unescape_all():
# str
url_in = 'http://casu.ast.cam.ac.uk/ag/iphas-dsa%2FSubmitCone?' \
'DSACAT=IDR&amp;DSATAB=Emitters&amp;'
url_out = 'http://casu.ast.cam.ac.uk/ag/iphas-dsa/SubmitCone?' \
'DSACAT=IDR&DSATAB=Emitters&'
assert unescaper.unescape_all(url_in) == url_out
# bytes
url_in = b'http://casu.ast.cam.ac.uk/ag/iphas-dsa%2FSubmitCone?' \
b'DSACAT=IDR&amp;DSATAB=Emitters&amp;'
url_out = b'http://casu.ast.cam.ac.uk/ag/iphas-dsa/SubmitCone?' \
b'DSACAT=IDR&DSATAB=Emitters&'
assert unescaper.unescape_all(url_in) == url_out
def test_escape_xml():
s = writer.xml_escape('This & That')
assert type(s) == str
assert s == 'This & That'
s = writer.xml_escape(1)
assert type(s) == str
assert s == '1'
s = writer.xml_escape(b'This & That')
assert type(s) == bytes
assert s == b'This & That'
@pytest.mark.skipif('HAS_BLEACH')
def test_escape_xml_without_bleach():
fh = io.StringIO()
w = writer.XMLWriter(fh)
with pytest.raises(ValueError) as err:
with w.xml_cleaning_method('bleach_clean'):
pass
assert 'bleach package is required when HTML escaping is disabled' in str(err.value)
@pytest.mark.skipif('not HAS_BLEACH')
def test_escape_xml_with_bleach():
fh = io.StringIO()
w = writer.XMLWriter(fh)
# Turn off XML escaping, but still sanitize unsafe tags like <script>
with w.xml_cleaning_method('bleach_clean'):
w.start('td')
w.data('<script>x</script> <em>OK</em>')
w.end(indent=False)
assert fh.getvalue() == '<td><script>x</script> <em>OK</em></td>\n'
fh = io.StringIO()
w = writer.XMLWriter(fh)
# Default is True (all XML tags escaped)
with w.xml_cleaning_method():
w.start('td')
w.data('<script>x</script> <em>OK</em>')
w.end(indent=False)
assert fh.getvalue() == '<td><script>x</script> <em>OK</em></td>\n'
|
56ab50b6f7d2481008ace78ace04fae903c81dbca1e175ce0980c4de2fddfb96 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.utils.shapes import check_broadcast, unbroadcast
def test_check_broadcast():
assert check_broadcast((10, 1), (3,)) == (10, 3)
assert check_broadcast((10, 1), (3,), (4, 1, 1, 3)) == (4, 1, 10, 3)
with pytest.raises(ValueError):
check_broadcast((10, 2), (3,))
with pytest.raises(ValueError):
check_broadcast((10, 1), (3,), (4, 1, 2, 3))
def test_unbroadcast():
x = np.array([1, 2, 3])
y = np.broadcast_to(x, (2, 4, 3))
z = unbroadcast(y)
assert z.shape == (3,)
np.testing.assert_equal(z, x)
x = np.ones((3, 5))
y = np.broadcast_to(x, (5, 3, 5))
z = unbroadcast(y)
assert z.shape == (3, 5)
|
913b7e0cd5ddcaead3857e1bd644a4e890821c01b4655e0c828042df0a277ce5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Some might be indirectly tested already in ``astropy.io.fits.tests``.
"""
import io
import numpy as np
import pytest
from astropy.utils.diff import diff_values, report_diff_values, where_not_allclose
from astropy.table import Table
@pytest.mark.parametrize('a', [np.nan, np.inf, 1.11, 1, 'a'])
def test_diff_values_false(a):
assert not diff_values(a, a)
@pytest.mark.parametrize(
('a', 'b'),
[(np.inf, np.nan), (1.11, 1.1), (1, 2), (1, 'a'), ('a', 'b')])
def test_diff_values_true(a, b):
assert diff_values(a, b)
def test_float_comparison():
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/21
"""
f = io.StringIO()
a = np.float32(0.029751372)
b = np.float32(0.029751368)
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
# This test doesn't care about what the exact output is, just that it
# did show a difference in their text representations
assert 'a>' in out
assert 'b>' in out
def test_diff_types():
"""
Regression test for https://github.com/astropy/astropy/issues/4122
"""
f = io.StringIO()
a = 1.0
b = '1.0'
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
assert out == (" (float) a> 1.0\n"
" (str) b> '1.0'\n"
" ? + +\n")
def test_diff_numeric_scalar_types():
""" Test comparison of different numeric scalar types. """
f = io.StringIO()
assert not report_diff_values(1.0, 1, fileobj=f)
out = f.getvalue()
assert out == ' (float) a> 1.0\n (int) b> 1\n'
def test_array_comparison():
"""
Test diff-ing two arrays.
"""
f = io.StringIO()
a = np.arange(9).reshape(3, 3)
b = a + 1
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
assert out == (' at [0, 0]:\n'
' a> 0\n'
' b> 1\n'
' at [0, 1]:\n'
' a> 1\n'
' b> 2\n'
' at [0, 2]:\n'
' a> 2\n'
' b> 3\n'
' ...and at 6 more indices.\n')
def test_diff_shaped_array_comparison():
"""
Test diff-ing two differently shaped arrays.
"""
f = io.StringIO()
a = np.empty((1, 2, 3))
identical = report_diff_values(a, a[0], fileobj=f)
assert not identical
out = f.getvalue()
assert out == (' Different array shapes:\n'
' a> (1, 2, 3)\n'
' ? ---\n'
' b> (2, 3)\n')
def test_tablediff():
"""
Test diff-ing two simple Table objects.
"""
a = Table.read("""name obs_date mag_b mag_v
M31 2012-01-02 17.0 16.0
M82 2012-10-29 16.2 15.2
M101 2012-10-31 15.1 15.5""", format='ascii')
b = Table.read("""name obs_date mag_b mag_v
M31 2012-01-02 17.0 16.5
M82 2012-10-29 16.2 15.2
M101 2012-10-30 15.1 15.5
NEW 2018-05-08 nan 9.0""", format='ascii')
f = io.StringIO()
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
assert out == (' name obs_date mag_b mag_v\n'
' ---- ---------- ----- -----\n'
' a> M31 2012-01-02 17.0 16.0\n'
' ? ^\n'
' b> M31 2012-01-02 17.0 16.5\n'
' ? ^\n'
' M82 2012-10-29 16.2 15.2\n'
' a> M101 2012-10-31 15.1 15.5\n'
' ? ^\n'
' b> M101 2012-10-30 15.1 15.5\n'
' ? ^\n'
' b> NEW 2018-05-08 nan 9.0\n')
# Identical
assert report_diff_values(a, a, fileobj=f)
@pytest.mark.parametrize('kwargs', [{}, {'atol': 0, 'rtol': 0}])
def test_where_not_allclose(kwargs):
a = np.array([1, np.nan, np.inf, 4.5])
b = np.array([1, np.inf, np.nan, 4.6])
assert where_not_allclose(a, b, **kwargs) == ([3], )
assert len(where_not_allclose(a, a, **kwargs)[0]) == 0
|
0b4055fdc697472300dfbc5b499c80481cfe2f9bcc5abd5b5f57e1e1e84003c7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import importlib
import secrets
import sys
from textwrap import dedent
import pytest
from astropy.utils.parsing import lex, yacc, _TAB_HEADER
def _docstring_canary():
"""Docstring that's here just to check for -OO."""
@pytest.mark.skipif(not _docstring_canary.__doc__, reason="Test cannot be run with -OO")
def test_generate_parser(tmp_path, monkeypatch):
# Write Python code into the temporary directory, so that the
# generated tables will also go into the temporary directory.
# We use a unique suffix so that the test can be run multiple times
# without weirdness due to module caching.
suffix = secrets.token_hex(16)
lexer_file = tmp_path / f'test_parsing_lexer_{suffix}.py'
lexer_file.write_text(dedent(fr"""
from astropy.utils.parsing import lex
def make_lexer():
tokens = ('NUMBER', 'PLUS')
t_PLUS = r'\+'
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
return lex('test_parsing_lextab_{suffix}', 'test_parsing_lexer_{suffix}')
"""))
parser_file = tmp_path / f'test_parsing_parser_{suffix}.py'
parser_file.write_text(dedent(fr"""
from astropy.utils.parsing import yacc
def make_parser():
tokens = ('NUMBER', 'PLUS')
def p_expression_number(p):
'expression : NUMBER'
p[0] = p[1]
def p_expression_plus(p):
'expression : expression PLUS NUMBER'
p[0] = p[1] + p[3]
return yacc('test_parsing_parsetab_{suffix}', 'test_parsing_parser_{suffix}')
"""))
monkeypatch.syspath_prepend(tmp_path)
lexer_mod = importlib.import_module(f'test_parsing_lexer_{suffix}')
lexer = lexer_mod.make_lexer()
parser_mod = importlib.import_module(f'test_parsing_parser_{suffix}')
parser = parser_mod.make_parser()
result = parser.parse('1+2+3', lexer=lexer)
assert result == 6
lextab = (tmp_path / f'test_parsing_lextab_{suffix}.py').read_text()
assert lextab.startswith(_TAB_HEADER.format(package=f'test_parsing_lexer_{suffix}'))
parsetab = (tmp_path / f'test_parsing_parsetab_{suffix}.py').read_text()
assert parsetab.startswith(_TAB_HEADER.format(package=f'test_parsing_parser_{suffix}'))
|
6bdce693da7f2c154629460ea0d26a9a3515835a12e7274ba1b689cbcebe3cd6 | import abc
from collections import OrderedDict
import pytest
import numpy as np
from astropy.utils.metadata import MetaData, MergeConflictError, merge, enable_merge_strategies
from astropy.utils.metadata import common_dtype
from astropy.utils import metadata
from astropy.io import fits
class OrderedDictSubclass(OrderedDict):
pass
class MetaBaseTest:
__metaclass__ = abc.ABCMeta
def test_none(self):
d = self.test_class(*self.args)
assert isinstance(d.meta, OrderedDict)
assert len(d.meta) == 0
@pytest.mark.parametrize(('meta'), ([dict([('a', 1)]),
OrderedDict([('a', 1)]),
OrderedDictSubclass([('a', 1)])]))
def test_mapping_init(self, meta):
d = self.test_class(*self.args, meta=meta)
assert type(d.meta) == type(meta)
assert d.meta['a'] == 1
@pytest.mark.parametrize(('meta'), (["ceci n'est pas un meta", 1.2, [1, 2, 3]]))
def test_non_mapping_init(self, meta):
with pytest.raises(TypeError):
self.test_class(*self.args, meta=meta)
@pytest.mark.parametrize(('meta'), ([dict([('a', 1)]),
OrderedDict([('a', 1)]),
OrderedDictSubclass([('a', 1)])]))
def test_mapping_set(self, meta):
d = self.test_class(*self.args, meta=meta)
assert type(d.meta) == type(meta)
assert d.meta['a'] == 1
@pytest.mark.parametrize(('meta'), (["ceci n'est pas un meta", 1.2, [1, 2, 3]]))
def test_non_mapping_set(self, meta):
with pytest.raises(TypeError):
d = self.test_class(*self.args, meta=meta)
def test_meta_fits_header(self):
header = fits.header.Header()
header.set('observer', 'Edwin Hubble')
header.set('exptime', '3600')
d = self.test_class(*self.args, meta=header)
assert d.meta['OBSERVER'] == 'Edwin Hubble'
class ExampleData:
meta = MetaData()
def __init__(self, meta=None):
self.meta = meta
class TestMetaExampleData(MetaBaseTest):
test_class = ExampleData
args = ()
def test_metadata_merging_conflict_exception():
"""Regression test for issue #3294.
Ensure that an exception is raised when a metadata conflict exists
and ``metadata_conflicts='error'`` has been set.
"""
data1 = ExampleData()
data2 = ExampleData()
data1.meta['somekey'] = {'x': 1, 'y': 1}
data2.meta['somekey'] = {'x': 1, 'y': 999}
with pytest.raises(MergeConflictError):
merge(data1.meta, data2.meta, metadata_conflicts='error')
def test_metadata_merging():
# Recursive merge
meta1 = {'k1': {'k1': [1, 2],
'k2': 2},
'k2': 2,
'k4': (1, 2)}
meta2 = {'k1': {'k1': [3]},
'k3': 3,
'k4': (3,)}
out = merge(meta1, meta2, metadata_conflicts='error')
assert out == {'k1': {'k2': 2,
'k1': [1, 2, 3]},
'k2': 2,
'k3': 3,
'k4': (1, 2, 3)}
# Merge two ndarrays
meta1 = {'k1': np.array([1, 2])}
meta2 = {'k1': np.array([3])}
out = merge(meta1, meta2, metadata_conflicts='error')
assert np.all(out['k1'] == np.array([1, 2, 3]))
# Merge list and np.ndarray
meta1 = {'k1': [1, 2]}
meta2 = {'k1': np.array([3])}
assert np.all(out['k1'] == np.array([1, 2, 3]))
# Can't merge two scalar types
meta1 = {'k1': 1}
meta2 = {'k1': 2}
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts='error')
# Conflicting shape
meta1 = {'k1': np.array([1, 2])}
meta2 = {'k1': np.array([[3]])}
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts='error')
# Conflicting array type
meta1 = {'k1': np.array([1, 2])}
meta2 = {'k1': np.array(['3'])}
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts='error')
# Conflicting array type with 'silent' merging
meta1 = {'k1': np.array([1, 2])}
meta2 = {'k1': np.array(['3'])}
out = merge(meta1, meta2, metadata_conflicts='silent')
assert np.all(out['k1'] == np.array(['3']))
def test_metadata_merging_new_strategy():
original_merge_strategies = list(metadata.MERGE_STRATEGIES)
class MergeNumbersAsList(metadata.MergeStrategy):
"""
Scalar float or int values are joined in a list.
"""
types = ((int, float), (int, float))
@classmethod
def merge(cls, left, right):
return [left, right]
class MergeConcatStrings(metadata.MergePlus):
"""
Scalar string values are concatenated
"""
types = (str, str)
enabled = False
# Normally can't merge two scalar types
meta1 = {'k1': 1, 'k2': 'a'}
meta2 = {'k1': 2, 'k2': 'b'}
# Enable new merge strategy
with enable_merge_strategies(MergeNumbersAsList, MergeConcatStrings):
assert MergeNumbersAsList.enabled
assert MergeConcatStrings.enabled
out = merge(meta1, meta2, metadata_conflicts='error')
assert out['k1'] == [1, 2]
assert out['k2'] == 'ab'
assert not MergeNumbersAsList.enabled
assert not MergeConcatStrings.enabled
# Confirm the default enabled=False behavior
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts='error')
# Enable all MergeStrategy subclasses
with enable_merge_strategies(metadata.MergeStrategy):
assert MergeNumbersAsList.enabled
assert MergeConcatStrings.enabled
out = merge(meta1, meta2, metadata_conflicts='error')
assert out['k1'] == [1, 2]
assert out['k2'] == 'ab'
assert not MergeNumbersAsList.enabled
assert not MergeConcatStrings.enabled
metadata.MERGE_STRATEGIES = original_merge_strategies
def test_common_dtype_string():
u3 = np.array(['123'])
u4 = np.array(['1234'])
b3 = np.array([b'123'])
b5 = np.array([b'12345'])
assert common_dtype([u3, u4]).endswith('U4')
assert common_dtype([b5, u4]).endswith('U5')
assert common_dtype([b3, b5]).endswith('S5')
def test_common_dtype_basic():
i8 = np.array(1, dtype=np.int64)
f8 = np.array(1, dtype=np.float64)
u3 = np.array('123')
with pytest.raises(MergeConflictError):
common_dtype([i8, u3])
assert common_dtype([i8, i8]).endswith('i8')
assert common_dtype([i8, f8]).endswith('f8')
|
130771987f91bfd8f218d13615c5d55f3527d9d45b4001dbe0d1bc6dfaea1b73 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.utils.data_info import dtype_info_name
from astropy.table import QTable
from astropy.table.index import SlicedIndex
from astropy.time import Time
from astropy.coordinates import SkyCoord
import astropy.units as u
STRING_TYPE_NAMES = {(True, 'S'): 'bytes',
(True, 'U'): 'str'}
DTYPE_TESTS = ((np.array(b'abcd').dtype, STRING_TYPE_NAMES[(True, 'S')] + '4'),
(np.array('abcd').dtype, STRING_TYPE_NAMES[(True, 'U')] + '4'),
('S4', STRING_TYPE_NAMES[(True, 'S')] + '4'),
('U4', STRING_TYPE_NAMES[(True, 'U')] + '4'),
(np.void, 'void'),
(np.int32, 'int32'),
(bool, 'bool'),
(float, 'float64'),
('<f4', 'float32'),
('u8', 'uint64'),
('c16', 'complex128'),
('object', 'object'))
@pytest.mark.parametrize('input,output', DTYPE_TESTS)
def test_dtype_info_name(input, output):
"""
Test that dtype_info_name is giving the expected output
Here the available types::
'b' boolean
'i' (signed) integer
'u' unsigned integer
'f' floating-point
'c' complex-floating point
'O' (Python) objects
'S', 'a' (byte-)string
'U' Unicode
'V' raw data (void)
"""
assert dtype_info_name(input) == output
def test_info_no_copy_numpy():
"""Test that getting a single item from Table column object does not copy info.
See #10889.
"""
col = [1, 2]
t = QTable([col], names=['col'])
t.add_index('col')
val = t['col'][0]
# Returns a numpy scalar (e.g. np.float64) with no .info
assert isinstance(val, np.number)
with pytest.raises(AttributeError):
val.info
val = t['col'][:]
assert val.info.indices == []
cols = [[1, 2] * u.m,
Time([1, 2], format='cxcsec')]
@pytest.mark.parametrize('col', cols)
def test_info_no_copy_mixin_with_index(col):
"""Test that getting a single item from Table column object does not copy info.
See #10889.
"""
t = QTable([col], names=['col'])
t.add_index('col')
val = t['col'][0]
assert 'info' not in val.__dict__
assert val.info.indices == []
val = t['col'][:]
assert 'info' in val.__dict__
assert val.info.indices == []
val = t[:]['col']
assert 'info' in val.__dict__
assert isinstance(val.info.indices[0], SlicedIndex)
def test_info_no_copy_skycoord():
"""Test that getting a single item from Table SkyCoord column object does
not copy info. Cannot create an index on a SkyCoord currently.
"""
col = SkyCoord([1, 2], [1, 2], unit='deg'),
t = QTable([col], names=['col'])
val = t['col'][0]
assert 'info' not in val.__dict__
assert val.info.indices == []
val = t['col'][:]
assert val.info.indices == []
val = t[:]['col']
assert val.info.indices == []
|
2740a9359abd23c2ecbfeb631855043c80fc58b2567d30b0e2faba2f475c002b | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
import sys
import stat
import errno
import base64
import random
import shutil
import hashlib
import pathlib
import platform
import tempfile
import warnings
import itertools
import contextlib
import urllib.error
import urllib.parse
import urllib.request
from itertools import islice
from concurrent.futures import ThreadPoolExecutor
from tempfile import NamedTemporaryFile, TemporaryDirectory
import py.path
import pytest
from astropy import units as _u # u is taken
from astropy.config import paths
import astropy.utils.data
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.data import (
CacheMissingWarning,
CacheDamaged,
conf,
_deltemps,
compute_hash,
download_file,
cache_contents,
_tempfilestodel,
get_cached_urls,
is_url_in_cache,
cache_total_size,
get_file_contents,
check_download_cache,
clear_download_cache,
get_pkg_data_fileobj,
get_readable_fileobj,
import_file_to_cache,
export_download_cache,
get_pkg_data_contents,
get_pkg_data_filename,
import_download_cache,
get_free_space_in_dir,
check_free_space_in_dir,
_get_download_cache_loc,
download_files_in_parallel,
is_url,
get_pkg_data_path
)
CI = os.environ.get('CI', False) == "true"
TESTURL = "http://www.astropy.org"
TESTURL2 = "http://www.astropy.org/about.html"
TESTURL_SSL = "https://www.astropy.org"
TESTLOCAL = get_pkg_data_filename(os.path.join("data", "local.dat"))
# NOTE: Python can be built without bz2 or lzma.
from astropy.utils.compat.optional_deps import HAS_BZ2, HAS_LZMA
# For when we need "some" test URLs
FEW = 5
# For stress testing the locking system using multiprocessing
N_PARALLEL_HAMMER = 5 # as high as 500 to replicate a bug
# For stress testing the locking system using threads
# (cheaper, works with coverage)
N_THREAD_HAMMER = 10 # as high as 1000 to replicate a bug
def can_rename_directory_in_use():
with TemporaryDirectory() as d:
d1 = os.path.join(d, "a")
d2 = os.path.join(d, "b")
f1 = os.path.join(d1, "file")
os.mkdir(d1)
with open(f1, "wt") as f:
f.write("some contents\n")
try:
with open(f1, "rt"):
os.rename(d1, d2)
except PermissionError:
return False
else:
return True
CAN_RENAME_DIRECTORY_IN_USE = can_rename_directory_in_use()
def url_to(path):
return pathlib.Path(path).resolve().as_uri()
@pytest.fixture
def valid_urls(tmpdir):
def _valid_urls(tmpdir):
for i in itertools.count():
c = os.urandom(16).hex()
fn = os.path.join(tmpdir, "valid_" + str(i))
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
yield u, c
return _valid_urls(tmpdir)
@pytest.fixture
def invalid_urls(tmpdir):
def _invalid_urls(tmpdir):
for i in itertools.count():
fn = os.path.join(tmpdir, "invalid_" + str(i))
if not os.path.exists(fn):
yield url_to(fn)
return _invalid_urls(tmpdir)
@pytest.fixture
def temp_cache(tmpdir):
with paths.set_temp_cache(tmpdir):
yield None
check_download_cache()
def change_tree_permission(d, writable=False):
if writable:
dirperm = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR | stat.S_IWUSR
else:
dirperm = stat.S_IRUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR
for dirpath, dirnames, filenames in os.walk(d):
os.chmod(dirpath, dirperm)
for f in filenames:
os.chmod(os.path.join(dirpath, f), fileperm)
def is_dir_readonly(d):
try:
with NamedTemporaryFile(dir=d):
return False
except PermissionError:
return True
@contextlib.contextmanager
def readonly_dir(d):
try:
change_tree_permission(d, writable=False)
yield
finally:
change_tree_permission(d, writable=True)
@pytest.fixture
def readonly_cache(tmpdir, valid_urls):
with TemporaryDirectory(dir=tmpdir) as d:
# other fixtures use the same tmpdir so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = set(u for u, c in islice(valid_urls, FEW))
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
with readonly_dir(d):
if not is_dir_readonly(d):
pytest.skip("Unable to make directory readonly")
yield urls
assert set(d.iterdir()) == files
check_download_cache()
@pytest.fixture
def fake_readonly_cache(tmpdir, valid_urls, monkeypatch):
def no_mkdir(path, mode=None):
raise OSError(errno.EPERM,
"os.mkdir monkeypatched out")
def no_mkdtemp(*args, **kwargs):
"""On Windows, mkdtemp uses mkdir in a loop and therefore hangs
with it monkeypatched out.
"""
raise OSError(errno.EPERM,
"os.mkdtemp monkeypatched out")
def no_TemporaryDirectory(*args, **kwargs):
raise OSError(errno.EPERM,
"_SafeTemporaryDirectory monkeypatched out")
with TemporaryDirectory(dir=tmpdir) as d:
# other fixtures use the same tmpdir so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = set(u for u, c in islice(valid_urls, FEW))
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
monkeypatch.setattr(os, "mkdir", no_mkdir)
monkeypatch.setattr(tempfile, "mkdtemp", no_mkdtemp)
monkeypatch.setattr(astropy.utils.data,
"_SafeTemporaryDirectory",
no_TemporaryDirectory)
yield urls
assert set(d.iterdir()) == files
check_download_cache()
def test_download_file_basic(valid_urls, temp_cache):
u, c = next(valid_urls)
assert get_file_contents(download_file(u, cache=False)) == c
assert not is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache miss
assert is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache hit
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_download_file_absolute_path(valid_urls, temp_cache):
def is_abs(p):
return p == os.path.abspath(p)
u, c = next(valid_urls)
assert is_abs(download_file(u, cache=False)) # no cache
assert is_abs(download_file(u, cache=True)) # not in cache
assert is_abs(download_file(u, cache=True)) # in cache
for k, v in cache_contents().items():
assert is_abs(v)
def test_unicode_url(valid_urls, temp_cache):
u, c = next(valid_urls)
unicode_url = "http://é—☃—è.com"
download_file(unicode_url, cache=False, sources=[u])
download_file(unicode_url, cache=True, sources=[u])
download_file(unicode_url, cache=True, sources=[])
assert is_url_in_cache(unicode_url)
assert unicode_url in cache_contents()
def test_too_long_url(valid_urls, temp_cache):
u, c = next(valid_urls)
long_url = "http://"+"a"*256+".com"
download_file(long_url, cache=False, sources=[u])
download_file(long_url, cache=True, sources=[u])
download_file(long_url, cache=True, sources=[])
def test_case_collision(valid_urls, temp_cache):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f1 = download_file("http://example.com/thing", cache=True, sources=[u])
f2 = download_file("http://example.com/THING", cache=True, sources=[u2])
assert f1 != f2
assert get_file_contents(f1) != get_file_contents(f2)
def test_domain_name_case(valid_urls, temp_cache):
u, c = next(valid_urls)
download_file("http://Example.com/thing", cache=True, sources=[u])
assert is_url_in_cache("http://EXAMPLE.com/thing")
download_file("http://EXAMPLE.com/thing", cache=True, sources=[])
assert is_url_in_cache("Http://example.com/thing")
download_file("Http://example.com/thing", cache=True, sources=[])
@pytest.mark.remote_data(source="astropy")
def test_download_nocache_from_internet():
fnout = download_file(TESTURL, cache=False)
assert os.path.isfile(fnout)
@pytest.fixture
def a_binary_file(tmp_path):
fn = tmp_path / "file"
b_contents = b"\xde\xad\xbe\xef"
with open(fn, "wb") as f:
f.write(b_contents)
yield fn, b_contents
@pytest.fixture
def a_file(tmp_path):
fn = tmp_path / "file.txt"
contents = "contents\n"
with open(fn, "w") as f:
f.write(contents)
yield fn, contents
def test_temp_cache(tmpdir):
dldir0 = _get_download_cache_loc()
check_download_cache()
with paths.set_temp_cache(tmpdir):
dldir1 = _get_download_cache_loc()
check_download_cache()
assert dldir1 != dldir0
dldir2 = _get_download_cache_loc()
check_download_cache()
assert dldir2 != dldir1
assert dldir2 == dldir0
# Check that things are okay even if we exit via an exception
class Special(Exception):
pass
try:
with paths.set_temp_cache(tmpdir):
dldir3 = _get_download_cache_loc()
check_download_cache()
assert dldir3 == dldir1
raise Special
except Special:
pass
dldir4 = _get_download_cache_loc()
check_download_cache()
assert dldir4 != dldir3
assert dldir4 == dldir0
@pytest.mark.parametrize("parallel", [False, True])
def test_download_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache, parallel):
# This is a combined test because the parallel version triggered a nasty
# bug and I was trying to track it down by comparing with the non-parallel
# version. I think the bug was that the parallel downloader didn't respect
# temporary cache settings.
# Make a big list of test URLs
u, c = next(valid_urls)
# as tuples (URL, right_content, wrong_content)
urls = [(u, c, None)]
# where to download the contents
sources = {}
# Set up some URLs to download where the "true" URL is not in the sources
# list; make the true URL valid with different contents so we can tell if
# it was loaded by mistake.
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
# For many of them the sources list starts with invalid URLs
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
# Now fetch them all
if parallel:
rs = download_files_in_parallel([u for (u, c, c_bad) in urls],
cache=True,
sources=sources)
else:
rs = [
download_file(u, cache=True, sources=sources.get(u, None))
for (u, c, c_bad) in urls
]
assert len(rs) == len(urls)
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
assert is_url_in_cache(u)
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_download_file_threaded_many(temp_cache, valid_urls):
"""Hammer download_file with multiple threaded requests.
The goal is to stress-test the locking system. Normal parallel downloading
also does this but coverage tools lose track of which paths are explored.
"""
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(lambda u: download_file(u, cache=True),
[u for (u, c) in urls]))
check_download_cache()
assert len(r) == len(urls)
for r, (u, c) in zip(r, urls):
assert get_file_contents(r) == c
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_threaded_segfault(valid_urls):
"""Demonstrate urllib's segfault."""
def slurp_url(u):
with urllib.request.urlopen(u) as remote:
block = True
while block:
block = remote.read(1024)
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
list(P.map(lambda u: slurp_url(u),
[u for (u, c) in urls]))
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_download_file_threaded_many_partial_success(
temp_cache, valid_urls, invalid_urls):
"""Hammer download_file with multiple threaded requests.
Because some of these requests fail, the locking context manager is
exercised with exceptions as well as success returns. I do not expect many
surprises from the threaded version, but the process version gave trouble
here.
"""
urls = []
contents = {}
for (u, c), i in islice(zip(valid_urls, invalid_urls), N_THREAD_HAMMER):
urls.append(u)
contents[u] = c
urls.append(i)
def get(u):
try:
return download_file(u, cache=True)
except OSError:
return None
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(get, urls))
check_download_cache()
assert len(r) == len(urls)
for r, u in zip(r, urls):
if u in contents:
assert get_file_contents(r) == contents[u]
else:
assert r is None
def test_clear_download_cache(valid_urls):
u1, c1 = next(valid_urls)
download_file(u1, cache=True)
u2, c2 = next(valid_urls)
download_file(u2, cache=True)
assert is_url_in_cache(u2)
clear_download_cache(u2)
assert not is_url_in_cache(u2)
assert is_url_in_cache(u1)
u3, c3 = next(valid_urls)
f3 = download_file(u3, cache=True)
assert is_url_in_cache(u3)
clear_download_cache(f3)
assert not is_url_in_cache(u3)
assert is_url_in_cache(u1)
u4, c4 = next(valid_urls)
f4 = download_file(u4, cache=True)
assert is_url_in_cache(u4)
clear_download_cache(compute_hash(f4))
assert not is_url_in_cache(u4)
assert is_url_in_cache(u1)
def test_clear_download_multiple_references_doesnt_corrupt_storage(temp_cache, tmpdir):
"""Check that files with the same hash don't confuse the storage."""
content = "Test data; doesn't matter much.\n"
def make_url():
with NamedTemporaryFile("w", dir=str(tmpdir), delete=False) as f:
f.write(content)
url = url_to(f.name)
clear_download_cache(url)
filename = download_file(url, cache=True)
return url, filename
a_url, a_filename = make_url()
clear_download_cache(a_filename)
assert not is_url_in_cache(a_url)
f_url, f_filename = make_url()
g_url, g_filename = make_url()
assert f_url != g_url
assert is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
clear_download_cache(f_url)
assert not is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
assert os.path.exists(
g_filename
), "Contents should not be deleted while a reference exists"
clear_download_cache(g_url)
assert not os.path.exists(
g_filename
), "No reference exists any more, file should be deleted"
@pytest.mark.parametrize("use_cache", [False, True])
def test_download_file_local_cache_survives(tmpdir, temp_cache, use_cache):
"""Confirm that downloading a local file does not delete it.
When implemented with urlretrieve (rather than urlopen) local files are
not copied to create temporaries, so importing them to the cache deleted
the original from wherever it was in the filesystem. I lost some built-in
astropy data.
"""
fn = tmpdir / "file"
contents = "some text"
with open(fn, "w") as f:
f.write(contents)
u = url_to(fn)
f = download_file(u, cache=use_cache)
assert fn not in _tempfilestodel, "File should not be deleted!"
assert os.path.isfile(fn), "File should not be deleted!"
assert get_file_contents(f) == contents
def test_sources_normal(temp_cache, valid_urls, invalid_urls):
primary, contents = next(valid_urls)
fallback1 = next(invalid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_fallback(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_ignore_primary(temp_cache, valid_urls, invalid_urls):
primary, bogus = next(valid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_multiple(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_sources_multiple_missing(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2 = next(invalid_urls)
with pytest.raises(urllib.error.URLError):
download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert not is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_update_url(tmpdir, temp_cache):
with TemporaryDirectory(dir=tmpdir) as d:
f_name = os.path.join(d, "f")
with open(f_name, "w") as f:
f.write("old")
f_url = url_to(f.name)
assert get_file_contents(download_file(f_url, cache=True)) == "old"
with open(f_name, "w") as f:
f.write("new")
assert get_file_contents(download_file(f_url, cache=True)) == "old"
assert get_file_contents(download_file(f_url, cache="update")) == "new"
# Now the URL doesn't exist any more.
assert not os.path.exists(f_name)
with pytest.raises(urllib.error.URLError):
# Direct download should fail
download_file(f_url, cache=False)
assert get_file_contents(download_file(f_url, cache=True)) == "new", \
"Cached version should still exist"
with pytest.raises(urllib.error.URLError):
# cannot download new version to check for updates
download_file(f_url, cache="update")
assert get_file_contents(download_file(f_url, cache=True)) == "new", \
"Failed update should not remove the current version"
@pytest.mark.remote_data(source="astropy")
def test_download_noprogress():
fnout = download_file(TESTURL, cache=False, show_progress=False)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_cache():
download_dir = _get_download_cache_loc()
# Download the test URL and make sure it exists, then clear just that
# URL and make sure it got deleted.
fnout = download_file(TESTURL, cache=True)
assert os.path.isdir(download_dir)
assert os.path.isfile(fnout)
clear_download_cache(TESTURL)
assert not os.path.exists(fnout)
# Clearing download cache succeeds even if the URL does not exist.
clear_download_cache("http://this_was_never_downloaded_before.com")
# Make sure lockdir was released
lockdir = os.path.join(download_dir, "lock")
assert not os.path.isdir(lockdir), "Cache dir lock was not released!"
@pytest.mark.remote_data(source="astropy")
def test_download_certificate_verification_failed():
"""Tests for https://github.com/astropy/astropy/pull/10434"""
# First test the expected exception when download fails due to a
# certificate verification error; we simulate this by passing a bogus
# CA directory to the ssl_context argument
ssl_context = {'cafile': None, 'capath': '/does/not/exist'}
msg = f'Verification of TLS/SSL certificate at {TESTURL_SSL} failed'
with pytest.raises(urllib.error.URLError, match=msg):
download_file(TESTURL_SSL, cache=False, ssl_context=ssl_context)
with pytest.warns(AstropyWarning, match=msg) as warning_lines:
fnout = download_file(TESTURL_SSL, cache=False,
ssl_context=ssl_context, allow_insecure=True)
assert len(warning_lines) == 1
assert os.path.isfile(fnout)
def test_download_cache_after_clear(tmpdir, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
# Test issues raised in #4427 with clear_download_cache() without a URL,
# followed by subsequent download.
download_dir = _get_download_cache_loc()
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
clear_download_cache()
assert not os.path.exists(fnout)
assert not os.path.exists(download_dir)
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_parallel_from_internet_works(temp_cache):
main_url = conf.dataurl
mirror_url = conf.dataurl_mirror
fileloc = "intersphinx/README"
urls = []
sources = {}
for s in ["", fileloc]:
urls.append(main_url + s)
sources[urls[-1]] = [urls[-1], mirror_url+s]
fnout = download_files_in_parallel(urls, sources=sources)
assert all([os.path.isfile(f) for f in fnout]), fnout
@pytest.mark.parametrize("method", [None, "spawn"])
def test_download_parallel_fills_cache(tmpdir, valid_urls, method):
urls = []
# tmpdir is shared between many tests, and that can cause weird
# interactions if we set the temporary cache too directly
with paths.set_temp_cache(tmpdir):
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel(
[u for (u, c) in urls], multiprocessing_start_method=method
)
assert len(rs) == len(urls)
url_set = set(u for (u, c) in urls)
assert url_set <= set(get_cached_urls())
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
check_download_cache()
assert not url_set.intersection(get_cached_urls())
check_download_cache()
def test_download_parallel_with_empty_sources(valid_urls, temp_cache):
urls = []
sources = {}
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel([u for (u, c) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c) in urls)
# assert u <= set(get_cached_urls())
check_download_cache()
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
def test_download_parallel_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache
):
u, c = next(valid_urls)
urls = [(u, c, None)]
sources = {}
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
rs = download_files_in_parallel([u for (u, c, c_bad) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c, c_bad) in urls)
# assert u <= set(get_cached_urls())
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
def test_download_parallel_many(temp_cache, valid_urls):
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
r = download_files_in_parallel([u for (u, c) in td])
assert len(r) == len(td)
for r, (u, c) in zip(r, td):
assert get_file_contents(r) == c
def test_download_parallel_partial_success(temp_cache, valid_urls, invalid_urls):
"""Check that a partially successful download works.
Even in the presence of many requested URLs, presumably hitting all the
parallelism this system can manage, a download failure leads to a tidy
shutdown.
"""
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
u_bad = next(invalid_urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel([u_bad] + [u for (u, c) in td])
# Actually some files may get downloaded, others not.
# Is this good? Should we stubbornly keep trying?
# assert not any([is_url_in_cache(u) for (u, c) in td])
def test_download_parallel_partial_success_lock_safe(temp_cache, valid_urls, invalid_urls):
"""Check that a partially successful parallel download leaves the cache unlocked.
This needs to be repeated many times because race conditions are what cause
this sort of thing, especially situations where a process might be forcibly
shut down while it holds the lock.
"""
s = random.getstate()
try:
random.seed(0)
for _ in range(N_PARALLEL_HAMMER):
td = list(islice(valid_urls, FEW))
u_bad = next(invalid_urls)
urls = [u_bad] + [u for (u, c) in td]
random.shuffle(urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel(urls)
finally:
random.setstate(s)
def test_download_parallel_update(temp_cache, tmpdir):
td = []
for i in range(N_PARALLEL_HAMMER):
c = f"{i:04d}"
fn = os.path.join(tmpdir, c)
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
clear_download_cache(u)
td.append((fn, u, c))
r1 = download_files_in_parallel([u for (fn, u, c) in td])
assert len(r1) == len(td)
for r_1, (fn, u, c) in zip(r1, td):
assert get_file_contents(r_1) == c
td2 = []
for (fn, u, c) in td:
c_plus = c + " updated"
fn = os.path.join(tmpdir, c)
with open(fn, "w") as f:
f.write(c_plus)
td2.append((fn, u, c, c_plus))
r2 = download_files_in_parallel([u for (fn, u, c) in td], cache=True)
assert len(r2) == len(td)
for r_2, (fn, u, c, c_plus) in zip(r2, td2):
assert get_file_contents(r_2) == c
assert c != c_plus
r3 = download_files_in_parallel([u for (fn, u, c) in td], cache="update")
assert len(r3) == len(td)
for r_3, (fn, u, c, c_plus) in zip(r3, td2):
assert get_file_contents(r_3) != c
assert get_file_contents(r_3) == c_plus
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_update_parallel(temp_cache, valid_urls):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(i):
return download_file(u, cache="update", sources=[u2])
with ThreadPoolExecutor(max_workers=N_THREAD_HAMMER) as P:
r = set(P.map(update, range(N_THREAD_HAMMER)))
check_download_cache()
for f in r:
assert get_file_contents(f) == c2
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_update_parallel_multi(temp_cache, valid_urls):
u, c = next(valid_urls)
iucs = list(islice(valid_urls, N_THREAD_HAMMER))
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(uc):
u2, c2 = uc
return download_file(u, cache="update", sources=[u2]), c2
with ThreadPoolExecutor(max_workers=len(iucs)) as P:
r = list(P.map(update, iucs))
check_download_cache()
assert any(get_file_contents(f) == c for (f, c) in r)
@pytest.mark.remote_data(source="astropy")
def test_url_nocache():
with get_readable_fileobj(TESTURL, cache=False, encoding="utf-8") as page:
assert page.read().find("Astropy") > -1
def test_find_by_hash(valid_urls, temp_cache):
testurl, contents = next(valid_urls)
p = download_file(testurl, cache=True)
hash = compute_hash(p)
hashstr = "hash/" + hash
fnout = get_pkg_data_filename(hashstr)
assert os.path.isfile(fnout)
clear_download_cache(fnout)
assert not os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_find_invalid():
# this is of course not a real data file and not on any remote server, but
# it should *try* to go to the remote server
with pytest.raises(urllib.error.URLError):
get_pkg_data_filename(
"kjfrhgjklahgiulrhgiuraehgiurhgiuhreglhurieghruelighiuerahiulruli"
)
@pytest.mark.parametrize("package", [None, "astropy", "numpy"])
def test_get_invalid(package):
"""Test can create a file path to an invalid file."""
path = get_pkg_data_path("kjfrhgjkla", "hgiulrhgiu", package=package)
assert not os.path.isfile(path)
assert not os.path.isdir(path)
# Package data functions
@pytest.mark.parametrize(
("filename"), ["local.dat", "local.dat.gz", "local.dat.bz2", "local.dat.xz"]
)
def test_local_data_obj(filename):
if ((not HAS_BZ2 and "bz2" in filename) or
(not HAS_LZMA and "xz" in filename)):
with pytest.raises(ValueError) as e:
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
# assert f.read().rstrip() == b'CONTENT'
assert " format files are not supported" in str(e.value)
else:
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.fixture(params=["invalid.dat.bz2", "invalid.dat.gz"])
def bad_compressed(request, tmpdir):
# These contents have valid headers for their respective file formats, but
# are otherwise malformed and invalid.
bz_content = b"BZhinvalid"
gz_content = b"\x1f\x8b\x08invalid"
datafile = tmpdir.join(request.param)
filename = datafile.strpath
if filename.endswith(".bz2"):
contents = bz_content
elif filename.endswith(".gz"):
contents = gz_content
else:
contents = "invalid"
datafile.write(contents, mode="wb")
return filename
def test_local_data_obj_invalid(bad_compressed):
is_bz2 = bad_compressed.endswith(".bz2")
is_xz = bad_compressed.endswith(".xz")
# Note, since these invalid files are created on the fly in order to avoid
# problems with detection by antivirus software
# (see https://github.com/astropy/astropy/issues/6520), it is no longer
# possible to use ``get_pkg_data_fileobj`` to read the files. Technically,
# they're not local anymore: they just live in a temporary directory
# created by pytest. However, we can still use get_readable_fileobj for the
# test.
if (not HAS_BZ2 and is_bz2) or (not HAS_LZMA and is_xz):
with pytest.raises(ModuleNotFoundError,
match=r'does not provide the [lb]z[2m]a? module\.'):
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
f.read()
else:
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
assert f.read().rstrip().endswith(b"invalid")
def test_local_data_name():
assert os.path.isfile(TESTLOCAL) and TESTLOCAL.endswith("local.dat")
# TODO: if in the future, the root data/ directory is added in, the below
# test should be uncommented and the README.rst should be replaced with
# whatever file is there
# get something in the astropy root
# fnout2 = get_pkg_data_filename('../../data/README.rst')
# assert os.path.isfile(fnout2) and fnout2.endswith('README.rst')
def test_data_name_third_party_package():
"""Regression test for issue #1256
Tests that `get_pkg_data_filename` works in a third-party package that
doesn't make any relative imports from the module it's used from.
Uses a test package under ``data/test_package``.
"""
# Get the actual data dir:
data_dir = os.path.join(os.path.dirname(__file__), "data")
sys.path.insert(0, data_dir)
try:
import test_package
filename = test_package.get_data_filename()
assert os.path.normcase(filename) == (
os.path.normcase(os.path.join(data_dir, "test_package", "data", "foo.txt"))
)
finally:
sys.path.pop(0)
def test_local_data_nonlocalfail():
# this would go *outside* the astropy tree
with pytest.raises(RuntimeError):
get_pkg_data_filename("../../../data/README.rst")
def test_compute_hash(tmpdir):
rands = b"1234567890abcdefghijklmnopqrstuvwxyz"
filename = tmpdir.join("tmp.dat").strpath
with open(filename, "wb") as ntf:
ntf.write(rands)
ntf.flush()
chhash = compute_hash(filename)
shash = hashlib.md5(rands).hexdigest()
assert chhash == shash
def test_get_pkg_data_contents():
with get_pkg_data_fileobj("data/local.dat") as f:
contents1 = f.read()
contents2 = get_pkg_data_contents("data/local.dat")
assert contents1 == contents2
@pytest.mark.remote_data(source="astropy")
def test_data_noastropy_fallback(monkeypatch):
"""
Tests to make sure the default behavior when the cache directory can't
be located is correct
"""
# better yet, set the configuration to make sure the temp files are deleted
conf.delete_temporary_downloads_at_exit = True
# make sure the config and cache directories are not searched
monkeypatch.setenv("XDG_CONFIG_HOME", "foo")
monkeypatch.delenv("XDG_CONFIG_HOME")
monkeypatch.setenv("XDG_CACHE_HOME", "bar")
monkeypatch.delenv("XDG_CACHE_HOME")
monkeypatch.setattr(paths.set_temp_config, "_temp_path", None)
monkeypatch.setattr(paths.set_temp_cache, "_temp_path", None)
# make sure the _find_or_create_astropy_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto, pkgname=None):
raise OSError()
monkeypatch.setattr(paths, '_find_or_create_root_dir', osraiser)
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_cache_dir(rootname='astropy')
with pytest.warns(CacheMissingWarning) as warning_lines:
fnout = download_file(TESTURL, cache=True)
n_warns = len(warning_lines)
assert n_warns in (2, 4), f'Expected 2 or 4 warnings, got {n_warns}'
partial_warn_msgs = ['remote data cache could not be accessed', 'temporary file']
if n_warns == 4:
partial_warn_msgs.extend(['socket', 'socket'])
for wl in warning_lines:
cur_w = str(wl).lower()
for i, partial_msg in enumerate(partial_warn_msgs):
if partial_msg in cur_w:
del partial_warn_msgs[i]
break
assert len(partial_warn_msgs) == 0, f'Got some unexpected warnings: {partial_warn_msgs}'
assert os.path.isfile(fnout)
# clearing the cache should be a no-up that doesn't affect fnout
with pytest.warns(CacheMissingWarning,
match=r".*Not clearing data cache - cache inaccessible.*"):
clear_download_cache(TESTURL)
assert os.path.isfile(fnout)
# now remove it so tests don't clutter up the temp dir this should get
# called at exit, anyway, but we do it here just to make sure it's working
# correctly
_deltemps()
assert not os.path.isfile(fnout)
# now try with no cache
fnnocache = download_file(TESTURL, cache=False)
with open(fnnocache, "rb") as page:
assert page.read().decode("utf-8").find("Astropy") > -1
# no warnings should be raise in fileobj because cache is unnecessary
@pytest.mark.parametrize(
("filename"),
[
"unicode.txt",
"unicode.txt.gz",
pytest.param(
"unicode.txt.bz2",
marks=pytest.mark.xfail(not HAS_BZ2, reason="no bz2 support"),
),
pytest.param(
"unicode.txt.xz",
marks=pytest.mark.xfail(not HAS_LZMA, reason="no lzma support"),
),
],
)
def test_read_unicode(filename):
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="utf-8")
assert isinstance(contents, str)
contents = contents.splitlines()[1]
assert contents == "האסטרונומי פייתון"
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="binary")
assert isinstance(contents, bytes)
x = contents.splitlines()[1]
assert x == (
b"\xff\xd7\x94\xd7\x90\xd7\xa1\xd7\x98\xd7\xa8\xd7\x95\xd7\xa0"
b"\xd7\x95\xd7\x9e\xd7\x99 \xd7\xa4\xd7\x99\xd7\x99\xd7\xaa\xd7\x95\xd7\x9f"[1:]
)
def test_compressed_stream():
gzipped_data = (
b"H4sICIxwG1AAA2xvY2FsLmRhdAALycgsVkjLzElVANKlxakpCpl5CiUZqQ"
b"olqcUl8Tn5yYk58SmJJYnxWmCRzLx0hbTSvOSSzPy8Yi5nf78QV78QLgAlLytnRQAAAA=="
)
gzipped_data = base64.b64decode(gzipped_data)
assert isinstance(gzipped_data, bytes)
class FakeStream:
"""
A fake stream that has `read`, but no `seek`.
"""
def __init__(self, data):
self.data = data
def read(self, nbytes=None):
if nbytes is None:
result = self.data
self.data = b""
else:
result = self.data[:nbytes]
self.data = self.data[nbytes:]
return result
stream = FakeStream(gzipped_data)
with get_readable_fileobj(stream, encoding="binary") as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.mark.remote_data(source="astropy")
def test_invalid_location_download_raises_urlerror():
"""
checks that download_file gives a URLError and not an AttributeError,
as its code pathway involves some fiddling with the exception.
"""
with pytest.raises(urllib.error.URLError):
download_file("http://www.astropy.org/nonexistentfile")
def test_invalid_location_download_noconnect():
"""
checks that download_file gives an OSError if the socket is blocked
"""
# This should invoke socket's monkeypatched failure
with pytest.raises(OSError):
download_file("http://astropy.org/nonexistentfile")
@pytest.mark.remote_data(source="astropy")
def test_is_url_in_cache_remote():
assert not is_url_in_cache("http://astropy.org/nonexistentfile")
download_file(TESTURL, cache=True, show_progress=False)
assert is_url_in_cache(TESTURL)
def test_is_url_in_cache_local(temp_cache, valid_urls, invalid_urls):
testurl, contents = next(valid_urls)
nonexistent = next(invalid_urls)
assert not is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
download_file(testurl, cache=True, show_progress=False)
assert is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
# If non-deterministic failure happens see
# https://github.com/astropy/astropy/issues/9765
def test_check_download_cache(tmpdir, temp_cache, valid_urls, invalid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = os.path.join(tmpdir, "the.zip")
clear_download_cache()
assert not check_download_cache()
download_file(testurl, cache=True)
check_download_cache()
download_file(testurl2, cache=True)
check_download_cache()
export_download_cache(zip_file_name, [testurl, testurl2])
check_download_cache()
clear_download_cache(testurl2)
check_download_cache()
import_download_cache(zip_file_name, [testurl])
check_download_cache()
def test_export_import_roundtrip_one(tmpdir, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
f = download_file(testurl, cache=True, show_progress=False)
assert get_file_contents(f) == contents
initial_urls_in_cache = set(get_cached_urls())
zip_file_name = os.path.join(tmpdir, "the.zip")
export_download_cache(zip_file_name, [testurl])
clear_download_cache(testurl)
import_download_cache(zip_file_name)
assert is_url_in_cache(testurl)
assert set(get_cached_urls()) == initial_urls_in_cache
assert (
get_file_contents(download_file(testurl, cache=True, show_progress=False))
== contents
)
def test_export_url_not_present(temp_cache, valid_urls):
testurl, contents = next(valid_urls)
with NamedTemporaryFile("wb") as zip_file:
assert not is_url_in_cache(testurl)
with pytest.raises(KeyError):
export_download_cache(zip_file, [testurl])
def test_import_one(tmpdir, temp_cache, valid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = os.path.join(tmpdir, "the.zip")
download_file(testurl, cache=True)
download_file(testurl2, cache=True)
assert is_url_in_cache(testurl2)
export_download_cache(zip_file_name, [testurl, testurl2])
clear_download_cache(testurl)
clear_download_cache(testurl2)
import_download_cache(zip_file_name, [testurl])
assert is_url_in_cache(testurl)
assert not is_url_in_cache(testurl2)
def test_export_import_roundtrip(tmpdir, temp_cache, valid_urls):
zip_file_name = os.path.join(tmpdir, "the.zip")
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
export_download_cache(zip_file_name)
clear_download_cache()
import_download_cache(zip_file_name)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_import_roundtrip_stream(temp_cache, valid_urls):
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
clear_download_cache()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_overwrite_flag_works(temp_cache, valid_urls, tmpdir):
fn = tmpdir / "f.zip"
c = b"Some contents\nto check later"
with open(fn, "wb") as f:
f.write(c)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with pytest.raises(FileExistsError):
export_download_cache(fn)
assert get_file_contents(fn, encoding='binary') == c
export_download_cache(fn, overwrite=True)
assert get_file_contents(fn, encoding='binary') != c
def test_export_import_roundtrip_different_location(tmpdir, valid_urls):
original_cache = tmpdir / "original"
os.mkdir(original_cache)
zip_file_name = tmpdir / "the.zip"
urls = list(islice(valid_urls, FEW))
initial_urls_in_cache = set(u for (u, c) in urls)
with paths.set_temp_cache(original_cache):
for u, c in urls:
download_file(u, cache=True)
assert set(get_cached_urls()) == initial_urls_in_cache
export_download_cache(zip_file_name)
new_cache = tmpdir / "new"
os.mkdir(new_cache)
with paths.set_temp_cache(new_cache):
import_download_cache(zip_file_name)
check_download_cache()
assert set(get_cached_urls()) == initial_urls_in_cache
for (u, c) in urls:
assert get_file_contents(download_file(u, cache=True)) == c
def test_cache_size_is_zero_when_empty(temp_cache):
assert not get_cached_urls()
assert cache_total_size() == 0
def test_cache_size_changes_correctly_when_files_are_added_and_removed(
temp_cache, valid_urls
):
u, c = next(valid_urls)
clear_download_cache(u)
s_i = cache_total_size()
download_file(u, cache=True)
assert cache_total_size() == s_i + len(c) + len(u.encode("utf-8"))
clear_download_cache(u)
assert cache_total_size() == s_i
def test_cache_contents_agrees_with_get_urls(temp_cache, valid_urls):
r = []
for a, a_c in islice(valid_urls, FEW):
a_f = download_file(a, cache=True)
r.append((a, a_c, a_f))
assert set(cache_contents().keys()) == set(get_cached_urls())
for (u, c, h) in r:
assert cache_contents()[u] == h
@pytest.mark.parametrize('desired_size',
[1_000_000_000_000_000_000, 1 * _u.Ebyte])
def test_free_space_checker_huge(tmpdir, desired_size):
with pytest.raises(OSError):
check_free_space_in_dir(str(tmpdir), desired_size)
def test_get_free_space_file_directory(tmpdir):
fn = tmpdir / "file"
with open(fn, "w"):
pass
with pytest.raises(OSError):
get_free_space_in_dir(str(fn))
free_space = get_free_space_in_dir(str(tmpdir))
assert free_space > 0 and not hasattr(free_space, 'unit')
# TODO: If unit=True starts to auto-guess prefix, this needs updating.
free_space = get_free_space_in_dir(str(tmpdir), unit=True)
assert free_space > 0 and free_space.unit == _u.byte
free_space = get_free_space_in_dir(str(tmpdir), unit=_u.Mbit)
assert free_space > 0 and free_space.unit == _u.Mbit
def test_download_file_bogus_settings(invalid_urls, temp_cache):
u = next(invalid_urls)
with pytest.raises(KeyError):
download_file(u, sources=[])
def test_download_file_local_directory(tmpdir):
"""Make sure we get a URLError rather than OSError even if it's a
local directory."""
with pytest.raises(urllib.request.URLError):
download_file(url_to(tmpdir))
def test_download_file_schedules_deletion(valid_urls):
u, c = next(valid_urls)
f = download_file(u)
assert f in _tempfilestodel
# how to test deletion actually occurs?
def test_clear_download_cache_refuses_to_delete_outside_the_cache(tmpdir):
fn = os.path.abspath(os.path.join(tmpdir, "file"))
with open(fn, "w") as f:
f.write("content")
assert os.path.exists(fn)
with pytest.raises(RuntimeError):
clear_download_cache(fn)
assert os.path.exists(fn)
def test_check_download_cache_finds_bogus_entries(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf = os.path.abspath(os.path.join(dldir, "bogus"))
with open(bf, "wt") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_finds_bogus_subentries(temp_cache, valid_urls):
u, c = next(valid_urls)
f = download_file(u, cache=True)
bf = os.path.abspath(os.path.join(os.path.dirname(f), "bogus"))
with open(bf, "wt") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_cleanup(temp_cache, valid_urls):
u, c = next(valid_urls)
fn = download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf1 = os.path.abspath(os.path.join(dldir, "bogus1"))
with open(bf1, "wt") as f:
f.write("bogus file that exists")
bf2 = os.path.abspath(os.path.join(os.path.dirname(fn), "bogus2"))
with open(bf2, "wt") as f:
f.write("other bogus file that exists")
bf3 = os.path.abspath(os.path.join(dldir, "contents"))
with open(bf3, "wt") as f:
f.write("awkwardly-named bogus file that exists")
u2, c2 = next(valid_urls)
f2 = download_file(u, cache=True)
os.unlink(f2)
bf4 = os.path.dirname(f2)
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert set(e.value.bad_files) == set([bf1, bf2, bf3, bf4])
for bf in e.value.bad_files:
clear_download_cache(bf)
# download cache will be checked on exit
def test_download_cache_update_doesnt_damage_cache(temp_cache, valid_urls):
u, _ = next(valid_urls)
download_file(u, cache=True)
download_file(u, cache="update")
def test_cache_dir_is_actually_a_file(tmpdir, valid_urls):
"""Ensure that bogus cache settings are handled sensibly.
Because the user can specify the cache location in a config file, and
because they might try to deduce the location by looking around at what's
in their directory tree, and because the cache directory is actual several
tree levels down from the directory set in the config file, it's important
to check what happens if each of the steps in the path is wrong somehow.
"""
def check_quietly_ignores_bogus_cache():
"""We want a broken cache to produce a warning but then astropy should
act like there isn't a cache.
"""
with pytest.warns(CacheMissingWarning):
assert not get_cached_urls()
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache("http://www.example.com/")
with pytest.warns(CacheMissingWarning):
assert not cache_contents()
with pytest.warns(CacheMissingWarning):
u, c = next(valid_urls)
r = download_file(u, cache=True)
assert get_file_contents(r) == c
# check the filename r appears in a warning message?
# check r is added to the delete_at_exit list?
# in fact should there be testing of the delete_at_exit mechanism,
# as far as that is possible?
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache(u)
with pytest.warns(CacheMissingWarning):
with pytest.raises(OSError):
check_download_cache()
dldir = _get_download_cache_loc()
# set_temp_cache acts weird if it is pointed at a file (see below)
# but we want to see what happens when the cache is pointed
# at a file instead of a directory, so make a directory we can
# replace later.
fn = str(tmpdir / "file")
ct = "contents\n"
os.mkdir(fn)
with paths.set_temp_cache(fn):
shutil.rmtree(fn)
with open(fn, "w") as f:
f.write(ct)
with pytest.raises(OSError):
paths.get_cache_dir()
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(fn) == ct, "File should not be harmed."
# See what happens when set_temp_cache is pointed at a file
with pytest.raises(OSError):
with paths.set_temp_cache(fn):
pass
assert dldir == _get_download_cache_loc()
assert get_file_contents(str(fn)) == ct
# Now the cache directory is normal but the subdirectory it wants
# to make is a file
cd = str(tmpdir / "astropy")
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmpdir):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto one level deeper
os.makedirs(cd)
cd = str(tmpdir / "astropy" / "download")
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmpdir):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto another level deeper
os.makedirs(cd)
cd = str(tmpdir / "astropy" / "download" / "url")
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmpdir):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
def test_get_fileobj_str(a_file):
fn, c = a_file
with get_readable_fileobj(str(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_localpath(a_file):
fn, c = a_file
with get_readable_fileobj(py.path.local(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_pathlib(a_file):
fn, c = a_file
with get_readable_fileobj(pathlib.Path(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_binary(a_binary_file):
fn, c = a_binary_file
with get_readable_fileobj(fn, encoding="binary") as rf:
assert rf.read() == c
def test_get_fileobj_already_open_text(a_file):
fn, c = a_file
with open(fn, "r") as f:
with get_readable_fileobj(f) as rf:
with pytest.raises(TypeError):
rf.read()
def test_get_fileobj_already_open_binary(a_file):
fn, c = a_file
with open(fn, "rb") as f:
with get_readable_fileobj(f) as rf:
assert rf.read() == c
def test_get_fileobj_binary_already_open_binary(a_binary_file):
fn, c = a_binary_file
with open(fn, "rb") as f:
with get_readable_fileobj(f, encoding="binary") as rf:
assert rf.read() == c
def test_cache_contents_not_writable(temp_cache, valid_urls):
c = cache_contents()
with pytest.raises(TypeError):
c["foo"] = 7
u, _ = next(valid_urls)
download_file(u, cache=True)
c = cache_contents()
assert u in c
with pytest.raises(TypeError):
c["foo"] = 7
def test_cache_relocatable(tmpdir, valid_urls):
u, c = next(valid_urls)
d1 = tmpdir / "1"
d2 = tmpdir / "2"
os.mkdir(d1)
with paths.set_temp_cache(d1):
p1 = download_file(u, cache=True)
assert is_url_in_cache(u)
assert get_file_contents(p1) == c
shutil.copytree(d1, d2)
clear_download_cache()
with paths.set_temp_cache(d2):
assert is_url_in_cache(u)
p2 = download_file(u, cache=True)
assert p1 != p2
assert os.path.exists(p2)
clear_download_cache(p2)
check_download_cache()
def test_get_readable_fileobj_cleans_up_temporary_files(tmpdir, monkeypatch):
"""checks that get_readable_fileobj leaves no temporary files behind"""
# Create a 'file://' URL pointing to a path on the local filesystem
url = url_to(TESTLOCAL)
# Save temporary files to a known location
monkeypatch.setattr(tempfile, "tempdir", str(tmpdir))
# Call get_readable_fileobj() as a context manager
with get_readable_fileobj(url) as f:
f.read()
# Get listing of files in temporary directory
tempdir_listing = tmpdir.listdir()
# Assert that the temporary file was empty after get_readable_fileobj()
# context manager finished running
assert len(tempdir_listing) == 0
def test_path_objects_get_readable_fileobj():
fpath = pathlib.Path(TESTLOCAL)
with get_readable_fileobj(fpath) as f:
assert f.read().rstrip() == (
"This file is used in the test_local_data_* testing functions\nCONTENT"
)
def test_nested_get_readable_fileobj():
"""Ensure fileobj state is as expected when get_readable_fileobj()
is called inside another get_readable_fileobj().
"""
with get_readable_fileobj(TESTLOCAL, encoding="binary") as fileobj:
with get_readable_fileobj(fileobj, encoding="UTF-8") as fileobj2:
fileobj2.seek(1)
fileobj.seek(1)
# Theoretically, fileobj2 should be closed already here but it is not.
# See https://github.com/astropy/astropy/pull/8675.
# UNCOMMENT THIS WHEN PYTHON FINALLY LETS IT HAPPEN.
# assert fileobj2.closed
assert fileobj.closed and fileobj2.closed
def test_download_file_wrong_size(monkeypatch):
@contextlib.contextmanager
def mockurl(remote_url, timeout=None):
yield MockURL()
def mockurl_builder(*args, tlscontext=None, **kwargs):
mock_opener = type('MockOpener', (object,), {})()
mock_opener.open = mockurl
return mock_opener
class MockURL:
def __init__(self):
self.reader = io.BytesIO(b"a" * real_length)
def info(self):
return {"Content-Length": str(report_length)}
def read(self, length=None):
return self.reader.read(length)
monkeypatch.setattr(astropy.utils.data, "_build_urlopener", mockurl_builder)
with pytest.raises(urllib.error.ContentTooShortError):
report_length = 1024
real_length = 1023
download_file(TESTURL, cache=False)
with pytest.raises(urllib.error.URLError):
report_length = 1023
real_length = 1024
download_file(TESTURL, cache=False)
report_length = 1023
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
report_length = None
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
def test_can_make_directories_readonly(tmpdir):
try:
with readonly_dir(tmpdir):
assert is_dir_readonly(tmpdir)
except AssertionError:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip(
"We are root, we can't make a directory un-writable with chmod."
)
elif platform.system() == "Windows":
pytest.skip(
"It seems we can't make a driectory un-writable under Windows "
"with chmod, in spite of the documentation."
)
else:
raise
def test_can_make_files_readonly(tmpdir):
fn = tmpdir / "test"
c = "contents\n"
with open(fn, "w") as f:
f.write(c)
with readonly_dir(tmpdir):
try:
with open(fn, "w+") as f:
f.write("more contents\n")
except PermissionError:
pass
else:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip("We are root, we can't make a file un-writable with chmod.")
assert get_file_contents(fn) == c
def test_read_cache_readonly(readonly_cache):
assert cache_contents() == readonly_cache
def test_download_file_cache_readonly(readonly_cache):
for u in readonly_cache:
f = download_file(u, cache=True)
assert f == readonly_cache[u]
def test_import_file_cache_readonly(readonly_cache, tmpdir):
filename = os.path.join(tmpdir, "test-file")
content = "Some text or other"
url = "http://example.com/"
with open(filename, "wt") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_readonly_cache_miss(readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert get_file_contents(f) == c
assert not is_url_in_cache(u)
def test_download_file_cache_readonly_update(readonly_cache):
for u in readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != readonly_cache[u]
assert compute_hash(f) == compute_hash(readonly_cache[u])
def test_check_download_cache_works_if_readonly(readonly_cache):
check_download_cache()
# On Windows I can't make directories readonly. On CircleCI I can't make
# anything readonly because the test suite runs as root. So on those platforms
# none of the "real" tests above can be run. I can use monkeypatch to trigger
# the readonly code paths, see the "fake" versions of the tests below, but I
# don't totally trust those to completely explore what happens either, so we
# have both. I couldn't see an easy way to parameterize over fixtures and share
# tests.
def test_read_cache_fake_readonly(fake_readonly_cache):
assert cache_contents() == fake_readonly_cache
def test_download_file_cache_fake_readonly(fake_readonly_cache):
for u in fake_readonly_cache:
f = download_file(u, cache=True)
assert f == fake_readonly_cache[u]
def test_mkdtemp_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
tempfile.mkdtemp()
def test_TD_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
with TemporaryDirectory():
pass
def test_import_file_cache_fake_readonly(fake_readonly_cache, tmpdir):
filename = os.path.join(tmpdir, "test-file")
content = "Some text or other"
url = "http://example.com/"
with open(filename, "wt") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_fake_readonly_cache_miss(fake_readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert not is_url_in_cache(u)
assert get_file_contents(f) == c
def test_download_file_cache_fake_readonly_update(fake_readonly_cache):
for u in fake_readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != fake_readonly_cache[u]
assert compute_hash(f) == compute_hash(fake_readonly_cache[u])
def test_check_download_cache_works_if_fake_readonly(fake_readonly_cache):
check_download_cache()
def test_pkgname_isolation(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True, pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() < cache_total_size(pkgname=a)
for u, _ in islice(valid_urls, FEW+1):
download_file(u, cache=True)
assert len(get_cached_urls()) == FEW+1
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() > cache_total_size(pkgname=a)
assert set(get_cached_urls()) == set(cache_contents().keys())
assert set(get_cached_urls(pkgname=a)) == set(cache_contents(pkgname=a).keys())
for i in get_cached_urls():
assert is_url_in_cache(i)
assert not is_url_in_cache(i, pkgname=a)
for i in get_cached_urls(pkgname=a):
assert not is_url_in_cache(i)
assert is_url_in_cache(i, pkgname=a)
# FIXME: need to break a cache to test whether we check the right one
check_download_cache()
check_download_cache(pkgname=a)
# FIXME: check that cache='update' works
u = get_cached_urls()[0]
with pytest.raises(KeyError):
download_file(u, cache=True, sources=[], pkgname=a)
clear_download_cache(u, pkgname=a)
assert len(get_cached_urls()) == FEW+1, "wrong pkgname should do nothing"
assert len(get_cached_urls(pkgname=a)) == FEW, "wrong pkgname should do nothing"
f = download_file(u, sources=[], cache=True)
with pytest.raises(RuntimeError):
clear_download_cache(f, pkgname=a)
ua = get_cached_urls(pkgname=a)[0]
with pytest.raises(KeyError):
download_file(ua, cache=True, sources=[])
fa = download_file(ua, sources=[], cache=True, pkgname=a)
with pytest.raises(RuntimeError):
clear_download_cache(fa)
clear_download_cache(ua, pkgname=a)
assert len(get_cached_urls()) == FEW+1
assert len(get_cached_urls(pkgname=a)) == FEW-1
clear_download_cache(u)
assert len(get_cached_urls()) == FEW
assert len(get_cached_urls(pkgname=a)) == FEW-1
clear_download_cache(pkgname=a)
assert len(get_cached_urls()) == FEW
assert not get_cached_urls(pkgname=a)
clear_download_cache()
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
def test_transport_cache_via_zip(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f, pkgname=a)
check_download_cache()
check_download_cache(pkgname=a)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
cca = cache_contents(pkgname=a)
for k, v in cache_contents().items():
assert v != cca[k]
assert get_file_contents(v) == get_file_contents(cca[k])
clear_download_cache()
with io.BytesIO() as f:
export_download_cache(f, pkgname=a)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
def test_download_parallel_respects_pkgname(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
download_files_in_parallel([u for (u, c) in islice(valid_urls, FEW)],
pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
@pytest.mark.skipif(not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.")
def test_removal_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
clear_download_cache(u)
assert not is_url_in_cache(u)
check_download_cache()
@pytest.mark.skipif(not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.")
def test_update_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
f = download_file(u, cache='update', sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert is_url_in_cache(u)
def test_removal_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
clear_download_cache(u)
def test_update_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
f = download_file(u, cache='update', sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_no_allow_internet(temp_cache, valid_urls):
u, c = next(valid_urls)
with conf.set_temp('allow_internet', False):
with pytest.raises(urllib.error.URLError):
download_file(u)
assert not is_url_in_cache(u)
with pytest.raises(urllib.error.URLError):
# This will trigger the remote data error if it's allowed to touch the internet
download_file(TESTURL)
def test_clear_download_cache_not_too_aggressive(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bad_filename = os.path.join(dldir, "contents")
assert is_url_in_cache(u)
clear_download_cache(bad_filename)
assert is_url_in_cache(u)
def test_clear_download_cache_variants(temp_cache, valid_urls):
# deletion by contents filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(f)
assert not is_url_in_cache(u)
# deletion by url filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.join(os.path.dirname(f), 'url'))
assert not is_url_in_cache(u)
# deletion by hash directory name
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f))
assert not is_url_in_cache(u)
# deletion by directory name with trailing slash
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f)+'/')
assert not is_url_in_cache(u)
# deletion by hash of file contents
u, c = next(valid_urls)
f = download_file(u, cache=True)
h = compute_hash(f)
clear_download_cache(h)
assert not is_url_in_cache(u)
@pytest.mark.skipif("CI", reason="Flaky on CI")
@pytest.mark.remote_data
def test_ftp_tls_auto(temp_cache):
url = "ftp://anonymous:mail%[email protected]/pub/products/iers/finals2000A.all" # noqa
download_file(url)
@pytest.mark.parametrize('base', ["http://example.com", "https://example.com"])
def test_url_trailing_slash(temp_cache, valid_urls, base):
slash = base + "/"
no_slash = base
u, c = next(valid_urls)
download_file(slash, cache=True, sources=[u])
assert is_url_in_cache(no_slash)
download_file(no_slash, cache=True, sources=[])
clear_download_cache(no_slash)
assert not is_url_in_cache(no_slash)
assert not is_url_in_cache(slash)
download_file(no_slash, cache=True, sources=[u])
# see if implicit check_download_cache squawks
def test_empty_url(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file('file://', cache=True, sources=[u])
assert not is_url_in_cache('file:///')
@pytest.mark.remote_data
def test_download_ftp_file_properly_handles_socket_error():
faulty_url = "ftp://anonymous:mail%40astropy.org@nonexisting/pub/products/iers/finals2000A.all"
with pytest.raises(urllib.error.URLError) as excinfo:
download_file(faulty_url)
errmsg = excinfo.exconly()
found_msg = False
possible_msgs = ['Name or service not known',
'nodename nor servname provided, or not known',
'getaddrinfo failed',
'Temporary failure in name resolution',
'No address associated with hostname']
for cur_msg in possible_msgs:
if cur_msg in errmsg:
found_msg = True
break
assert found_msg, f'Got {errmsg}, expected one of these: {",".join(possible_msgs)}'
@pytest.mark.parametrize(
('s', 'ans'),
[('http://googlecom', True),
('https://google.com', True),
('ftp://google.com', True),
('sftp://google.com', True),
('ssh://google.com', True),
('file:///c:/path/to/the%20file.txt', True),
('google.com', False),
('C:\\\\path\\\\file.docx', False),
('data://file', False)])
def test_string_is_url_check(s, ans):
assert is_url(s) is ans
|
e4f8ad19cc1d07889c34ff65b4d8f57e046e341b2228f90485774e282d491983 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import io
import pytest
from . import test_progress_bar_func
from astropy.utils import console
from astropy import units as u
class FakeTTY(io.StringIO):
"""IOStream that fakes a TTY; provide an encoding to emulate an output
stream with a specific encoding.
"""
def __new__(cls, encoding=None):
# Return a new subclass of FakeTTY with the requested encoding
if encoding is None:
return super().__new__(cls)
encoding = encoding
cls = type(encoding.title() + cls.__name__, (cls,),
{'encoding': encoding})
return cls.__new__(cls)
def __init__(self, encoding=None):
super().__init__()
def write(self, s):
if isinstance(s, bytes):
# Just allow this case to work
s = s.decode('latin-1')
elif self.encoding is not None:
s.encode(self.encoding)
return super().write(s)
def isatty(self):
return True
def test_fake_tty():
# First test without a specified encoding; we should be able to write
# arbitrary unicode strings
f1 = FakeTTY()
assert f1.isatty()
f1.write('☃')
assert f1.getvalue() == '☃'
# Now test an ASCII-only TTY--it should raise a UnicodeEncodeError when
# trying to write a string containing non-ASCII characters
f2 = FakeTTY('ascii')
assert f2.isatty()
assert f2.__class__.__name__ == 'AsciiFakeTTY'
assert pytest.raises(UnicodeEncodeError, f2.write, '☃')
assert f2.getvalue() == ''
@pytest.mark.skipif("sys.platform.startswith('win')")
def test_color_text():
assert console._color_text("foo", "green") == '\033[0;32mfoo\033[0m'
def test_color_print():
# This stuff is hard to test, at least smoke test it
console.color_print("foo", "green")
console.color_print("foo", "green", "bar", "red")
def test_color_print2():
# Test that this automatically detects that io.StringIO is
# not a tty
stream = io.StringIO()
console.color_print("foo", "green", file=stream)
assert stream.getvalue() == 'foo\n'
stream = io.StringIO()
console.color_print("foo", "green", "bar", "red", "baz", file=stream)
assert stream.getvalue() == 'foobarbaz\n'
@pytest.mark.skipif("sys.platform.startswith('win')")
def test_color_print3():
# Test that this thinks the FakeTTY is a tty and applies colors.
stream = FakeTTY()
console.color_print("foo", "green", file=stream)
assert stream.getvalue() == '\x1b[0;32mfoo\x1b[0m\n'
stream = FakeTTY()
console.color_print("foo", "green", "bar", "red", "baz", file=stream)
assert stream.getvalue() == '\x1b[0;32mfoo\x1b[0m\x1b[0;31mbar\x1b[0mbaz\n'
def test_color_print_unicode():
console.color_print("überbær", "red")
def test_color_print_invalid_color():
console.color_print("foo", "unknown")
def test_spinner_non_unicode_console():
"""Regression test for #1760
Ensures that the spinner can fall go into fallback mode when using the
unicode spinner on a terminal whose default encoding cannot encode the
unicode characters.
"""
stream = FakeTTY('ascii')
chars = console.Spinner._default_unicode_chars
with console.Spinner("Reticulating splines", file=stream,
chars=chars) as s:
next(s)
def test_progress_bar():
# This stuff is hard to test, at least smoke test it
with console.ProgressBar(50) as bar:
for i in range(50):
bar.update()
def test_progress_bar2():
for x in console.ProgressBar(range(50)):
pass
def test_progress_bar3():
def do_nothing(*args, **kwargs):
pass
console.ProgressBar.map(do_nothing, range(50))
def test_zero_progress_bar():
with console.ProgressBar(0) as bar:
pass
def test_progress_bar_as_generator():
sum = 0
for x in console.ProgressBar(range(50)):
sum += x
assert sum == 1225
sum = 0
for x in console.ProgressBar(50):
sum += x
assert sum == 1225
def test_progress_bar_map():
items = list(range(100))
result = console.ProgressBar.map(test_progress_bar_func.func,
items, step=10, multiprocess=True)
assert items == result
result1 = console.ProgressBar.map(test_progress_bar_func.func,
items, step=10, multiprocess=2)
assert items == result1
@pytest.mark.parametrize(("seconds", "string"),
[(864088, " 1w 3d"),
(187213, " 2d 4h"),
(3905, " 1h 5m"),
(64, " 1m 4s"),
(15, " 15s"),
(2, " 2s")]
)
def test_human_time(seconds, string):
human_time = console.human_time(seconds)
assert human_time == string
@pytest.mark.parametrize(("size", "string"),
[(8640882, "8.6M"),
(187213, "187k"),
(3905, "3.9k"),
(64, " 64 "),
(2, " 2 "),
(10*u.GB, " 10G")]
)
def test_human_file_size(size, string):
human_time = console.human_file_size(size)
assert human_time == string
@pytest.mark.parametrize("size", (50*u.km, 100*u.g))
def test_bad_human_file_size(size):
assert pytest.raises(u.UnitConversionError, console.human_file_size, size)
|
5dde31cbf26bcbf8a6a5d22fea44c44b944859c1394e535b4da8d352b3ac130e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# namedtuple is needed for find_mod_objs so it can have a non-local module
from collections import namedtuple
from unittest import mock
import pytest
import yaml
from astropy.utils import introspection
from astropy.utils.introspection import (find_current_module, find_mod_objs,
minversion)
def test_pkg_finder():
"""
Tests that the `find_current_module` function works. Note that
this also implicitly tests compat.misc._patched_getmodule
"""
mod1 = 'astropy.utils.introspection'
mod2 = 'astropy.utils.tests.test_introspection'
mod3 = 'astropy.utils.tests.test_introspection'
assert find_current_module(0).__name__ == mod1
assert find_current_module(1).__name__ == mod2
assert find_current_module(0, True).__name__ == mod3
def test_find_current_mod():
from sys import getrecursionlimit
thismodnm = __name__
assert find_current_module(0) is introspection
assert find_current_module(1).__name__ == thismodnm
assert find_current_module(getrecursionlimit() + 1) is None
assert find_current_module(0, True).__name__ == thismodnm
assert find_current_module(0, [introspection]).__name__ == thismodnm
assert find_current_module(0, ['astropy.utils.introspection']).__name__ == thismodnm
with pytest.raises(ImportError):
find_current_module(0, ['faddfdsasewrweriopunjlfiurrhujnkflgwhu'])
def test_find_mod_objs():
lnms, fqns, objs = find_mod_objs('astropy')
# this import is after the above call intentionally to make sure
# find_mod_objs properly imports astropy on its own
import astropy
# just check for astropy.test ... other things might be added, so we
# shouldn't check that it's the only thing
assert 'test' in lnms
assert astropy.test in objs
lnms, fqns, objs = find_mod_objs(__name__, onlylocals=False)
assert 'namedtuple' in lnms
assert 'collections.namedtuple' in fqns
assert namedtuple in objs
lnms, fqns, objs = find_mod_objs(__name__, onlylocals=True)
assert 'namedtuple' not in lnms
assert 'collections.namedtuple' not in fqns
assert namedtuple not in objs
def test_minversion():
import numpy
good_versions = ['1.16', '1.16.1', '1.16.0.dev', '1.16dev']
bad_versions = ['100000', '100000.2rc1']
for version in good_versions:
assert minversion(numpy, version)
assert minversion("numpy", version)
for version in bad_versions:
assert not minversion(numpy, version)
assert not minversion("numpy", version)
assert minversion(yaml, '3.1')
assert minversion('yaml', '3.1')
def test_find_current_module_bundle():
"""
Tests that the `find_current_module` function would work if used inside
an application bundle. Since we can't test this directly, we test what
would happen if inspect.getmodule returned `None`, which is what happens
inside PyInstaller and py2app bundles.
"""
with mock.patch('inspect.getmodule', return_value=None):
mod1 = 'astropy.utils.introspection'
mod2 = 'astropy.utils.tests.test_introspection'
mod3 = 'astropy.utils.tests.test_introspection'
assert find_current_module(0).__name__ == mod1
assert find_current_module(1).__name__ == mod2
assert find_current_module(0, True).__name__ == mod3
|
568db9b97bb72b2ab069e7595998649c969d7f22cf436e798fb8601e91de8101 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.utils import collections
def test_homogeneous_list():
l = collections.HomogeneousList(int)
with pytest.raises(TypeError):
l.append(5.0)
def test_homogeneous_list2():
l = collections.HomogeneousList(int)
with pytest.raises(TypeError):
l.extend([5.0])
def test_homogeneous_list3():
l = collections.HomogeneousList(int)
l.append(5)
assert l == [5]
def test_homogeneous_list4():
l = collections.HomogeneousList(int)
l.extend([5])
assert l == [5]
def test_homogeneous_list5():
l = collections.HomogeneousList(int, [1, 2, 3])
with pytest.raises(TypeError):
l[1] = 5.0
def test_homogeneous_list_setitem_works():
l = collections.HomogeneousList(int, [1, 2, 3])
l[1] = 5
assert l == [1, 5, 3]
def test_homogeneous_list_setitem_works_with_slice():
l = collections.HomogeneousList(int, [1, 2, 3])
l[0:1] = [10, 20, 30]
assert l == [10, 20, 30, 2, 3]
l[:] = [5, 4, 3]
assert l == [5, 4, 3]
l[::2] = [2, 1]
assert l == [2, 4, 1]
def test_homogeneous_list_init_got_invalid_type():
with pytest.raises(TypeError):
collections.HomogeneousList(int, [1, 2., 3])
def test_homogeneous_list_works_with_generators():
hl = collections.HomogeneousList(int, (i for i in range(3)))
assert hl == [0, 1, 2]
hl = collections.HomogeneousList(int)
hl.extend(i for i in range(3))
assert hl == [0, 1, 2]
hl = collections.HomogeneousList(int)
hl[0:1] = (i for i in range(3))
assert hl == [0, 1, 2]
hl = collections.HomogeneousList(int)
hl += (i for i in range(3))
assert hl == [0, 1, 2]
|
ca1c8fedd548f53bf208029e3629846278185aa635b3c3ac350697f613350a10 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import concurrent.futures
import inspect
import pickle
import pytest
from astropy.utils.decorators import (deprecated_attribute, deprecated,
sharedmethod, classproperty, lazyproperty,
format_doc, deprecated_renamed_argument)
from astropy.utils.exceptions import (AstropyDeprecationWarning,
AstropyPendingDeprecationWarning,
AstropyUserWarning)
class NewDeprecationWarning(AstropyDeprecationWarning):
"""
New Warning subclass to be used to test the deprecated decorator's
``warning_type`` parameter.
"""
def test_deprecated_attribute():
class DummyClass:
def __init__(self):
self._foo = 42
self._bar = 4242
self._message = '42'
self._alternative = [42]
self._pending = {42}
def set_private(self):
self._foo = 100
self._bar = 1000
self._message = '100'
self._alternative = [100]
self._pending = {100}
foo = deprecated_attribute('foo', '0.2')
bar = deprecated_attribute('bar', '0.2',
warning_type=NewDeprecationWarning)
alternative = deprecated_attribute('alternative', '0.2',
alternative='other')
message = deprecated_attribute('message', '0.2', message='MSG')
pending = deprecated_attribute('pending', '0.2', pending=True)
dummy = DummyClass()
with pytest.warns(AstropyDeprecationWarning, match="The foo attribute is "
"deprecated and may be removed in a future version.") as w:
dummy.foo
assert len(w) == 1
with pytest.warns(NewDeprecationWarning, match="The bar attribute is "
"deprecated and may be removed in a future version.") as w:
dummy.bar
assert len(w) == 1
with pytest.warns(AstropyDeprecationWarning, match="MSG"):
dummy.message
with pytest.warns(AstropyDeprecationWarning, match=r"Use other instead\."):
dummy.alternative
with pytest.warns(AstropyPendingDeprecationWarning):
dummy.pending
dummy.set_private()
# This needs to be defined outside of the test function, because we
# want to try to pickle it.
@deprecated('100.0')
class TA:
"""
This is the class docstring.
"""
def __init__(self):
"""
This is the __init__ docstring
"""
pass
class TMeta(type):
metaclass_attr = 1
@deprecated('100.0')
class TB(metaclass=TMeta):
pass
@deprecated('100.0', warning_type=NewDeprecationWarning)
class TC:
"""
This class has the custom warning.
"""
pass
def test_deprecated_class():
orig_A = TA.__bases__[0]
# The only thing that should be different about the new class
# is __doc__, __init__, __bases__ and __subclasshook__.
# and __init_subclass__ for Python 3.6+.
for x in dir(orig_A):
if x not in ('__doc__', '__init__', '__bases__', '__dict__',
'__subclasshook__', '__init_subclass__'):
assert getattr(TA, x) == getattr(orig_A, x)
with pytest.warns(AstropyDeprecationWarning) as w:
TA()
assert len(w) == 1
if TA.__doc__ is not None:
assert 'function' not in TA.__doc__
assert 'deprecated' in TA.__doc__
assert 'function' not in TA.__init__.__doc__
assert 'deprecated' in TA.__init__.__doc__
# Make sure the object is picklable
pickle.dumps(TA)
with pytest.warns(NewDeprecationWarning) as w:
TC()
assert len(w) == 1
def test_deprecated_class_with_new_method():
"""
Test that a class with __new__ method still works even if it accepts
additional arguments.
This previously failed because the deprecated decorator would wrap objects
__init__ which takes no arguments.
"""
@deprecated('1.0')
class A:
def __new__(cls, a):
return super().__new__(cls)
# Creating an instance should work but raise a DeprecationWarning
with pytest.warns(AstropyDeprecationWarning) as w:
A(1)
assert len(w) == 1
@deprecated('1.0')
class B:
def __new__(cls, a):
return super().__new__(cls)
def __init__(self, a):
pass
# Creating an instance should work but raise a DeprecationWarning
with pytest.warns(AstropyDeprecationWarning) as w:
B(1)
assert len(w) == 1
def test_deprecated_class_with_super():
"""
Regression test for an issue where classes that used ``super()`` in their
``__init__`` did not actually call the correct class's ``__init__`` in the
MRO.
"""
@deprecated('100.0')
class TB:
def __init__(self, a, b):
super().__init__()
with pytest.warns(AstropyDeprecationWarning) as w:
TB(1, 2)
assert len(w) == 1
if TB.__doc__ is not None:
assert 'function' not in TB.__doc__
assert 'deprecated' in TB.__doc__
assert 'function' not in TB.__init__.__doc__
assert 'deprecated' in TB.__init__.__doc__
def test_deprecated_class_with_custom_metaclass():
"""
Regression test for an issue where deprecating a class with a metaclass
other than type did not restore the metaclass properly.
"""
with pytest.warns(AstropyDeprecationWarning) as w:
TB()
assert len(w) == 1
assert type(TB) is TMeta
assert TB.metaclass_attr == 1
def test_deprecated_static_and_classmethod():
"""
Regression test for issue introduced by
https://github.com/astropy/astropy/pull/2811 and mentioned also here:
https://github.com/astropy/astropy/pull/2580#issuecomment-51049969
where it appears that deprecated staticmethods didn't work on Python 2.6.
"""
class A:
"""Docstring"""
@deprecated('1.0')
@staticmethod
def B():
pass
@deprecated('1.0')
@classmethod
def C(cls):
pass
with pytest.warns(AstropyDeprecationWarning) as w:
A.B()
assert len(w) == 1
if A.__doc__ is not None:
assert 'deprecated' in A.B.__doc__
with pytest.warns(AstropyDeprecationWarning) as w:
A.C()
assert len(w) == 1
if A.__doc__ is not None:
assert 'deprecated' in A.C.__doc__
def test_deprecated_argument():
# Tests the decorator with function, method, staticmethod and classmethod.
class Test:
@classmethod
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test1(cls, overwrite):
return overwrite
@staticmethod
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test2(overwrite):
return overwrite
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test3(self, overwrite):
return overwrite
@deprecated_renamed_argument('clobber', 'overwrite', '1.3',
warning_type=NewDeprecationWarning)
def test4(self, overwrite):
return overwrite
@deprecated_renamed_argument('clobber', 'overwrite', '1.3', relax=False)
def test1(overwrite):
return overwrite
for method in [Test().test1, Test().test2, Test().test3, Test().test4, test1]:
# As positional argument only
assert method(1) == 1
# As new keyword argument
assert method(overwrite=1) == 1
# Using the deprecated name
with pytest.warns(AstropyDeprecationWarning, match=r"1\.3") as w:
assert method(clobber=1) == 1
assert len(w) == 1
assert 'test_decorators.py' in str(w[0].filename)
if method.__name__ == 'test4':
assert issubclass(w[0].category, NewDeprecationWarning)
# Using both. Both keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
method(clobber=2, overwrite=1)
# One positional, one keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
method(1, clobber=2)
def test_deprecated_argument_custom_message():
@deprecated_renamed_argument('foo', 'bar', '4.0', message='Custom msg')
def test(bar=0):
pass
with pytest.warns(AstropyDeprecationWarning, match='Custom msg'):
test(foo=0)
def test_deprecated_argument_in_kwargs():
# To rename an argument that is consumed by "kwargs" the "arg_in_kwargs"
# parameter is used.
@deprecated_renamed_argument('clobber', 'overwrite', '1.3',
arg_in_kwargs=True)
def test(**kwargs):
return kwargs['overwrite']
# As positional argument only
with pytest.raises(TypeError):
test(1)
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
with pytest.warns(AstropyDeprecationWarning, match=r"1\.3") as w:
assert test(clobber=1) == 1
assert len(w) == 1
assert 'test_decorators.py' in str(w[0].filename)
# Using both. Both keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
test(clobber=2, overwrite=1)
# One positional, one keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
test(1, clobber=2)
def test_deprecated_argument_relaxed():
# Relax turns the TypeError if both old and new keyword are used into
# a warning.
@deprecated_renamed_argument('clobber', 'overwrite', '1.3', relax=True)
def test(overwrite):
return overwrite
# As positional argument only
assert test(1) == 1
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
with pytest.warns(AstropyDeprecationWarning, match=r"1\.3") as w:
assert test(clobber=1) == 1
assert len(w) == 1
# Using both. Both keyword
with pytest.warns(AstropyUserWarning) as w:
assert test(clobber=2, overwrite=1) == 1
assert len(w) == 2
assert '"clobber" was deprecated' in str(w[0].message)
assert '"clobber" and "overwrite" keywords were set' in str(w[1].message)
# One positional, one keyword
with pytest.warns(AstropyUserWarning) as w:
assert test(1, clobber=2) == 1
assert len(w) == 2
assert '"clobber" was deprecated' in str(w[0].message)
assert '"clobber" and "overwrite" keywords were set' in str(w[1].message)
def test_deprecated_argument_pending():
# Relax turns the TypeError if both old and new keyword are used into
# a warning.
@deprecated_renamed_argument('clobber', 'overwrite', '1.3', pending=True)
def test(overwrite):
return overwrite
# As positional argument only
assert test(1) == 1
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
assert test(clobber=1) == 1
# Using both. Both keyword
assert test(clobber=2, overwrite=1) == 1
# One positional, one keyword
assert test(1, clobber=2) == 1
def test_deprecated_argument_multi_deprecation():
@deprecated_renamed_argument(['x', 'y', 'z'], ['a', 'b', 'c'],
[1.3, 1.2, 1.3], relax=True)
def test(a, b, c):
return a, b, c
with pytest.warns(AstropyDeprecationWarning) as w:
assert test(x=1, y=2, z=3) == (1, 2, 3)
assert len(w) == 3
# Make sure relax is valid for all arguments
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, b=3) == (1, 3, 3)
assert len(w) == 4
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, a=3) == (3, 2, 3)
assert len(w) == 4
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, c=5) == (1, 2, 5)
assert len(w) == 4
def test_deprecated_argument_multi_deprecation_2():
@deprecated_renamed_argument(['x', 'y', 'z'], ['a', 'b', 'c'],
[1.3, 1.2, 1.3], relax=[True, True, False])
def test(a, b, c):
return a, b, c
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, b=3) == (1, 3, 3)
assert len(w) == 4
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, a=3) == (3, 2, 3)
assert len(w) == 4
with pytest.raises(TypeError), pytest.warns(AstropyUserWarning):
assert test(x=1, y=2, z=3, c=5) == (1, 2, 5)
def test_deprecated_argument_not_allowed_use():
# If the argument is supposed to be inside the kwargs one needs to set the
# arg_in_kwargs parameter. Without it it raises a TypeError.
with pytest.raises(TypeError):
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test1(**kwargs):
return kwargs['overwrite']
# Cannot replace "*args".
with pytest.raises(TypeError):
@deprecated_renamed_argument('overwrite', 'args', '1.3')
def test2(*args):
return args
# Cannot replace "**kwargs".
with pytest.raises(TypeError):
@deprecated_renamed_argument('overwrite', 'kwargs', '1.3')
def test3(**kwargs):
return kwargs
def test_deprecated_argument_remove():
@deprecated_renamed_argument('x', None, '2.0', alternative='astropy.y')
def test(dummy=11, x=3):
return dummy, x
with pytest.warns(AstropyDeprecationWarning, match=r"Use astropy\.y instead") as w:
assert test(x=1) == (11, 1)
assert len(w) == 1
with pytest.warns(AstropyDeprecationWarning) as w:
assert test(x=1, dummy=10) == (10, 1)
assert len(w) == 1
with pytest.warns(AstropyDeprecationWarning, match=r'Use astropy\.y instead'):
test(121, 1) == (121, 1)
assert test() == (11, 3)
assert test(121) == (121, 3)
assert test(dummy=121) == (121, 3)
def test_sharedmethod_reuse_on_subclasses():
"""
Regression test for an issue where sharedmethod would bind to one class
for all time, causing the same method not to work properly on other
subclasses of that class.
It has the same problem when the same sharedmethod is called on different
instances of some class as well.
"""
class AMeta(type):
def foo(cls):
return cls.x
class A:
x = 3
def __init__(self, x):
self.x = x
@sharedmethod
def foo(self):
return self.x
a1 = A(1)
a2 = A(2)
assert a1.foo() == 1
assert a2.foo() == 2
# Similar test now, but for multiple subclasses using the same sharedmethod
# as a classmethod
assert A.foo() == 3
class B(A):
x = 5
assert B.foo() == 5
def test_classproperty_docstring():
"""
Tests that the docstring is set correctly on classproperties.
This failed previously due to a bug in Python that didn't always
set __doc__ properly on instances of property subclasses.
"""
class A:
# Inherits docstring from getter
@classproperty
def foo(cls):
"""The foo."""
return 1
assert A.__dict__['foo'].__doc__ == "The foo."
class B:
# Use doc passed to classproperty constructor
def _get_foo(cls): return 1
foo = classproperty(_get_foo, doc="The foo.")
assert B.__dict__['foo'].__doc__ == "The foo."
def test_classproperty_lazy_threadsafe(fast_thread_switching):
"""
Test that a class property with lazy=True is thread-safe.
"""
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
# This is testing for race conditions, so try many times in the
# hope that we'll get the timing right.
for p in range(10000):
class A:
@classproperty(lazy=True)
def foo(cls):
nonlocal calls
calls += 1
return object()
# Have all worker threads query in parallel
calls = 0
futures = [executor.submit(lambda: A.foo) for i in range(workers)]
# Check that only one call happened and they all received it
values = [future.result() for future in futures]
assert calls == 1
assert values[0] is not None
assert values == [values[0]] * workers
def test_lazyproperty_threadsafe(fast_thread_switching):
"""
Test thread safety of lazyproperty.
"""
# This test is generally similar to test_classproperty_lazy_threadsafe
# above. See there for comments.
class A:
def __init__(self):
self.calls = 0
@lazyproperty
def foo(self):
self.calls += 1
return object()
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
for p in range(10000):
a = A()
futures = [executor.submit(lambda: a.foo) for i in range(workers)]
values = [future.result() for future in futures]
assert a.calls == 1
assert a.foo is not None
assert values == [a.foo] * workers
def test_format_doc_stringInput_simple():
# Simple tests with string input
docstring_fail = ''
# Raises an valueerror if input is empty
with pytest.raises(ValueError):
@format_doc(docstring_fail)
def testfunc_fail():
pass
docstring = 'test'
# A first test that replaces an empty docstring
@format_doc(docstring)
def testfunc_1():
pass
assert inspect.getdoc(testfunc_1) == docstring
# Test that it replaces an existing docstring
@format_doc(docstring)
def testfunc_2():
'''not test'''
pass
assert inspect.getdoc(testfunc_2) == docstring
def test_format_doc_stringInput_format():
# Tests with string input and formatting
docstring = 'yes {0} no {opt}'
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(docstring)
def testfunc1():
pass
# Test that the formatting is done right
@format_doc(docstring, '/', opt='= life')
def testfunc2():
pass
assert inspect.getdoc(testfunc2) == 'yes / no = life'
# Test that we can include the original docstring
docstring2 = 'yes {0} no {__doc__}'
@format_doc(docstring2, '/')
def testfunc3():
'''= 2 / 2 * life'''
pass
assert inspect.getdoc(testfunc3) == 'yes / no = 2 / 2 * life'
def test_format_doc_objectInput_simple():
# Simple tests with object input
def docstring_fail():
pass
# Self input while the function has no docstring raises an error
with pytest.raises(ValueError):
@format_doc(docstring_fail)
def testfunc_fail():
pass
def docstring0():
'''test'''
pass
# A first test that replaces an empty docstring
@format_doc(docstring0)
def testfunc_1():
pass
assert inspect.getdoc(testfunc_1) == inspect.getdoc(docstring0)
# Test that it replaces an existing docstring
@format_doc(docstring0)
def testfunc_2():
'''not test'''
pass
assert inspect.getdoc(testfunc_2) == inspect.getdoc(docstring0)
def test_format_doc_objectInput_format():
# Tests with object input and formatting
def docstring():
'''test {0} test {opt}'''
pass
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(docstring)
def testfunc_fail():
pass
# Test that the formatting is done right
@format_doc(docstring, '+', opt='= 2 * test')
def testfunc2():
pass
assert inspect.getdoc(testfunc2) == 'test + test = 2 * test'
# Test that we can include the original docstring
def docstring2():
'''test {0} test {__doc__}'''
pass
@format_doc(docstring2, '+')
def testfunc3():
'''= 4 / 2 * test'''
pass
assert inspect.getdoc(testfunc3) == 'test + test = 4 / 2 * test'
def test_format_doc_selfInput_simple():
# Simple tests with self input
# Self input while the function has no docstring raises an error
with pytest.raises(ValueError):
@format_doc(None)
def testfunc_fail():
pass
# Test that it keeps an existing docstring
@format_doc(None)
def testfunc_1():
'''not test'''
pass
assert inspect.getdoc(testfunc_1) == 'not test'
def test_format_doc_selfInput_format():
# Tests with string input which is '__doc__' (special case) and formatting
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(None)
def testfunc_fail():
'''dum {0} dum {opt}'''
pass
# Test that the formatting is done right
@format_doc(None, 'di', opt='da dum')
def testfunc1():
'''dum {0} dum {opt}'''
pass
assert inspect.getdoc(testfunc1) == 'dum di dum da dum'
# Test that we cannot recursively insert the original documentation
@format_doc(None, 'di')
def testfunc2():
'''dum {0} dum {__doc__}'''
pass
assert inspect.getdoc(testfunc2) == 'dum di dum '
def test_format_doc_onMethod():
# Check if the decorator works on methods too, to spice it up we try double
# decorator
docstring = 'what we do {__doc__}'
class TestClass:
@format_doc(docstring)
@format_doc(None, 'strange.')
def test_method(self):
'''is {0}'''
pass
assert inspect.getdoc(TestClass.test_method) == 'what we do is strange.'
def test_format_doc_onClass():
# Check if the decorator works on classes too
docstring = 'what we do {__doc__} {0}{opt}'
@format_doc(docstring, 'strange', opt='.')
class TestClass:
'''is'''
pass
assert inspect.getdoc(TestClass) == 'what we do is strange.'
|
2679be7acfd93ae01b6b79d250ee068fd0d08ebc7da63f8460c9f612af3da654 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import traceback
import pytest
from astropy.utils.codegen import make_function_with_signature
def test_make_function_with_signature_lineno():
"""
Tests that a function made with ``make_function_with_signature`` is give
the correct line number into the module it was created from (i.e. the line
``make_function_with_signature`` was called from).
"""
def crashy_function(*args, **kwargs):
1 / 0
# Make a wrapper around this function with the signature:
# crashy_function(a, b)
# Note: the signature is not really relevant to this test
wrapped = make_function_with_signature(crashy_function, ('a', 'b'))
line = """
wrapped = make_function_with_signature(crashy_function, ('a', 'b'))
""".strip()
try:
wrapped(1, 2)
except Exception:
exc_cls, exc, tb = sys.exc_info()
assert exc_cls is ZeroDivisionError
# The *last* line in the traceback should be the 1 / 0 line in
# crashy_function; the next line up should be the line that the
# make_function_with_signature call was one
tb_lines = traceback.format_tb(tb)
assert '1 / 0' in tb_lines[-1]
else:
pytest.fail('This should have caused an exception')
|
49d3d96927ec9765caa5979ab84be00c2c739f39d313ccacb941453a6794e154 | import time
import numpy as np
from astropy.utils.misc import NumpyRNGContext
def func(i):
"""An identity function that jitters its execution time by a
pseudo-random amount.
FIXME: This function should be defined in test_console.py, but Astropy's
test runner interacts strangely with Python's `multiprocessing`
module. I was getting a mysterious PicklingError until I moved this
function into a separate module. (It worked fine in a standalone pytest
script.)"""
with NumpyRNGContext(i):
time.sleep(np.random.uniform(0, 0.01))
return i
|
cdd89a126890daf75868162943e11ee325bd69cd3cd25b3906c4288ceca0f467 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.utils.state import ScienceState
def test_ScienceState_and_Context():
"""
Tests a ScienceState and spawned contexts.
"""
class MyState(ScienceState):
_value = "A"
_state = dict(foo="bar")
state = {"foo": "bar"}
# test created ScienceState
assert MyState.get() == "A"
assert MyState.validate("B") == "B"
assert MyState._state == state
# test setting
with MyState.set("B"):
assert MyState.get() == "B"
assert MyState.get() == "A" # test returning
|
657aa82dd82768dc61b0076f88168026aa285cf340c49eec13d12fb8bd30f9da | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Built-in mask mixin class.
The design uses `Masked` as a factory class which automatically
generates new subclasses for any data class that is itself a
subclass of a predefined masked class, with `MaskedNDArray`
providing such a predefined class for `~numpy.ndarray`.
Generally, any new predefined class should override the
``from_unmasked(data, mask, copy=False)`` class method that
creates an instance from unmasked data and a mask, as well as
the ``unmasked`` property that returns just the data.
The `Masked` class itself provides a base ``mask`` property,
which can also be overridden if needed.
"""
import builtins
import numpy as np
from astropy.utils.shapes import NDArrayShapeMethods
from astropy.utils.data_info import ParentDtypeInfo
from .function_helpers import (MASKED_SAFE_FUNCTIONS,
APPLY_TO_BOTH_FUNCTIONS,
DISPATCHED_FUNCTIONS,
UNSUPPORTED_FUNCTIONS)
__all__ = ['Masked', 'MaskedNDArray']
get__doc__ = """Masked version of {0.__name__}.
Except for the ability to pass in a ``mask``, parameters are
as for `{0.__module__}.{0.__name__}`.
""".format
class Masked(NDArrayShapeMethods):
"""A scalar value or array of values with associated mask.
The resulting instance will take its exact type from whatever the
contents are, with the type generated on the fly as needed.
Parameters
----------
data : array-like
The data for which a mask is to be added. The result will be a
a subclass of the type of ``data``.
mask : array-like of bool, optional
The initial mask to assign. If not given, taken from the data.
copy : bool
Whether the data and mask should be copied. Default: `False`.
"""
_base_classes = {}
"""Explicitly defined masked classes keyed by their unmasked counterparts.
For subclasses of these unmasked classes, masked counterparts can be generated.
"""
_masked_classes = {}
"""Masked classes keyed by their unmasked data counterparts."""
def __new__(cls, *args, **kwargs):
if cls is Masked:
# Initializing with Masked itself means we're in "factory mode".
if not kwargs and len(args) == 1 and isinstance(args[0], type):
# Create a new masked class.
return cls._get_masked_cls(args[0])
else:
return cls._get_masked_instance(*args, **kwargs)
else:
# Otherwise we're a subclass and should just pass information on.
return super().__new__(cls, *args, **kwargs)
def __init_subclass__(cls, base_cls=None, data_cls=None, **kwargs):
"""Register a Masked subclass.
Parameters
----------
base_cls : type, optional
If given, it is taken to mean that ``cls`` can be used as
a base for masked versions of all subclasses of ``base_cls``,
so it is registered as such in ``_base_classes``.
data_cls : type, optional
If given, ``cls`` should will be registered as the masked version of
``data_cls``. Will set the private ``cls._data_cls`` attribute,
and auto-generate a docstring if not present already.
**kwargs
Passed on for possible further initialization by superclasses.
"""
if base_cls is not None:
Masked._base_classes[base_cls] = cls
if data_cls is not None:
cls._data_cls = data_cls
cls._masked_classes[data_cls] = cls
if cls.__doc__ is None:
cls.__doc__ = get__doc__(data_cls)
super().__init_subclass__(**kwargs)
# This base implementation just uses the class initializer.
# Subclasses can override this in case the class does not work
# with this signature, or to provide a faster implementation.
@classmethod
def from_unmasked(cls, data, mask=None, copy=False):
"""Create an instance from unmasked data and a mask."""
return cls(data, mask=mask, copy=copy)
@classmethod
def _get_masked_instance(cls, data, mask=None, copy=False):
data, data_mask = cls._get_data_and_mask(data)
if mask is None:
mask = False if data_mask is None else data_mask
masked_cls = cls._get_masked_cls(data.__class__)
return masked_cls.from_unmasked(data, mask, copy)
@classmethod
def _get_masked_cls(cls, data_cls):
"""Get the masked wrapper for a given data class.
If the data class does not exist yet but is a subclass of any of the
registered base data classes, it is automatically generated
(except we skip `~numpy.ma.MaskedArray` subclasses, since then the
masking mechanisms would interfere).
"""
if issubclass(data_cls, (Masked, np.ma.MaskedArray)):
return data_cls
masked_cls = cls._masked_classes.get(data_cls)
if masked_cls is None:
# Walk through MRO and find closest base data class.
# Note: right now, will basically always be ndarray, but
# one could imagine needing some special care for one subclass,
# which would then get its own entry. E.g., if MaskedAngle
# defined something special, then MaskedLongitude should depend
# on it.
for mro_item in data_cls.__mro__:
base_cls = cls._base_classes.get(mro_item)
if base_cls is not None:
break
else:
# Just hope that MaskedNDArray can handle it.
# TODO: this covers the case where a user puts in a list or so,
# but for those one could just explicitly do something like
# _masked_classes[list] = MaskedNDArray.
return MaskedNDArray
# Create (and therefore register) new Masked subclass for the
# given data_cls.
masked_cls = type('Masked' + data_cls.__name__,
(data_cls, base_cls), {}, data_cls=data_cls)
return masked_cls
@classmethod
def _get_data_and_mask(cls, data, allow_ma_masked=False):
"""Split data into unmasked and mask, if present.
Parameters
----------
data : array-like
Possibly masked item, judged by whether it has a ``mask`` attribute.
If so, checks for being an instance of `~astropy.utils.masked.Masked`
or `~numpy.ma.MaskedArray`, and gets unmasked data appropriately.
allow_ma_masked : bool, optional
Whether or not to process `~numpy.ma.masked`, i.e., an item that
implies no data but the presence of a mask.
Returns
-------
unmasked, mask : array-like
Unmasked will be `None` for `~numpy.ma.masked`.
Raises
------
ValueError
If `~numpy.ma.masked` is passed in and ``allow_ma_masked`` is not set.
"""
mask = getattr(data, 'mask', None)
if mask is not None:
try:
data = data.unmasked
except AttributeError:
if not isinstance(data, np.ma.MaskedArray):
raise
if data is np.ma.masked:
if allow_ma_masked:
data = None
else:
raise ValueError('cannot handle np.ma.masked here.') from None
else:
data = data.data
return data, mask
@classmethod
def _get_data_and_masks(cls, *args):
data_masks = [cls._get_data_and_mask(arg) for arg in args]
return (tuple(data for data, _ in data_masks),
tuple(mask for _, mask in data_masks))
def _get_mask(self):
"""The mask.
If set, replace the original mask, with whatever it is set with,
using a view if no broadcasting or type conversion is required.
"""
return self._mask
def _set_mask(self, mask, copy=False):
self_dtype = getattr(self, 'dtype', None)
mask_dtype = (np.ma.make_mask_descr(self_dtype)
if self_dtype and self_dtype.names else np.dtype('?'))
ma = np.asanyarray(mask, dtype=mask_dtype)
if ma.shape != self.shape:
# This will fail (correctly) if not broadcastable.
self._mask = np.empty(self.shape, dtype=mask_dtype)
self._mask[...] = ma
elif ma is mask:
# Even if not copying use a view so that shape setting
# does not propagate.
self._mask = mask.copy() if copy else mask.view()
else:
self._mask = ma
mask = property(_get_mask, _set_mask)
# Note: subclass should generally override the unmasked property.
# This one assumes the unmasked data is stored in a private attribute.
@property
def unmasked(self):
"""The unmasked values.
See Also
--------
astropy.utils.masked.Masked.filled
"""
return self._unmasked
def filled(self, fill_value):
"""Get a copy of the underlying data, with masked values filled in.
Parameters
----------
fill_value : object
Value to replace masked values with.
See Also
--------
astropy.utils.masked.Masked.unmasked
"""
unmasked = self.unmasked.copy()
if self.mask.dtype.names:
np.ma.core._recursive_filled(unmasked, self.mask, fill_value)
else:
unmasked[self.mask] = fill_value
return unmasked
def _apply(self, method, *args, **kwargs):
# Required method for NDArrayShapeMethods, to help provide __getitem__
# and shape-changing methods.
if callable(method):
data = method(self.unmasked, *args, **kwargs)
mask = method(self.mask, *args, **kwargs)
else:
data = getattr(self.unmasked, method)(*args, **kwargs)
mask = getattr(self.mask, method)(*args, **kwargs)
result = self.from_unmasked(data, mask, copy=False)
if 'info' in self.__dict__:
result.info = self.info
return result
def __setitem__(self, item, value):
value, mask = self._get_data_and_mask(value, allow_ma_masked=True)
if value is not None:
self.unmasked[item] = value
self.mask[item] = mask
class MaskedInfoBase:
mask_val = np.ma.masked
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
self.serialize_method = {'fits': 'null_value',
'ecsv': 'null_value',
'hdf5': 'data_mask',
'parquet': 'data_mask',
None: 'null_value'}
class MaskedNDArrayInfo(MaskedInfoBase, ParentDtypeInfo):
"""
Container for meta information like name, description, format.
"""
# Add `serialize_method` attribute to the attrs that MaskedNDArrayInfo knows
# about. This allows customization of the way that MaskedColumn objects
# get written to file depending on format. The default is to use whatever
# the writer would normally do, which in the case of FITS or ECSV is to use
# a NULL value within the data itself. If serialize_method is 'data_mask'
# then the mask is explicitly written out as a separate column if there
# are any masked values. This is the same as for MaskedColumn.
attr_names = ParentDtypeInfo.attr_names | {'serialize_method'}
# When `serialize_method` is 'data_mask', and data and mask are being written
# as separate columns, use column names <name> and <name>.mask (instead
# of default encoding as <name>.data and <name>.mask).
_represent_as_dict_primary_data = 'data'
def _represent_as_dict(self):
out = super()._represent_as_dict()
masked_array = self._parent
# If the serialize method for this context (e.g. 'fits' or 'ecsv') is
# 'data_mask', that means to serialize using an explicit mask column.
method = self.serialize_method[self._serialize_context]
if method == 'data_mask':
out['data'] = masked_array.unmasked
if np.any(masked_array.mask):
# Only if there are actually masked elements do we add the ``mask`` column
out['mask'] = masked_array.mask
elif method == 'null_value':
out['data'] = np.ma.MaskedArray(masked_array.unmasked,
mask=masked_array.mask)
else:
raise ValueError('serialize method must be either "data_mask" or "null_value"')
return out
def _construct_from_dict(self, map):
# Override usual handling, since MaskedNDArray takes shape and buffer
# as input, which is less useful here.
# The map can contain either a MaskedColumn or a Column and a mask.
# Extract the mask for the former case.
map.setdefault('mask', getattr(map['data'], 'mask', False))
return self._parent_cls.from_unmasked(**map)
class MaskedArraySubclassInfo(MaskedInfoBase):
"""Mixin class to create a subclasses such as MaskedQuantityInfo."""
# This is used below in __init_subclass__, which also inserts a
# 'serialize_method' attribute in attr_names.
def _represent_as_dict(self):
# Use the data_cls as the class name for serialization,
# so that we do not have to store all possible masked classes
# in astropy.table.serialize.__construct_mixin_classes.
out = super()._represent_as_dict()
data_cls = self._parent._data_cls
out.setdefault('__class__',
data_cls.__module__ + '.' + data_cls.__name__)
return out
def _comparison_method(op):
"""
Create a comparison operator for MaskedNDArray.
Needed since for string dtypes the base operators bypass __array_ufunc__
and hence return unmasked results.
"""
def _compare(self, other):
other_data, other_mask = self._get_data_and_mask(other)
result = getattr(self.unmasked, op)(other_data)
if result is NotImplemented:
return NotImplemented
mask = self.mask | (other_mask if other_mask is not None else False)
return self._masked_result(result, mask, None)
return _compare
class MaskedIterator:
"""
Flat iterator object to iterate over Masked Arrays.
A `~astropy.utils.masked.MaskedIterator` iterator is returned by ``m.flat``
for any masked array ``m``. It allows iterating over the array as if it
were a 1-D array, either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
Notes
-----
The design of `~astropy.utils.masked.MaskedIterator` follows that of
`~numpy.ma.core.MaskedIterator`. It is not exported by the
`~astropy.utils.masked` module. Instead of instantiating directly,
use the ``flat`` method in the masked array instance.
"""
def __init__(self, m):
self._masked = m
self._dataiter = m.unmasked.flat
self._maskiter = m.mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
mask = self._maskiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Masked array.
if not isinstance(out, np.ndarray):
out = out[...]
mask = mask[...]
return self._masked.from_unmasked(out, mask, copy=False)
def __setitem__(self, index, value):
data, mask = self._masked._get_data_and_mask(value, allow_ma_masked=True)
if data is not None:
self._dataiter[index] = data
self._maskiter[index] = mask
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)[...]
mask = next(self._maskiter)[...]
return self._masked.from_unmasked(out, mask, copy=False)
next = __next__
class MaskedNDArray(Masked, np.ndarray, base_cls=np.ndarray, data_cls=np.ndarray):
_mask = None
info = MaskedNDArrayInfo()
def __new__(cls, *args, mask=None, **kwargs):
"""Get data class instance from arguments and then set mask."""
self = super().__new__(cls, *args, **kwargs)
if mask is not None:
self.mask = mask
elif self._mask is None:
self.mask = False
return self
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(cls, **kwargs)
# For all subclasses we should set a default __new__ that passes on
# arguments other than mask to the data class, and then sets the mask.
if '__new__' not in cls.__dict__:
def __new__(newcls, *args, mask=None, **kwargs):
"""Get data class instance from arguments and then set mask."""
# Need to explicitly mention classes outside of class definition.
self = super(cls, newcls).__new__(newcls, *args, **kwargs)
if mask is not None:
self.mask = mask
elif self._mask is None:
self.mask = False
return self
cls.__new__ = __new__
if 'info' not in cls.__dict__ and hasattr(cls._data_cls, 'info'):
data_info = cls._data_cls.info
attr_names = data_info.attr_names | {'serialize_method'}
new_info = type(cls.__name__+'Info',
(MaskedArraySubclassInfo, data_info.__class__),
dict(attr_names=attr_names))
cls.info = new_info()
# The two pieces typically overridden.
@classmethod
def from_unmasked(cls, data, mask=None, copy=False):
# Note: have to override since __new__ would use ndarray.__new__
# which expects the shape as its first argument, not an array.
data = np.array(data, subok=True, copy=copy)
self = data.view(cls)
self._set_mask(mask, copy=copy)
return self
@property
def unmasked(self):
return super().view(self._data_cls)
@classmethod
def _get_masked_cls(cls, data_cls):
# Short-cuts
if data_cls is np.ndarray:
return MaskedNDArray
elif data_cls is None: # for .view()
return cls
return super()._get_masked_cls(data_cls)
@property
def flat(self):
"""A 1-D iterator over the Masked array.
This returns a ``MaskedIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to Python's built-in iterator, except that it also
allows assignment.
"""
return MaskedIterator(self)
@property
def _baseclass(self):
"""Work-around for MaskedArray initialization.
Allows the base class to be inferred correctly when a masked instance
is used to initialize (or viewed as) a `~numpy.ma.MaskedArray`.
"""
return self._data_cls
def view(self, dtype=None, type=None):
"""New view of the masked array.
Like `numpy.ndarray.view`, but always returning a masked array subclass.
"""
if type is None and (isinstance(dtype, builtins.type)
and issubclass(dtype, np.ndarray)):
return super().view(self._get_masked_cls(dtype))
if dtype is None:
return super().view(self._get_masked_cls(type))
dtype = np.dtype(dtype)
if not (dtype.itemsize == self.dtype.itemsize
and (dtype.names is None
or len(dtype.names) == len(self.dtype.names))):
raise NotImplementedError(
f"{self.__class__} cannot be viewed with a dtype with a "
f"with a different number of fields or size.")
return super().view(dtype, self._get_masked_cls(type))
def __array_finalize__(self, obj):
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# Logically, this should come from ndarray and hence be None, but
# just in case someone creates a new mixin, we check.
super_array_finalize = super().__array_finalize__
if super_array_finalize: # pragma: no cover
super_array_finalize(obj)
if self._mask is None:
# Got here after, e.g., a view of another masked class.
# Get its mask, or initialize ours.
self._set_mask(getattr(obj, '_mask', False))
if 'info' in obj.__dict__:
self.info = obj.info
@property
def shape(self):
"""The shape of the data and the mask.
Usually used to get the current shape of an array, but may also be
used to reshape the array in-place by assigning a tuple of array
dimensions to it. As with `numpy.reshape`, one of the new shape
dimensions can be -1, in which case its value is inferred from the
size of the array and the remaining dimensions.
Raises
------
AttributeError
If a copy is required, of either the data or the mask.
"""
# Redefinition to allow defining a setter and add a docstring.
return super().shape
@shape.setter
def shape(self, shape):
old_shape = self.shape
self._mask.shape = shape
# Reshape array proper in try/except just in case some broadcasting
# or so causes it to fail.
try:
super(MaskedNDArray, type(self)).shape.__set__(self, shape)
except Exception as exc:
self._mask.shape = old_shape
# Given that the mask reshaping succeeded, the only logical
# reason for an exception is something like a broadcast error in
# in __array_finalize__, or a different memory ordering between
# mask and data. For those, give a more useful error message;
# otherwise just raise the error.
if 'could not broadcast' in exc.args[0]:
raise AttributeError(
'Incompatible shape for in-place modification. '
'Use `.reshape()` to make a copy with the desired '
'shape.') from None
else: # pragma: no cover
raise
_eq_simple = _comparison_method('__eq__')
_ne_simple = _comparison_method('__ne__')
__lt__ = _comparison_method('__lt__')
__le__ = _comparison_method('__le__')
__gt__ = _comparison_method('__gt__')
__ge__ = _comparison_method('__ge__')
def __eq__(self, other):
if not self.dtype.names:
return self._eq_simple(other)
# For structured arrays, we treat this as a reduction over the fields,
# where masked fields are skipped and thus do not influence the result.
other = np.asanyarray(other, dtype=self.dtype)
result = np.stack([self[field] == other[field]
for field in self.dtype.names], axis=-1)
return result.all(axis=-1)
def __ne__(self, other):
if not self.dtype.names:
return self._ne_simple(other)
# For structured arrays, we treat this as a reduction over the fields,
# where masked fields are skipped and thus do not influence the result.
other = np.asanyarray(other, dtype=self.dtype)
result = np.stack([self[field] != other[field]
for field in self.dtype.names], axis=-1)
return result.any(axis=-1)
def _combine_masks(self, masks, out=None):
masks = [m for m in masks if m is not None and m is not False]
if not masks:
return False
if len(masks) == 1:
if out is None:
return masks[0].copy()
else:
np.copyto(out, masks[0])
return out
out = np.logical_or(masks[0], masks[1], out=out)
for mask in masks[2:]:
np.logical_or(out, mask, out=out)
return out
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.pop('out', None)
out_unmasked = None
out_mask = None
if out is not None:
out_unmasked, out_masks = self._get_data_and_masks(*out)
for d, m in zip(out_unmasked, out_masks):
if m is None:
# TODO: allow writing to unmasked output if nothing is masked?
if d is not None:
raise TypeError('cannot write to unmasked output')
elif out_mask is None:
out_mask = m
unmasked, masks = self._get_data_and_masks(*inputs)
if ufunc.signature:
# We're dealing with a gufunc. For now, only deal with
# np.matmul and gufuncs for which the mask of any output always
# depends on all core dimension values of all inputs.
# Also ignore axes keyword for now...
# TODO: in principle, it should be possible to generate the mask
# purely based on the signature.
if 'axes' in kwargs:
raise NotImplementedError("Masked does not yet support gufunc "
"calls with 'axes'.")
if ufunc is np.matmul:
# np.matmul is tricky and its signature cannot be parsed by
# _parse_gufunc_signature.
unmasked = np.atleast_1d(*unmasked)
mask0, mask1 = masks
masks = []
is_mat1 = unmasked[1].ndim >= 2
if mask0 is not None:
masks.append(
np.logical_or.reduce(mask0, axis=-1, keepdims=is_mat1))
if mask1 is not None:
masks.append(
np.logical_or.reduce(mask1, axis=-2, keepdims=True)
if is_mat1 else
np.logical_or.reduce(mask1))
mask = self._combine_masks(masks, out=out_mask)
else:
# Parse signature with private numpy function. Note it
# cannot handle spaces in tuples, so remove those.
in_sig, out_sig = np.lib.function_base._parse_gufunc_signature(
ufunc.signature.replace(' ', ''))
axis = kwargs.get('axis', -1)
keepdims = kwargs.get('keepdims', False)
in_masks = []
for sig, mask in zip(in_sig, masks):
if mask is not None:
if sig:
# Input has core dimensions. Assume that if any
# value in those is masked, the output will be
# masked too (TODO: for multiple core dimensions
# this may be too strong).
mask = np.logical_or.reduce(
mask, axis=axis, keepdims=keepdims)
in_masks.append(mask)
mask = self._combine_masks(in_masks)
result_masks = []
for os in out_sig:
if os:
# Output has core dimensions. Assume all those
# get the same mask.
result_mask = np.expand_dims(mask, axis)
else:
result_mask = mask
result_masks.append(result_mask)
mask = result_masks if len(result_masks) > 1 else result_masks[0]
elif method == '__call__':
# Regular ufunc call.
mask = self._combine_masks(masks, out=out_mask)
elif method == 'outer':
# Must have two arguments; adjust masks as will be done for data.
assert len(masks) == 2
masks = [(m if m is not None else False) for m in masks]
mask = np.logical_or.outer(masks[0], masks[1], out=out_mask)
elif method in {'reduce', 'accumulate'}:
# Reductions like np.add.reduce (sum).
if masks[0] is not None:
# By default, we simply propagate masks, since for
# things like np.sum, it makes no sense to do otherwise.
# Individual methods need to override as needed.
# TODO: take care of 'out' too?
if method == 'reduce':
axis = kwargs.get('axis', None)
keepdims = kwargs.get('keepdims', False)
where = kwargs.get('where', True)
mask = np.logical_or.reduce(masks[0], where=where,
axis=axis, keepdims=keepdims,
out=out_mask)
if where is not True:
# Mask also whole rows that were not selected by where,
# so would have been left as unmasked above.
mask |= np.logical_and.reduce(masks[0], where=where,
axis=axis, keepdims=keepdims)
else:
# Accumulate
axis = kwargs.get('axis', 0)
mask = np.logical_or.accumulate(masks[0], axis=axis,
out=out_mask)
elif out is not None:
mask = False
else: # pragma: no cover
# Can only get here if neither input nor output was masked, but
# perhaps axis or where was masked (in numpy < 1.21 this is
# possible). We don't support this.
return NotImplemented
elif method in {'reduceat', 'at'}: # pragma: no cover
# TODO: implement things like np.add.accumulate (used for cumsum).
raise NotImplementedError("masked instances cannot yet deal with "
"'reduceat' or 'at'.")
if out_unmasked is not None:
kwargs['out'] = out_unmasked
result = getattr(ufunc, method)(*unmasked, **kwargs)
if result is None: # pragma: no cover
# This happens for the "at" method.
return result
if out is not None and len(out) == 1:
out = out[0]
return self._masked_result(result, mask, out)
def __array_function__(self, function, types, args, kwargs):
# TODO: go through functions systematically to see which ones
# work and/or can be supported.
if function in MASKED_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in APPLY_TO_BOTH_FUNCTIONS:
helper = APPLY_TO_BOTH_FUNCTIONS[function]
try:
helper_result = helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
data_args, mask_args, kwargs, out = helper_result
if out is not None:
if not isinstance(out, Masked):
return self._not_implemented_or_raise(function, types)
function(*mask_args, out=out.mask, **kwargs)
function(*data_args, out=out.unmasked, **kwargs)
return out
mask = function(*mask_args, **kwargs)
result = function(*data_args, **kwargs)
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
dispatched_result = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
if not isinstance(dispatched_result, tuple):
return dispatched_result
result, mask, out = dispatched_result
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else: # pragma: no cover
# By default, just pass it through for now.
return super().__array_function__(function, types, args, kwargs)
if mask is None:
return result
else:
return self._masked_result(result, mask, out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Masked. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Masked subclass of it) around,
# it quite likely coerces, so we should just break.
if any(issubclass(t, np.ndarray) and not issubclass(t, Masked)
for t in types):
raise TypeError("the MaskedNDArray implementation cannot handle {} "
"with the given arguments."
.format(function)) from None
else:
return NotImplemented
def _masked_result(self, result, mask, out):
if isinstance(result, tuple):
if out is None:
out = (None,) * len(result)
if not isinstance(mask, (list, tuple)):
mask = (mask,) * len(result)
return tuple(self._masked_result(result_, mask_, out_)
for (result_, mask_, out_) in zip(result, mask, out))
if out is None:
# Note that we cannot count on result being the same class as
# 'self' (e.g., comparison of quantity results in an ndarray, most
# operations on Longitude and Latitude result in Angle or
# Quantity), so use Masked to determine the appropriate class.
return Masked(result, mask)
# TODO: remove this sanity check once test cases are more complete.
assert isinstance(out, Masked)
# If we have an output, the result was written in-place, so we should
# also write the mask in-place (if not done already in the code).
if out._mask is not mask:
out._mask[...] = mask
return out
# Below are ndarray methods that need to be overridden as masked elements
# need to be skipped and/or an initial value needs to be set.
def _reduce_defaults(self, kwargs, initial_func=None):
"""Get default where and initial for masked reductions.
Generally, the default should be to skip all masked elements. For
reductions such as np.minimum.reduce, we also need an initial value,
which can be determined using ``initial_func``.
"""
if 'where' not in kwargs:
kwargs['where'] = ~self.mask
if initial_func is not None and 'initial' not in kwargs:
kwargs['initial'] = initial_func(self.unmasked)
return kwargs
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
# Unfortunately, cannot override the call to diagonal inside trace, so
# duplicate implementation in numpy/core/src/multiarray/calculation.c.
diagonal = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return diagonal.sum(-1, dtype=dtype, out=out)
def min(self, axis=None, out=None, **kwargs):
return super().min(axis=axis, out=out,
**self._reduce_defaults(kwargs, np.nanmax))
def max(self, axis=None, out=None, **kwargs):
return super().max(axis=axis, out=out,
**self._reduce_defaults(kwargs, np.nanmin))
def nonzero(self):
unmasked_nonzero = self.unmasked.nonzero()
if self.ndim >= 1:
not_masked = ~self.mask[unmasked_nonzero]
return tuple(u[not_masked] for u in unmasked_nonzero)
else:
return unmasked_nonzero if not self.mask else np.nonzero(0)
def compress(self, condition, axis=None, out=None):
if out is not None:
raise NotImplementedError('cannot yet give output')
return self._apply('compress', condition, axis=axis)
def repeat(self, repeats, axis=None):
return self._apply('repeat', repeats, axis=axis)
def choose(self, choices, out=None, mode='raise'):
# Let __array_function__ take care since choices can be masked too.
return np.choose(self, choices, out=out, mode=mode)
def argmin(self, axis=None, out=None):
# Todo: should this return a masked integer array, with masks
# if all elements were masked?
at_min = self == self.min(axis=axis, keepdims=True)
return at_min.filled(False).argmax(axis=axis, out=out)
def argmax(self, axis=None, out=None):
at_max = self == self.max(axis=axis, keepdims=True)
return at_max.filled(False).argmax(axis=axis, out=out)
def argsort(self, axis=-1, kind=None, order=None):
"""Returns the indices that would sort an array.
Perform an indirect sort along the given axis on both the array
and the mask, with masked items being sorted to the end.
Parameters
----------
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis).
If None, the flattened array is used.
kind : str or None, ignored.
The kind of sort. Present only to allow subclasses to work.
order : str or list of str.
For an array with fields defined, the fields to compare first,
second, etc. A single field can be specified as a string, and not
all fields need be specified, but unspecified fields will still be
used, in dtype order, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sorts along the specified ``axis``. Use
``np.take_along_axis(self, index_array, axis=axis)`` to obtain
the sorted array.
"""
if axis is None:
data = self.ravel()
axis = -1
else:
data = self
if self.dtype.names:
# As done inside the argsort implementation in multiarray/methods.c.
if order is None:
order = self.dtype.names
else:
order = np.core._internal._newnames(self.dtype, order)
keys = tuple(data[name] for name in order[::-1])
elif order is not None:
raise ValueError('Cannot specify order when the array has no fields.')
else:
keys = (data,)
return np.lexsort(keys, axis=axis)
def sort(self, axis=-1, kind=None, order=None):
"""Sort an array in-place. Refer to `numpy.sort` for full documentation."""
# TODO: probably possible to do this faster than going through argsort!
indices = self.argsort(axis, kind=kind, order=order)
self[:] = np.take_along_axis(self, indices, axis=axis)
def argpartition(self, kth, axis=-1, kind='introselect', order=None):
# TODO: should be possible to do this faster than with a full argsort!
return self.argsort(axis=axis, order=order)
def partition(self, kth, axis=-1, kind='introselect', order=None):
# TODO: should be possible to do this faster than with a full argsort!
return self.sort(axis=axis, order=None)
def cumsum(self, axis=None, dtype=None, out=None):
if axis is None:
self = self.ravel()
axis = 0
return np.add.accumulate(self, axis=axis, dtype=dtype, out=out)
def cumprod(self, axis=None, dtype=None, out=None):
if axis is None:
self = self.ravel()
axis = 0
return np.multiply.accumulate(self, axis=axis, dtype=dtype, out=out)
def clip(self, min=None, max=None, out=None, **kwargs):
"""Return an array whose values are limited to ``[min, max]``.
Like `~numpy.clip`, but any masked values in ``min`` and ``max``
are ignored for clipping. The mask of the input array is propagated.
"""
# TODO: implement this at the ufunc level.
dmin, mmin = self._get_data_and_mask(min)
dmax, mmax = self._get_data_and_mask(max)
if mmin is None and mmax is None:
# Fast path for unmasked max, min.
return super().clip(min, max, out=out, **kwargs)
masked_out = np.positive(self, out=out)
out = masked_out.unmasked
if dmin is not None:
np.maximum(out, dmin, out=out, where=True if mmin is None else ~mmin)
if dmax is not None:
np.minimum(out, dmax, out=out, where=True if mmax is None else ~mmax)
return masked_out
def mean(self, axis=None, dtype=None, out=None, keepdims=False):
# Implementation based on that in numpy/core/_methods.py
# Cast bool, unsigned int, and int to float64 by default,
# and do float16 at higher precision.
is_float16_result = False
if dtype is None:
if issubclass(self.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype('f8')
elif issubclass(self.dtype.type, np.float16):
dtype = np.dtype('f4')
is_float16_result = out is None
result = self.sum(axis=axis, dtype=dtype, out=out,
keepdims=keepdims, where=~self.mask)
n = np.add.reduce(~self.mask, axis=axis, keepdims=keepdims)
result /= n
if is_float16_result:
result = result.astype(self.dtype)
return result
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
# Simplified implementation based on that in numpy/core/_methods.py
n = np.add.reduce(~self.mask, axis=axis, keepdims=keepdims)[...]
# Cast bool, unsigned int, and int to float64 by default.
if dtype is None and issubclass(self.dtype.type,
(np.integer, np.bool_)):
dtype = np.dtype('f8')
mean = self.mean(axis=axis, dtype=dtype, keepdims=True)
x = self - mean
x *= x.conjugate() # Conjugate just returns x if not complex.
result = x.sum(axis=axis, dtype=dtype, out=out,
keepdims=keepdims, where=~x.mask)
n -= ddof
n = np.maximum(n, 0, out=n)
result /= n
result._mask |= (n == 0)
return result
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
result = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
return np.sqrt(result, out=result)
def __bool__(self):
# First get result from array itself; this will error if not a scalar.
result = super().__bool__()
return result and not self.mask
def any(self, axis=None, out=None, keepdims=False):
return np.logical_or.reduce(self, axis=axis, out=out,
keepdims=keepdims, where=~self.mask)
def all(self, axis=None, out=None, keepdims=False):
return np.logical_and.reduce(self, axis=axis, out=out,
keepdims=keepdims, where=~self.mask)
# Following overrides needed since somehow the ndarray implementation
# does not actually call these.
def __str__(self):
return np.array_str(self)
def __repr__(self):
return np.array_repr(self)
def __format__(self, format_spec):
string = super().__format__(format_spec)
if self.shape == () and self.mask:
n = min(3, max(1, len(string)))
return ' ' * (len(string)-n) + '\u2014' * n
else:
return string
class MaskedRecarray(np.recarray, MaskedNDArray, data_cls=np.recarray):
# Explicit definition since we need to override some methods.
def __array_finalize__(self, obj):
# recarray.__array_finalize__ does not do super, so we do it
# explicitly.
super().__array_finalize__(obj)
super(np.recarray, self).__array_finalize__(obj)
# __getattribute__, __setattr__, and field use these somewhat
# obscrure ndarray methods. TODO: override in MaskedNDArray?
def getfield(self, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
return self[field]
raise NotImplementedError('can only get existing field from '
'structured dtype.')
def setfield(self, val, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
self[field] = val
return
raise NotImplementedError('can only set existing field from '
'structured dtype.')
|
1afc7f56ae4363fe4bc06ba800e7abbe1c6ffeab8b85470a900ba5a9f1e30c95 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Built-in mask mixin class.
The design uses `Masked` as a factory class which automatically
generates new subclasses for any data class that is itself a
subclass of a predefined masked class, with `MaskedNDArray`
providing such a predefined class for `~numpy.ndarray`.
"""
from .core import * # noqa
|
e451a8659d394ba4702091ac5b23077dbc8df6108b27c32bfd46748bf0fa6259 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Helpers for letting numpy functions interact with Masked arrays.
The module supplies helper routines for numpy functions that propagate
masks appropriately., for use in the ``__array_function__``
implementation of `~astropy.utils.masked.MaskedNDArray`. They are not
very useful on their own, but the ones with docstrings are included in
the documentation so that there is a place to find out how the mask is
interpreted.
"""
import numpy as np
from astropy.units.quantity_helper.function_helpers import (
FunctionAssigner)
from astropy.utils.compat import NUMPY_LT_1_19, NUMPY_LT_1_20, NUMPY_LT_1_23
# This module should not really be imported, but we define __all__
# such that sphinx can typeset the functions with docstrings.
# The latter are added to __all__ at the end.
__all__ = ['MASKED_SAFE_FUNCTIONS', 'APPLY_TO_BOTH_FUNCTIONS',
'DISPATCHED_FUNCTIONS', 'UNSUPPORTED_FUNCTIONS']
MASKED_SAFE_FUNCTIONS = set()
"""Set of functions that work fine on Masked classes already.
Most of these internally use `numpy.ufunc` or other functions that
are already covered.
"""
APPLY_TO_BOTH_FUNCTIONS = {}
"""Dict of functions that should apply to both data and mask.
The `dict` is keyed by the numpy function and the values are functions
that take the input arguments of the numpy function and organize these
for passing the data and mask to the numpy function.
Returns
-------
data_args : tuple
Arguments to pass on to the numpy function for the unmasked data.
mask_args : tuple
Arguments to pass on to the numpy function for the masked data.
kwargs : dict
Keyword arguments to pass on for both unmasked data and mask.
out : `~astropy.utils.masked.Masked` instance or None
Optional instance in which to store the output.
Raises
------
NotImplementedError
When an arguments is masked when it should not be or vice versa.
"""
DISPATCHED_FUNCTIONS = {}
"""Dict of functions that provide the numpy function's functionality.
These are for more complicated versions where the numpy function itself
cannot easily be used. It should return either the result of the
function, or a tuple consisting of the unmasked result, the mask for the
result and a possible output instance.
It should raise `NotImplementedError` if one of the arguments is masked
when it should not be or vice versa.
"""
UNSUPPORTED_FUNCTIONS = set()
"""Set of numpy functions that are not supported for masked arrays.
For most, masked input simply makes no sense, but for others it may have
been lack of time. Issues or PRs for support for functions are welcome.
"""
# Almost all from np.core.fromnumeric defer to methods so are OK.
MASKED_SAFE_FUNCTIONS |= set(
getattr(np, name) for name in np.core.fromnumeric.__all__
if name not in ({'choose', 'put', 'resize', 'searchsorted', 'where', 'alen'}))
MASKED_SAFE_FUNCTIONS |= {
# built-in from multiarray
np.may_share_memory, np.can_cast, np.min_scalar_type, np.result_type,
np.shares_memory,
# np.core.arrayprint
np.array_repr,
# np.core.function_base
np.linspace, np.logspace, np.geomspace,
# np.core.numeric
np.isclose, np.allclose, np.flatnonzero, np.argwhere,
# np.core.shape_base
np.atleast_1d, np.atleast_2d, np.atleast_3d, np.stack, np.hstack, np.vstack,
# np.lib.function_base
np.average, np.diff, np.extract, np.meshgrid, np.trapz, np.gradient,
# np.lib.index_tricks
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.fill_diagonal,
# np.lib.shape_base
np.column_stack, np.row_stack, np.dstack,
np.array_split, np.split, np.hsplit, np.vsplit, np.dsplit,
np.expand_dims, np.apply_along_axis, np.kron, np.tile,
np.take_along_axis, np.put_along_axis,
# np.lib.type_check (all but asfarray, nan_to_num)
np.iscomplexobj, np.isrealobj, np.imag, np.isreal,
np.real, np.real_if_close, np.common_type,
# np.lib.ufunclike
np.fix, np.isneginf, np.isposinf,
# np.lib.function_base
np.angle, np.i0,
}
IGNORED_FUNCTIONS = {
# I/O - useless for Masked, since no way to store the mask.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander}
if NUMPY_LT_1_20:
# financial
IGNORED_FUNCTIONS |= {np.fv, np.ipmt, np.irr, np.mirr, np.nper,
np.npv, np.pmt, np.ppmt, np.pv, np.rate}
# TODO: some of the following could in principle be supported.
IGNORED_FUNCTIONS |= {
np.pad,
np.searchsorted, np.digitize,
np.is_busday, np.busday_count, np.busday_offset,
# numpy.lib.function_base
np.cov, np.corrcoef, np.trim_zeros,
# numpy.core.numeric
np.correlate, np.convolve,
# numpy.lib.histograms
np.histogram, np.histogram2d, np.histogramdd, np.histogram_bin_edges,
# TODO!!
np.dot, np.vdot, np.inner, np.tensordot, np.cross,
np.einsum, np.einsum_path,
}
# Really should do these...
IGNORED_FUNCTIONS |= set(getattr(np, setopsname)
for setopsname in np.lib.arraysetops.__all__)
if NUMPY_LT_1_23:
IGNORED_FUNCTIONS |= {
# Deprecated, removed in numpy 1.23
np.asscalar, np.alen,
}
# Explicitly unsupported functions
UNSUPPORTED_FUNCTIONS |= {
np.unravel_index, np.ravel_multi_index, np.ix_,
}
# No support for the functions also not supported by Quantity
# (io, polynomial, etc.).
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
apply_to_both = FunctionAssigner(APPLY_TO_BOTH_FUNCTIONS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
def _get_data_and_masks(*args):
"""Separate out arguments into tuples of data and masks.
An all-False mask is created if an argument does not have a mask.
"""
from .core import Masked
data, masks = Masked._get_data_and_masks(*args)
masks = tuple(m if m is not None else np.zeros(np.shape(d), bool)
for d, m in zip(data, masks))
return data, masks
# Following are simple ufunc-like functions which should just copy the mask.
@dispatched_function
def datetime_as_string(arr, *args, **kwargs):
return (np.datetime_as_string(arr.unmasked, *args, **kwargs),
arr.mask.copy(), None)
@dispatched_function
def sinc(x):
return np.sinc(x.unmasked), x.mask.copy(), None
@dispatched_function
def iscomplex(x):
return np.iscomplex(x.unmasked), x.mask.copy(), None
@dispatched_function
def unwrap(p, *args, **kwargs):
return np.unwrap(p.unmasked, *args, **kwargs), p.mask.copy(), None
@dispatched_function
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
data = np.nan_to_num(x.unmasked, copy=copy,
nan=nan, posinf=posinf, neginf=neginf)
return (data, x.mask.copy(), None) if copy else x
# Following are simple functions related to shapes, where the same function
# should be applied to the data and the mask. They cannot all share the
# same helper, because the first arguments have different names.
@apply_to_both(helps={
np.copy, np.asfarray, np.resize, np.moveaxis, np.rollaxis, np.roll})
def masked_a_helper(a, *args, **kwargs):
data, mask = _get_data_and_masks(a)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.flip, np.flipud, np.fliplr, np.rot90, np.triu, np.tril})
def masked_m_helper(m, *args, **kwargs):
data, mask = _get_data_and_masks(m)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.diag, np.diagflat})
def masked_v_helper(v, *args, **kwargs):
data, mask = _get_data_and_masks(v)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.delete})
def masked_arr_helper(array, *args, **kwargs):
data, mask = _get_data_and_masks(array)
return data + args, mask + args, kwargs, None
@apply_to_both
def broadcast_to(array, shape, subok=False):
"""Broadcast array to the given shape.
Like `numpy.broadcast_to`, and applied to both unmasked data and mask.
Note that ``subok`` is taken to mean whether or not subclasses of
the unmasked data and mask are allowed, i.e., for ``subok=False``,
a `~astropy.utils.masked.MaskedNDArray` will be returned.
"""
data, mask = _get_data_and_masks(array)
return data, mask, dict(shape=shape, subok=subok), None
@dispatched_function
def outer(a, b, out=None):
return np.multiply.outer(np.ravel(a), np.ravel(b), out=out)
@dispatched_function
def empty_like(prototype, dtype=None, order='K', subok=True, shape=None):
"""Return a new array with the same shape and type as a given array.
Like `numpy.empty_like`, but will add an empty mask.
"""
unmasked = np.empty_like(prototype.unmasked, dtype=dtype, order=order,
subok=subok, shape=shape)
if dtype is not None:
dtype = (np.ma.make_mask_descr(unmasked.dtype)
if unmasked.dtype.names else np.dtype('?'))
mask = np.empty_like(prototype.mask, dtype=dtype, order=order,
subok=subok, shape=shape)
return unmasked, mask, None
@dispatched_function
def zeros_like(a, dtype=None, order='K', subok=True, shape=None):
"""Return an array of zeros with the same shape and type as a given array.
Like `numpy.zeros_like`, but will add an all-false mask.
"""
unmasked = np.zeros_like(a.unmasked, dtype=dtype, order=order,
subok=subok, shape=shape)
return unmasked, False, None
@dispatched_function
def ones_like(a, dtype=None, order='K', subok=True, shape=None):
"""Return an array of ones with the same shape and type as a given array.
Like `numpy.ones_like`, but will add an all-false mask.
"""
unmasked = np.ones_like(a.unmasked, dtype=dtype, order=order,
subok=subok, shape=shape)
return unmasked, False, None
@dispatched_function
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
"""Return a full array with the same shape and type as a given array.
Like `numpy.full_like`, but with a mask that is also set.
If ``fill_value`` is `numpy.ma.masked`, the data will be left unset
(i.e., as created by `numpy.empty_like`).
"""
result = np.empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
result[...] = fill_value
return result
@dispatched_function
def put(a, ind, v, mode='raise'):
"""Replaces specified elements of an array with given values.
Like `numpy.put`, but for masked array ``a`` and possibly masked
value ``v``. Masked indices ``ind`` are not supported.
"""
from astropy.utils.masked import Masked
if isinstance(ind, Masked) or not isinstance(a, Masked):
raise NotImplementedError
v_data, v_mask = a._get_data_and_mask(v)
if v_data is not None:
np.put(a.unmasked, ind, v_data, mode=mode)
# v_mask of None will be correctly interpreted as False.
np.put(a.mask, ind, v_mask, mode=mode)
return None
@dispatched_function
def putmask(a, mask, values):
"""Changes elements of an array based on conditional and input values.
Like `numpy.putmask`, but for masked array ``a`` and possibly masked
``values``. Masked ``mask`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(mask, Masked) or not isinstance(a, Masked):
raise NotImplementedError
values_data, values_mask = a._get_data_and_mask(values)
if values_data is not None:
np.putmask(a.unmasked, mask, values_data)
np.putmask(a.mask, mask, values_mask)
return None
@dispatched_function
def place(arr, mask, vals):
"""Change elements of an array based on conditional and input values.
Like `numpy.place`, but for masked array ``a`` and possibly masked
``values``. Masked ``mask`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(mask, Masked) or not isinstance(arr, Masked):
raise NotImplementedError
vals_data, vals_mask = arr._get_data_and_mask(vals)
if vals_data is not None:
np.place(arr.unmasked, mask, vals_data)
np.place(arr.mask, mask, vals_mask)
return None
@dispatched_function
def copyto(dst, src, casting='same_kind', where=True):
"""Copies values from one array to another, broadcasting as necessary.
Like `numpy.copyto`, but for masked destination ``dst`` and possibly
masked source ``src``.
"""
from astropy.utils.masked import Masked
if not isinstance(dst, Masked) or isinstance(where, Masked):
raise NotImplementedError
src_data, src_mask = dst._get_data_and_mask(src)
if src_data is not None:
np.copyto(dst.unmasked, src_data, casting=casting, where=where)
if src_mask is not None:
np.copyto(dst.mask, src_mask, where=where)
return None
@dispatched_function
def packbits(a, *args, **kwargs):
result = np.packbits(a.unmasked, *args, **kwargs)
mask = np.packbits(a.mask, *args, **kwargs).astype(bool)
return result, mask, None
@dispatched_function
def unpackbits(a, *args, **kwargs):
result = np.unpackbits(a.unmasked, *args, **kwargs)
mask = np.zeros(a.shape, dtype='u1')
mask[a.mask] = 255
mask = np.unpackbits(mask, *args, **kwargs).astype(bool)
return result, mask, None
@dispatched_function
def bincount(x, weights=None, minlength=0):
"""Count number of occurrences of each value in array of non-negative ints.
Like `numpy.bincount`, but masked entries in ``x`` will be skipped.
Any masked entries in ``weights`` will lead the corresponding bin to
be masked.
"""
from astropy.utils.masked import Masked
if weights is not None:
weights = np.asanyarray(weights)
if isinstance(x, Masked) and x.ndim <= 1:
# let other dimensions lead to errors.
if weights is not None and weights.ndim == x.ndim:
weights = weights[~x.mask]
x = x.unmasked[~x.mask]
mask = None
if weights is not None:
weights, w_mask = Masked._get_data_and_mask(weights)
if w_mask is not None:
mask = np.bincount(x, w_mask.astype(int),
minlength=minlength).astype(bool)
result = np.bincount(x, weights, minlength=0)
return result, mask, None
@dispatched_function
def msort(a):
result = a.copy()
result.sort(axis=0)
return result
@dispatched_function
def sort_complex(a):
# Just a copy of function_base.sort_complex, to avoid the asarray.
b = a.copy()
b.sort()
if not issubclass(b.dtype.type, np.complexfloating): # pragma: no cover
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
@apply_to_both
def concatenate(arrays, axis=0, out=None):
data, masks = _get_data_and_masks(*arrays)
return (data,), (masks,), dict(axis=axis), out
@apply_to_both
def append(arr, values, axis=None):
data, masks = _get_data_and_masks(arr, values)
return data, masks, dict(axis=axis), None
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
from astropy.utils.masked import Masked
(arrays, list_ndim, result_ndim,
final_size) = np.core.shape_base._block_setup(arrays)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim)
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
order = 'F' if F_order and not C_order else 'C'
result = Masked(np.empty(shape=shape, dtype=dtype, order=order))
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result
@dispatched_function
def broadcast_arrays(*args, subok=True):
"""Broadcast arrays to a common shape.
Like `numpy.broadcast_arrays`, applied to both unmasked data and masks.
Note that ``subok`` is taken to mean whether or not subclasses of
the unmasked data and masks are allowed, i.e., for ``subok=False``,
`~astropy.utils.masked.MaskedNDArray` instances will be returned.
"""
from .core import Masked
are_masked = [isinstance(arg, Masked) for arg in args]
data = [(arg.unmasked if is_masked else arg)
for arg, is_masked in zip(args, are_masked)]
results = np.broadcast_arrays(*data, subok=subok)
shape = results[0].shape if isinstance(results, list) else results.shape
masks = [(np.broadcast_to(arg.mask, shape, subok=subok)
if is_masked else None)
for arg, is_masked in zip(args, are_masked)]
results = [(Masked(result, mask) if mask is not None else result)
for (result, mask) in zip(results, masks)]
return results if len(results) > 1 else results[0]
@apply_to_both
def insert(arr, obj, values, axis=None):
"""Insert values along the given axis before the given indices.
Like `numpy.insert` but for possibly masked ``arr`` and ``values``.
Masked ``obj`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(obj, Masked) or not isinstance(arr, Masked):
raise NotImplementedError
(arr_data, val_data), (arr_mask, val_mask) = _get_data_and_masks(arr, values)
return ((arr_data, obj, val_data, axis),
(arr_mask, obj, val_mask, axis), {}, None)
if NUMPY_LT_1_19:
@dispatched_function
def count_nonzero(a, axis=None):
"""Counts the number of non-zero values in the array ``a``.
Like `numpy.count_nonzero`, with masked values counted as 0 or `False`.
"""
filled = a.filled(np.zeros((), a.dtype))
return np.count_nonzero(filled, axis)
else:
@dispatched_function
def count_nonzero(a, axis=None, *, keepdims=False):
"""Counts the number of non-zero values in the array ``a``.
Like `numpy.count_nonzero`, with masked values counted as 0 or `False`.
"""
filled = a.filled(np.zeros((), a.dtype))
return np.count_nonzero(filled, axis, keepdims=keepdims)
if NUMPY_LT_1_19:
def _zeros_like(a, dtype=None, order='K', subok=True, shape=None):
if shape != ():
return np.zeros_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
else:
return np.zeros_like(a, dtype=dtype, order=order, subok=subok,
shape=(1,))[0]
else:
_zeros_like = np.zeros_like
def _masked_median_1d(a, overwrite_input):
# TODO: need an in-place mask-sorting option.
unmasked = a.unmasked[~a.mask]
if unmasked.size:
return a.from_unmasked(
np.median(unmasked, overwrite_input=overwrite_input))
else:
return a.from_unmasked(_zeros_like(a.unmasked, shape=(1,))[0], mask=True)
def _masked_median(a, axis=None, out=None, overwrite_input=False):
# As for np.nanmedian, but without a fast option as yet.
if axis is None or a.ndim == 1:
part = a.ravel()
result = _masked_median_1d(part, overwrite_input)
else:
result = np.apply_along_axis(_masked_median_1d, axis, a, overwrite_input)
if out is not None:
out[...] = result
return result
@dispatched_function
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
from astropy.utils.masked import Masked
if out is not None and not isinstance(out, Masked):
raise NotImplementedError
a = Masked(a)
r, k = np.lib.function_base._ureduce(
a, func=_masked_median, axis=axis, out=out,
overwrite_input=overwrite_input)
return (r.reshape(k) if keepdims else r) if out is None else out
def _masked_quantile_1d(a, q, **kwargs):
"""
Private function for rank 1 arrays. Compute quantile ignoring NaNs.
See nanpercentile for parameter usage
"""
unmasked = a.unmasked[~a.mask]
if unmasked.size:
result = np.lib.function_base._quantile_unchecked(unmasked, q, **kwargs)
return a.from_unmasked(result)
else:
return a.from_unmasked(_zeros_like(a.unmasked, shape=q.shape), True)
def _masked_quantile(a, q, axis=None, out=None, **kwargs):
# As for np.nanmedian, but without a fast option as yet.
if axis is None or a.ndim == 1:
part = a.ravel()
result = _masked_quantile_1d(part, q, **kwargs)
else:
result = np.apply_along_axis(_masked_quantile_1d, axis, a, q, **kwargs)
# apply_along_axis fills in collapsed axis with results.
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
result = np.moveaxis(result, axis, 0)
if out is not None:
out[...] = result
return result
@dispatched_function
def quantile(a, q, axis=None, out=None, **kwargs):
from astropy.utils.masked import Masked
if isinstance(q, Masked) or out is not None and not isinstance(out, Masked):
raise NotImplementedError
a = Masked(a)
q = np.asanyarray(q)
if not np.lib.function_base._quantile_is_valid(q):
raise ValueError("Quantiles must be in the range [0, 1]")
keepdims = kwargs.pop('keepdims', False)
r, k = np.lib.function_base._ureduce(
a, func=_masked_quantile, q=q, axis=axis, out=out, **kwargs)
return (r.reshape(k) if keepdims else r) if out is None else out
@dispatched_function
def percentile(a, q, *args, **kwargs):
q = np.true_divide(q, 100)
return quantile(a, q, *args, **kwargs)
@dispatched_function
def array_equal(a1, a2, equal_nan=False):
(a1d, a2d), (a1m, a2m) = _get_data_and_masks(a1, a2)
if a1d.shape != a2d.shape:
return False
equal = (a1d == a2d)
if equal_nan:
equal |= np.isnan(a1d) & np.isnan(a2d)
return bool((equal | a1m | a2m).all())
@dispatched_function
def array_equiv(a1, a2):
return bool((a1 == a2).all())
@dispatched_function
def where(condition, *args):
from astropy.utils.masked import Masked
if not args:
return condition.nonzero(), None, None
condition, c_mask = Masked._get_data_and_mask(condition)
data, masks = _get_data_and_masks(*args)
unmasked = np.where(condition, *data)
mask = np.where(condition, *masks)
if c_mask is not None:
mask |= c_mask
return Masked(unmasked, mask=mask)
@dispatched_function
def choose(a, choices, out=None, mode='raise'):
"""Construct an array from an index array and a set of arrays to choose from.
Like `numpy.choose`. Masked indices in ``a`` will lead to masked output
values and underlying data values are ignored if out of bounds (for
``mode='raise'``). Any values masked in ``choices`` will be propagated
if chosen.
"""
from astropy.utils.masked import Masked
a_data, a_mask = Masked._get_data_and_mask(a)
if a_mask is not None and mode == 'raise':
# Avoid raising on masked indices.
a_data = a.filled(fill_value=0)
kwargs = {'mode': mode}
if out is not None:
if not isinstance(out, Masked):
raise NotImplementedError
kwargs['out'] = out.unmasked
data, masks = _get_data_and_masks(*choices)
data_chosen = np.choose(a_data, data, **kwargs)
if out is not None:
kwargs['out'] = out.mask
mask_chosen = np.choose(a_data, masks, **kwargs)
if a_mask is not None:
mask_chosen |= a_mask
return Masked(data_chosen, mask_chosen) if out is None else out
@apply_to_both
def select(condlist, choicelist, default=0):
"""Return an array drawn from elements in choicelist, depending on conditions.
Like `numpy.select`, with masks in ``choicelist`` are propagated.
Any masks in ``condlist`` are ignored.
"""
from astropy.utils.masked import Masked
condlist = [c.unmasked if isinstance(c, Masked) else c
for c in condlist]
data_list, mask_list = _get_data_and_masks(*choicelist)
default = Masked(default) if default is not np.ma.masked else Masked(0, mask=True)
return ((condlist, data_list, default.unmasked),
(condlist, mask_list, default.mask), {}, None)
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
"""Evaluate a piecewise-defined function.
Like `numpy.piecewise` but for masked input array ``x``.
Any masks in ``condlist`` are ignored.
"""
# Copied implementation from numpy.lib.function_base.piecewise,
# just to ensure output is Masked.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray))
and x.ndim != 0): # pragma: no cover
condlist = [condlist]
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
# The one real change...
y = np.zeros_like(x)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
for item, value in zip(where, what):
y[item] = value
return y
@dispatched_function
def interp(x, xp, fp, *args, **kwargs):
"""One-dimensional linear interpolation.
Like `numpy.interp`, but any masked points in ``xp`` and ``fp``
are ignored. Any masked values in ``x`` will still be evaluated,
but masked on output.
"""
from astropy.utils.masked import Masked
xd, xm = Masked._get_data_and_mask(x)
if isinstance(xp, Masked) or isinstance(fp, Masked):
(xp, fp), (xpm, fpm) = _get_data_and_masks(xp, fp)
if xp.ndim == fp.ndim == 1:
# Avoid making arrays 1-D; will just raise below.
m = xpm | fpm
xp = xp[m]
fp = fp[m]
result = np.interp(xd, xp, fp, *args, **kwargs)
return result if xm is None else Masked(result, xm.copy())
@dispatched_function
def lexsort(keys, axis=-1):
"""Perform an indirect stable sort using a sequence of keys.
Like `numpy.lexsort` but for possibly masked ``keys``. Masked
values are sorted towards the end for each key.
"""
# Sort masks to the end.
from .core import Masked
new_keys = []
for key in keys:
if isinstance(key, Masked):
# If there are other keys below, want to be sure that
# for masked values, those other keys set the order.
new_key = key.unmasked
if new_keys and key.mask.any():
new_key = new_key.copy()
new_key[key.mask] = new_key.flat[0]
new_keys.extend([new_key, key.mask])
else:
new_keys.append(key)
return np.lexsort(new_keys, axis=axis)
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is Masked.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
class MaskedFormat:
"""Formatter for masked array scalars.
For use in `numpy.array2string`, wrapping the regular formatters such
that if a value is masked, its formatted string is replaced.
Typically initialized using the ``from_data`` class method.
"""
def __init__(self, format_function):
self.format_function = format_function
# Special case for structured void: we need to make all the
# format functions for the items masked as well.
# TODO: maybe is a separate class is more logical?
ffs = getattr(format_function, 'format_functions', None)
if ffs:
self.format_function.format_functions = [MaskedFormat(ff) for ff in ffs]
def __call__(self, x):
if x.dtype.names:
# The replacement of x with a list is needed because the function
# inside StructuredVoidFormat iterates over x, which works for an
# np.void but not an array scalar.
return self.format_function([x[field] for field in x.dtype.names])
string = self.format_function(x.unmasked[()])
if x.mask:
# Strikethrough would be neat, but terminal needs a different
# formatting than, say, jupyter notebook.
# return "\x1B[9m"+string+"\x1B[29m"
# return ''.join(s+'\u0336' for s in string)
n = min(3, max(1, len(string)))
return ' ' * (len(string)-n) + '\u2014' * n
else:
return string
@classmethod
def from_data(cls, data, **options):
from numpy.core.arrayprint import _get_format_function
return cls(_get_format_function(data, **options))
def _array2string(a, options, separator=' ', prefix=""):
# Mostly copied from numpy.core.arrayprint, except:
# - The format function is wrapped in a mask-aware class;
# - Arrays scalars are not cast as arrays.
from numpy.core.arrayprint import _leading_trailing, _formatArray
data = np.asarray(a)
if a.size > options['threshold']:
summary_insert = "..."
data = _leading_trailing(data, options['edgeitems'])
else:
summary_insert = ""
# find the right formatting function for the array
format_function = MaskedFormat.from_data(data, **options)
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " "*len(prefix)
lst = _formatArray(a, format_function, options['linewidth'],
next_line_prefix, separator, options['edgeitems'],
summary_insert, options['legacy'])
return lst
@dispatched_function
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=np._NoValue, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix=""):
# Copied from numpy.core.arrayprint, but using _array2string above.
from numpy.core.arrayprint import _make_options_dict, _format_options
overrides = _make_options_dict(precision, threshold, edgeitems,
max_line_width, suppress_small, None, None,
sign, formatter, floatmode)
options = _format_options.copy()
options.update(overrides)
options['linewidth'] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
@dispatched_function
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
# Override to avoid special treatment of array scalars.
return array2string(a, max_line_width, precision, suppress_small, ' ', "")
# For the nanfunctions, we just treat any nan as an additional mask.
_nanfunc_fill_values = {'nansum': 0, 'nancumsum': 0,
'nanprod': 1, 'nancumprod': 1}
def masked_nanfunc(nanfuncname):
np_func = getattr(np, nanfuncname[3:])
fill_value = _nanfunc_fill_values.get(nanfuncname, None)
def nanfunc(a, *args, **kwargs):
from astropy.utils.masked import Masked
a, mask = Masked._get_data_and_mask(a)
if issubclass(a.dtype.type, np.inexact):
nans = np.isnan(a)
mask = nans if mask is None else (nans | mask)
if mask is not None:
a = Masked(a, mask)
if fill_value is not None:
a = a.filled(fill_value)
return np_func(a, *args, **kwargs)
doc = f"Like `numpy.{nanfuncname}`, skipping masked values as well.\n\n"
if fill_value is not None:
# sum, cumsum, prod, cumprod
doc += (f"Masked/NaN values are replaced with {fill_value}. "
"The output is not masked.")
elif "arg" in nanfuncname:
doc += ("No exceptions are raised for fully masked/NaN slices.\n"
"Instead, these give index 0.")
else:
doc += ("No warnings are given for fully masked/NaN slices.\n"
"Instead, they are masked in the output.")
nanfunc.__doc__ = doc
nanfunc.__name__ = nanfuncname
return nanfunc
for nanfuncname in np.lib.nanfunctions.__all__:
globals()[nanfuncname] = dispatched_function(masked_nanfunc(nanfuncname),
helps=getattr(np, nanfuncname))
# Add any dispatched or helper function that has a docstring to
# __all__, so they will be typeset by sphinx. The logic is that for
# those presumably the use of the mask is not entirely obvious.
__all__ += sorted(helper.__name__ for helper in (
set(APPLY_TO_BOTH_FUNCTIONS.values())
| set(DISPATCHED_FUNCTIONS.values())) if helper.__doc__)
|
876df8bbee2363eff5db82838215db7f20cc37b0cb82cf8add3af19ed71f152a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Checks for optional dependencies using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
"""
import importlib
import warnings
# First, the top-level packages:
# TODO: This list is a duplicate of the dependencies in setup.cfg "all", but
# some of the package names are different from the pip-install name (e.g.,
# beautifulsoup4 -> bs4).
_optional_deps = ['asdf', 'bleach', 'bottleneck', 'bs4', 'bz2', 'h5py',
'html5lib', 'IPython', 'jplephem', 'lxml', 'matplotlib',
'mpmath', 'pandas', 'PIL', 'pytz', 'scipy', 'skyfield',
'sortedcontainers', 'lzma', 'pyarrow']
_formerly_optional_deps = ['yaml'] # for backward compatibility
_deps = {k.upper(): k for k in _optional_deps + _formerly_optional_deps}
# Any subpackages that have different import behavior:
_deps['PLT'] = 'matplotlib.pyplot'
__all__ = [f"HAS_{pkg}" for pkg in _deps]
def __getattr__(name):
if name in __all__:
module_name = name[4:]
if module_name == "YAML":
warnings.warn(
"PyYaml is now a strict dependency. HAS_YAML is deprecated as "
"of v5.0 and will be removed in a subsequent version.",
category=AstropyDeprecationWarning)
try:
importlib.import_module(_deps[module_name])
except (ImportError, ModuleNotFoundError):
return False
return True
raise AttributeError(f"Module {__name__!r} has no attribute {name!r}.")
|
2092252ef5001400a2fc6fb990b97bd25ccba401cb8fc853d730780d3c81cf05 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains utility modules for compatibility with older/newer
versions of python, as well as including some bugfixes for the stdlib that are
important for Astropy.
Note that all public functions in the `astropy.utils.compat.misc` module are
imported here for easier access.
The content of this module is solely for internal use of ``astropy``
and subject to changes without deprecations. Do not use it in external
packages or code.
"""
from .misc import * # noqa
# Importing this module will also install monkey-patches defined in it
from .numpycompat import * # noqa
|
b68f2dfb868108d624ce0d3e415b72cf1e7e25ce6d49f66fc7e6b9210daee145 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a collection of monkey patches and workarounds for bugs in
earlier versions of Numpy.
"""
import numpy as np
from astropy.utils import minversion
__all__ = ['NUMPY_LT_1_19', 'NUMPY_LT_1_20', 'NUMPY_LT_1_21_1',
'NUMPY_LT_1_22', 'NUMPY_LT_1_22_1', 'NUMPY_LT_1_23']
# TODO: It might also be nice to have aliases to these named for specific
# features/bugs we're checking for (ex:
# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)
NUMPY_LT_1_19 = not minversion(np, '1.19')
NUMPY_LT_1_20 = not minversion(np, '1.20')
NUMPY_LT_1_21_1 = not minversion(np, '1.21.1')
NUMPY_LT_1_22 = not minversion(np, '1.22')
NUMPY_LT_1_22_1 = not minversion(np, '1.22.1')
NUMPY_LT_1_23 = not minversion(np, '1.23dev0')
|
466589b41aae7bc6b94550bbf9b075b7e8965d8b4aecf92133bb78a9473610b7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Simple utility functions and bug fixes for compatibility with all supported
versions of Python. This module should generally not be used directly, as
everything in `__all__` will be imported into `astropy.utils.compat` and can
be accessed from there.
"""
import sys
import functools
from contextlib import suppress
__all__ = ['override__dir__', 'suppress',
'possible_filename', 'namedtuple_asdict']
def possible_filename(filename):
"""
Determine if the ``filename`` argument is an allowable type for a filename.
In Python 3.3 use of non-unicode filenames on system calls such as
`os.stat` and others that accept a filename argument was deprecated (and
may be removed outright in the future).
Therefore this returns `True` in all cases except for `bytes` strings in
Windows.
"""
if isinstance(filename, str):
return True
elif isinstance(filename, bytes):
return not (sys.platform == 'win32')
return False
def override__dir__(f):
"""
When overriding a __dir__ method on an object, you often want to
include the "standard" members on the object as well. This
decorator takes care of that automatically, and all the wrapped
function needs to do is return a list of the "special" members
that wouldn't be found by the normal Python means.
Example
-------
Your class could define __dir__ as follows::
@override__dir__
def __dir__(self):
return ['special_method1', 'special_method2']
"""
# http://bugs.python.org/issue12166
@functools.wraps(f)
def override__dir__wrapper(self):
members = set(object.__dir__(self))
members.update(f(self))
return sorted(members)
return override__dir__wrapper
def namedtuple_asdict(namedtuple):
"""
The same as ``namedtuple._adict()``.
Parameters
----------
namedtuple : collections.namedtuple
The named tuple to get the dict of
"""
return namedtuple._asdict()
|
347ea97b9e61f3d3d3eb90e15b845ef5978997329c1f7e48687d2150b1a1f8e0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import urllib.request
import os
import locale
import platform
import pytest
import numpy as np
from numpy.testing import assert_array_equal
import erfa
from astropy.time import Time, TimeDelta
from astropy.utils.iers import iers
from astropy.utils.data import get_pkg_data_filename
from astropy.tests.tests.test_imports import test_imports
# Import every top-level astropy module as a test that the ERFA leap second
# table is not updated for normal imports.
test_imports()
# Now test that the erfa leap_seconds table has not been updated. This must be
# done at the module level, which unfortunately will abort the entire test run
# if if fails. Running within a normal pytest test will not work because the
# other tests will end up updating this attribute by virtue of doing Time UTC
# transformations.
assert erfa.leap_seconds._expires is None
# Tests in this module assume that the erfa.leap_seconds attribute has been
# updated from the `erfa` package built-in table to the astropy built-in
# leap-second table. That has the effect of ensuring that the
# `erfa.leap_seconds.expires` property is sufficiently in the future.
iers_table = iers.LeapSeconds.auto_open()
erfa.leap_seconds.update(iers_table)
assert erfa.leap_seconds._expires is not None
SYSTEM_FILE = '/usr/share/zoneinfo/leap-seconds.list'
# Test leap_seconds.list in test/data.
LEAP_SECOND_LIST = get_pkg_data_filename('data/leap-seconds.list')
def test_configuration():
# This test just ensures things stay consistent.
# Adjust if changes are made.
assert iers.conf.iers_leap_second_auto_url == iers.IERS_LEAP_SECOND_URL
assert iers.conf.ietf_leap_second_auto_url == iers.IETF_LEAP_SECOND_URL
class TestReading:
"""Basic tests that leap seconds can be read."""
def verify_day_month_year(self, ls):
assert np.all(ls['day'] == 1)
assert np.all((ls['month'] == 1) | (ls['month'] == 7) |
(ls['year'] < 1970))
assert np.all(ls['year'] >= 1960)
t = Time({'year': ls['year'], 'month': ls['month'], 'day': ls['day']},
format='ymdhms')
assert np.all(t == Time(ls['mjd'], format='mjd'))
def test_read_leap_second_dat(self):
ls = iers.LeapSeconds.from_iers_leap_seconds(
iers.IERS_LEAP_SECOND_FILE)
# Below, >= to take into account we might ship and updated file.
assert ls.expires >= Time('2020-06-28', scale='tai')
assert ls['mjd'][0] == 41317
assert ls['tai_utc'][0] == 10
assert ls['mjd'][-1] >= 57754
assert ls['tai_utc'][-1] >= 37
self.verify_day_month_year(ls)
def test_read_leap_second_dat_locale(self):
current = locale.setlocale(locale.LC_ALL)
try:
if platform.system() == 'Darwin':
locale.setlocale(locale.LC_ALL, 'fr_FR')
else:
locale.setlocale(locale.LC_ALL, 'fr_FR.utf8')
ls = iers.LeapSeconds.from_iers_leap_seconds(
iers.IERS_LEAP_SECOND_FILE)
except locale.Error as e:
pytest.skip(f'Locale error: {e}')
finally:
locale.setlocale(locale.LC_ALL, current)
# Below, >= to take into account we might ship and updated file.
assert ls.expires >= Time('2020-06-28', scale='tai')
def test_open_leap_second_dat(self):
ls = iers.LeapSeconds.from_iers_leap_seconds(
iers.IERS_LEAP_SECOND_FILE)
ls2 = iers.LeapSeconds.open(iers.IERS_LEAP_SECOND_FILE)
assert np.all(ls == ls2)
@pytest.mark.parametrize('file', (
LEAP_SECOND_LIST,
"file:" + urllib.request.pathname2url(LEAP_SECOND_LIST)))
def test_read_leap_seconds_list(self, file):
ls = iers.LeapSeconds.from_leap_seconds_list(file)
assert ls.expires == Time('2020-06-28', scale='tai')
assert ls['mjd'][0] == 41317
assert ls['tai_utc'][0] == 10
assert ls['mjd'][-1] == 57754
assert ls['tai_utc'][-1] == 37
self.verify_day_month_year(ls)
@pytest.mark.parametrize('file', (
LEAP_SECOND_LIST,
"file:" + urllib.request.pathname2url(LEAP_SECOND_LIST)))
def test_open_leap_seconds_list(self, file):
ls = iers.LeapSeconds.from_leap_seconds_list(file)
ls2 = iers.LeapSeconds.open(file)
assert np.all(ls == ls2)
@pytest.mark.skipif(not os.path.isfile(SYSTEM_FILE),
reason=f'system does not have {SYSTEM_FILE}')
def test_open_system_file(self):
ls = iers.LeapSeconds.open(SYSTEM_FILE)
expired = ls.expires < Time.now()
if expired:
pytest.skip("System leap second file is expired.")
assert not expired
def make_fake_file(expiration, tmpdir):
"""copy the built-in IERS file but set a different expiration date."""
ls = iers.LeapSeconds.from_iers_leap_seconds()
fake_file = str(tmpdir.join('fake_leap_seconds.dat'))
with open(fake_file, 'w') as fh:
fh.write('\n'.join([f'# File expires on {expiration}']
+ str(ls).split('\n')[2:-1]))
return fake_file
def test_fake_file(tmpdir):
fake_file = make_fake_file('28 June 2345', tmpdir)
fake = iers.LeapSeconds.from_iers_leap_seconds(fake_file)
assert fake.expires == Time('2345-06-28', scale='tai')
class TestAutoOpenExplicitLists:
# For this set of tests, leap-seconds are allowed to be expired
# except as explicitly tested.
@pytest.mark.filterwarnings(iers.IERSStaleWarning)
def test_auto_open_simple(self):
ls = iers.LeapSeconds.auto_open([iers.IERS_LEAP_SECOND_FILE])
assert ls.meta['data_url'] == iers.IERS_LEAP_SECOND_FILE
@pytest.mark.filterwarnings(iers.IERSStaleWarning)
def test_auto_open_erfa(self):
ls = iers.LeapSeconds.auto_open(['erfa', iers.IERS_LEAP_SECOND_FILE])
assert ls.meta['data_url'] in ['erfa', iers.IERS_LEAP_SECOND_FILE]
@pytest.mark.filterwarnings(iers.IERSStaleWarning)
def test_fake_future_file(self, tmpdir):
fake_file = make_fake_file('28 June 2345', tmpdir)
# Try as system file for auto_open, setting auto_max_age such
# that any ERFA or system files are guaranteed to be expired,
# while the fake file is guaranteed to be OK.
with iers.conf.set_temp('auto_max_age', -100000):
ls = iers.LeapSeconds.auto_open([
'erfa', iers.IERS_LEAP_SECOND_FILE, fake_file])
assert ls.expires == Time('2345-06-28', scale='tai')
assert ls.meta['data_url'] == str(fake_file)
# And as URL
fake_url = "file:" + urllib.request.pathname2url(fake_file)
ls2 = iers.LeapSeconds.auto_open([
'erfa', iers.IERS_LEAP_SECOND_FILE, fake_url])
assert ls2.expires == Time('2345-06-28', scale='tai')
assert ls2.meta['data_url'] == str(fake_url)
def test_fake_expired_file(self, tmpdir):
fake_file1 = make_fake_file('28 June 2010', tmpdir)
fake_file2 = make_fake_file('27 June 2012', tmpdir)
# Between these and the built-in one, the built-in file is best.
ls = iers.LeapSeconds.auto_open([fake_file1, fake_file2,
iers.IERS_LEAP_SECOND_FILE])
assert ls.meta['data_url'] == iers.IERS_LEAP_SECOND_FILE
# But if we remove the built-in one, the least expired one will be
# used and we get a warning that it is stale.
with pytest.warns(iers.IERSStaleWarning):
ls2 = iers.LeapSeconds.auto_open([fake_file1, fake_file2])
assert ls2.meta['data_url'] == fake_file2
assert ls2.expires == Time('2012-06-27', scale='tai')
# Use the fake files to make sure auto_max_age is safe.
# Should have no warning in either example.
with iers.conf.set_temp('auto_max_age', None):
ls3 = iers.LeapSeconds.auto_open([fake_file1,
iers.IERS_LEAP_SECOND_FILE])
assert ls3.meta['data_url'] == iers.IERS_LEAP_SECOND_FILE
with iers.conf.set_temp('auto_max_age', None):
ls4 = iers.LeapSeconds.auto_open([fake_file1, fake_file2])
assert ls4.meta['data_url'] == fake_file2
@pytest.mark.remote_data
class TestRemoteURLs:
def setup_class(cls):
# Need auto_download so that IERS_B won't be loaded and cause tests to
# fail.
iers.conf.auto_download = True
def teardown_class(cls):
# This setting is to be consistent with astropy/conftest.py
iers.conf.auto_download = False
# In these tests, the results may be cached.
# This is fine - no need to download again.
def test_iers_url(self):
ls = iers.LeapSeconds.auto_open([iers.IERS_LEAP_SECOND_URL])
assert ls.expires > Time.now()
def test_ietf_url(self):
ls = iers.LeapSeconds.auto_open([iers.IETF_LEAP_SECOND_URL])
assert ls.expires > Time.now()
class TestDefaultAutoOpen:
"""Test auto_open with different _auto_open_files."""
def setup(self):
# Identical to what is used in LeapSeconds.auto_open().
self.good_enough = (iers.LeapSeconds._today()
+ TimeDelta(180 - iers._none_to_float(iers.conf.auto_max_age),
format='jd'))
self._auto_open_files = iers.LeapSeconds._auto_open_files.copy()
def teardown(self):
iers.LeapSeconds._auto_open_files = self._auto_open_files
def remove_auto_open_files(self, *files):
"""Remove some files from the auto-opener.
The default set is restored in teardown.
"""
for f in files:
iers.LeapSeconds._auto_open_files.remove(f)
def test_erfa_found(self):
# Set huge maximum age such that whatever ERFA has is OK.
# Since it is checked first, it should thus be found.
with iers.conf.set_temp('auto_max_age', 100000):
ls = iers.LeapSeconds.open()
assert ls.meta['data_url'] == 'erfa'
def test_builtin_found(self):
# Set huge maximum age such that built-in file is always OK.
# If we remove 'erfa', it should thus be found.
self.remove_auto_open_files('erfa')
with iers.conf.set_temp('auto_max_age', 100000):
ls = iers.LeapSeconds.open()
assert ls.meta['data_url'] == iers.IERS_LEAP_SECOND_FILE
# The test below is marked remote_data only to ensure it runs
# as an allowed-fail job on CI: i.e., we will notice it (eventually)
# but will not be misled in thinking that a PR is bad.
@pytest.mark.remote_data
def test_builtin_not_expired(self):
# TODO: would be nice to have automatic PRs for this!
ls = iers.LeapSeconds.open(iers.IERS_LEAP_SECOND_FILE)
assert ls.expires > self.good_enough, (
"The leap second file built in to astropy is expired. Fix with:\n"
"cd astropy/utils/iers/data/; . update_builtin_iers.sh\n"
"and commit as a PR (for details, see release procedure).")
def test_fake_future_file(self, tmpdir):
fake_file = make_fake_file('28 June 2345', tmpdir)
# Try as system file for auto_open, setting auto_max_age such
# that any ERFA or system files are guaranteed to be expired.
with iers.conf.set_temp('auto_max_age', -100000), \
iers.conf.set_temp('system_leap_second_file', fake_file):
ls = iers.LeapSeconds.open()
assert ls.expires == Time('2345-06-28', scale='tai')
assert ls.meta['data_url'] == str(fake_file)
# And as URL
fake_url = "file:" + urllib.request.pathname2url(fake_file)
with iers.conf.set_temp('auto_max_age', -100000), \
iers.conf.set_temp('iers_leap_second_auto_url', fake_url):
ls2 = iers.LeapSeconds.open()
assert ls2.expires == Time('2345-06-28', scale='tai')
assert ls2.meta['data_url'] == str(fake_url)
def test_fake_expired_file(self, tmpdir):
self.remove_auto_open_files('erfa', 'iers_leap_second_auto_url',
'ietf_leap_second_auto_url')
fake_file = make_fake_file('28 June 2010', tmpdir)
with iers.conf.set_temp('system_leap_second_file', fake_file):
# If we try this directly, the built-in file will be found.
ls = iers.LeapSeconds.open()
assert ls.meta['data_url'] == iers.IERS_LEAP_SECOND_FILE
# But if we remove the built-in one, the expired one will be
# used and we get a warning that it is stale.
self.remove_auto_open_files(iers.IERS_LEAP_SECOND_FILE)
with pytest.warns(iers.IERSStaleWarning):
ls2 = iers.LeapSeconds.open()
assert ls2.meta['data_url'] == fake_file
assert ls2.expires == Time('2010-06-28', scale='tai')
@pytest.mark.skipif(not os.path.isfile(SYSTEM_FILE),
reason=f'system does not have {SYSTEM_FILE}')
def test_system_file_used_if_not_expired(self, tmpdir):
# We skip the test if the system file is on a CI and is expired -
# we should not depend on CI keeping it up to date, but if it is,
# we should check that it is used if possible.
if (iers.LeapSeconds.open(SYSTEM_FILE).expires <= self.good_enough):
pytest.skip("System leap second file is expired.")
self.remove_auto_open_files('erfa')
with iers.conf.set_temp('system_leap_second_file', SYSTEM_FILE):
ls = iers.LeapSeconds.open()
assert ls.expires > self.good_enough
assert ls.meta['data_url'] in (iers.IERS_LEAP_SECOND_FILE,
SYSTEM_FILE)
# Also check with a "built-in" file that is expired
fake_file = make_fake_file('28 June 2017', tmpdir)
iers.LeapSeconds._auto_open_files[0] = fake_file
ls2 = iers.LeapSeconds.open()
assert ls2.expires > Time.now()
assert ls2.meta['data_url'] == SYSTEM_FILE
@pytest.mark.remote_data
def test_auto_open_urls_always_good_enough(self):
# Avoid using the erfa, built-in and system files, as they might
# be good enough already.
try:
# Need auto_download so that IERS_B won't be loaded and
# cause tests to fail.
iers.conf.auto_download = True
self.remove_auto_open_files('erfa', iers.IERS_LEAP_SECOND_FILE,
'system_leap_second_file')
ls = iers.LeapSeconds.open()
assert ls.expires > self.good_enough
assert ls.meta['data_url'].startswith('http')
finally:
# This setting is to be consistent with astropy/conftest.py
iers.conf.auto_download = False
class ERFALeapSecondsSafe:
"""Base class for tests that change the ERFA leap-second tables.
It ensures the original state is restored.
"""
def setup(self):
# Keep current leap-second table and expiration.
self.erfa_ls = self._erfa_ls = erfa.leap_seconds.get()
self.erfa_expires = self._expires = erfa.leap_seconds._expires
def teardown(self):
# Restore leap-second table and expiration.
erfa.leap_seconds.set(self.erfa_ls)
erfa.leap_seconds._expires = self._expires
class TestFromERFA(ERFALeapSecondsSafe):
def test_get_erfa_ls(self):
ls = iers.LeapSeconds.from_erfa()
assert ls.colnames == ['year', 'month', 'tai_utc']
assert isinstance(ls.expires, Time)
assert ls.expires == self.erfa_expires
ls_array = np.array(ls['year', 'month', 'tai_utc'])
assert np.all(ls_array == self.erfa_ls)
def test_get_built_in_erfa_ls(self):
ls = iers.LeapSeconds.from_erfa(built_in=True)
assert ls.colnames == ['year', 'month', 'tai_utc']
assert isinstance(ls.expires, Time)
ls_array = np.array(ls['year', 'month', 'tai_utc'])
assert np.all(ls_array == self.erfa_ls[:len(ls_array)])
def test_get_modified_erfa_ls(self):
erfa.leap_seconds.set(self.erfa_ls[:-10])
ls = iers.LeapSeconds.from_erfa()
assert len(ls) == len(self.erfa_ls)-10
ls_array = np.array(ls['year', 'month', 'tai_utc'])
assert np.all(ls_array == self.erfa_ls[:-10])
ls2 = iers.LeapSeconds.from_erfa(built_in=True)
assert len(ls2) > len(ls)
erfa.leap_seconds.set(None)
erfa_built_in = erfa.leap_seconds.get()
assert len(ls2) == len(erfa_built_in)
ls2_array = np.array(ls2['year', 'month', 'tai_utc'])
assert np.all(ls2_array == erfa_built_in)
def test_open(self):
ls = iers.LeapSeconds.open('erfa')
assert isinstance(ls.expires, Time)
assert ls.expires == self.erfa_expires
ls_array = np.array(ls['year', 'month', 'tai_utc'])
assert np.all(ls_array == self.erfa_ls)
class TestUpdateLeapSeconds(ERFALeapSecondsSafe):
def setup(self):
super().setup()
# Read default leap second table.
self.ls = iers.LeapSeconds.from_iers_leap_seconds()
# For tests, reset ERFA table to built-in default.
erfa.leap_seconds.set()
self.erfa_ls = erfa.leap_seconds.get()
def test_built_in_up_to_date(self):
"""Leap second should match between built-in and ERFA."""
erfa_since_1970 = self.erfa_ls[self.erfa_ls['year'] > 1970]
assert len(self.ls) >= len(erfa_since_1970), \
"built-in leap seconds out of date"
assert len(self.ls) <= len(erfa_since_1970), \
"ERFA leap seconds out of date"
overlap = np.array(self.ls['year', 'month', 'tai_utc'])
assert np.all(overlap == erfa_since_1970.astype(overlap.dtype))
def test_update_with_built_in(self):
"""An update with built-in should not do anything."""
n_update = self.ls.update_erfa_leap_seconds()
assert n_update == 0
new_erfa_ls = erfa.leap_seconds.get()
assert np.all(new_erfa_ls == self.erfa_ls)
@pytest.mark.parametrize('n_short', (1, 3))
def test_update(self, n_short):
"""Check whether we can recover removed leap seconds."""
erfa.leap_seconds.set(self.erfa_ls[:-n_short])
n_update = self.ls.update_erfa_leap_seconds()
assert n_update == n_short
new_erfa_ls = erfa.leap_seconds.get()
assert_array_equal(new_erfa_ls, self.erfa_ls)
# Check that a second update does not do anything.
n_update2 = self.ls.update_erfa_leap_seconds()
assert n_update2 == 0
new_erfa_ls2 = erfa.leap_seconds.get()
assert_array_equal(new_erfa_ls2, self.erfa_ls)
def test_update_initialize_erfa(self):
# With pre-initialization, update does nothing.
erfa.leap_seconds.set(self.erfa_ls[:-2])
n_update = self.ls.update_erfa_leap_seconds(initialize_erfa=True)
assert n_update == 0
new_erfa_ls = erfa.leap_seconds.get()
assert_array_equal(new_erfa_ls, self.erfa_ls)
def test_update_overwrite(self):
n_update = self.ls.update_erfa_leap_seconds(initialize_erfa='empty')
assert n_update == len(self.ls)
new_erfa_ls = erfa.leap_seconds.get()
assert new_erfa_ls['year'].min() > 1970
n_update2 = self.ls.update_erfa_leap_seconds()
assert n_update2 == 0
new_erfa_ls2 = erfa.leap_seconds.get()
assert_array_equal(new_erfa_ls2, new_erfa_ls)
n_update3 = self.ls.update_erfa_leap_seconds(initialize_erfa=True)
assert n_update3 == 0
new_erfa_ls3 = erfa.leap_seconds.get()
assert_array_equal(new_erfa_ls3, self.erfa_ls)
def test_bad_jump(self):
erfa.leap_seconds.set(self.erfa_ls[:-2])
bad = self.ls.copy()
bad['tai_utc'][-1] = 5
with pytest.raises(ValueError, match='jump'):
bad.update_erfa_leap_seconds()
# With an error the ERFA table should not change.
assert_array_equal(erfa.leap_seconds.get(), self.erfa_ls[:-2])
# Unless we initialized it beforehand.
with pytest.raises(ValueError, match='jump'):
bad.update_erfa_leap_seconds(initialize_erfa=True)
assert_array_equal(erfa.leap_seconds.get(), self.erfa_ls)
# Of course, we get no errors if we initialize only.
erfa.leap_seconds.set(self.erfa_ls[:-2])
n_update = bad.update_erfa_leap_seconds(initialize_erfa='only')
assert n_update == 0
new_erfa_ls = erfa.leap_seconds.get()
assert_array_equal(new_erfa_ls, self.erfa_ls)
def test_bad_day(self):
erfa.leap_seconds.set(self.erfa_ls[:-2])
bad = self.ls.copy()
bad['day'][-1] = 5
with pytest.raises(ValueError, match='not on 1st'):
bad.update_erfa_leap_seconds()
def test_bad_month(self):
erfa.leap_seconds.set(self.erfa_ls[:-2])
bad = self.ls.copy()
bad['month'][-1] = 5
with pytest.raises(ValueError, match='January'):
bad.update_erfa_leap_seconds()
assert_array_equal(erfa.leap_seconds.get(), self.erfa_ls[:-2])
|
4df3c29ee49fad367953d9627b0e1ac1883f29b6610b4f0eb96f27d1329ded29 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import warnings
from pathlib import Path
import pytest
import numpy as np
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.iers import iers
from astropy import units as u
from astropy.table import QTable
from astropy.time import Time, TimeDelta
CI = os.environ.get('CI', False)
FILE_NOT_FOUND_ERROR = getattr(__builtins__, 'FileNotFoundError', OSError)
try:
iers.IERS_A.open('finals2000A.all') # check if IERS_A is available
except OSError:
HAS_IERS_A = False
else:
HAS_IERS_A = True
IERS_A_EXCERPT = get_pkg_data_filename(os.path.join('data', 'iers_a_excerpt'))
def setup_module():
# Need auto_download so that IERS_B won't be loaded and cause tests to
# fail. Files to be downloaded are handled appropriately in the tests.
iers.conf.auto_download = True
def teardown_module():
# This setting is to be consistent with astropy/conftest.py
iers.conf.auto_download = False
class TestBasic():
"""Basic tests that IERS_B returns correct values"""
@pytest.mark.parametrize('iers_cls', (iers.IERS_B, iers.IERS))
def test_simple(self, iers_cls):
"""Test the default behaviour for IERS_B and IERS."""
# Arguably, IERS itself should not be used at all, but it used to
# provide IERS_B by default so we check that it continues to do so.
# Eventually, IERS should probably be deprecated.
iers_cls.close()
assert iers_cls.iers_table is None
iers_tab = iers_cls.open()
assert iers_cls.iers_table is not None
assert iers_cls.iers_table is iers_tab
assert isinstance(iers_tab, QTable)
assert isinstance(iers_tab, iers.IERS_B)
assert (iers_tab['UT1_UTC'].unit / u.second).is_unity()
assert (iers_tab['PM_x'].unit / u.arcsecond).is_unity()
assert (iers_tab['PM_y'].unit / u.arcsecond).is_unity()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc = iers_tab.ut1_utc(jd1, jd2)
assert isinstance(ut1_utc, u.Quantity)
assert (ut1_utc.unit / u.second).is_unity()
# IERS files change at the 0.1 ms level; see gh-6981
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
# should be future-proof; surely we've moved to another planet by then
with pytest.raises(IndexError):
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.)
# also check it returns the right status
ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status2 == iers.FROM_IERS_B)
ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status4 == iers.TIME_BEYOND_IERS_RANGE
# check it works via Time too
t = Time(jd1, jd2, format='jd', scale='utc')
ut1_utc3 = iers_tab.ut1_utc(t)
assert_quantity_allclose(ut1_utc3, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
def test_open_filename(self):
iers.IERS_B.close()
iers.IERS_B.open(iers.IERS_B_FILE)
assert iers.IERS_B.iers_table is not None
assert isinstance(iers.IERS_B.iers_table, QTable)
iers.IERS_B.close()
with pytest.raises(FILE_NOT_FOUND_ERROR):
iers.IERS_B.open('surely this does not exist')
def test_open_network_url(self):
iers.IERS_A.close()
iers.IERS_A.open(Path(IERS_A_EXCERPT).as_uri())
assert iers.IERS_A.iers_table is not None
assert isinstance(iers.IERS_A.iers_table, QTable)
iers.IERS_A.close()
class TestIERS_AExcerpt():
def test_simple(self):
# Test the IERS A reader. It is also a regression tests that ensures
# values do not get overridden by IERS B; see #4933.
iers_tab = iers.IERS_A.open(IERS_A_EXCERPT)
assert (iers_tab['UT1_UTC'].unit / u.second).is_unity()
assert 'P' in iers_tab['UT1Flag']
assert 'I' in iers_tab['UT1Flag']
assert 'B' in iers_tab['UT1Flag']
assert np.all((iers_tab['UT1Flag'] == 'I') |
(iers_tab['UT1Flag'] == 'P') |
(iers_tab['UT1Flag'] == 'B'))
assert (iers_tab['dX_2000A'].unit / u.marcsec).is_unity()
assert (iers_tab['dY_2000A'].unit / u.marcsec).is_unity()
assert 'P' in iers_tab['NutFlag']
assert 'I' in iers_tab['NutFlag']
assert 'B' in iers_tab['NutFlag']
assert np.all((iers_tab['NutFlag'] == 'P') |
(iers_tab['NutFlag'] == 'I') |
(iers_tab['NutFlag'] == 'B'))
assert (iers_tab['PM_x'].unit / u.arcsecond).is_unity()
assert (iers_tab['PM_y'].unit / u.arcsecond).is_unity()
assert 'P' in iers_tab['PolPMFlag']
assert 'I' in iers_tab['PolPMFlag']
assert 'B' in iers_tab['PolPMFlag']
assert np.all((iers_tab['PolPMFlag'] == 'P') |
(iers_tab['PolPMFlag'] == 'I') |
(iers_tab['PolPMFlag'] == 'B'))
t = Time([57053., 57054., 57055.], format='mjd')
ut1_utc, status = iers_tab.ut1_utc(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
assert_quantity_allclose(ut1_utc,
[-0.4916557, -0.4925323, -0.4934373] * u.s,
atol=0.1*u.ms)
dcip_x, dcip_y, status = iers_tab.dcip_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
print(dcip_x)
print(dcip_y)
assert_quantity_allclose(dcip_x,
[-0.086, -0.093, -0.087] * u.marcsec,
atol=1.*u.narcsec)
assert_quantity_allclose(dcip_y,
[0.094, 0.081, 0.072] * u.marcsec,
atol=1*u.narcsec)
pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
assert_quantity_allclose(pm_x,
[0.003734, 0.004581, 0.004623] * u.arcsec,
atol=0.1*u.marcsec)
assert_quantity_allclose(pm_y,
[0.310824, 0.313150, 0.315517] * u.arcsec,
atol=0.1*u.marcsec)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
@pytest.mark.skipif('not HAS_IERS_A')
class TestIERS_A():
def test_simple(self):
"""Test that open() by default reads a 'finals2000A.all' file."""
# Ensure we remove any cached table (gh-5131).
iers.IERS_A.close()
iers_tab = iers.IERS_A.open()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status == iers.FROM_IERS_B)
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status2 == iers.TIME_BEYOND_IERS_RANGE
tnow = Time.now()
ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True)
assert status3 == iers.FROM_IERS_A_PREDICTION
assert ut1_utc3 != 0.
class TestIERS_Auto():
def setup_class(self):
"""Set up useful data for the tests.
"""
self.N = 40
self.ame = 30.0
self.iers_a_file_1 = get_pkg_data_filename(
os.path.join('data', 'finals2000A-2016-02-30-test'))
self.iers_a_file_2 = get_pkg_data_filename(
os.path.join('data', 'finals2000A-2016-04-30-test'))
self.iers_a_url_1 = Path(self.iers_a_file_1).as_uri()
self.iers_a_url_2 = Path(self.iers_a_file_2).as_uri()
self.t = Time.now() + TimeDelta(10, format='jd') * np.arange(self.N)
def teardown_method(self, method):
"""Run this after every test.
"""
iers.IERS_Auto.close()
def test_interpolate_error_formatting(self):
"""Regression test: make sure the error message in
IERS_Auto._check_interpolate_indices() is formatted correctly.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('iers_auto_url_mirror', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', self.ame):
with pytest.raises(ValueError) as err:
iers_table = iers.IERS_Auto.open()
with warnings.catch_warnings():
# Ignoring this if it comes up -- IERS_Auto predictive
# values are older than 30.0 days but downloading the
# latest table did not find newer values
warnings.simplefilter('ignore', iers.IERSStaleWarning)
iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert str(err.value) == iers.INTERPOLATE_ERROR.format(self.ame)
def test_auto_max_age_none(self):
"""Make sure that iers.INTERPOLATE_ERROR's advice about setting
auto_max_age = None actually works.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', None):
iers_table = iers.IERS_Auto.open()
delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert isinstance(delta, np.ndarray)
assert delta.shape == (self.N,)
assert_quantity_allclose(delta, np.array([-0.2246227]*self.N)*u.s)
def test_auto_max_age_minimum(self):
"""Check that the minimum auto_max_age is enforced.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', 5.0):
with pytest.raises(ValueError) as err:
iers_table = iers.IERS_Auto.open()
_ = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert str(err.value) == 'IERS auto_max_age configuration value must be larger than 10 days'
def test_no_auto_download(self):
with iers.conf.set_temp('auto_download', False):
t = iers.IERS_Auto.open()
assert type(t) is iers.IERS_B
@pytest.mark.remote_data
def test_simple(self):
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
dat = iers.IERS_Auto.open()
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == 57539.0 * u.d
# Pretend we are accessing at a time 7 days after start of predictive data
predictive_mjd = dat.meta['predictive_mjd']
dat._time_now = Time(predictive_mjd, format='mjd') + 7 * u.d
# Look at times before and after the test file begins. 0.1292905 is
# the IERS-B value from MJD=57359. The value in
# finals2000A-2016-02-30-test has been replaced at this point.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1293286)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.2246227)
# Now pretend we are accessing at time 60 days after start of predictive data.
# There will be a warning when downloading the file doesn't give new data
# and an exception when extrapolating into the future with insufficient data.
dat._time_now = Time(predictive_mjd, format='mjd') + 60 * u.d
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1293286)
with pytest.warns(iers.IERSStaleWarning, match='IERS_Auto predictive '
'values are older') as warns, \
pytest.raises(ValueError, match='interpolating from IERS_Auto '
'using predictive values'):
dat.ut1_utc(Time(60000, format='mjd').jd)
assert len(warns) == 1
# Warning only if we are getting return status
with pytest.warns(iers.IERSStaleWarning, match='IERS_Auto '
'predictive values are older') as warns:
dat.ut1_utc(Time(60000, format='mjd').jd, return_status=True)
assert len(warns) == 1
# Now set auto_max_age = None which says that we don't care how old the
# available IERS-A file is. There should be no warnings or exceptions.
with iers.conf.set_temp('auto_max_age', None):
dat.ut1_utc(Time(60000, format='mjd').jd)
# Now point to a later file with same values but MJD increased by
# 60 days and see that things work. dat._time_now is still the same value
# as before, i.e. right around the start of predictive values for the new file.
# (In other words this is like downloading the latest file online right now).
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_2):
# Look at times before and after the test file begins. This forces a new download.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1293286)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.3)
# Now the time range should be different.
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == (57539.0 + 60) * u.d
@pytest.mark.remote_data
def test_IERS_B_parameters_loading_into_IERS_Auto():
A = iers.IERS_Auto.open()
B = iers.IERS_B.open()
ok_A = A["MJD"] <= B["MJD"][-1]
assert not np.all(ok_A), "IERS B covers all of IERS A: should not happen"
# We only overwrite IERS_B values in the IERS_A table that were already
# there in the first place. Better take that into account.
ok_A &= np.isfinite(A["UT1_UTC_B"])
i_B = np.searchsorted(B["MJD"], A["MJD"][ok_A])
assert np.all(np.diff(i_B) == 1), "Valid region not contiguous"
assert np.all(A["MJD"][ok_A] == B["MJD"][i_B])
# Check that values are copied correctly. Since units are not
# necessarily the same, we use allclose with very strict tolerance.
for name in ("UT1_UTC", "PM_x", "PM_y", "dX_2000A", "dY_2000A"):
assert_quantity_allclose(
A[name][ok_A], B[name][i_B], rtol=1e-15,
err_msg=("Bug #9206 IERS B parameter {} not copied over "
"correctly to IERS Auto".format(name)))
# Issue with FTP, rework test into previous one when it's fixed
@pytest.mark.skipif("CI", reason="Flaky on CI")
@pytest.mark.remote_data
def test_iers_a_dl():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert 'UT1_UTC_A' in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.remote_data
def test_iers_a_dl_mirror():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL_MIRROR, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert 'UT1_UTC_A' in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.remote_data
def test_iers_b_dl():
iersb_tab = iers.IERS_B.open(iers.IERS_B_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersb_tab) > 0
assert 'UT1_UTC' in iersb_tab.colnames
finally:
iers.IERS_B.close()
|
f359458eb1276054b7ff9688522e13e721f08790f0137c318d44c56eb87f264e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# LOCAL
from astropy.utils.xml.iterparser import _fast_iterparse
# SYSTEM
import io
import zlib
# The C-based XML parser for VOTables previously used fixed-sized
# buffers (allocated at __init__() time). This test will
# only pass with the patch that allows a dynamic realloc() of
# the queue. This addresses the bugs:
#
# - "RuntimeError: XML queue overflow"
# https://github.com/astropy/astropy/issues/5824
# (Kudos to Stefan Becker---ARI/ZAH Heidelberg)
#
# - "iterparse.c: add queue_realloc() + move 'buffersize / 2' logic there"
# https://github.com/astropy/astropy/issues/5869
#
# This test code can emulate a combination of network buffering and
# gzip decompression---with different request sizes, it can be used to
# demonstrate both under-reading and over-reading.
#
# Using the 512-tag VOTABLE XML sample input, and various combinations
# of minimum/maximum fetch sizes, the following situations can be
# generated:
#
# maximum_fetch = 1 (ValueError, no element found) still within gzip headers
# maximum_fetch = 80 (ValueError, unclosed token) short read
# maximum_fetch =217 passes, because decompressed_length > requested
# && <512 tags in a single parse
# maximum_fetch =218 (RuntimeError, XML queue overflow)
#
# The test provided here covers the over-reading identified in #5824
# (equivalent to the 217).
# Firstly, assemble a minimal VOTABLE header, table contents and footer.
# This is done in textual form, as the aim is to only test the parser, not
# the outputter!
HEADER = """<?xml version="1.0" encoding="UTF-8"?>
<VOTABLE>
<RESOURCE type="results">
<TABLE>
<FIELD ID="foo" name="foo" datatype="int" arraysize="1"/>
<DATA>
<TABLEDATA>
"""
ROW = """<TR><TD>0</TD></TR>
"""
FOOTER = """
</TABLEDATA>
</DATA>
</TABLE>
</RESOURCE>
</VOTABLE>
"""
# minimum passable buffer size => 1024
# 1024 / 2 => 512 tags for overflow
# 512 - 7 tags in header, - 5 tags in footer = 500 tags required for overflow
# 500 / 4 tags (<tr><td></td></tr>) per row == 125 rows required for overflow
VOTABLE_XML = HEADER + 125*ROW + FOOTER
# UngzipFileWrapper() wraps an existing file-like Object,
# decompressing the content and returning the plaintext.
# This therefore emulates the behavior of the Python 'requests'
# library when transparently decompressing Gzip HTTP responses.
#
# The critical behavior is that---because of the
# decompression---read() can return considerably more
# bytes than were requested! (But, read() can also return less).
#
# inspiration:
# http://stackoverflow.com/questions/4013843/how-to-wrap-file-object-read-and-write-operation-which-are-readonly
class UngzipFileWrapper:
def __init__(self, fd, **kwargs):
self._file = fd
self._z = zlib.decompressobj(16 + zlib.MAX_WBITS)
def read(self, requested_length):
# emulate network buffering dynamics by clamping the read size
clamped_length = max(1, min(1 << 24, requested_length))
compressed = self._file.read(clamped_length)
plaintext = self._z.decompress(compressed)
# Only for real local files---just for the testcase
if len(compressed) == 0:
self.close()
return plaintext
def __getattr__(self, attr):
return getattr(self._file, attr)
# test_iterparser_over_read_simple() is a very cut down test,
# of the original more flexible test-case, but without external
# dependencies. The plaintext is compressed and then decompressed
# to provide a better emulation of the original situation where
# the bug was observed.
#
# If a dependency upon 'zlib' is not desired, it would be possible to
# simplify this testcase by replacing the compress/decompress with a
# read() method emulation that always returned more from a buffer
# that was requested.
def test_iterparser_over_read_simple():
# Take the plaintext of 512 tags, and compression it with a
# Gzip-style header (+16), to most closely emulate the behavior
# of most HTTP servers.
zlib_GZIP_STYLE_HEADER = 16
compo = zlib.compressobj(zlib.Z_BEST_COMPRESSION,
zlib.DEFLATED,
zlib.MAX_WBITS + zlib_GZIP_STYLE_HEADER)
# Bytes vs. String .encode()/.decode() for compatibility with Python 3.5.
s = compo.compress(VOTABLE_XML.encode())
s = s + compo.flush()
fd = io.BytesIO(s)
fd.seek(0)
# Finally setup the test of the C-based '_fast_iterparse()' iterator
# and a situation in which it can be called a-la the VOTable Parser.
MINIMUM_REQUESTABLE_BUFFER_SIZE = 1024
uncompressed_fd = UngzipFileWrapper(fd)
iterable = _fast_iterparse(uncompressed_fd.read,
MINIMUM_REQUESTABLE_BUFFER_SIZE)
list(iterable)
|
04fe07719ea5661e9f2246374f6ebb7caaa176a6ddf6a5e381c0ea7bc3abb0d2 | from astropy.utils.data import get_pkg_data_filename
def get_data_filename():
return get_pkg_data_filename('data/foo.txt')
|
5b41a12ce9df8e8adb1bbc5ee36a224f1f4dc0447286953b0874a134226a2965 | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from astropy import units as u
from astropy.table import QTable, hstack, vstack, join
from astropy.utils.masked import Masked
from astropy.utils.compat.optional_deps import HAS_H5PY
from .test_masked import assert_masked_equal
FILE_FORMATS = ['ecsv', 'fits']
if HAS_H5PY:
FILE_FORMATS.append('h5')
class MaskedArrayTableSetup:
@classmethod
def setup_arrays(self):
self.a = np.array([3., 5., 0.])
self.mask_a = np.array([True, False, False])
@classmethod
def setup_class(self):
self.setup_arrays()
self.ma = Masked(self.a, mask=self.mask_a)
self.ma.info.format = '.1f'
self.t = QTable([self.ma], names=['ma'])
class MaskedQuantityTableSetup(MaskedArrayTableSetup):
@classmethod
def setup_arrays(self):
self.a = np.array([3., 5., 0.]) << u.m
self.mask_a = np.array([True, False, False])
class TestMaskedArrayTable(MaskedArrayTableSetup):
def test_table_initialization(self):
assert_array_equal(self.t['ma'].unmasked, self.a)
assert_array_equal(self.t['ma'].mask, self.mask_a)
assert repr(self.t).splitlines()[-3:] == [
' ———',
' 5.0',
' 0.0']
def test_info_basics(self):
assert self.t['ma'].info.name == 'ma'
assert 'serialize_method' in self.t['ma'].info.attr_names
t2 = self.t.copy()
t2['ma'].info.format = '.2f'
t2['ma'].info.serialize_method['fits'] = 'nonsense'
assert repr(t2).splitlines()[-3:] == [
' ———',
' 5.00',
' 0.00']
# Check that if we slice, things get copied over correctly.
t3 = t2[:2]
assert t3['ma'].info.name == 'ma'
assert t3['ma'].info.format == '.2f'
assert 'serialize_method' in t3['ma'].info.attr_names
assert t3['ma'].info.serialize_method['fits'] == 'nonsense'
@pytest.mark.parametrize('file_format', FILE_FORMATS)
def test_table_write(self, file_format, tmpdir):
name = str(tmpdir.join(f"a.{file_format}"))
kwargs = {}
if file_format == 'h5':
kwargs['path'] = 'trial'
kwargs['serialize_meta'] = True
self.t.write(name, **kwargs)
t2 = QTable.read(name)
assert isinstance(t2['ma'], self.ma.__class__)
assert np.all(t2['ma'] == self.ma)
assert np.all(t2['ma'].mask == self.mask_a)
if file_format == 'fits':
# Imperfect roundtrip through FITS native format description.
assert self.t['ma'].info.format in t2['ma'].info.format
else:
assert t2['ma'].info.format == self.t['ma'].info.format
@pytest.mark.parametrize('serialize_method', ['data_mask', 'null_value'])
def test_table_write_serialization(self, serialize_method, tmpdir):
name = str(tmpdir.join("test.ecsv"))
self.t.write(name, serialize_method=serialize_method)
with open(name) as fh:
lines = fh.readlines()
t2 = QTable.read(name)
assert isinstance(t2['ma'], self.ma.__class__)
if serialize_method == 'data_mask':
# Will data_mask, we have data and mask columns and should
# exactly round-trip.
assert len(lines[-1].split()) == 2
assert_masked_equal(t2['ma'], self.ma)
else:
# With null_value we have just a data column with null values
# marked, so we lost information on the data below the mask.
assert len(lines[-1].split()) == 1
assert np.all(t2['ma'] == self.ma)
assert np.all(t2['ma'].mask == self.mask_a)
def test_non_existing_serialize_method(self, tmpdir):
name = str(tmpdir.join('bad.ecsv'))
with pytest.raises(ValueError, match='serialize method must be'):
self.t.write(name, serialize_method='bad_serialize_method')
class TestMaskedQuantityTable(TestMaskedArrayTable, MaskedQuantityTableSetup):
# Runs tests from TestMaskedArrayTable as well as some extra ones.
def test_table_operations_requiring_masking(self):
t1 = self.t
t2 = QTable({'ma2': Masked([1, 2] * u.m)})
t12 = hstack([t1, t2], join_type='outer')
assert np.all(t12['ma'].mask == [True, False, False])
# 'ma2' is shorter by one so we expect one True from hstack so length matches
assert np.all(t12['ma2'].mask == [False, False, True])
t12 = hstack([t1, t2], join_type='inner')
assert np.all(t12['ma'].mask == [True, False])
assert np.all(t12['ma2'].mask == [False, False])
# Vstack tables with different column names. In this case we get masked
# values
t12 = vstack([t1, t2], join_type='outer')
# ma ma2
# m m
# --- ---
# —— ——
# 5.0 ——
# 0.0 ——
# —— 1.0
# —— 2.0
assert np.all(t12['ma'].mask == [True, False, False, True, True])
assert np.all(t12['ma2'].mask == [True, True, True, False, False])
def test_table_operations_requiring_masking_auto_promote(self):
MaskedQuantity = Masked(u.Quantity)
t1 = QTable({'ma1': [1, 2] * u.m})
t2 = QTable({'ma2': [3, 4, 5] * u.m})
t12 = hstack([t1, t2], join_type='outer')
assert isinstance(t12['ma1'], MaskedQuantity)
assert np.all(t12['ma1'].mask == [False, False, True])
assert np.all(t12['ma1'] == [1, 2, 0] * u.m)
assert not isinstance(t12['ma2'], MaskedQuantity)
assert isinstance(t12['ma2'], u.Quantity)
assert np.all(t12['ma2'] == [3, 4, 5] * u.m)
t12 = hstack([t1, t2], join_type='inner')
assert isinstance(t12['ma1'], u.Quantity)
assert not isinstance(t12['ma1'], MaskedQuantity)
assert isinstance(t12['ma2'], u.Quantity)
assert not isinstance(t12['ma2'], MaskedQuantity)
# Vstack tables with different column names. In this case we get masked
# values
t12 = vstack([t1, t2], join_type='outer')
assert np.all(t12['ma1'].mask == [False, False, True, True, True])
assert np.all(t12['ma2'].mask == [True, True, False, False, False])
t1['a'] = [1, 2]
t2['a'] = [1, 3, 4]
t12 = join(t1, t2, join_type='outer')
assert np.all(t12['ma1'].mask == [False, False, True, True])
assert np.all(t12['ma2'].mask == [False, True, False, False])
|
5a4182ad29daf3349750eb10e32941282a36ddda99eb0ced980ac24c8d91bcd5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test numpy functions and ufuncs on Masked arrays and quantities.
The tests here are fairly detailed but do not aim for complete
coverage. Complete coverage of all numpy functions is done
with less detailed tests in test_function_helpers.
"""
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.units import Quantity
from astropy.utils.masked.core import Masked
from .test_masked import (MaskedArraySetup, QuantitySetup, LongitudeSetup,
assert_masked_equal)
class MaskedUfuncTests(MaskedArraySetup):
@pytest.mark.parametrize('ufunc', (np.add, np.subtract, np.divide,
np.arctan2, np.minimum))
def test_2op_ufunc(self, ufunc):
ma_mb = ufunc(self.ma, self.mb)
expected_data = ufunc(self.a, self.b)
expected_mask = (self.ma.mask | self.mb.mask)
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(ma_mb.unmasked, expected_data)
assert_array_equal(ma_mb.mask, expected_mask)
@pytest.mark.parametrize('ufunc', (np.add, np.subtract, np.divide,
np.arctan2, np.minimum))
def test_ufunc_inplace(self, ufunc):
ma_mb = ufunc(self.ma, self.mb)
out = Masked(np.zeros_like(ma_mb.unmasked))
result = ufunc(self.ma, self.mb, out=out)
assert result is out
assert_masked_equal(result, ma_mb)
def test_ufunc_inplace_no_masked_input(self):
a_b = np.add(self.a, self.b)
out = Masked(np.zeros_like(a_b))
result = np.add(self.a, self.b, out=out)
assert result is out
assert_array_equal(result.unmasked, a_b)
assert_array_equal(result.mask, np.zeros(a_b.shape, bool))
def test_ufunc_inplace_error(self):
out = np.zeros(self.ma.shape)
with pytest.raises(TypeError):
np.add(self.ma, self.mb, out=out)
@pytest.mark.parametrize('ufunc', (np.add.outer, np.minimum.outer))
def test_2op_ufunc_outer(self, ufunc):
ma_mb = ufunc(self.ma, self.mb)
expected_data = ufunc(self.a, self.b)
expected_mask = np.logical_or.outer(self.mask_a, self.mask_b)
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(ma_mb.unmasked, expected_data)
assert_array_equal(ma_mb.mask, expected_mask)
def test_3op_ufunc(self):
ma_mb = np.clip(self.ma, self.b, self.c)
expected_data = np.clip(self.a, self.b, self.c)
expected_mask = self.mask_a
assert_array_equal(ma_mb.unmasked, expected_data)
assert_array_equal(ma_mb.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_add_reduce(self, axis):
ma_reduce = np.add.reduce(self.ma, axis=axis)
expected_data = np.add.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
out = Masked(np.zeros_like(ma_reduce.unmasked),
np.ones_like(ma_reduce.mask))
ma_reduce2 = np.add.reduce(self.ma, axis=axis, out=out)
assert ma_reduce2 is out
assert_masked_equal(ma_reduce2, ma_reduce)
def test_add_reduce_no_masked_input(self):
a_reduce = np.add.reduce(self.a, axis=0)
out = Masked(np.zeros_like(a_reduce), np.ones(a_reduce.shape, bool))
result = np.add.reduce(self.a, axis=0, out=out)
assert result is out
assert_array_equal(out.unmasked, a_reduce)
assert_array_equal(out.mask, np.zeros(a_reduce.shape, bool))
@pytest.mark.parametrize('axis', (0, 1, None))
def test_minimum_reduce(self, axis):
ma_reduce = np.minimum.reduce(self.ma, axis=axis)
expected_data = np.minimum.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_maximum_reduce(self, axis):
ma_reduce = np.maximum.reduce(self.ma, axis=axis)
expected_data = np.maximum.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
class TestMaskedArrayUfuncs(MaskedUfuncTests):
# multiply.reduce does not work with units, so test only for plain array.
@pytest.mark.parametrize('axis', (0, 1, None))
def test_multiply_reduce(self, axis):
ma_reduce = np.multiply.reduce(self.ma, axis=axis)
expected_data = np.multiply.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
def test_ufunc_not_implemented_for_other(self):
"""
If the unmasked operation returns NotImplemented, this
should lead to a TypeError also for the masked version.
"""
a = np.array([1, 2])
b = 3 * u.m
with pytest.raises(TypeError):
a & b
ma = Masked(a)
with pytest.raises(TypeError):
ma & b
class TestMaskedQuantityUfuncs(MaskedUfuncTests, QuantitySetup):
def test_ufunc_inplace_error2(self):
out = Masked(np.zeros(self.ma.shape))
with pytest.raises(TypeError):
np.add(self.ma, self.mb, out=out)
class TestMaskedLongitudeUfuncs(MaskedUfuncTests, LongitudeSetup):
def test_ufunc_inplace_quantity_initial(self):
out = Masked(np.zeros(self.ma.shape) << u.m)
result = np.add(self.ma, self.mb, out=out)
assert result is out
expected = np.add(self.ma, self.mb).view(Quantity)
assert_masked_equal(result, expected)
class TestMaskedArrayConcatenation(MaskedArraySetup):
def test_concatenate(self):
mb = self.mb[np.newaxis]
concat_a_b = np.concatenate((self.ma, mb), axis=0)
expected_data = np.concatenate((self.a, self.b[np.newaxis]), axis=0)
expected_mask = np.concatenate((self.mask_a, self.mask_b[np.newaxis]),
axis=0)
assert_array_equal(concat_a_b.unmasked, expected_data)
assert_array_equal(concat_a_b.mask, expected_mask)
def test_concatenate_not_all_masked(self):
mb = self.mb[np.newaxis]
concat_a_b = np.concatenate((self.a, mb), axis=0)
expected_data = np.concatenate((self.a, self.b[np.newaxis]), axis=0)
expected_mask = np.concatenate((np.zeros(self.a.shape, bool),
self.mask_b[np.newaxis]), axis=0)
assert_array_equal(concat_a_b.unmasked, expected_data)
assert_array_equal(concat_a_b.mask, expected_mask)
@pytest.mark.parametrize('obj', (1, slice(2, 3)))
def test_insert(self, obj):
mc_in_a = np.insert(self.ma, obj, self.mc, axis=-1)
expected = Masked(np.insert(self.a, obj, self.c, axis=-1),
np.insert(self.mask_a, obj, self.mask_c, axis=-1))
assert_masked_equal(mc_in_a, expected)
def test_insert_masked_obj(self):
with pytest.raises(TypeError):
np.insert(self.ma, Masked(1, mask=False), self.mc, axis=-1)
def test_append(self):
mc_to_a = np.append(self.ma, self.mc, axis=-1)
expected = Masked(np.append(self.a, self.c, axis=-1),
np.append(self.mask_a, self.mask_c, axis=-1))
assert_masked_equal(mc_to_a, expected)
class TestMaskedQuantityConcatenation(TestMaskedArrayConcatenation,
QuantitySetup):
pass
class TestMaskedLongitudeConcatenation(TestMaskedArrayConcatenation,
LongitudeSetup):
pass
class TestMaskedArrayBroadcast(MaskedArraySetup):
def test_broadcast_to(self):
shape = self.ma.shape
ba = np.broadcast_to(self.mb, shape, subok=True)
assert ba.shape == shape
assert ba.mask.shape == shape
expected = Masked(np.broadcast_to(self.mb.unmasked, shape, subok=True),
np.broadcast_to(self.mb.mask, shape, subok=True))
assert_masked_equal(ba, expected)
def test_broadcast_to_using_apply(self):
# Partially just to ensure we cover the relevant part of _apply.
shape = self.ma.shape
ba = self.mb._apply(np.broadcast_to, shape=shape, subok=True)
assert ba.shape == shape
assert ba.mask.shape == shape
expected = Masked(np.broadcast_to(self.mb.unmasked, shape, subok=True),
np.broadcast_to(self.mb.mask, shape, subok=True))
assert_masked_equal(ba, expected)
def test_broadcast_arrays(self):
mb = np.broadcast_arrays(self.ma, self.mb, self.mc, subok=True)
b = np.broadcast_arrays(self.a, self.b, self.c, subok=True)
bm = np.broadcast_arrays(self.mask_a, self.mask_b, self.mask_c)
for mb_, b_, bm_ in zip(mb, b, bm):
assert_array_equal(mb_.unmasked, b_)
assert_array_equal(mb_.mask, bm_)
def test_broadcast_arrays_not_all_masked(self):
mb = np.broadcast_arrays(self.a, self.mb, self.c, subok=True)
assert_array_equal(mb[0], self.a)
expected1 = np.broadcast_to(self.mb, self.a.shape, subok=True)
assert_masked_equal(mb[1], expected1)
expected2 = np.broadcast_to(self.c, self.a.shape, subok=True)
assert_array_equal(mb[2], expected2)
def test_broadcast_arrays_subok_false(self):
# subok affects ndarray subclasses but not masking itself.
mb = np.broadcast_arrays(self.ma, self.mb, self.mc, subok=False)
assert all(type(mb_.unmasked) is np.ndarray for mb_ in mb)
b = np.broadcast_arrays(self.a, self.b, self.c, subok=False)
mask_b = np.broadcast_arrays(self.mask_a, self.mask_b,
self.mask_c, subok=False)
for mb_, b_, mask_ in zip(mb, b, mask_b):
assert_array_equal(mb_.unmasked, b_)
assert_array_equal(mb_.mask, mask_)
class TestMaskedQuantityBroadcast(TestMaskedArrayBroadcast, QuantitySetup):
pass
class TestMaskedLongitudeBroadcast(TestMaskedArrayBroadcast, LongitudeSetup):
pass
class TestMaskedArrayCalculation(MaskedArraySetup):
@pytest.mark.parametrize('n,axis', [(1, -1), (2, -1), (1, 0)])
def test_diff(self, n, axis):
mda = np.diff(self.ma, n=n, axis=axis)
expected_data = np.diff(self.a, n, axis)
nan_mask = np.zeros_like(self.a)
nan_mask[self.ma.mask] = np.nan
expected_mask = np.isnan(np.diff(nan_mask, n=n, axis=axis))
assert_array_equal(mda.unmasked, expected_data)
assert_array_equal(mda.mask, expected_mask)
def test_diff_explicit(self):
ma = Masked(np.arange(8.),
[True, False, False, False, False, True, False, False])
mda = np.diff(ma)
assert np.all(mda.unmasked == 1.)
assert np.all(mda.mask ==
[True, False, False, False, True, True, False])
mda = np.diff(ma, n=2)
assert np.all(mda.unmasked == 0.)
assert np.all(mda.mask == [True, False, False, True, True, True])
class TestMaskedQuantityCalculation(TestMaskedArrayCalculation, QuantitySetup):
pass
class TestMaskedLongitudeCalculation(TestMaskedArrayCalculation,
LongitudeSetup):
pass
class TestMaskedArraySorting(MaskedArraySetup):
@pytest.mark.parametrize('axis', [-1, 0])
def test_lexsort1(self, axis):
ma_lexsort = np.lexsort((self.ma,), axis=axis)
filled = self.a.copy()
filled[self.mask_a] = 9e9
expected_data = filled.argsort(axis)
assert_array_equal(ma_lexsort, expected_data)
@pytest.mark.parametrize('axis', [-1, 0])
def test_lexsort2(self, axis):
mb = np.broadcast_to(-self.mb, self.ma.shape).copy()
mamb_lexsort = np.lexsort((self.ma, mb), axis=axis)
filled_a = self.ma.filled(9e9)
filled_b = mb.filled(9e9)
expected_ab = np.lexsort((filled_a, filled_b), axis=axis)
assert_array_equal(mamb_lexsort, expected_ab)
mbma_lexsort = np.lexsort((mb, self.ma), axis=axis)
expected_ba = np.lexsort((filled_b, filled_a), axis=axis)
assert_array_equal(mbma_lexsort, expected_ba)
mbma_lexsort2 = np.lexsort(np.stack([mb, self.ma], axis=0), axis=axis)
assert_array_equal(mbma_lexsort2, expected_ba)
@pytest.mark.parametrize('axis', [-1, 0])
def test_lexsort_mix(self, axis):
mb = np.broadcast_to(-self.mb, self.ma.shape).copy()
mamb_lexsort = np.lexsort((self.a, mb), axis=axis)
filled_b = mb.filled(9e9)
expected_ab = np.lexsort((self.a, filled_b), axis=axis)
assert_array_equal(mamb_lexsort, expected_ab)
mbma_lexsort = np.lexsort((mb, self.a), axis=axis)
expected_ba = np.lexsort((filled_b, self.a), axis=axis)
assert_array_equal(mbma_lexsort, expected_ba)
mbma_lexsort2 = np.lexsort(np.stack([mb, self.a], axis=0), axis=axis)
assert_array_equal(mbma_lexsort2, expected_ba)
|
f1f81bca223387ea5c55c64831bb4ad2ef25445e75cd24dfbc958a779882cbf1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test masked class initialization, methods, and operators.
Functions, including ufuncs, are tested in test_functions.py
"""
import operator
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from astropy import units as u
from astropy.units import Quantity
from astropy.coordinates import Longitude
from astropy.utils.masked import Masked, MaskedNDArray
def assert_masked_equal(a, b):
assert_array_equal(a.unmasked, b.unmasked)
assert_array_equal(a.mask, b.mask)
VARIOUS_ITEMS = [
(1, 1),
slice(None, 1),
(),
1]
class ArraySetup:
_data_cls = np.ndarray
@classmethod
def setup_class(self):
self.a = np.arange(6.).reshape(2, 3)
self.mask_a = np.array([[True, False, False],
[False, True, False]])
self.b = np.array([-3., -2., -1.])
self.mask_b = np.array([False, True, False])
self.c = np.array([[0.25], [0.5]])
self.mask_c = np.array([[False], [True]])
self.sdt = np.dtype([('a', 'f8'), ('b', 'f8')])
self.mask_sdt = np.dtype([('a', '?'), ('b', '?')])
self.sa = np.array([[(1., 2.), (3., 4.)],
[(11., 12.), (13., 14.)]], dtype=self.sdt)
self.mask_sa = np.array([[(True, True), (False, False)],
[(False, True), (True, False)]],
dtype=self.mask_sdt)
self.sb = np.array([(1., 2.), (-3., 4.)], dtype=self.sdt)
self.mask_sb = np.array([(True, False), (False, False)],
dtype=self.mask_sdt)
class QuantitySetup(ArraySetup):
_data_cls = Quantity
@classmethod
def setup_class(self):
super().setup_class()
self.a = Quantity(self.a, u.m)
self.b = Quantity(self.b, u.cm)
self.c = Quantity(self.c, u.km)
self.sa = Quantity(self.sa, u.m, dtype=self.sdt)
self.sb = Quantity(self.sb, u.cm, dtype=self.sdt)
class LongitudeSetup(ArraySetup):
_data_cls = Longitude
@classmethod
def setup_class(self):
super().setup_class()
self.a = Longitude(self.a, u.deg)
self.b = Longitude(self.b, u.deg)
self.c = Longitude(self.c, u.deg)
# Note: Longitude does not work on structured arrays, so
# leaving it as regular array (which just reruns some tests).
class TestMaskedArrayInitialization(ArraySetup):
def test_simple(self):
ma = Masked(self.a, mask=self.mask_a)
assert isinstance(ma, np.ndarray)
assert isinstance(ma, type(self.a))
assert isinstance(ma, Masked)
assert_array_equal(ma.unmasked, self.a)
assert_array_equal(ma.mask, self.mask_a)
assert ma.mask is not self.mask_a
assert np.may_share_memory(ma.mask, self.mask_a)
def test_structured(self):
ma = Masked(self.sa, mask=self.mask_sa)
assert isinstance(ma, np.ndarray)
assert isinstance(ma, type(self.sa))
assert isinstance(ma, Masked)
assert_array_equal(ma.unmasked, self.sa)
assert_array_equal(ma.mask, self.mask_sa)
assert ma.mask is not self.mask_sa
assert np.may_share_memory(ma.mask, self.mask_sa)
def test_masked_ndarray_init():
# Note: as a straight ndarray subclass, MaskedNDArray passes on
# the arguments relevant for np.ndarray, not np.array.
a_in = np.arange(3, dtype=int)
m_in = np.array([True, False, False])
buff = a_in.tobytes()
# Check we're doing things correctly using regular ndarray.
a = np.ndarray(shape=(3,), dtype=int, buffer=buff)
assert_array_equal(a, a_in)
# Check with and without mask.
ma = MaskedNDArray((3,), dtype=int, mask=m_in, buffer=buff)
assert_array_equal(ma.unmasked, a_in)
assert_array_equal(ma.mask, m_in)
ma = MaskedNDArray((3,), dtype=int, buffer=buff)
assert_array_equal(ma.unmasked, a_in)
assert_array_equal(ma.mask, np.zeros(3, bool))
def test_cannot_initialize_with_masked():
with pytest.raises(ValueError, match='cannot handle np.ma.masked'):
Masked(np.ma.masked)
def test_cannot_just_use_anything_with_a_mask_attribute():
class my_array(np.ndarray):
mask = True
a = np.array([1., 2.]).view(my_array)
with pytest.raises(AttributeError, match='unmasked'):
Masked(a)
class TestMaskedClassCreation:
"""Try creating a MaskedList and subclasses.
By no means meant to be realistic, just to check that the basic
machinery allows it.
"""
@classmethod
def setup_class(self):
self._base_classes_orig = Masked._base_classes.copy()
self._masked_classes_orig = Masked._masked_classes.copy()
class MaskedList(Masked, list, base_cls=list, data_cls=list):
def __new__(cls, *args, mask=None, copy=False, **kwargs):
self = super().__new__(cls)
self._unmasked = self._data_cls(*args, **kwargs)
self.mask = mask
return self
# Need to have shape for basics to work.
@property
def shape(self):
return (len(self._unmasked),)
self.MaskedList = MaskedList
def teardown_class(self):
Masked._base_classes = self._base_classes_orig
Masked._masked_classes = self._masked_classes_orig
def test_setup(self):
assert issubclass(self.MaskedList, Masked)
assert issubclass(self.MaskedList, list)
assert Masked(list) is self.MaskedList
def test_masked_list(self):
ml = self.MaskedList(range(3), mask=[True, False, False])
assert ml.unmasked == [0, 1, 2]
assert_array_equal(ml.mask, np.array([True, False, False]))
ml01 = ml[:2]
assert ml01.unmasked == [0, 1]
assert_array_equal(ml01.mask, np.array([True, False]))
def test_from_list(self):
ml = Masked([1, 2, 3], mask=[True, False, False])
assert ml.unmasked == [1, 2, 3]
assert_array_equal(ml.mask, np.array([True, False, False]))
def test_masked_list_subclass(self):
class MyList(list):
pass
ml = MyList(range(3))
mml = Masked(ml, mask=[False, True, False])
assert isinstance(mml, Masked)
assert isinstance(mml, MyList)
assert isinstance(mml.unmasked, MyList)
assert mml.unmasked == [0, 1, 2]
assert_array_equal(mml.mask, np.array([False, True, False]))
assert Masked(MyList) is type(mml)
class TestMaskedNDArraySubclassCreation:
"""Test that masked subclasses can be created directly and indirectly."""
@classmethod
def setup_class(self):
class MyArray(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.asanyarray(*args, **kwargs).view(cls)
self.MyArray = MyArray
self.a = np.array([1., 2.]).view(self.MyArray)
self.m = np.array([True, False], dtype=bool)
def teardown_method(self, method):
Masked._masked_classes.pop(self.MyArray, None)
def test_direct_creation(self):
assert self.MyArray not in Masked._masked_classes
mcls = Masked(self.MyArray)
assert issubclass(mcls, Masked)
assert issubclass(mcls, self.MyArray)
assert mcls.__name__ == 'MaskedMyArray'
assert mcls.__doc__.startswith('Masked version of MyArray')
mms = mcls(self.a, mask=self.m)
assert isinstance(mms, mcls)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_initialization_without_mask(self):
# Default for not giving a mask should be False.
mcls = Masked(self.MyArray)
mms = mcls(self.a)
assert isinstance(mms, mcls)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, np.zeros(mms.shape, bool))
@pytest.mark.parametrize('masked_array', [Masked, np.ma.MaskedArray])
def test_initialization_with_masked_values(self, masked_array):
mcls = Masked(self.MyArray)
ma = masked_array(np.asarray(self.a), mask=self.m)
mms = mcls(ma)
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_indirect_creation(self):
assert self.MyArray not in Masked._masked_classes
mms = Masked(self.a, mask=self.m)
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
assert self.MyArray in Masked._masked_classes
assert Masked(self.MyArray) is type(mms)
def test_can_initialize_with_masked_values(self):
mcls = Masked(self.MyArray)
mms = mcls(Masked(np.asarray(self.a), mask=self.m))
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_viewing(self):
mms = Masked(self.a, mask=self.m)
mms2 = mms.view()
assert type(mms2) is mms.__class__
assert_masked_equal(mms2, mms)
ma = mms.view(np.ndarray)
assert type(ma) is MaskedNDArray
assert_array_equal(ma.unmasked, self.a.view(np.ndarray))
assert_array_equal(ma.mask, self.m)
class TestMaskedQuantityInitialization(TestMaskedArrayInitialization, QuantitySetup):
def test_masked_quantity_class_init(self):
# TODO: class definitions should be more easily accessible.
mcls = Masked._masked_classes[self.a.__class__]
# This is not a very careful test.
mq = mcls([1., 2.], mask=[True, False], unit=u.s)
assert mq.unit == u.s
assert np.all(mq.value.unmasked == [1., 2.])
assert np.all(mq.value.mask == [True, False])
assert np.all(mq.mask == [True, False])
def test_masked_quantity_getting(self):
mcls = Masked._masked_classes[self.a.__class__]
MQ = Masked(Quantity)
assert MQ is mcls
def test_initialization_without_mask(self):
# Default for not giving a mask should be False.
MQ = Masked(Quantity)
mq = MQ([1., 2.], u.s)
assert mq.unit == u.s
assert np.all(mq.value.unmasked == [1., 2.])
assert np.all(mq.mask == [False, False])
@pytest.mark.parametrize('masked_array', [Masked, np.ma.MaskedArray])
def test_initialization_with_masked_values(self, masked_array):
MQ = Masked(Quantity)
a = np.array([1., 2.])
m = np.array([True, False])
ma = masked_array(a, m)
mq = MQ(ma)
assert isinstance(mq, Masked)
assert isinstance(mq, Quantity)
assert_array_equal(mq.value.unmasked, a)
assert_array_equal(mq.mask, m)
class TestMaskSetting(ArraySetup):
def test_whole_mask_setting_simple(self):
ma = Masked(self.a)
assert ma.mask.shape == ma.shape
assert not ma.mask.any()
ma.mask = True
assert ma.mask.shape == ma.shape
assert ma.mask.all()
ma.mask = [[True], [False]]
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, np.array([[True] * 3, [False] * 3]))
ma.mask = self.mask_a
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, self.mask_a)
assert ma.mask is not self.mask_a
assert np.may_share_memory(ma.mask, self.mask_a)
def test_whole_mask_setting_structured(self):
ma = Masked(self.sa)
assert ma.mask.shape == ma.shape
assert not ma.mask['a'].any() and not ma.mask['b'].any()
ma.mask = True
assert ma.mask.shape == ma.shape
assert ma.mask['a'].all() and ma.mask['b'].all()
ma.mask = [[True], [False]]
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, np.array(
[[(True, True)] * 2, [(False, False)] * 2], dtype=self.mask_sdt))
ma.mask = self.mask_sa
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, self.mask_sa)
assert ma.mask is not self.mask_sa
assert np.may_share_memory(ma.mask, self.mask_sa)
@pytest.mark.parametrize('item', VARIOUS_ITEMS)
def test_part_mask_setting(self, item):
ma = Masked(self.a)
ma.mask[item] = True
expected = np.zeros(ma.shape, bool)
expected[item] = True
assert_array_equal(ma.mask, expected)
ma.mask[item] = False
assert_array_equal(ma.mask, np.zeros(ma.shape, bool))
# Mask propagation
mask = np.zeros(self.a.shape, bool)
ma = Masked(self.a, mask)
ma.mask[item] = True
assert np.may_share_memory(ma.mask, mask)
assert_array_equal(ma.mask, mask)
@pytest.mark.parametrize('item', ['a'] + VARIOUS_ITEMS)
def test_part_mask_setting_structured(self, item):
ma = Masked(self.sa)
ma.mask[item] = True
expected = np.zeros(ma.shape, self.mask_sdt)
expected[item] = True
assert_array_equal(ma.mask, expected)
ma.mask[item] = False
assert_array_equal(ma.mask, np.zeros(ma.shape, self.mask_sdt))
# Mask propagation
mask = np.zeros(self.sa.shape, self.mask_sdt)
ma = Masked(self.sa, mask)
ma.mask[item] = True
assert np.may_share_memory(ma.mask, mask)
assert_array_equal(ma.mask, mask)
# Following are tests where we trust the initializer works.
class MaskedArraySetup(ArraySetup):
@classmethod
def setup_class(self):
super().setup_class()
self.ma = Masked(self.a, mask=self.mask_a)
self.mb = Masked(self.b, mask=self.mask_b)
self.mc = Masked(self.c, mask=self.mask_c)
self.msa = Masked(self.sa, mask=self.mask_sa)
self.msb = Masked(self.sb, mask=self.mask_sb)
class TestViewing(MaskedArraySetup):
def test_viewing_as_new_type(self):
ma2 = self.ma.view(type(self.ma))
assert_masked_equal(ma2, self.ma)
ma3 = self.ma.view()
assert_masked_equal(ma3, self.ma)
def test_viewing_as_new_dtype(self):
# Not very meaningful, but possible...
ma2 = self.ma.view('c8')
assert_array_equal(ma2.unmasked, self.a.view('c8'))
assert_array_equal(ma2.mask, self.mask_a)
@pytest.mark.parametrize('new_dtype', ['2f4', 'f8,f8,f8'])
def test_viewing_as_new_dtype_not_implemented(self, new_dtype):
# But cannot (yet) view in way that would need to create a new mask,
# even though that view is possible for a regular array.
check = self.a.view(new_dtype)
with pytest.raises(NotImplementedError, match='different.*size'):
self.ma.view(check.dtype)
def test_viewing_as_something_impossible(self):
with pytest.raises(TypeError):
# Use intp to ensure have the same size as object,
# otherwise we get a different error message
Masked(np.array([1, 2], dtype=np.intp)).view(Masked)
class TestMaskedArrayCopyFilled(MaskedArraySetup):
def test_copy(self):
ma_copy = self.ma.copy()
assert type(ma_copy) is type(self.ma)
assert_array_equal(ma_copy.unmasked, self.ma.unmasked)
assert_array_equal(ma_copy.mask, self.ma.mask)
assert not np.may_share_memory(ma_copy.unmasked, self.ma.unmasked)
assert not np.may_share_memory(ma_copy.mask, self.ma.mask)
@pytest.mark.parametrize('fill_value', (0, 1))
def test_filled(self, fill_value):
fill_value = fill_value * getattr(self.a, 'unit', 1)
expected = self.a.copy()
expected[self.ma.mask] = fill_value
result = self.ma.filled(fill_value)
assert_array_equal(expected, result)
def test_filled_no_fill_value(self):
with pytest.raises(TypeError, match='missing 1 required'):
self.ma.filled()
@pytest.mark.parametrize('fill_value', [(0, 1), (-1, -1)])
def test_filled_structured(self, fill_value):
fill_value = np.array(fill_value, dtype=self.sdt)
if hasattr(self.sa, 'unit'):
fill_value = fill_value << self.sa.unit
expected = self.sa.copy()
expected['a'][self.msa.mask['a']] = fill_value['a']
expected['b'][self.msa.mask['b']] = fill_value['b']
result = self.msa.filled(fill_value)
assert_array_equal(expected, result)
def test_flat(self):
ma_copy = self.ma.copy()
ma_flat = ma_copy.flat
# Check that single item keeps class and mask
ma_flat1 = ma_flat[1]
assert ma_flat1.unmasked == self.a.flat[1]
assert ma_flat1.mask == self.mask_a.flat[1]
# As well as getting items via iteration.
assert all((ma.unmasked == a and ma.mask == m) for (ma, a, m)
in zip(self.ma.flat, self.a.flat, self.mask_a.flat))
# check that flat works like a view of the real array
ma_flat[1] = self.b[1]
assert ma_flat[1] == self.b[1]
assert ma_copy[0, 1] == self.b[1]
class TestMaskedQuantityCopyFilled(TestMaskedArrayCopyFilled, QuantitySetup):
pass
class TestMaskedLongitudeCopyFilled(TestMaskedArrayCopyFilled, LongitudeSetup):
pass
class TestMaskedArrayShaping(MaskedArraySetup):
def test_reshape(self):
ma_reshape = self.ma.reshape((6,))
expected_data = self.a.reshape((6,))
expected_mask = self.mask_a.reshape((6,))
assert ma_reshape.shape == expected_data.shape
assert_array_equal(ma_reshape.unmasked, expected_data)
assert_array_equal(ma_reshape.mask, expected_mask)
def test_shape_setting(self):
ma_reshape = self.ma.copy()
ma_reshape.shape = 6,
expected_data = self.a.reshape((6,))
expected_mask = self.mask_a.reshape((6,))
assert ma_reshape.shape == expected_data.shape
assert_array_equal(ma_reshape.unmasked, expected_data)
assert_array_equal(ma_reshape.mask, expected_mask)
def test_shape_setting_failure(self):
ma = self.ma.copy()
with pytest.raises(ValueError, match='cannot reshape'):
ma.shape = 5,
assert ma.shape == self.ma.shape
assert ma.mask.shape == self.ma.shape
# Here, mask can be reshaped but array cannot.
ma2 = Masked(np.broadcast_to([[1.], [2.]], self.a.shape),
mask=self.mask_a)
with pytest.raises(AttributeError, match='ncompatible shape'):
ma2.shape = 6,
assert ma2.shape == self.ma.shape
assert ma2.mask.shape == self.ma.shape
# Here, array can be reshaped but mask cannot.
ma3 = Masked(self.a.copy(), mask=np.broadcast_to([[True], [False]],
self.mask_a.shape))
with pytest.raises(AttributeError, match='ncompatible shape'):
ma3.shape = 6,
assert ma3.shape == self.ma.shape
assert ma3.mask.shape == self.ma.shape
def test_ravel(self):
ma_ravel = self.ma.ravel()
expected_data = self.a.ravel()
expected_mask = self.mask_a.ravel()
assert ma_ravel.shape == expected_data.shape
assert_array_equal(ma_ravel.unmasked, expected_data)
assert_array_equal(ma_ravel.mask, expected_mask)
def test_transpose(self):
ma_transpose = self.ma.transpose()
expected_data = self.a.transpose()
expected_mask = self.mask_a.transpose()
assert ma_transpose.shape == expected_data.shape
assert_array_equal(ma_transpose.unmasked, expected_data)
assert_array_equal(ma_transpose.mask, expected_mask)
def test_iter(self):
for ma, d, m in zip(self.ma, self.a, self.mask_a):
assert_array_equal(ma.unmasked, d)
assert_array_equal(ma.mask, m)
class MaskedItemTests(MaskedArraySetup):
@pytest.mark.parametrize('item', VARIOUS_ITEMS)
def test_getitem(self, item):
ma_part = self.ma[item]
expected_data = self.a[item]
expected_mask = self.mask_a[item]
assert_array_equal(ma_part.unmasked, expected_data)
assert_array_equal(ma_part.mask, expected_mask)
@pytest.mark.parametrize('item', ['a'] + VARIOUS_ITEMS)
def test_getitem_structured(self, item):
ma_part = self.msa[item]
expected_data = self.sa[item]
expected_mask = self.mask_sa[item]
assert_array_equal(ma_part.unmasked, expected_data)
assert_array_equal(ma_part.mask, expected_mask)
@pytest.mark.parametrize('indices,axis', [
([0, 1], 1), ([0, 1], 0), ([0, 1], None), ([[0, 1], [2, 3]], None)])
def test_take(self, indices, axis):
ma_take = self.ma.take(indices, axis=axis)
expected_data = self.a.take(indices, axis=axis)
expected_mask = self.mask_a.take(indices, axis=axis)
assert_array_equal(ma_take.unmasked, expected_data)
assert_array_equal(ma_take.mask, expected_mask)
ma_take2 = np.take(self.ma, indices, axis=axis)
assert_masked_equal(ma_take2, ma_take)
@pytest.mark.parametrize('item', VARIOUS_ITEMS)
@pytest.mark.parametrize('mask', [None, True, False])
def test_setitem(self, item, mask):
base = self.ma.copy()
expected_data = self.a.copy()
expected_mask = self.mask_a.copy()
value = self.a[0, 0] if mask is None else Masked(self.a[0, 0], mask)
base[item] = value
expected_data[item] = value if mask is None else value.unmasked
expected_mask[item] = False if mask is None else value.mask
assert_array_equal(base.unmasked, expected_data)
assert_array_equal(base.mask, expected_mask)
@pytest.mark.parametrize('item', ['a'] + VARIOUS_ITEMS)
@pytest.mark.parametrize('mask', [None, True, False])
def test_setitem_structured(self, item, mask):
base = self.msa.copy()
expected_data = self.sa.copy()
expected_mask = self.mask_sa.copy()
value = self.sa['b'] if item == 'a' else self.sa[0, 0]
if mask is not None:
value = Masked(value, mask)
base[item] = value
expected_data[item] = value if mask is None else value.unmasked
expected_mask[item] = False if mask is None else value.mask
assert_array_equal(base.unmasked, expected_data)
assert_array_equal(base.mask, expected_mask)
@pytest.mark.parametrize('item', VARIOUS_ITEMS)
def test_setitem_np_ma_masked(self, item):
base = self.ma.copy()
expected_mask = self.mask_a.copy()
base[item] = np.ma.masked
expected_mask[item] = True
assert_array_equal(base.unmasked, self.a)
assert_array_equal(base.mask, expected_mask)
class TestMaskedArrayItems(MaskedItemTests):
@classmethod
def setup_class(self):
super().setup_class()
self.d = np.array(['aa', 'bb'])
self.mask_d = np.array([True, False])
self.md = Masked(self.d, self.mask_d)
# Quantity, Longitude cannot hold strings.
def test_getitem_strings(self):
md = self.md.copy()
md0 = md[0]
assert md0.unmasked == self.d[0]
assert md0.mask
md_all = md[:]
assert_masked_equal(md_all, md)
def test_setitem_strings_np_ma_masked(self):
md = self.md.copy()
md[1] = np.ma.masked
assert_array_equal(md.unmasked, self.d)
assert_array_equal(md.mask, np.ones(2, bool))
class TestMaskedQuantityItems(MaskedItemTests, QuantitySetup):
pass
class TestMaskedLongitudeItems(MaskedItemTests, LongitudeSetup):
pass
class MaskedOperatorTests(MaskedArraySetup):
@pytest.mark.parametrize('op', (operator.add, operator.sub))
def test_add_subtract(self, op):
mapmb = op(self.ma, self.mb)
expected_data = op(self.a, self.b)
expected_mask = (self.ma.mask | self.mb.mask)
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
@pytest.mark.parametrize('op', (operator.eq, operator.ne))
def test_equality(self, op):
mapmb = op(self.ma, self.mb)
expected_data = op(self.a, self.b)
expected_mask = (self.ma.mask | self.mb.mask)
# Note: assert_array_equal also checks type, i.e., that boolean
# output is represented as plain Masked ndarray.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
def test_not_implemented(self):
with pytest.raises(TypeError):
self.ma > 'abc'
@pytest.mark.parametrize('different_names', [False, True])
@pytest.mark.parametrize('op', (operator.eq, operator.ne))
def test_structured_equality(self, op, different_names):
msb = self.msb
if different_names:
msb = msb.astype([(f'different_{name}', dt)
for name, dt in msb.dtype.fields.items()])
mapmb = op(self.msa, self.msb)
# Expected is a bit tricky here: only unmasked fields count
expected_data = np.ones(mapmb.shape, bool)
expected_mask = np.ones(mapmb.shape, bool)
for field in self.sdt.names:
fa, mfa = self.sa[field], self.mask_sa[field]
fb, mfb = self.sb[field], self.mask_sb[field]
mfequal = mfa | mfb
fequal = (fa == fb) | mfequal
expected_data &= fequal
expected_mask &= mfequal
if op is operator.ne:
expected_data = ~expected_data
# Note: assert_array_equal also checks type, i.e., that boolean
# output is represented as plain Masked ndarray.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
def test_matmul(self):
result = self.ma.T @ self.ma
assert_array_equal(result.unmasked, self.a.T @ self.a)
mask1 = np.any(self.mask_a, axis=0)
expected_mask = np.logical_or.outer(mask1, mask1)
assert_array_equal(result.mask, expected_mask)
result2 = self.ma.T @ self.a
assert_array_equal(result2.unmasked, self.a.T @ self.a)
expected_mask2 = np.logical_or.outer(mask1, np.zeros(3, bool))
assert_array_equal(result2.mask, expected_mask2)
result3 = self.a.T @ self.ma
assert_array_equal(result3.unmasked, self.a.T @ self.a)
expected_mask3 = np.logical_or.outer(np.zeros(3, bool), mask1)
assert_array_equal(result3.mask, expected_mask3)
def test_matvec(self):
result = self.ma @ self.mb
assert np.all(result.mask)
assert_array_equal(result.unmasked, self.a @ self.b)
# Just using the masked vector still has all elements masked.
result2 = self.a @ self.mb
assert np.all(result2.mask)
assert_array_equal(result2.unmasked, self.a @ self.b)
new_ma = self.ma.copy()
new_ma.mask[0, 0] = False
result3 = new_ma @ self.b
assert_array_equal(result3.unmasked, self.a @ self.b)
assert_array_equal(result3.mask, new_ma.mask.any(-1))
def test_vecmat(self):
result = self.mb @ self.ma.T
assert np.all(result.mask)
assert_array_equal(result.unmasked, self.b @ self.a.T)
result2 = self.b @ self.ma.T
assert np.all(result2.mask)
assert_array_equal(result2.unmasked, self.b @ self.a.T)
new_ma = self.ma.T.copy()
new_ma.mask[0, 0] = False
result3 = self.b @ new_ma
assert_array_equal(result3.unmasked, self.b @ self.a.T)
assert_array_equal(result3.mask, new_ma.mask.any(0))
def test_vecvec(self):
result = self.mb @ self.mb
assert result.shape == ()
assert result.mask
assert result.unmasked == self.b @ self.b
mb_no_mask = Masked(self.b, False)
result2 = mb_no_mask @ mb_no_mask
assert not result2.mask
class TestMaskedArrayOperators(MaskedOperatorTests):
# Some further tests that use strings, which are not useful for Quantity.
@pytest.mark.parametrize('op', (operator.eq, operator.ne))
def test_equality_strings(self, op):
m1 = Masked(np.array(['a', 'b', 'c']), mask=[True, False, False])
m2 = Masked(np.array(['a', 'b', 'd']), mask=[False, False, False])
result = op(m1, m2)
assert_array_equal(result.unmasked, op(m1.unmasked, m2.unmasked))
assert_array_equal(result.mask, m1.mask | m2.mask)
result2 = op(m1, m2.unmasked)
assert_masked_equal(result2, result)
def test_not_implemented(self):
with pytest.raises(TypeError):
Masked(['a', 'b']) > object()
class TestMaskedQuantityOperators(MaskedOperatorTests, QuantitySetup):
pass
class TestMaskedLongitudeOperators(MaskedOperatorTests, LongitudeSetup):
pass
class TestMaskedArrayMethods(MaskedArraySetup):
def test_round(self):
# Goes via ufunc, hence easy.
mrc = self.mc.round()
expected = Masked(self.c.round(), self.mask_c)
assert_masked_equal(mrc, expected)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_sum(self, axis):
ma_sum = self.ma.sum(axis)
expected_data = self.a.sum(axis)
expected_mask = self.ma.mask.any(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_cumsum(self, axis):
ma_sum = self.ma.cumsum(axis)
expected_data = self.a.cumsum(axis)
mask = self.mask_a
if axis is None:
mask = mask.ravel()
expected_mask = np.logical_or.accumulate(mask, axis=axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_mean(self, axis):
ma_mean = self.ma.mean(axis)
filled = self.a.copy()
filled[self.mask_a] = 0.
count = 1 - self.ma.mask.astype(int)
expected_data = filled.sum(axis) / count.sum(axis)
expected_mask = self.ma.mask.all(axis)
assert_array_equal(ma_mean.unmasked, expected_data)
assert_array_equal(ma_mean.mask, expected_mask)
def test_mean_int16(self):
ma = self.ma.astype('i2')
ma_mean = ma.mean()
assert ma_mean.dtype == 'f8'
expected = ma.astype('f8').mean()
assert_masked_equal(ma_mean, expected)
def test_mean_float16(self):
ma = self.ma.astype('f2')
ma_mean = ma.mean()
assert ma_mean.dtype == 'f2'
expected = self.ma.mean().astype('f2')
assert_masked_equal(ma_mean, expected)
def test_mean_inplace(self):
expected = self.ma.mean(1)
out = Masked(np.zeros_like(expected.unmasked))
result = self.ma.mean(1, out=out)
assert result is out
assert_masked_equal(out, expected)
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.parametrize('axis', (0, 1, None))
def test_var(self, axis):
ma_var = self.ma.var(axis)
filled = (self.a - self.ma.mean(axis, keepdims=True))**2
filled[self.mask_a] = 0.
count = (1 - self.ma.mask.astype(int)).sum(axis)
expected_data = filled.sum(axis) / count
expected_mask = self.ma.mask.all(axis)
assert_array_equal(ma_var.unmasked, expected_data)
assert_array_equal(ma_var.mask, expected_mask)
ma_var1 = self.ma.var(axis, ddof=1)
expected_data1 = filled.sum(axis) / (count - 1)
expected_mask1 = self.ma.mask.all(axis) | (count <= 1)
assert_array_equal(ma_var1.unmasked, expected_data1)
assert_array_equal(ma_var1.mask, expected_mask1)
ma_var5 = self.ma.var(axis, ddof=5)
assert np.all(~np.isfinite(ma_var5.unmasked))
assert ma_var5.mask.all()
def test_var_int16(self):
ma = self.ma.astype('i2')
ma_var = ma.var()
assert ma_var.dtype == 'f8'
expected = ma.astype('f8').var()
assert_masked_equal(ma_var, expected)
def test_std(self):
ma_std = self.ma.std(1, ddof=1)
ma_var1 = self.ma.var(1, ddof=1)
expected = np.sqrt(ma_var1)
assert_masked_equal(ma_std, expected)
def test_std_inplace(self):
expected = self.ma.std(1, ddof=1)
out = Masked(np.zeros_like(expected.unmasked))
result = self.ma.std(1, ddof=1, out=out)
assert result is out
assert_masked_equal(result, expected)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_min(self, axis):
ma_min = self.ma.min(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max()
expected_data = filled.min(axis)
assert_array_equal(ma_min.unmasked, expected_data)
assert not np.any(ma_min.mask)
def test_min_with_masked_nan(self):
ma = Masked([3., np.nan, 2.], mask=[False, True, False])
ma_min = ma.min()
assert_array_equal(ma_min.unmasked, np.array(2.))
assert not ma_min.mask
@pytest.mark.parametrize('axis', (0, 1, None))
def test_max(self, axis):
ma_max = self.ma.max(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.min()
expected_data = filled.max(axis)
assert_array_equal(ma_max.unmasked, expected_data)
assert not np.any(ma_max.mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_argmin(self, axis):
ma_argmin = self.ma.argmin(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max()
expected_data = filled.argmin(axis)
assert_array_equal(ma_argmin, expected_data)
def test_argmin_only_one_unmasked_element(self):
# Regression test for example from @taldcroft at
# https://github.com/astropy/astropy/pull/11127#discussion_r600864559
ma = Masked(data=[1, 2], mask=[True, False])
assert ma.argmin() == 1
@pytest.mark.parametrize('axis', (0, 1, None))
def test_argmax(self, axis):
ma_argmax = self.ma.argmax(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.min()
expected_data = filled.argmax(axis)
assert_array_equal(ma_argmax, expected_data)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_argsort(self, axis):
ma_argsort = self.ma.argsort(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max() * 1.1
expected_data = filled.argsort(axis)
assert_array_equal(ma_argsort, expected_data)
@pytest.mark.parametrize('order', [None, 'a', ('a', 'b'), ('b', 'a')])
@pytest.mark.parametrize('axis', [0, 1])
def test_structured_argsort(self, axis, order):
ma_argsort = self.msa.argsort(axis, order=order)
filled = self.msa.filled(fill_value=np.array((np.inf, np.inf),
dtype=self.sdt))
expected_data = filled.argsort(axis, order=order)
assert_array_equal(ma_argsort, expected_data)
def test_argsort_error(self):
with pytest.raises(ValueError, match='when the array has no fields'):
self.ma.argsort(axis=0, order='a')
@pytest.mark.parametrize('axis', (0, 1))
def test_sort(self, axis):
ma_sort = self.ma.copy()
ma_sort.sort(axis)
indices = self.ma.argsort(axis)
expected_data = np.take_along_axis(self.ma.unmasked, indices, axis)
expected_mask = np.take_along_axis(self.ma.mask, indices, axis)
assert_array_equal(ma_sort.unmasked, expected_data)
assert_array_equal(ma_sort.mask, expected_mask)
@pytest.mark.parametrize('kth', [1, 3])
def test_argpartition(self, kth):
ma = self.ma.ravel()
ma_argpartition = ma.argpartition(kth)
partitioned = ma[ma_argpartition]
assert (partitioned[:kth] < partitioned[kth]).all()
assert (partitioned[kth:] >= partitioned[kth]).all()
if partitioned[kth].mask:
assert all(partitioned.mask[kth:])
else:
assert not any(partitioned.mask[:kth])
@pytest.mark.parametrize('kth', [1, 3])
def test_partition(self, kth):
partitioned = self.ma.flatten()
partitioned.partition(kth)
assert (partitioned[:kth] < partitioned[kth]).all()
assert (partitioned[kth:] >= partitioned[kth]).all()
if partitioned[kth].mask:
assert all(partitioned.mask[kth:])
else:
assert not any(partitioned.mask[:kth])
def test_all_explicit(self):
a1 = np.array([[1., 2.],
[3., 4.]])
a2 = np.array([[1., 0.],
[3., 4.]])
if self._data_cls is not np.ndarray:
a1 = self._data_cls(a1, self.a.unit)
a2 = self._data_cls(a2, self.a.unit)
ma1 = Masked(a1, mask=[[False, False],
[True, True]])
ma2 = Masked(a2, mask=[[False, True],
[False, True]])
ma1_eq_ma2 = ma1 == ma2
assert_array_equal(ma1_eq_ma2.unmasked, np.array([[True, False],
[True, True]]))
assert_array_equal(ma1_eq_ma2.mask, np.array([[False, True],
[True, True]]))
assert ma1_eq_ma2.all()
assert not (ma1 != ma2).all()
ma_eq1 = ma1_eq_ma2.all(1)
assert_array_equal(ma_eq1.mask, np.array([False, True]))
assert bool(ma_eq1[0]) is True
assert bool(ma_eq1[1]) is False
ma_eq0 = ma1_eq_ma2.all(0)
assert_array_equal(ma_eq0.mask, np.array([False, True]))
assert bool(ma_eq1[0]) is True
assert bool(ma_eq1[1]) is False
@pytest.mark.parametrize('method', ['any', 'all'])
@pytest.mark.parametrize('array,axis', [
('a', 0), ('a', 1), ('a', None),
('b', None),
('c', 0), ('c', 1), ('c', None)])
def test_all_and_any(self, array, axis, method):
ma = getattr(self, 'm'+array)
ma_eq = ma == ma
ma_all_or_any = getattr(ma_eq, method)(axis=axis)
filled = ma_eq.unmasked.copy()
filled[ma_eq.mask] = method == 'all'
a_all_or_any = getattr(filled, method)(axis=axis)
all_masked = ma.mask.all(axis)
assert_array_equal(ma_all_or_any.mask, all_masked)
assert_array_equal(ma_all_or_any.unmasked, a_all_or_any)
# interpretation as bool
as_bool = [bool(a) for a in ma_all_or_any.ravel()]
expected = [bool(a) for a in (a_all_or_any & ~all_masked).ravel()]
assert as_bool == expected
def test_any_inplace(self):
ma_eq = self.ma == self.ma
expected = ma_eq.any(1)
out = Masked(np.zeros_like(expected.unmasked))
result = ma_eq.any(1, out=out)
assert result is out
assert_masked_equal(result, expected)
@pytest.mark.parametrize('offset', (0, 1))
def test_diagonal(self, offset):
mda = self.ma.diagonal(offset=offset)
expected = Masked(self.a.diagonal(offset=offset),
self.mask_a.diagonal(offset=offset))
assert_masked_equal(mda, expected)
@pytest.mark.parametrize('offset', (0, 1))
def test_trace(self, offset):
mta = self.ma.trace(offset=offset)
expected = Masked(self.a.trace(offset=offset),
self.mask_a.trace(offset=offset, dtype=bool))
assert_masked_equal(mta, expected)
def test_clip(self):
maclip = self.ma.clip(self.b, self.c)
expected = Masked(self.a.clip(self.b, self.c), self.mask_a)
assert_masked_equal(maclip, expected)
def test_clip_masked_min_max(self):
maclip = self.ma.clip(self.mb, self.mc)
# Need to be careful with min, max because of Longitude, which wraps.
dmax = np.maximum(np.maximum(self.a, self.b), self.c).max()
dmin = np.minimum(np.minimum(self.a, self.b), self.c).min()
expected = Masked(self.a.clip(self.mb.filled(dmin),
self.mc.filled(dmax)),
mask=self.mask_a)
assert_masked_equal(maclip, expected)
class TestMaskedQuantityMethods(TestMaskedArrayMethods, QuantitySetup):
pass
class TestMaskedLongitudeMethods(TestMaskedArrayMethods, LongitudeSetup):
pass
class TestMaskedArrayProductMethods(MaskedArraySetup):
# These cannot work on Quantity, so done separately
@pytest.mark.parametrize('axis', (0, 1, None))
def test_prod(self, axis):
ma_sum = self.ma.prod(axis)
expected_data = self.a.prod(axis)
expected_mask = self.ma.mask.any(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_cumprod(self, axis):
ma_sum = self.ma.cumprod(axis)
expected_data = self.a.cumprod(axis)
mask = self.mask_a
if axis is None:
mask = mask.ravel()
expected_mask = np.logical_or.accumulate(mask, axis=axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
def test_masked_str_explicit():
sa = np.array([(1., 2.), (3., 4.)], dtype='f8,f8')
msa = Masked(sa, [(False, True), (False, False)])
assert str(msa) == "[(1., ——) (3., 4.)]"
assert str(msa[0]) == "(1., ——)"
assert str(msa[1]) == "(3., 4.)"
with np.printoptions(precision=3, floatmode='fixed'):
assert str(msa) == "[(1.000, ———) (3.000, 4.000)]"
def test_masked_repr_explicit():
# Use explicit endianness to ensure tests pass on all architectures
sa = np.array([(1., 2.), (3., 4.)], dtype='>f8,>f8')
msa = Masked(sa, [(False, True), (False, False)])
assert repr(msa) == ("MaskedNDArray([(1., ——), (3., 4.)], "
"dtype=[('f0', '>f8'), ('f1', '>f8')])")
assert repr(msa[0]) == ("MaskedNDArray((1., ——), "
"dtype=[('f0', '>f8'), ('f1', '>f8')])")
assert repr(msa[1]) == ("MaskedNDArray((3., 4.), "
"dtype=[('f0', '>f8'), ('f1', '>f8')])")
def test_masked_repr_summary():
ma = Masked(np.arange(15.), mask=[True]+[False]*14)
with np.printoptions(threshold=2):
assert repr(ma) == (
"MaskedNDArray([———, 1., 2., ..., 12., 13., 14.])")
def test_masked_repr_nodata():
assert repr(Masked([])) == "MaskedNDArray([], dtype=float64)"
class TestMaskedArrayRepr(MaskedArraySetup):
def test_array_str(self):
# very blunt check they work at all.
str(self.ma)
str(self.mb)
str(self.mc)
str(self.msa)
str(self.msb)
def test_scalar_str(self):
assert self.mb[0].shape == ()
str(self.mb[0])
assert self.msb[0].shape == ()
str(self.msb[0])
def test_array_repr(self):
repr(self.ma)
repr(self.mb)
repr(self.mc)
repr(self.msa)
repr(self.msb)
def test_scalar_repr(self):
repr(self.mb[0])
repr(self.msb[0])
class TestMaskedQuantityRepr(TestMaskedArrayRepr, QuantitySetup):
pass
class TestMaskedRecarray(MaskedArraySetup):
@classmethod
def setup_class(self):
super().setup_class()
self.ra = self.sa.view(np.recarray)
self.mra = Masked(self.ra, mask=self.mask_sa)
def test_recarray_setup(self):
assert isinstance(self.mra, Masked)
assert isinstance(self.mra, np.recarray)
assert np.all(self.mra.unmasked == self.ra)
assert np.all(self.mra.mask == self.mask_sa)
assert_array_equal(self.mra.view(np.ndarray), self.sa)
assert isinstance(self.mra.a, Masked)
assert_array_equal(self.mra.a.unmasked, self.sa['a'])
assert_array_equal(self.mra.a.mask, self.mask_sa['a'])
def test_recarray_setting(self):
mra = self.mra.copy()
mra.a = self.msa['b']
assert_array_equal(mra.a.unmasked, self.msa['b'].unmasked)
assert_array_equal(mra.a.mask, self.msa['b'].mask)
@pytest.mark.parametrize('attr', [0, 'a'])
def test_recarray_field_getting(self, attr):
mra_a = self.mra.field(attr)
assert isinstance(mra_a, Masked)
assert_array_equal(mra_a.unmasked, self.sa['a'])
assert_array_equal(mra_a.mask, self.mask_sa['a'])
@pytest.mark.parametrize('attr', [0, 'a'])
def test_recarray_field_setting(self, attr):
mra = self.mra.copy()
mra.field(attr, self.msa['b'])
assert_array_equal(mra.a.unmasked, self.msa['b'].unmasked)
assert_array_equal(mra.a.mask, self.msa['b'].mask)
class TestMaskedArrayInteractionWithNumpyMA(MaskedArraySetup):
def test_masked_array_from_masked(self):
"""Check that we can initialize a MaskedArray properly."""
np_ma = np.ma.MaskedArray(self.ma)
assert type(np_ma) is np.ma.MaskedArray
assert type(np_ma.data) is self._data_cls
assert type(np_ma.mask) is np.ndarray
assert_array_equal(np_ma.data, self.a)
assert_array_equal(np_ma.mask, self.mask_a)
def test_view_as_masked_array(self):
"""Test that we can be viewed as a MaskedArray."""
np_ma = self.ma.view(np.ma.MaskedArray)
assert type(np_ma) is np.ma.MaskedArray
assert type(np_ma.data) is self._data_cls
assert type(np_ma.mask) is np.ndarray
assert_array_equal(np_ma.data, self.a)
assert_array_equal(np_ma.mask, self.mask_a)
class TestMaskedQuantityInteractionWithNumpyMA(
TestMaskedArrayInteractionWithNumpyMA, QuantitySetup):
pass
|
6ca4e21f5d0c2c6787d2550b512651b828293cebf2e284b3ea4079c4c1df2f87 | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from astropy import units as u
from astropy.coordinates import SkyCoord, representation as r
from astropy.time import Time
from astropy.utils.masked import Masked
class TestRepresentations:
def setup_class(self):
self.x = np.array([3., 5., 0.]) << u.m
self.y = np.array([4., 12., 1.]) << u.m
self.z = np.array([0., 0., 1.]) << u.m
self.c = r.CartesianRepresentation(self.x, self.y, self.z)
self.mask = np.array([False, False, True])
self.mx = Masked(self.x, self.mask)
self.my = Masked(self.y, self.mask)
self.mz = Masked(self.z, self.mask)
self.mc = r.CartesianRepresentation(self.mx, self.my, self.mz)
def test_initialization(self):
check = self.mc.z == self.mz
assert_array_equal(check.unmasked, np.ones(3, bool))
assert_array_equal(check.mask, self.mask)
assert_array_equal(self.mc.x, self.mx)
assert_array_equal(self.mc.y, self.my)
assert_array_equal(self.mc.z, self.mz)
def test_norm(self):
# Need stacking and erfa override.
norm = self.mc.norm()
assert_array_equal(norm.unmasked, self.c.norm())
assert_array_equal(norm.mask, self.mask)
def test_transformation(self):
msr = self.mc.represent_as(r.SphericalRepresentation)
sr = self.c.represent_as(r.SphericalRepresentation)
for comp in msr.components:
mc = getattr(msr, comp)
c = getattr(sr, comp)
assert_array_equal(mc.unmasked, c)
assert_array_equal(mc.mask, self.mask)
# Transformation back. This also tests erfa.ufunc.s2p, which
# is special in having a core dimension only in the output.
cr2 = sr.represent_as(r.CartesianRepresentation)
mcr2 = msr.represent_as(r.CartesianRepresentation)
for comp in mcr2.components:
mc = getattr(mcr2, comp)
c = getattr(cr2, comp)
assert_array_equal(mc.unmasked, c)
assert_array_equal(mc.mask, self.mask)
class TestSkyCoord:
def setup_class(self):
self.ra = np.array([3., 5., 0.]) << u.hourangle
self.dec = np.array([4., 12., 1.]) << u.deg
self.sc = SkyCoord(self.ra, self.dec)
self.mask = np.array([False, False, True])
self.mra = Masked(self.ra, self.mask)
self.mdec = Masked(self.dec, self.mask)
self.msc = SkyCoord(self.mra, self.mdec)
def test_initialization(self):
check = self.msc.dec == self.mdec
assert_array_equal(check.unmasked, np.ones(3, bool))
assert_array_equal(check.mask, self.mask)
assert_array_equal(self.msc.data.lon, self.mra)
assert_array_equal(self.msc.data.lat, self.mdec)
def test_transformation(self):
gcrs = self.sc.gcrs
mgcrs = self.msc.gcrs
assert_array_equal(mgcrs.data.lon.mask, self.msc.data.lon.mask)
assert_array_equal(mgcrs.data.lon.unmasked, gcrs.data.lon)
assert_array_equal(mgcrs.data.lat.unmasked, gcrs.data.lat)
class TestTime:
def setup_class(self):
self.s = np.array(['2010-11-12T13:14:15.160',
'2010-11-12T13:14:15.161',
'2011-12-13T14:15:16.170'])
self.t = Time(self.s)
# Time formats will currently strip any ndarray subtypes, so we cannot
# initialize a Time with a Masked version of self.s yet. Instead, we
# work around it, for now only testing that masked are preserved by
# transformations.
self.mask = np.array([False, False, True])
self.mt = self.t._apply(Masked, self.mask)
def test_initialization(self):
assert_array_equal(self.mt.jd1.mask, self.mask)
assert_array_equal(self.mt.jd2.mask, self.mask)
assert_array_equal(self.mt.jd1.unmasked, self.t.jd1)
assert_array_equal(self.mt.jd2.unmasked, self.t.jd2)
@pytest.mark.parametrize('format_', ['jd', 'cxcsec', 'jyear'])
def test_different_formats(self, format_):
# Formats do not yet work with everything; e.g., isot is not supported
# since the Masked class does not yet support structured arrays.
tfmt = getattr(self.t, format_)
mtfmt = getattr(self.mt, format_)
check = mtfmt == tfmt
assert_array_equal(check.unmasked, np.ones(3, bool))
assert_array_equal(check.mask, self.mask)
@pytest.mark.parametrize('scale', ['tai', 'tcb', 'ut1'])
def test_transformation(self, scale):
tscl = getattr(self.t, scale)
mtscl = getattr(self.mt, scale)
assert_array_equal(mtscl.jd1.mask, self.mask)
assert_array_equal(mtscl.jd2.mask, self.mask)
assert_array_equal(mtscl.jd1.unmasked, tscl.jd1)
assert_array_equal(mtscl.jd2.unmasked, tscl.jd2)
|
9525988e7c2c352395c1f7ad5952c3df60b9e36b8294c1bf018b9d6537463d5d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test all functions covered by __array_function__.
Here, run through all functions, with simple tests just to check the helpers.
More complicated tests of functionality, including with subclasses, are done
in test_functions.
TODO: finish full coverage (see also `~astropy.utils.masked.function_helpers`)
- np.linalg
- np.fft (is there any point?)
- np.lib.nanfunctions
"""
import inspect
import itertools
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy.utils.compat import NUMPY_LT_1_19, NUMPY_LT_1_20, NUMPY_LT_1_23
from astropy.units.tests.test_quantity_non_ufuncs import (
get_wrapped_functions)
from astropy.utils.masked import Masked, MaskedNDArray
from astropy.utils.masked.function_helpers import (
MASKED_SAFE_FUNCTIONS,
APPLY_TO_BOTH_FUNCTIONS,
DISPATCHED_FUNCTIONS,
IGNORED_FUNCTIONS,
UNSUPPORTED_FUNCTIONS)
from .test_masked import assert_masked_equal, MaskedArraySetup
all_wrapped_functions = get_wrapped_functions(np)
all_wrapped = set(all_wrapped_functions.values())
class BasicTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = Masked(func(self.a, *args, **kwargs),
mask=func(self.mask_a, *args, **kwargs))
assert_masked_equal(out, expected)
def check2(self, func, *args, **kwargs):
out = func(self.ma, self.mb, *args, **kwargs)
expected = Masked(func(self.a, self.b, *args, **kwargs),
mask=func(self.mask_a, self.mask_b, *args, **kwargs))
if isinstance(out, (tuple, list)):
for o, x in zip(out, expected):
assert_masked_equal(o, x)
else:
assert_masked_equal(out, expected)
class NoMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o, expected)
class InvariantMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, self.mask_a)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.ma) == (2, 3)
def test_size(self):
assert np.size(self.ma) == 6
def test_ndim(self):
assert np.ndim(self.ma) == 2
class TestShapeManipulation(BasicTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (6, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
self.check(np.atleast_1d)
o, so = np.atleast_1d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1,)
def test_atleast_2d(self):
self.check(np.atleast_2d)
o, so = np.atleast_2d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1)
def test_atleast_3d(self):
self.check(np.atleast_3d)
o, so = np.atleast_3d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1, 1)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.mc)
assert o.shape == o.mask.shape == (2,)
assert_array_equal(o.unmasked, self.c.squeeze())
assert_array_equal(o.mask, self.mask_c.squeeze())
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
self.check(np.broadcast_to, (3, 2, 3))
self.check(np.broadcast_to, (3, 2, 3), subok=False)
def test_broadcast_arrays(self):
self.check2(np.broadcast_arrays)
self.check2(np.broadcast_arrays, subok=False)
class TestArgFunctions(MaskedArraySetup):
def check(self, function, *args, fill_value=np.nan, **kwargs):
o = function(self.ma, *args, **kwargs)
a_filled = self.ma.filled(fill_value=fill_value)
expected = function(a_filled, *args, **kwargs)
assert_array_equal(o, expected)
def test_argmin(self):
self.check(np.argmin, fill_value=np.inf)
def test_argmax(self):
self.check(np.argmax, fill_value=-np.inf)
def test_argsort(self):
self.check(np.argsort, fill_value=np.nan)
def test_lexsort(self):
self.check(np.lexsort, fill_value=np.nan)
def test_nonzero(self):
self.check(np.nonzero, fill_value=0.)
@pytest.mark.filterwarnings('ignore:Calling nonzero on 0d arrays is deprecated')
def test_nonzero_0d(self):
res1 = Masked(1, mask=False).nonzero()
assert len(res1) == 1
assert_array_equal(res1[0], np.ones(()).nonzero()[0])
res2 = Masked(1, mask=True).nonzero()
assert len(res2) == 1
assert_array_equal(res2[0], np.zeros(()).nonzero()[0])
def test_argwhere(self):
self.check(np.argwhere, fill_value=0.)
def test_argpartition(self):
self.check(np.argpartition, 2, fill_value=np.inf)
def test_flatnonzero(self):
self.check(np.flatnonzero, fill_value=0.)
class TestAlongAxis(MaskedArraySetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
out = np.take_along_axis(self.ma, indices, axis=0)
expected = np.take_along_axis(self.a, indices, axis=0)
expected_mask = np.take_along_axis(self.mask_a, indices, axis=0)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_put_along_axis(self):
ma = self.ma.copy()
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
np.put_along_axis(ma, indices, axis=0, values=-1)
expected = self.a.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, self.mask_a)
np.put_along_axis(ma, indices, axis=0, values=np.ma.masked)
assert_array_equal(ma.unmasked, expected)
expected_mask = self.mask_a.copy()
np.put_along_axis(expected_mask, indices, axis=0, values=True)
assert_array_equal(ma.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.ma)
expected = np.apply_along_axis(np.square, axis, self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.parametrize('axes', [(1,), 0, (0, -1)])
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.mean(np.square(x), axis)
out = np.apply_over_axes(function, self.ma, axes)
expected = self.ma
for axis in (axes if isinstance(axes, tuple) else (axes,)):
expected = (expected**2).mean(axis, keepdims=True)
assert_array_equal(out.unmasked, expected.unmasked)
assert_array_equal(out.mask, expected.mask)
def test_apply_over_axes_no_reduction(self):
out = np.apply_over_axes(np.cumsum, self.ma, 0)
expected = self.ma.cumsum(axis=0)
assert_masked_equal(out, expected)
def test_apply_over_axes_wrong_size(self):
with pytest.raises(ValueError, match='not.*correct shape'):
np.apply_over_axes(lambda x, axis: x[..., np.newaxis], self.ma, 0)
class TestIndicesFrom(NoMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.arange(9).reshape(3, 3)
self.mask_a = np.eye(3, dtype=bool)
self.ma = Masked(self.a, self.mask_a)
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.array([1+2j, 3+4j])
self.mask_a = np.array([True, False])
self.ma = Masked(self.a, mask=self.mask_a)
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantMaskTestSetup):
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.ma)
assert_array_equal(copy, self.ma)
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.ma)
assert_array_equal(farray, self.ma)
class TestArrayCreation(MaskedArraySetup):
def test_empty_like(self):
o = np.empty_like(self.ma)
assert o.shape == (2, 3)
assert isinstance(o, Masked)
assert isinstance(o, np.ndarray)
o2 = np.empty_like(prototype=self.ma)
assert o2.shape == (2, 3)
assert isinstance(o2, Masked)
assert isinstance(o2, np.ndarray)
o3 = np.empty_like(self.ma, subok=False)
assert type(o3) is MaskedNDArray
def test_zeros_like(self):
o = np.zeros_like(self.ma)
assert_array_equal(o.unmasked, np.zeros_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.zeros_like(a=self.ma)
assert_array_equal(o2.unmasked, np.zeros_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
def test_ones_like(self):
o = np.ones_like(self.ma)
assert_array_equal(o.unmasked, np.ones_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.ones_like(a=self.ma)
assert_array_equal(o2.unmasked, np.ones_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
@pytest.mark.parametrize('value', [0.5, Masked(0.5, mask=True), np.ma.masked])
def test_full_like(self, value):
o = np.full_like(self.ma, value)
if value is np.ma.masked:
expected = Masked(o.unmasked, True)
else:
expected = Masked(np.empty_like(self.a))
expected[...] = value
assert_array_equal(o.unmasked, expected.unmasked)
assert_array_equal(o.mask, expected.mask)
class TestAccessingParts(BasicTestSetup):
def test_diag(self):
self.check(np.diag)
def test_diag_1d_input(self):
ma = self.ma.ravel()
o = np.diag(ma)
assert_array_equal(o.unmasked, np.diag(self.a.ravel()))
assert_array_equal(o.mask, np.diag(self.mask_a.ravel()))
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False], self.ma, axis=0)
expected = np.compress([True, False], self.a, axis=0)
expected_mask = np.compress([True, False], self.mask_a, axis=0)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_extract(self):
o = np.extract([True, False, True], self.ma)
expected = np.extract([True, False, True], self.a)
expected_mask = np.extract([True, False, True], self.mask_a)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(MaskedArraySetup):
def test_put(self):
ma = self.ma.copy()
v = Masked([50, 150], [False, True])
np.put(ma, [0, 2], v)
expected = self.a.copy()
np.put(expected, [0, 2], [50, 150])
expected_mask = self.mask_a.copy()
np.put(expected_mask, [0, 2], [False, True])
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
# Indices cannot be masked.
np.put(ma, Masked([0, 2]), v)
with pytest.raises(TypeError):
# Array to put masked values in must be masked.
np.put(self.a.copy(), [0, 2], v)
def test_putmask(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(np.arange(100, 650, 100),
mask=[False, True, True, True, False, False])
np.putmask(ma, mask, values)
expected = self.a.flatten()
np.putmask(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.putmask(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.putmask(self.a.flatten(), mask, values)
def test_place(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked([100, 200], mask=[False, True])
np.place(ma, mask, values)
expected = self.a.flatten()
np.place(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.place(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.place(self.a.flatten(), mask, values)
def test_copyto(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(np.arange(100, 650, 100),
mask=[False, True, True, True, False, False])
np.copyto(ma, values, where=mask)
expected = self.a.flatten()
np.copyto(expected, values.unmasked, where=mask)
expected_mask = self.mask_a.flatten()
np.copyto(expected_mask, values.mask, where=mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.copyto(self.a.flatten(), values, where=mask)
@pytest.mark.parametrize('value', [0.25, np.ma.masked])
def test_fill_diagonal(self, value):
ma = self.ma[:2, :2].copy()
np.fill_diagonal(ma, value)
expected = ma.copy()
expected[np.diag_indices_from(expected)] = value
assert_array_equal(ma.unmasked, expected.unmasked)
assert_array_equal(ma.mask, expected.mask)
class TestRepeat(BasicTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(MaskedArraySetup):
# More tests at TestMaskedArrayConcatenation in test_functions.
def check(self, func, *args, **kwargs):
ma_list = kwargs.pop('ma_list', [self.ma, self.ma])
a_list = [Masked(ma).unmasked for ma in ma_list]
m_list = [Masked(ma).mask for ma in ma_list]
o = func(ma_list, *args, **kwargs)
expected = func(a_list, *args, **kwargs)
expected_mask = func(m_list, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
self.check(np.concatenate, ma_list=[self.a, self.ma])
out = Masked(np.empty((4, 3)))
result = np.concatenate([self.ma, self.ma], out=out)
assert out is result
expected = np.concatenate([self.a, self.a])
expected_mask = np.concatenate([self.mask_a, self.mask_a])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.concatenate([self.ma, self.ma], out=np.empty((4, 3)))
def test_stack(self):
self.check(np.stack)
def test_column_stack(self):
self.check(np.column_stack)
def test_hstack(self):
self.check(np.hstack)
def test_vstack(self):
self.check(np.vstack)
def test_dstack(self):
self.check(np.dstack)
def test_block(self):
self.check(np.block)
out = np.block([[0., Masked(1., True)],
[Masked(1, False), Masked(2, False)]])
expected = np.array([[0, 1.], [1, 2]])
expected_mask = np.array([[False, True], [False, False]])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_append(self):
out = np.append(self.ma, self.mc, axis=1)
expected = np.append(self.a, self.c, axis=1)
expected_mask = np.append(self.mask_a, self.mask_c, axis=1)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_insert(self):
obj = (1, 1)
values = Masked([50., 25.], mask=[True, False])
out = np.insert(self.ma.flatten(), obj, values)
expected = np.insert(self.a.flatten(), obj, [50., 25.])
expected_mask = np.insert(self.mask_a.flatten(), obj, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.insert(self.a.flatten(), obj, values)
with pytest.raises(TypeError):
np.insert(self.ma.flatten(), Masked(obj), values)
class TestSplit:
@classmethod
def setup_class(self):
self.a = np.arange(54.).reshape(3, 3, 6)
self.mask_a = np.zeros(self.a.shape, dtype=bool)
self.mask_a[1, 1, 1] = True
self.mask_a[0, 1, 4] = True
self.mask_a[1, 2, 5] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
expected_mask = func(self.mask_a, *args, **kwargs)
assert len(out) == len(expected)
for o, x, xm in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, xm)
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestMethodLikes(MaskedArraySetup):
def check(self, function, *args, method=None, **kwargs):
if method is None:
method = function.__name__
o = function(self.ma, *args, **kwargs)
x = getattr(self.ma, method)(*args, **kwargs)
assert_masked_equal(o, x)
def test_amax(self):
self.check(np.amax, method='max')
def test_amin(self):
self.check(np.amin, method='min')
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
self.check(np.any)
def test_all(self):
self.check(np.all)
def test_sometrue(self):
self.check(np.sometrue, method='any')
def test_alltrue(self):
self.check(np.alltrue, method='all')
def test_prod(self):
self.check(np.prod)
def test_product(self):
self.check(np.product, method='prod')
def test_cumprod(self):
self.check(np.cumprod)
def test_cumproduct(self):
self.check(np.cumproduct, method='cumprod')
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round_(self):
self.check(np.round_, method='round')
def test_around(self):
self.check(np.around, method='round')
def test_clip(self):
self.check(np.clip, 2., 4.)
self.check(np.clip, self.mb, self.mc)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
self.check(np.var)
class TestUfuncLike(InvariantMaskTestSetup):
def test_fix(self):
self.check(np.fix)
def test_angle(self):
a = np.array([1+0j, 0+1j, 1+1j, 0+0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.angle(ma)
expected = np.angle(ma.unmasked)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_i0(self):
self.check(np.i0)
def test_sinc(self):
self.check(np.sinc)
def test_where(self):
mask = [True, False, True]
out = np.where(mask, self.ma, 1000.)
expected = np.where(mask, self.a, 1000.)
expected_mask = np.where(mask, self.mask_a, False)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
mask2 = Masked(mask, [True, False, False])
out2 = np.where(mask2, self.ma, 1000.)
expected2 = np.where(mask, self.a, 1000.)
expected_mask2 = np.where(mask, self.mask_a, False) | mask2.mask
assert_array_equal(out2.unmasked, expected2)
assert_array_equal(out2.mask, expected_mask2)
def test_where_single_arg(self):
m = Masked(np.arange(3), mask=[True, False, False])
out = np.where(m)
expected = m.nonzero()
assert isinstance(out, tuple) and len(out) == 1
assert_array_equal(out[0], expected[0])
def test_where_wrong_number_of_arg(self):
with pytest.raises(ValueError, match='either both or neither'):
np.where([True, False, False], self.a)
def test_choose(self):
a = np.array([0, 1]).reshape((2, 1))
result = np.choose(a, (self.ma, self.mb))
expected = np.choose(a, (self.a, self.b))
expected_mask = np.choose(a, (self.mask_a, self.mask_b))
assert_array_equal(result.unmasked, expected)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.choose(a, (self.ma, self.mb), out=out)
assert result2 is out
assert_array_equal(result2, result)
with pytest.raises(TypeError):
np.choose(a, (self.ma, self.mb), out=np.zeros_like(expected))
def test_choose_masked(self):
ma = Masked(np.array([-1, 1]), mask=[True, False]).reshape((2, 1))
out = ma.choose((self.ma, self.mb))
expected = np.choose(ma.filled(0), (self.a, self.b))
expected_mask = np.choose(ma.filled(0), (self.mask_a, self.mask_b)) | ma.mask
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(ValueError):
ma.unmasked.choose((self.ma, self.mb))
@pytest.mark.parametrize('default', [-1., np.ma.masked, Masked(-1, mask=True)])
def test_select(self, default):
a, mask_a, ma = self.a, self.mask_a, self.ma
out = np.select([a < 1.5, a > 3.5], [ma, ma+1], default=default)
expected = np.select([a < 1.5, a > 3.5], [a, a+1],
default=-1 if default is not np.ma.masked else 0)
expected_mask = np.select([a < 1.5, a > 3.5], [mask_a, mask_a],
default=getattr(default, 'mask', False))
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_real_if_close(self):
a = np.array([1+0j, 0+1j, 1+1j, 0+0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.real_if_close(ma)
expected = np.real_if_close(a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_tril(self):
self.check(np.tril)
def test_triu(self):
self.check(np.triu)
def test_unwrap(self):
self.check(np.unwrap)
def test_nan_to_num(self):
self.check(np.nan_to_num)
ma = Masked([np.nan, 1.], mask=[True, False])
o = np.nan_to_num(ma, copy=False)
assert_masked_equal(o, Masked([0., 1.], mask=[True, False]))
assert ma is o
class TestUfuncLikeTests:
@classmethod
def setup_class(self):
self.a = np.array([[-np.inf, +np.inf, np.nan, 3., 4.]]*2)
self.mask_a = np.array([[False]*5, [True]*4+[False]])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([[3.0001], [3.9999]])
self.mask_b = np.array([[True], [False]])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, func):
out = func(self.ma)
expected = func(self.a)
assert type(out) is MaskedNDArray
assert out.dtype.kind == 'b'
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
assert not np.may_share_memory(out.mask, self.mask_a)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
o = np.isreal(Masked([1. + 1j], mask=False))
assert not o.unmasked and not o.mask
o = np.isreal(Masked([1. + 1j], mask=True))
assert not o.unmasked and o.mask
def test_iscomplex(self):
self.check(np.iscomplex)
o = np.iscomplex(Masked([1. + 1j], mask=False))
assert o.unmasked and not o.mask
o = np.iscomplex(Masked([1. + 1j], mask=True))
assert o.unmasked and o.mask
def test_isclose(self):
out = np.isclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb, atol=0.01)
expected_mask = self.mask_a | self.mask_b
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_allclose(self):
out = np.allclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb,
atol=0.01)[self.mask_a | self.mask_b].all()
assert_array_equal(out, expected)
def test_array_equal(self):
assert not np.array_equal(self.ma, self.ma)
assert not np.array_equal(self.ma, self.a)
if not NUMPY_LT_1_19:
assert np.array_equal(self.ma, self.ma, equal_nan=True)
assert np.array_equal(self.ma, self.a, equal_nan=True)
assert not np.array_equal(self.ma, self.mb)
ma2 = self.ma.copy()
ma2.mask |= np.isnan(self.a)
assert np.array_equal(ma2, self.ma)
def test_array_equiv(self):
assert np.array_equiv(self.mb, self.mb)
assert np.array_equiv(self.mb, self.b)
assert not np.array_equiv(self.ma, self.mb)
assert np.array_equiv(self.mb, np.stack([self.mb, self.mb]))
class TestOuterLikeFunctions(MaskedArraySetup):
def test_outer(self):
result = np.outer(self.ma, self.mb)
expected_data = np.outer(self.a.ravel(), self.b.ravel())
expected_mask = np.logical_or.outer(self.mask_a.ravel(),
self.mask_b.ravel())
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.outer(self.ma, self.mb, out=out)
assert result2 is out
assert result2 is not result
assert_masked_equal(result2, result)
out2 = np.zeros_like(result.unmasked)
with pytest.raises(TypeError):
np.outer(self.ma, self.mb, out=out2)
def test_kron(self):
result = np.kron(self.ma, self.mb)
expected_data = np.kron(self.a, self.b)
expected_mask = np.logical_or.outer(self.mask_a,
self.mask_b).reshape(result.shape)
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
class TestReductionLikeFunctions(MaskedArraySetup):
def test_average(self):
o = np.average(self.ma)
assert_masked_equal(o, self.ma.mean())
o = np.average(self.ma, weights=self.mb, axis=-1)
expected = np.average(self.a, weights=self.b, axis=-1)
expected_mask = (self.mask_a | self.mask_b).any(-1)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_trace(self):
o = np.trace(self.ma)
expected = np.trace(self.a)
expected_mask = np.trace(self.mask_a).astype(bool)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
@pytest.mark.parametrize('axis', [0, 1, None])
def test_count_nonzero(self, axis):
o = np.count_nonzero(self.ma, axis=axis)
expected = np.count_nonzero(self.ma.filled(0), axis=axis)
assert_array_equal(o, expected)
@pytest.mark.filterwarnings('ignore:all-nan')
class TestPartitionLikeFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(36.).reshape(6, 6)
self.mask_a = np.zeros_like(self.a, bool)
# On purpose fill diagonal, so we get all masked elements.
self.mask_a[np.tril_indices_from(self.a)] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, function, *args, **kwargs):
o = function(self.ma, *args, **kwargs)
nanfunc = getattr(np, 'nan'+function.__name__)
nanfilled = self.ma.filled(np.nan)
expected = nanfunc(nanfilled, *args, **kwargs)
assert_array_equal(o.filled(np.nan), expected)
assert_array_equal(o.mask, np.isnan(expected))
if not kwargs.get('axis', 1):
# no need to test for all
return
out = np.zeros_like(o)
o2 = function(self.ma, *args, out=out, **kwargs)
assert o2 is out
assert_masked_equal(o2, o)
with pytest.raises(TypeError):
function(self.ma, *args, out=np.zeros_like(expected), **kwargs)
@pytest.mark.parametrize('axis', [None, 0, 1])
def test_median(self, axis):
self.check(np.median, axis=axis)
@pytest.mark.parametrize('axis', [None, 0, 1])
def test_quantile(self, axis):
self.check(np.quantile, q=[0.25, 0.5], axis=axis)
def test_quantile_out_of_range(self):
with pytest.raises(ValueError, match='must be in the range'):
np.quantile(self.ma, q=1.5)
@pytest.mark.parametrize('axis', [None, 0, 1])
def test_percentile(self, axis):
self.check(np.percentile, q=50, axis=axis)
class TestIntDiffFunctions(MaskedArraySetup):
def test_diff(self):
out = np.diff(self.ma)
expected = np.diff(self.a)
expected_mask = self.mask_a[:, 1:] | self.mask_a[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_diff_prepend_append(self):
out = np.diff(self.ma, prepend=Masked(-1, mask=True), append=1)
expected = np.diff(self.a, prepend=-1, append=1.)
mask = np.concatenate([np.ones((2, 1), bool),
self.mask_a,
np.zeros((2, 1), bool)], axis=-1)
expected_mask = mask[:, 1:] | mask[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_trapz(self):
ma = self.ma.copy()
ma.mask[1] = False
out = np.trapz(ma)
assert_array_equal(out.unmasked, np.trapz(self.a))
assert_array_equal(out.mask, np.array([True, False]))
def test_gradient(self):
out = np.gradient(self.ma)
expected = np.gradient(self.a)
expected_mask = [(self.mask_a[1:] | self.mask_a[:-1]).repeat(2, axis=0),
np.stack([
self.mask_a[:, 0] | self.mask_a[:, 1],
self.mask_a[:, 0] | self.mask_a[:, 2],
self.mask_a[:, 1] | self.mask_a[:, 2]], axis=-1)]
for o, x, m in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestSpaceFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(1., 7.).reshape(2, 3)
self.mask_a = np.array([[True, False, False],
[False, True, False]])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([2.5, 10., 3.])
self.mask_b = np.array([False, True, False])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, *args, **kwargs):
out = function(self.ma, self.mb, 5)
expected = function(self.a, self.b, 5)
expected_mask = np.broadcast_to(self.mask_a | self.mask_b,
expected.shape).copy()
# TODO: make implementation that also ensures start point mask is
# determined just by start point? (as for geomspace in numpy 1.20)?
expected_mask[-1] = self.mask_b
if not NUMPY_LT_1_20 and function is np.geomspace:
expected_mask[0] = self.mask_a
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_linspace(self):
self.check(np.linspace, 5)
def test_logspace(self):
self.check(np.logspace, 10)
def test_geomspace(self):
self.check(np.geomspace, 5)
class TestInterpolationFunctions(MaskedArraySetup):
def test_interp(self):
xp = np.arange(5.)
fp = np.array([1., 5., 6., 19., 20.])
mask_fp = np.array([False, False, False, True, False])
mfp = Masked(fp, mask=mask_fp)
x = np.array([1.5, 17.])
mask_x = np.array([False, True])
mx = Masked(x, mask=mask_x)
out = np.interp(mx, xp, mfp)
expected = np.interp(x, xp[mask_fp], fp[mask_fp])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_x)
def test_piecewise(self):
condlist = [self.a < 1, self.a >= 1]
out = np.piecewise(self.ma, condlist, [Masked(-1, mask=True), 1.])
expected = np.piecewise(self.a, condlist, [-1, 1.])
expected_mask = np.piecewise(self.mask_a, condlist, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
condlist2 = [self.a < 1, self.a >= 3]
out2 = np.piecewise(self.ma, condlist2,
[Masked(-1, True), 1, lambda x: Masked(np.full_like(x, 2.),
mask=~x.mask)])
expected = np.piecewise(self.a, condlist2, [-1, 1, 2])
expected_mask = np.piecewise(self.mask_a, condlist2,
[True, False, lambda x: ~x])
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
with pytest.raises(ValueError, match='with 2 condition'):
np.piecewise(self.ma, condlist2, [])
class TestBincount(MaskedArraySetup):
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
mask_i = np.array([True, False, False, True, False, False])
mi = Masked(i, mask=mask_i)
out = np.bincount(mi)
expected = np.bincount(i[~mask_i])
assert_array_equal(out, expected)
w = np.arange(len(i))
mask_w = np.array([True]+[False]*5)
mw = Masked(w, mask=mask_w)
out2 = np.bincount(i, mw)
expected = np.bincount(i, w)
expected_mask = np.array([False, True, False, False, False])
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
out3 = np.bincount(mi, mw)
expected = np.bincount(i[~mask_i], w[~mask_i])
expected_mask = np.array([False, False, False, False, False])
assert_array_equal(out3.unmasked, expected)
assert_array_equal(out3.mask, expected_mask)
class TestSortFunctions(MaskedArraySetup):
def test_sort(self):
o = np.sort(self.ma)
expected = self.ma.copy()
expected.sort()
assert_masked_equal(o, expected)
def test_sort_complex(self):
ma = Masked(np.array([1+2j, 0+4j, 3+0j, -1-1j]),
mask=[True, False, False, False])
o = np.sort_complex(ma)
indx = np.lexsort((ma.unmasked.imag, ma.unmasked.real, ma.mask))
expected = ma[indx]
assert_masked_equal(o, expected)
def test_msort(self):
o = np.msort(self.ma)
expected = np.sort(self.ma, axis=0)
assert_masked_equal(o, expected)
def test_partition(self):
o = np.partition(self.ma, 1)
expected = self.ma.copy()
expected.partition(1)
assert_masked_equal(o, expected)
class TestStringFunctions:
# More elaborate tests done in test_masked.py
@classmethod
def setup_class(self):
self.ma = Masked(np.arange(3), mask=[True, False, False])
def test_array2string(self):
out0 = np.array2string(self.ma)
assert out0 == '[— 1 2]'
# Arguments are interpreted as usual.
out1 = np.array2string(self.ma, separator=', ')
assert out1 == '[—, 1, 2]'
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.ma, separator=', ', formatter={'all': hex})
assert out2 == '[———, 0x1, 0x2]'
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(self.ma, None, None, None, ', ', '',
np._NoValue, {'int': hex})
assert out3 == out2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.ma, separator=', ', formatter={'float': hex})
assert out4 == out1
def test_array_repr(self):
out = np.array_repr(self.ma)
assert out == 'MaskedNDArray([—, 1, 2])'
ma2 = self.ma.astype('f4')
out2 = np.array_repr(ma2)
assert out2 == 'MaskedNDArray([——, 1., 2.], dtype=float32)'
def test_array_str(self):
out = np.array_str(self.ma)
assert out == '[— 1 2]'
class TestBitFunctions:
@classmethod
def setup_class(self):
self.a = np.array([15, 255, 0], dtype='u1')
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.unpackbits(self.a).reshape(6, 4)
self.mask_b = np.array([False]*15 + [True, True] + [False]*7).reshape(6, 4)
self.mb = Masked(self.b, mask=self.mask_b)
@pytest.mark.parametrize('axis', [None, 1, 0])
def test_packbits(self, axis):
out = np.packbits(self.mb, axis=axis)
if axis is None:
expected = self.a
else:
expected = np.packbits(self.b, axis=axis)
expected_mask = np.packbits(self.mask_b, axis=axis) > 0
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_unpackbits(self):
out = np.unpackbits(self.ma)
mask = np.where(self.mask_a, np.uint8(255), np.uint8(0))
expected_mask = np.unpackbits(mask) > 0
assert_array_equal(out.unmasked, self.b.ravel())
assert_array_equal(out.mask, expected_mask)
class TestIndexFunctions(MaskedArraySetup):
"""Does not seem much sense to support these..."""
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.ma, 3)
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.ma,), 3)
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.ma)
class TestDtypeFunctions(MaskedArraySetup):
def check(self, function, *args, **kwargs):
out = function(self.ma, *args, **kwargs)
expected = function(self.a, *args, **kwargs)
assert out == expected
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.a.dtype)
self.check(np.can_cast, 'f4')
def test_min_scalar_type(self):
out = np.min_scalar_type(self.ma[0, 0])
expected = np.min_scalar_type(self.a[0, 0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(MaskedArraySetup):
def test_meshgrid(self):
a = np.arange(1., 4.)
mask_a = np.array([True, False, False])
ma = Masked(a, mask=mask_a)
b = np.array([2.5, 10., 3., 4.])
mask_b = np.array([False, True, False, True])
mb = Masked(b, mask=mask_b)
oa, ob = np.meshgrid(ma, mb)
xa, xb = np.broadcast_arrays(a, b[:, np.newaxis])
ma, mb = np.broadcast_arrays(mask_a, mask_b[:, np.newaxis])
for o, x, m in ((oa, xa, ma), (ob, xb, mb)):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestMemoryFunctions(MaskedArraySetup):
def test_shares_memory(self):
assert np.shares_memory(self.ma, self.ma.unmasked)
assert not np.shares_memory(self.ma, self.ma.mask)
def test_may_share_memory(self):
assert np.may_share_memory(self.ma, self.ma.unmasked)
assert not np.may_share_memory(self.ma, self.ma.mask)
class TestDatetimeFunctions:
# Could in principle support np.is_busday, np.busday_count, np.busday_offset.
@classmethod
def setup_class(self):
self.a = np.array(['2020-12-31', '2021-01-01', '2021-01-02'], dtype='M')
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([['2021-01-07'], ['2021-01-31']], dtype='M')
self.mask_b = np.array([[False], [True]])
self.mb = Masked(self.b, mask=self.mask_b)
def test_datetime_as_string(self):
out = np.datetime_as_string(self.ma)
expected = np.datetime_as_string(self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.filterwarnings('ignore:all-nan')
class TestNaNFunctions:
def setup_class(self):
self.a = np.array([[np.nan, np.nan, 3.],
[4., 5., 6.]])
self.mask_a = np.array([[True, False, False],
[False, True, False]])
self.b = np.arange(1, 7).reshape(2, 3)
self.mask_b = self.mask_a
self.ma = Masked(self.a, mask=self.mask_a)
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, exact_fill_value=None, masked_result=True,
**kwargs):
result = function(self.ma, **kwargs)
expected_data = function(self.ma.filled(np.nan), **kwargs)
expected_mask = np.isnan(expected_data)
if masked_result:
assert isinstance(result, Masked)
assert_array_equal(result.mask, expected_mask)
assert np.all(result == expected_data)
else:
assert not isinstance(result, Masked)
assert_array_equal(result, expected_data)
assert not np.any(expected_mask)
out = np.zeros_like(result)
result2 = function(self.ma, out=out, **kwargs)
assert result2 is out
assert_array_equal(result2, result)
def check_arg(self, function, **kwargs):
# arg functions do not have an 'out' argument, so just test directly.
result = function(self.ma, **kwargs)
assert not isinstance(result, Masked)
expected = function(self.ma.filled(np.nan), **kwargs)
assert_array_equal(result, expected)
def test_nanmin(self):
self.check(np.nanmin)
self.check(np.nanmin, axis=0)
self.check(np.nanmin, axis=1)
resi = np.nanmin(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.array([2, 4]))
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanmax(self):
self.check(np.nanmax)
def test_nanargmin(self):
self.check_arg(np.nanargmin)
self.check_arg(np.nanargmin, axis=1)
def test_nanargmax(self):
self.check_arg(np.nanargmax)
def test_nansum(self):
self.check(np.nansum, masked_result=False)
resi = np.nansum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([5, 10]))
def test_nanprod(self):
self.check(np.nanprod, masked_result=False)
resi = np.nanprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([6, 24]))
def test_nancumsum(self):
self.check(np.nancumsum, masked_result=False)
resi = np.nancumsum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[0, 2, 5], [4, 4, 10]]))
def test_nancumprod(self):
self.check(np.nancumprod, masked_result=False)
resi = np.nancumprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[1, 2, 6], [4, 4, 24]]))
def test_nanmean(self):
self.check(np.nanmean)
resi = np.nanmean(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.mean(self.mb, axis=1).unmasked)
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanvar(self):
self.check(np.nanvar)
self.check(np.nanvar, ddof=1)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nanquantile(self):
self.check(np.nanquantile, q=0.5)
def test_nanpercentile(self):
self.check(np.nanpercentile, q=50)
untested_functions = set()
if NUMPY_LT_1_20:
financial_functions = {f for f in all_wrapped_functions.values()
if f in np.lib.financial.__dict__.values()}
untested_functions |= financial_functions
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar, np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander
}
untested_functions |= poly_functions
# Get covered functions
tested_functions = set()
for cov_cls in list(filter(inspect.isclass, locals().values())):
for k, v in cov_cls.__dict__.items():
if inspect.isfunction(v) and k.startswith('test'):
f = k.replace('test_', '')
if f in all_wrapped_functions:
tested_functions.add(all_wrapped_functions[f])
def test_basic_testing_completeness():
assert all_wrapped == (tested_functions
| IGNORED_FUNCTIONS
| UNSUPPORTED_FUNCTIONS)
@pytest.mark.xfail(reason='coverage not completely set up yet')
def test_testing_completeness():
assert not tested_functions.intersection(untested_functions)
assert all_wrapped == (tested_functions | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize('one, two', itertools.combinations(
(MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(APPLY_TO_BOTH_FUNCTIONS.keys()),
set(DISPATCHED_FUNCTIONS.keys())), 2))
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
def test_all_included(self):
included_in_helpers = (MASKED_SAFE_FUNCTIONS |
UNSUPPORTED_FUNCTIONS |
set(APPLY_TO_BOTH_FUNCTIONS.keys()) |
set(DISPATCHED_FUNCTIONS.keys()))
assert all_wrapped == included_in_helpers
@pytest.mark.xfail(reason='coverage not completely set up yet')
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS == untested_functions
|
6da6d12129c801fcc7a3b5aa2d8176a86092687d1bb4cf1ddf3b601da2b7480d | # -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2018
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = '3.11'
__tabversion__ = '3.10'
import re
import sys
import types
import copy
import os
import inspect
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self, message, s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def critical(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re, findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = 'INITIAL' # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexstateeoff = {} # Dictionary of eof functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lexeoff = None # EOF rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = '' # Ignored characters
self.lexliterals = '' # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = False # Optimized mode
def clone(self, object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = {}
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object, f[0].__name__), f[1]))
newre.append((cre, newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = {}
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object, ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self, lextab, outputdir=''):
if isinstance(lextab, types.ModuleType):
raise IOError("Won't overwrite existing lextab module")
basetabmodule = lextab.split('.')[-1]
filename = os.path.join(outputdir, basetabmodule) + '.py'
with open(filename, 'w') as tf:
tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
tf.write('_tabversion = %s\n' % repr(__tabversion__))
tf.write('_lextokens = set(%s)\n' % repr(tuple(sorted(self.lextokens))))
tf.write('_lexreflags = %s\n' % repr(int(self.lexreflags)))
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
# Rewrite the lexstatere table, replacing function objects with function names
tabre = {}
for statename, lre in self.lexstatere.items():
titem = []
for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
titem.append((retext, _funcs_to_names(func, renames)))
tabre[statename] = titem
tf.write('_lexstatere = %s\n' % repr(tabre))
tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
taberr = {}
for statename, ef in self.lexstateerrorf.items():
taberr[statename] = ef.__name__ if ef else None
tf.write('_lexstateerrorf = %s\n' % repr(taberr))
tabeof = {}
for statename, ef in self.lexstateeoff.items():
tabeof[statename] = ef.__name__ if ef else None
tf.write('_lexstateeoff = %s\n' % repr(tabeof))
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self, tabfile, fdict):
if isinstance(tabfile, types.ModuleType):
lextab = tabfile
else:
exec('import %s' % tabfile)
lextab = sys.modules[tabfile]
if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
raise ImportError('Inconsistent PLY version')
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lextokens_all = self.lextokens | set(self.lexliterals)
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = {}
self.lexstateretext = {}
for statename, lre in lextab._lexstatere.items():
titem = []
txtitem = []
for pat, func_name in lre:
titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict)))
self.lexstatere[statename] = titem
self.lexstateretext[statename] = txtitem
self.lexstateerrorf = {}
for statename, ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[statename] = fdict[ef]
self.lexstateeoff = {}
for statename, ef in lextab._lexstateeoff.items():
self.lexstateeoff[statename] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self, s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c, StringTypes):
raise ValueError('Expected a string')
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self, state):
if state not in self.lexstatere:
raise ValueError('Undefined state')
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state, '')
self.lexerrorf = self.lexstateerrorf.get(state, None)
self.lexeoff = self.lexstateeoff.get(state, None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self, state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self, n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre, lexindexfunc in self.lexre:
m = lexre.match(lexdata, lexpos)
if not m:
continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func, tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if newtok.type not in self.lextokens_all:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.__code__.co_filename, func.__code__.co_firstlineno,
func.__name__, newtok.type), lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = 'error'
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok:
continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
if self.lexeoff:
tok = LexToken()
tok.type = 'eof'
tok.value = ''
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
self.lexpos = lexpos
newtok = self.lexeoff(tok)
return newtok
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError('No input string given with input()')
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# _get_regex(func)
#
# Returns the regular expression assigned to a function either as a doc string
# or as a .regex attribute attached by the @TOKEN decorator.
# -----------------------------------------------------------------------------
def _get_regex(func):
return getattr(func, 'regex', func.__doc__)
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist, namelist):
result = []
for f, name in zip(funclist, namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist, fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]], n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist, reflags, ldict, toknames):
if not relist:
return []
regex = '|'.join(relist)
try:
lexre = re.compile(regex, reflags)
# Build the index to function map for the matching engine
lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
lexindexnames = lexindexfunc[:]
for f, i in lexre.groupindex.items():
handle = ldict.get(f, None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle, toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find('ignore_') > 0:
lexindexfunc[i] = (None, None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre, lexindexfunc)], [regex], [lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0:
m = 1
llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
return (llist+rlist), (lre+rre), (lnames+rnames)
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s, names):
parts = s.split('_')
for i, part in enumerate(parts[1:], 1):
if part not in names and part != 'ANY':
break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = '_'.join(parts[i:])
return (states, tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self, ldict, log=None, reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = {'INITIAL': 'inclusive'}
self.modules = set()
self.error = False
self.log = PlyLogger(sys.stderr) if log is None else log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get('tokens', None)
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'", n)
self.error = True
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get('literals', '')
if not self.literals:
self.literals = ''
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c, StringTypes) or len(c) > 1:
self.log.error('Invalid literal %s. Must be a single character', repr(c))
self.error = True
except TypeError:
self.log.error('Invalid literals specification. literals must be a sequence of characters')
self.error = True
def get_states(self):
self.states = self.ldict.get('states', None)
# Build statemap
if self.states:
if not isinstance(self.states, (tuple, list)):
self.log.error('states must be defined as a tuple or list')
self.error = True
else:
for s in self.states:
if not isinstance(s, tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
self.error = True
continue
name, statetype = s
if not isinstance(name, StringTypes):
self.log.error('State name %s must be a string', repr(name))
self.error = True
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
self.error = True
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined", name)
self.error = True
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_']
# Now build up a list of functions and a list of strings
self.toknames = {} # Mapping of symbols to token names
self.funcsym = {} # Symbols defined as functions
self.strsym = {} # Symbols defined as strings
self.ignore = {} # Ignore strings by state
self.errorf = {} # Error functions by state
self.eoff = {} # EOF functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error('No rules of the form t_rulename are defined')
self.error = True
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f, self.stateinfo)
self.toknames[f] = tokname
if hasattr(t, '__call__'):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'eof':
for s in states:
self.eoff[s] = t
elif tokname == 'ignore':
line = t.__code__.co_firstlineno
file = t.__code__.co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
self.error = True
else:
for s in states:
self.funcsym[s].append((f, t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if '\\' in t:
self.log.warning("%s contains a literal backslash '\\'", f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = True
else:
for s in states:
self.strsym[s].append((f, t))
else:
self.log.error('%s not defined as a function or string', f)
self.error = True
# Sort the functions by line number
for f in self.funcsym.values():
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
s.sort(key=lambda x: len(x[1]), reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
continue
if not _get_regex(f):
self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags)
if c.match(''):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
self.error = True
except re.error as e:
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
if '#' in _get_regex(f):
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
self.error = True
# Validate all rules defined by strings
for name, r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = True
continue
if tokname not in self.tokens and tokname.find('ignore_') < 0:
self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (name, r), self.reflags)
if (c.match('')):
self.log.error("Regular expression for rule '%s' matches empty string", name)
self.error = True
except re.error as e:
self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
self.error = True
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'", state)
self.error = True
# Validate the error function
efunc = self.errorf.get(state, None)
if efunc:
f = efunc
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
for module in self.modules:
self.validate_module(module)
# -----------------------------------------------------------------------------
# validate_module()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the source code of the given module.
# -----------------------------------------------------------------------------
def validate_module(self, module):
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
return
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = {}
linen += 1
for line in lines:
m = fre.match(line)
if not m:
m = sre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
self.error = True
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None):
if lextab is None:
lextab = 'lextab'
global lexer
ldict = None
stateinfo = {'INITIAL': 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token, input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object:
module = object
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
ldict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in ldict:
ldict['__file__'] = sys.modules[ldict['__module__']].__file__
else:
ldict = get_caller_module_dict(2)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = ldict.get('__package__')
if pkg and isinstance(lextab, str):
if '.' not in lextab:
lextab = pkg + '.' + lextab
# Collect parser information from the dictionary
linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab, ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info('lex: tokens = %r', linfo.tokens)
debuglog.info('lex: literals = %r', linfo.literals)
debuglog.info('lex: states = %r', linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = set()
for n in linfo.tokens:
lexobj.lextokens.add(n)
# Get literals specification
if isinstance(linfo.literals, (list, tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = {}
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
# Now add all of the simple rules
for name, r in linfo.strsym[state]:
regex_list.append('(?P<%s>%s)' % (name, r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i, text in enumerate(re_text):
debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state, stype in stateinfo.items():
if state != 'INITIAL' and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere['INITIAL']
lexobj.lexretext = lexobj.lexstateretext['INITIAL']
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
if not lexobj.lexerrorf:
errorlog.warning('No t_error rule is defined')
# Set up eof functions
lexobj.lexstateeoff = linfo.eoff
lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
# Check state information for ignore and error rules
for s, stype in stateinfo.items():
if stype == 'exclusive':
if s not in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if s not in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if s not in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
if s not in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If lextab specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(lextab, types.ModuleType):
srcfile = lextab.__file__
else:
if '.' not in lextab:
srcfile = ldict['__file__']
else:
parts = lextab.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
try:
lexobj.writetab(lextab, outputdir)
if lextab in sys.modules:
del sys.modules[lextab]
except IOError as e:
errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None, data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write('Reading from standard input (type EOF to end):\n')
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while True:
tok = _token()
if not tok:
break
sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_regex(f):
if hasattr(r, '__call__'):
f.regex = _get_regex(r)
else:
f.regex = r
return f
return set_regex
# Alternative spelling of the TOKEN decorator
Token = TOKEN
|
293835dd1e5229e89cc199bb6c83cbe3829c40145c1ec99e1063b020156f2e4a | # -----------------------------------------------------------------------------
# cpp.py
#
# Author: David Beazley (http://www.dabeaz.com)
# Copyright (C) 2007
# All rights reserved
#
# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
from __future__ import generators
import sys
# Some Python 3 compatibility shims
if sys.version_info.major < 3:
STRING_TYPES = (str, unicode)
else:
STRING_TYPES = str
xrange = range
# -----------------------------------------------------------------------------
# Default preprocessor lexer definitions. These tokens are enough to get
# a basic preprocessor working. Other modules may import these if they want
# -----------------------------------------------------------------------------
tokens = (
'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND'
)
literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
# Whitespace
def t_CPP_WS(t):
r'\s+'
t.lexer.lineno += t.value.count("\n")
return t
t_CPP_POUND = r'\#'
t_CPP_DPOUND = r'\#\#'
# Identifier
t_CPP_ID = r'[A-Za-z_][\w_]*'
# Integer literal
def CPP_INTEGER(t):
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)'
return t
t_CPP_INTEGER = CPP_INTEGER
# Floating literal
t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
def t_CPP_STRING(t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
t.lexer.lineno += t.value.count("\n")
return t
# Character constant 'c' or L'c'
def t_CPP_CHAR(t):
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
t.lexer.lineno += t.value.count("\n")
return t
# Comment
def t_CPP_COMMENT1(t):
r'(/\*(.|\n)*?\*/)'
ncr = t.value.count("\n")
t.lexer.lineno += ncr
# replace with one space or a number of '\n'
t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' '
return t
# Line comment
def t_CPP_COMMENT2(t):
r'(//.*?(\n|$))'
# replace with '/n'
t.type = 'CPP_WS'; t.value = '\n'
return t
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
t.lexer.skip(1)
return t
import re
import copy
import time
import os.path
# -----------------------------------------------------------------------------
# trigraph()
#
# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
# ??/ \
# ??' ^
# ??( [
# ??) ]
# ??! |
# ??< {
# ??> }
# ??- ~
# -----------------------------------------------------------------------------
_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
_trigraph_rep = {
'=':'#',
'/':'\\',
"'":'^',
'(':'[',
')':']',
'!':'|',
'<':'{',
'>':'}',
'-':'~'
}
def trigraph(input):
return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
# ------------------------------------------------------------------
# Macro object
#
# This object holds information about preprocessor macros
#
# .name - Macro name (string)
# .value - Macro value (a list of tokens)
# .arglist - List of argument names
# .variadic - Boolean indicating whether or not variadic macro
# .vararg - Name of the variadic parameter
#
# When a macro is created, the macro replacement token sequence is
# pre-scanned and used to create patch lists that are later used
# during macro expansion
# ------------------------------------------------------------------
class Macro(object):
def __init__(self,name,value,arglist=None,variadic=False):
self.name = name
self.value = value
self.arglist = arglist
self.variadic = variadic
if variadic:
self.vararg = arglist[-1]
self.source = None
# ------------------------------------------------------------------
# Preprocessor object
#
# Object representing a preprocessor. Contains macro definitions,
# include directories, and other information
# ------------------------------------------------------------------
class Preprocessor(object):
def __init__(self,lexer=None):
if lexer is None:
lexer = lex.lexer
self.lexer = lexer
self.macros = { }
self.path = []
self.temp_path = []
# Probe the lexer for selected tokens
self.lexprobe()
tm = time.localtime()
self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
self.parser = None
# -----------------------------------------------------------------------------
# tokenize()
#
# Utility function. Given a string of text, tokenize into a list of tokens
# -----------------------------------------------------------------------------
def tokenize(self,text):
tokens = []
self.lexer.input(text)
while True:
tok = self.lexer.token()
if not tok: break
tokens.append(tok)
return tokens
# ---------------------------------------------------------------------
# error()
#
# Report a preprocessor error/warning of some kind
# ----------------------------------------------------------------------
def error(self,file,line,msg):
print("%s:%d %s" % (file,line,msg))
# ----------------------------------------------------------------------
# lexprobe()
#
# This method probes the preprocessor lexer object to discover
# the token types of symbols that are important to the preprocessor.
# If this works right, the preprocessor will simply "work"
# with any suitable lexer regardless of how tokens have been named.
# ----------------------------------------------------------------------
def lexprobe(self):
# Determine the token type for identifiers
self.lexer.input("identifier")
tok = self.lexer.token()
if not tok or tok.value != "identifier":
print("Couldn't determine identifier type")
else:
self.t_ID = tok.type
# Determine the token type for integers
self.lexer.input("12345")
tok = self.lexer.token()
if not tok or int(tok.value) != 12345:
print("Couldn't determine integer type")
else:
self.t_INTEGER = tok.type
self.t_INTEGER_TYPE = type(tok.value)
# Determine the token type for strings enclosed in double quotes
self.lexer.input("\"filename\"")
tok = self.lexer.token()
if not tok or tok.value != "\"filename\"":
print("Couldn't determine string type")
else:
self.t_STRING = tok.type
# Determine the token type for whitespace--if any
self.lexer.input(" ")
tok = self.lexer.token()
if not tok or tok.value != " ":
self.t_SPACE = None
else:
self.t_SPACE = tok.type
# Determine the token type for newlines
self.lexer.input("\n")
tok = self.lexer.token()
if not tok or tok.value != "\n":
self.t_NEWLINE = None
print("Couldn't determine token for newlines")
else:
self.t_NEWLINE = tok.type
self.t_WS = (self.t_SPACE, self.t_NEWLINE)
# Check for other characters used by the preprocessor
chars = [ '<','>','#','##','\\','(',')',',','.']
for c in chars:
self.lexer.input(c)
tok = self.lexer.token()
if not tok or tok.value != c:
print("Unable to lex '%s' required for preprocessor" % c)
# ----------------------------------------------------------------------
# add_path()
#
# Adds a search path to the preprocessor.
# ----------------------------------------------------------------------
def add_path(self,path):
self.path.append(path)
# ----------------------------------------------------------------------
# group_lines()
#
# Given an input string, this function splits it into lines. Trailing whitespace
# is removed. Any line ending with \ is grouped with the next line. This
# function forms the lowest level of the preprocessor---grouping into text into
# a line-by-line format.
# ----------------------------------------------------------------------
def group_lines(self,input):
lex = self.lexer.clone()
lines = [x.rstrip() for x in input.splitlines()]
for i in xrange(len(lines)):
j = i+1
while lines[i].endswith('\\') and (j < len(lines)):
lines[i] = lines[i][:-1]+lines[j]
lines[j] = ""
j += 1
input = "\n".join(lines)
lex.input(input)
lex.lineno = 1
current_line = []
while True:
tok = lex.token()
if not tok:
break
current_line.append(tok)
if tok.type in self.t_WS and '\n' in tok.value:
yield current_line
current_line = []
if current_line:
yield current_line
# ----------------------------------------------------------------------
# tokenstrip()
#
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------
def tokenstrip(self,tokens):
i = 0
while i < len(tokens) and tokens[i].type in self.t_WS:
i += 1
del tokens[:i]
i = len(tokens)-1
while i >= 0 and tokens[i].type in self.t_WS:
i -= 1
del tokens[i+1:]
return tokens
# ----------------------------------------------------------------------
# collect_args()
#
# Collects comma separated arguments from a list of tokens. The arguments
# must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
# where tokencount is the number of tokens consumed, args is a list of arguments,
# and positions is a list of integers containing the starting index of each
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
# from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
# ----------------------------------------------------------------------
def collect_args(self,tokenlist):
args = []
positions = []
current_arg = []
nesting = 1
tokenlen = len(tokenlist)
# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
i += 1
if (i < tokenlen) and (tokenlist[i].value == '('):
positions.append(i+1)
else:
self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
return 0, [], []
i += 1
while i < tokenlen:
t = tokenlist[i]
if t.value == '(':
current_arg.append(t)
nesting += 1
elif t.value == ')':
nesting -= 1
if nesting == 0:
if current_arg:
args.append(self.tokenstrip(current_arg))
positions.append(i)
return i+1,args,positions
current_arg.append(t)
elif t.value == ',' and nesting == 1:
args.append(self.tokenstrip(current_arg))
positions.append(i+1)
current_arg = []
else:
current_arg.append(t)
i += 1
# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
# ----------------------------------------------------------------------
# macro_prescan()
#
# Examine the macro value (token sequence) and identify patch points
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------
def macro_prescan(self,macro):
macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
while i < len(macro.value):
if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
argnum = macro.arglist.index(macro.value[i].value)
# Conversion of argument to a string
if i > 0 and macro.value[i-1].value == '#':
macro.value[i] = copy.copy(macro.value[i])
macro.value[i].type = self.t_STRING
del macro.value[i-1]
macro.str_patch.append((argnum,i-1))
continue
# Concatenation
elif (i > 0 and macro.value[i-1].value == '##'):
macro.patch.append(('c',argnum,i-1))
del macro.value[i-1]
i -= 1
continue
elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
macro.patch.append(('c',argnum,i))
del macro.value[i + 1]
continue
# Standard expansion
else:
macro.patch.append(('e',argnum,i))
elif macro.value[i].value == '##':
if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
(macro.value[i+1].value == macro.vararg):
macro.var_comma_patch.append(i-1)
i += 1
macro.patch.sort(key=lambda x: x[2],reverse=True)
# ----------------------------------------------------------------------
# macro_expand_args()
#
# Given a Macro and list of arguments (each a token list), this method
# returns an expanded version of a macro. The return value is a token sequence
# representing the replacement macro tokens
# ----------------------------------------------------------------------
def macro_expand_args(self,macro,args):
# Make a copy of the macro token sequence
rep = [copy.copy(_x) for _x in macro.value]
# Make string expansion patches. These do not alter the length of the replacement sequence
str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
rep[i] = copy.copy(rep[i])
rep[i].value = str_expansion[argnum]
# Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
comma_patch = False
if macro.variadic and not args[-1]:
for i in macro.var_comma_patch:
rep[i] = None
comma_patch = True
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.
expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
if ptype == 'c':
rep[i:i+1] = args[argnum]
# Normal expansion. Argument is macro expanded first
elif ptype == 'e':
if argnum not in expanded:
expanded[argnum] = self.expand_macros(args[argnum])
rep[i:i+1] = expanded[argnum]
# Get rid of removed comma if necessary
if comma_patch:
rep = [_i for _i in rep if _i]
return rep
# ----------------------------------------------------------------------
# expand_macros()
#
# Given a list of tokens, this function performs macro expansion.
# The expanded argument is a dictionary that contains macros already
# expanded. This is used to prevent infinite recursion.
# ----------------------------------------------------------------------
def expand_macros(self,tokens,expanded=None):
if expanded is None:
expanded = {}
i = 0
while i < len(tokens):
t = tokens[i]
if t.type == self.t_ID:
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True
m = self.macros[t.value]
if not m.arglist:
# A simple macro
ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
for e in ex:
e.lineno = t.lineno
tokens[i:i+1] = ex
i += len(ex)
else:
# A macro with arguments
j = i + 1
while j < len(tokens) and tokens[j].type in self.t_WS:
j += 1
if j < len(tokens) and tokens[j].value == '(':
tokcount,args,positions = self.collect_args(tokens[j:])
if not m.variadic and len(args) != len(m.arglist):
self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
i = j + tokcount
elif m.variadic and len(args) < len(m.arglist)-1:
if len(m.arglist) > 2:
self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
else:
self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
i = j + tokcount
else:
if m.variadic:
if len(args) == len(m.arglist)-1:
args.append([])
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]
# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
for r in rep:
r.lineno = t.lineno
tokens[i:j+tokcount] = rep
i += len(rep)
else:
# This is not a macro. It is just a word which
# equals to name of the macro. Hence, go to the
# next token.
i += 1
del expanded[t.value]
continue
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)
i += 1
return tokens
# ----------------------------------------------------------------------
# evalexpr()
#
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
def evalexpr(self,tokens):
# tokens = tokenize(line)
# Search for defined macros
i = 0
while i < len(tokens):
if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
j = i + 1
needparen = False
result = "0L"
while j < len(tokens):
if tokens[j].type in self.t_WS:
j += 1
continue
elif tokens[j].type == self.t_ID:
if tokens[j].value in self.macros:
result = "1L"
else:
result = "0L"
if not needparen: break
elif tokens[j].value == '(':
needparen = True
elif tokens[j].value == ')':
break
else:
self.error(self.source,tokens[i].lineno,"Malformed defined()")
j += 1
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE(result)
del tokens[i+1:j+1]
i += 1
tokens = self.expand_macros(tokens)
for i,t in enumerate(tokens):
if t.type == self.t_ID:
tokens[i] = copy.copy(t)
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE("0L")
elif t.type == self.t_INTEGER:
tokens[i] = copy.copy(t)
# Strip off any trailing suffixes
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]
expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
expr = expr.replace("!"," not ")
try:
result = eval(expr)
except Exception:
self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
result = 0
return result
# ----------------------------------------------------------------------
# parsegen()
#
# Parse an input string/
# ----------------------------------------------------------------------
def parsegen(self,input,source=None):
# Replace trigraph sequences
t = trigraph(input)
lines = self.group_lines(t)
if not source:
source = ""
self.define("__FILE__ \"%s\"" % source)
self.source = source
chunk = []
enable = True
iftrigger = False
ifstack = []
for x in lines:
for i,tok in enumerate(x):
if tok.type not in self.t_WS: break
if tok.value == '#':
# Preprocessor directive
# insert necessary whitespace instead of eaten tokens
for tok in x:
if tok.type in self.t_WS and '\n' in tok.value:
chunk.append(tok)
dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
args = self.tokenstrip(dirtokens[1:])
else:
name = ""
args = []
if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.define(args)
elif name == 'include':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
oldfile = self.macros['__FILE__']
for tok in self.include(args):
yield tok
self.macros['__FILE__'] = oldfile
self.source = source
elif name == 'undef':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.undef(args)
elif name == 'ifdef':
ifstack.append((enable,iftrigger))
if enable:
if not args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'ifndef':
ifstack.append((enable,iftrigger))
if enable:
if args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'if':
ifstack.append((enable,iftrigger))
if enable:
result = self.evalexpr(args)
if not result:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'elif':
if ifstack:
if ifstack[-1][0]: # We only pay attention if outer "if" allows this
if enable: # If already true, we flip enable False
enable = False
elif not iftrigger: # If False, but not triggered yet, we'll check expression
result = self.evalexpr(args)
if result:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
elif name == 'else':
if ifstack:
if ifstack[-1][0]:
if enable:
enable = False
elif not iftrigger:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
elif name == 'endif':
if ifstack:
enable,iftrigger = ifstack.pop()
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
else:
# Unknown preprocessor directive
pass
else:
# Normal text
if enable:
chunk.extend(x)
for tok in self.expand_macros(chunk):
yield tok
chunk = []
# ----------------------------------------------------------------------
# include()
#
# Implementation of file-inclusion
# ----------------------------------------------------------------------
def include(self,tokens):
# Try to extract the filename and then process an include file
if not tokens:
return
if tokens:
if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
tokens = self.expand_macros(tokens)
if tokens[0].value == '<':
# Include <...>
i = 1
while i < len(tokens):
if tokens[i].value == '>':
break
i += 1
else:
print("Malformed #include <...>")
return
filename = "".join([x.value for x in tokens[1:i]])
path = self.path + [""] + self.temp_path
elif tokens[0].type == self.t_STRING:
filename = tokens[0].value[1:-1]
path = self.temp_path + [""] + self.path
else:
print("Malformed #include statement")
return
for p in path:
iname = os.path.join(p,filename)
try:
data = open(iname,"r").read()
dname = os.path.dirname(iname)
if dname:
self.temp_path.insert(0,dname)
for tok in self.parsegen(data,filename):
yield tok
if dname:
del self.temp_path[0]
break
except IOError:
pass
else:
print("Couldn't find '%s'" % filename)
# ----------------------------------------------------------------------
# define()
#
# Define a new macro
# ----------------------------------------------------------------------
def define(self,tokens):
if isinstance(tokens,STRING_TYPES):
tokens = self.tokenize(tokens)
linetok = tokens
try:
name = linetok[0]
if len(linetok) > 1:
mtype = linetok[1]
else:
mtype = None
if not mtype:
m = Macro(name.value,[])
self.macros[name.value] = m
elif mtype.type in self.t_WS:
# A normal macro
m = Macro(name.value,self.tokenstrip(linetok[2:]))
self.macros[name.value] = m
elif mtype.value == '(':
# A macro with arguments
tokcount, args, positions = self.collect_args(linetok[1:])
variadic = False
for a in args:
if variadic:
print("No more arguments may follow a variadic argument")
break
astr = "".join([str(_i.value) for _i in a])
if astr == "...":
variadic = True
a[0].type = self.t_ID
a[0].value = '__VA_ARGS__'
variadic = True
del a[1:]
continue
elif astr[-3:] == "..." and a[0].type == self.t_ID:
variadic = True
del a[1:]
# If, for some reason, "." is part of the identifier, strip off the name for the purposes
# of macro expansion
if a[0].value[-3:] == '...':
a[0].value = a[0].value[:-3]
continue
if len(a) > 1 or a[0].type != self.t_ID:
print("Invalid macro argument")
break
else:
mvalue = self.tokenstrip(linetok[1+tokcount:])
i = 0
while i < len(mvalue):
if i+1 < len(mvalue):
if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
del mvalue[i]
continue
elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
del mvalue[i+1]
i += 1
m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
self.macro_prescan(m)
self.macros[name.value] = m
else:
print("Bad macro definition")
except LookupError:
print("Bad macro definition")
# ----------------------------------------------------------------------
# undef()
#
# Undefine a macro
# ----------------------------------------------------------------------
def undef(self,tokens):
id = tokens[0].value
try:
del self.macros[id]
except LookupError:
pass
# ----------------------------------------------------------------------
# parse()
#
# Parse input text.
# ----------------------------------------------------------------------
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)
# ----------------------------------------------------------------------
# token()
#
# Method to return individual tokens
# ----------------------------------------------------------------------
def token(self):
try:
while True:
tok = next(self.parser)
if tok.type not in self.ignore: return tok
except StopIteration:
self.parser = None
return None
if __name__ == '__main__':
import ply.lex as lex
lexer = lex.lex()
# Run a preprocessor
import sys
f = open(sys.argv[1])
input = f.read()
p = Preprocessor(lexer)
p.parse(input,sys.argv[1])
while True:
tok = p.token()
if not tok: break
print(p.source, tok)
|
4d19e4660c79041078dd0b29bb6278815b69781badf31ad3119a0b6cd3746fa3 | # ply: ygen.py
#
# This is a support program that auto-generates different versions of the YACC parsing
# function with different features removed for the purposes of performance.
#
# Users should edit the method LRParser.parsedebug() in yacc.py. The source code
# for that method is then used to create the other methods. See the comments in
# yacc.py for further details.
import os.path
import shutil
def get_source_range(lines, tag):
srclines = enumerate(lines)
start_tag = '#--! %s-start' % tag
end_tag = '#--! %s-end' % tag
for start_index, line in srclines:
if line.strip().startswith(start_tag):
break
for end_index, line in srclines:
if line.strip().endswith(end_tag):
break
return (start_index + 1, end_index)
def filter_section(lines, tag):
filtered_lines = []
include = True
tag_text = '#--! %s' % tag
for line in lines:
if line.strip().startswith(tag_text):
include = not include
elif include:
filtered_lines.append(line)
return filtered_lines
def main():
dirname = os.path.dirname(__file__)
shutil.copy2(os.path.join(dirname, 'yacc.py'), os.path.join(dirname, 'yacc.py.bak'))
with open(os.path.join(dirname, 'yacc.py'), 'r') as f:
lines = f.readlines()
parse_start, parse_end = get_source_range(lines, 'parsedebug')
parseopt_start, parseopt_end = get_source_range(lines, 'parseopt')
parseopt_notrack_start, parseopt_notrack_end = get_source_range(lines, 'parseopt-notrack')
# Get the original source
orig_lines = lines[parse_start:parse_end]
# Filter the DEBUG sections out
parseopt_lines = filter_section(orig_lines, 'DEBUG')
# Filter the TRACKING sections out
parseopt_notrack_lines = filter_section(parseopt_lines, 'TRACKING')
# Replace the parser source sections with updated versions
lines[parseopt_notrack_start:parseopt_notrack_end] = parseopt_notrack_lines
lines[parseopt_start:parseopt_end] = parseopt_lines
lines = [line.rstrip()+'\n' for line in lines]
with open(os.path.join(dirname, 'yacc.py'), 'w') as f:
f.writelines(lines)
print('Updated yacc.py')
if __name__ == '__main__':
main()
|
b31ea204817ffd6288794d22c36592a12a8185c947179121053734c038a04d5f | # PLY package
# Author: David Beazley ([email protected])
__version__ = '3.11'
__all__ = ['lex','yacc']
|
105d38deb207ad72581ba8dc6f5e5323648bc1d0a834e4195c237fd4cdfe2389 | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2018
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammar is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup ([email protected]),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
import re
import types
import sys
import os.path
import inspect
import warnings
__version__ = '3.11'
__tabversion__ = '3.10'
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = True # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = False # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
# String type-checking compatibility
if sys.version_info[0] < 3:
string_types = basestring
else:
string_types = str
MAXINT = sys.maxsize
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception):
pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit] + ' ...'
result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return '<%s @ 0x%x>' % (type(r).__name__, id(r))
# Panic mode error recovery support. This feature is being reworked--much of the
# code here is to offer a deprecation/backwards compatible transition
_errok = None
_token = None
_restart = None
_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
Instead, invoke the methods on the associated parser instance:
def p_error(p):
...
# Use parser.errok(), parser.token(), parser.restart()
...
parser = yacc.yacc()
'''
def errok():
warnings.warn(_warnmsg)
return _errok()
def restart():
warnings.warn(_warnmsg)
return _restart()
def token():
warnings.warn(_warnmsg)
return _token()
# Utility function to call the p_error() function with some deprecation hacks
def call_errorfunc(errorfunc, token, parser):
global _errok, _token, _restart
_errok = parser.errok
_token = parser.token
_restart = parser.restart
r = errorfunc(token)
try:
del _errok, _token, _restart
except NameError:
pass
return r
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self):
return self.type
def __repr__(self):
return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self, s, stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser = None
def __getitem__(self, n):
if isinstance(n, slice):
return [s.value for s in self.slice[n]]
elif n >= 0:
return self.slice[n].value
else:
return self.stack[n].value
def __setitem__(self, n, v):
self.slice[n].value = v
def __getslice__(self, i, j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self, n):
return getattr(self.slice[n], 'lineno', 0)
def set_lineno(self, n, lineno):
self.slice[n].lineno = lineno
def linespan(self, n):
startline = getattr(self.slice[n], 'lineno', 0)
endline = getattr(self.slice[n], 'endlineno', startline)
return startline, endline
def lexpos(self, n):
return getattr(self.slice[n], 'lexpos', 0)
def set_lexpos(self, n, lexpos):
self.slice[n].lexpos = lexpos
def lexspan(self, n):
startpos = getattr(self.slice[n], 'lexpos', 0)
endpos = getattr(self.slice[n], 'endlexpos', startpos)
return startpos, endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self, lrtab, errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
self.set_defaulted_states()
self.errorok = True
def errok(self):
self.errorok = True
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
# Defaulted state support.
# This method identifies parser states where there is only one possible reduction action.
# For such states, the parser can make a choose to make a rule reduction without consuming
# the next look-ahead token. This delayed invocation of the tokenizer can be useful in
# certain kinds of advanced parsing situations where the lexer and parser interact with
# each other or change states (i.e., manipulation of scope, lexer states, etc.).
#
# See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
def set_defaulted_states(self):
self.defaulted_states = {}
for state, actions in self.action.items():
rules = list(actions.values())
if len(rules) == 1 and rules[0] < 0:
self.defaulted_states[state] = rules[0]
def disable_defaulted_states(self):
self.defaulted_states = {}
def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug, int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
elif tracking:
return self.parseopt(input, lexer, debug, tracking, tokenfunc)
else:
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. Optimized versions of this function
# are automatically created by the ply/ygen.py script. This script cuts out
# sections enclosed in markers such as this:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parsedebug-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
#--! DEBUG
debug.info('PLY: PARSE DEBUG START')
#--! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
#--! DEBUG
debug.debug('')
debug.debug('State : %s', state)
#--! DEBUG
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
#--! DEBUG
debug.debug('Defaulted state %s: Reduce using %d', state, -t)
#--! DEBUG
#--! DEBUG
debug.debug('Stack : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
#--! DEBUG
debug.debug('Action : Shift and goto state %s', t)
#--! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
#--! DEBUG
if plen:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
'['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
goto[statestack[-1-plen]][pname])
else:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
goto[statestack[-1]][pname])
#--! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
#--! DEBUG
debug.info('Done : Returning %s', format_result(result))
debug.info('PLY: PARSE DEBUG END')
#--! DEBUG
return result
if t is None:
#--! DEBUG
debug.error('Error : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parsedebug-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
# This code is automatically generated by the ply/ygen.py script. Make
# changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
# by the ply/ygen.py script. Make changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-notrack-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-notrack-end
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = []
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
self.str = '%s -> <empty>' % self.name
def __str__(self):
return self.str
def __repr__(self):
return 'Production(' + str(self) + ')'
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self, index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self, n):
if n > len(self.prod):
return None
p = LRItem(self, n)
# Precompute the list of productions immediately following.
try:
p.lr_after = self.Prodnames[p.prod[n+1]]
except (IndexError, KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self, str, name, len, func, file, line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return 'MiniProduction(%s)' % self.str
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self, p, n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = {}
self.prod.insert(n, '.')
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
s = '%s -> <empty>' % self.name
return s
def __repr__(self):
return 'LRItem(' + str(self) + ')'
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError):
pass
class Grammar(object):
def __init__(self, terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = {} # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = {} # A dictionary of precomputed FIRST(x) symbols
self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self, index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self, term, assoc, level):
assert self.Productions == [None], 'Must call set_precedence() before add_production()'
if term in self.Precedence:
raise GrammarError('Precedence already specified for terminal %r' % term)
if assoc not in ['left', 'right', 'nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc, level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self, prodname, syms, func=None, file='', line=0):
if prodname in self.Terminals:
raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
if prodname == 'error':
raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
if not _is_identifier.match(prodname):
raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
# Look for literal tokens
for n, s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
(file, line, s, prodname))
if c not in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
if syms[-2] != '%prec':
raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
(file, line))
precname = syms[-1]
prodprec = self.Precedence.get(precname)
if not prodprec:
raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
else:
self.UsedPrecedence.add(precname)
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms, self.Terminals)
prodprec = self.Precedence.get(precname, ('right', 0))
# See if the rule is already in the rulemap
map = '%s -> %s' % (prodname, syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
'Previous definition at %s:%d' % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if prodname not in self.Nonterminals:
self.Nonterminals[prodname] = []
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if t not in self.Nonterminals:
self.Nonterminals[t] = []
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber, prodname, syms, prodprec, func, file, line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [p]
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self, start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError('start symbol %s undefined' % start)
self.Productions[0] = Production(0, "S'", [start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if s in reachable:
return
reachable.add(s)
for p in self.Prodnames.get(s, []):
for r in p.prod:
mark_reachable_from(r)
reachable = set()
mark_reachable_from(self.Productions[0].prod[0])
return [s for s in self.Nonterminals if s not in reachable]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = True
terminates['$end'] = True
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = False
# Then propagate termination until no change:
while True:
some_change = False
for (n, pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = False
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = True
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = True
some_change = True
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s, term) in terminates.items():
if not term:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p:
continue
for s in p.prod:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
result.append((s, p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s, v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s, v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname, self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self, beta):
# We are computing First(x1,x2,x3,...,xn)
result = []
for x in beta:
x_produces_empty = False
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = True
else:
if f not in result:
result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while True:
some_change = False
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append(f)
some_change = True
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self, start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = []
if not start:
start = self.Productions[1].name
self.Follow[start] = ['$end']
while True:
didadd = False
for p in self.Productions[1:]:
# Here is the production set
for i, B in enumerate(p.prod):
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = False
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if f == '<empty>':
hasempty = True
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if not didadd:
break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while True:
if i > len(p):
lri = None
else:
lri = LRItem(p, i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError, KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri:
break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError):
pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self, module):
if isinstance(module, types.ModuleType):
parsetab = module
else:
exec('import %s' % module)
parsetab = sys.modules[module]
if parsetab._tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self, filename):
try:
import cPickle as pickle
except ImportError:
import pickle
if not os.path.exists(filename):
raise ImportError
in_f = open(filename, 'rb')
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self, pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X, R, FP):
N = {}
for x in X:
N[x] = 0
stack = []
F = {}
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F
def traverse(x, N, stack, F, X, R, FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError):
pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self, grammar, method='LALR', log=None):
if method not in ['SLR', 'LALR']:
raise LALRError('Unsupported method %s' % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self, I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = True
while didadd:
didadd = False
for j in J:
for x in j.lr_after:
if getattr(x, 'lr0_added', 0) == self._add_count:
continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = True
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self, I, x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I), x))
if g:
return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x)
if not s:
s = {}
self.lr_goto_cache[x] = s
gs = []
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n))
if not s1:
s1 = {}
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end')
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I), x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = {}
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I, x)
if not g or id(g) in self.lr0_cidhash:
continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = set()
num_nullable = 0
while True:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable.add(p.name)
continue
for t in p.prod:
if t not in nullable:
break
else:
nullable.add(p.name)
if len(nullable) == num_nullable:
break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self, C):
trans = []
for stateno, state in enumerate(C):
for p in state:
if p.lr_index < p.len - 1:
t = (stateno, p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans:
trans.append(t)
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self, C, trans, nullable):
state, N = trans
terms = []
g = self.lr0_goto(C[state], N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms:
terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self, C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state], N)
j = self.lr0_cidhash.get(id(g), -1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j, a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self, C, trans, nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state, N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N:
continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j, t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals:
break # No forget it
if p.prod[li] not in nullable:
break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j, t))
g = self.lr0_goto(C[j], t) # Go to next set
j = self.lr0_cidhash.get(id(g), -1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name:
continue
if r.len != p.len:
continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]:
break
i = i + 1
else:
lookb.append((j, r))
for i in includes:
if i not in includedict:
includedict[i] = []
includedict[i].append((state, N))
lookdict[(state, N)] = lookb
return lookdict, includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self, C, ntrans, nullable):
FP = lambda x: self.dr_relation(C, x, nullable)
R = lambda x: self.reads_relation(C, x, nullable)
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self, ntrans, readsets, inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x, [])
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self, lookbacks, followset):
for trans, lb in lookbacks.items():
# Loop over productions in lookback
for state, p in lb:
if state not in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans, [])
for a in f:
if a not in p.lookaheads[state]:
p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self, C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C, trans, nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C, trans, nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans, readsets, included)
# Add all of the lookaheads
self.add_lookaheads(lookd, followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = {} # Action production array (temporary)
log.info('Parsing method: %s', self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [] # List of actions
st_action = {}
st_actionp = {}
st_goto = {}
log.info('')
log.info('state %d', st)
log.info('')
for p in I:
log.info(' (%d) %s', p.number, p)
log.info('')
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action['$end'] = 0
st_actionp['$end'] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
r = st_action.get(a)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from rule being reduced (p)
rprec, rlevel = Productions[p.number].prec
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp, rejectp = pp, oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp, rejectp = oldp, pp
self.rr_conflicts.append((st, chosenp, rejectp))
log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
a, st_actionp[a].number, st_actionp[a])
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I, a)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
# We are in a shift state
actlist.append((a, p, 'shift and go to state %d' % j))
r = st_action.get(a)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError('Shift/shift conflict in state %d' % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from the rule that could have been reduced
rprec, rlevel = Productions[st_actionp[a].number].prec
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = {}
for a, p, m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(' %-15s %s', a, m)
_actprint[(a, m)] = 1
log.info('')
# Print the actions that were not used. (debugging)
not_used = 0
for a, p, m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a, m) in _actprint:
log.debug(' ! %-15s [ %s ]', a, m)
not_used = 1
_actprint[(a, m)] = 1
if not_used:
log.debug('')
# Construct the goto table for this state
nkeys = {}
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I, n)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
st_goto[n] = j
log.info(' %-30s shift and go to state %d', n, j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self, tabmodule, outputdir='', signature=''):
if isinstance(tabmodule, types.ModuleType):
raise IOError("Won't overwrite existing tabmodule")
basemodulename = tabmodule.split('.')[-1]
filename = os.path.join(outputdir, basemodulename) + '.py'
try:
f = open(filename, 'w')
f.write('''
# %s
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = %r
_lr_method = %r
_lr_signature = %r
''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = {}
for s, nd in self.lr_action.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_action_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
''')
else:
f.write('\n_lr_action = { ')
for k, v in self.lr_action.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
if smaller:
# Factor out names to try and make smaller
items = {}
for s, nd in self.lr_goto.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_goto_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
''')
else:
f.write('\n_lr_goto = { ')
for k, v in self.lr_goto.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
# Write production table
f.write('_lr_productions = [\n')
for p in self.lr_productions:
if p.func:
f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
p.func, os.path.basename(p.file), p.line))
else:
f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
f.write(']\n')
f.close()
except IOError as e:
raise
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self, filename, signature=''):
try:
import cPickle as pickle
except ImportError:
import pickle
with open(filename, 'wb') as outf:
pickle.dump(__tabversion__, outf, pickle_protocol)
pickle.dump(self.lr_method, outf, pickle_protocol)
pickle.dump(signature, outf, pickle_protocol)
pickle.dump(self.lr_action, outf, pickle_protocol)
pickle.dump(self.lr_goto, outf, pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
else:
outp.append((str(p), p.name, p.len, None, None, None))
pickle.dump(outp, outf, pickle_protocol)
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc, file, line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p:
continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
grammar.append((file, dline, prodname, syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self, pdict, log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.modules = set()
self.grammar = []
self.error = False
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_modules()
return self.error
# Compute a signature over the grammar
def signature(self):
parts = []
try:
if self.start:
parts.append(self.start)
if self.prec:
parts.append(''.join([''.join(p) for p in self.prec]))
if self.tokens:
parts.append(' '.join(self.tokens))
for f in self.pfuncs:
if f[3]:
parts.append(f[3])
except (TypeError, ValueError):
pass
return ''.join(parts)
# -----------------------------------------------------------------------------
# validate_modules()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_modules(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for module in self.modules:
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
continue
counthash = {}
for linen, line in enumerate(lines):
linen += 1
m = fre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
filename, linen, name, prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start, string_types):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func, types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = True
return
eline = self.error_func.__code__.co_firstlineno
efile = self.error_func.__code__.co_filename
module = inspect.getmodule(self.error_func)
self.modules.add(module)
argcount = self.error_func.__code__.co_argcount - ismethod
if argcount != 1:
self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
self.error = True
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get('tokens')
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = sorted(tokens)
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = True
return
terminals = set()
for n in self.tokens:
if n in terminals:
self.log.warning('Token %r multiply defined', n)
terminals.add(n)
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get('precedence')
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec, (list, tuple)):
self.log.error('precedence must be a list or tuple')
self.error = True
return
for level, p in enumerate(self.prec):
if not isinstance(p, (list, tuple)):
self.log.error('Bad precedence table')
self.error = True
return
if len(p) < 2:
self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
self.error = True
return
assoc = p[0]
if not isinstance(assoc, string_types):
self.log.error('precedence associativity must be a string')
self.error = True
return
for term in p[1:]:
if not isinstance(term, string_types):
self.log.error('precedence items must be strings')
self.error = True
return
preclist.append((term, assoc, level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if not name.startswith('p_') or name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
module = inspect.getmodule(item)
p_functions.append((line, module, name, item.__doc__))
# Sort all of the actions by line number; make sure to stringify
# modules to make them sortable, since `line` may not uniquely sort all
# p functions
p_functions.sort(key=lambda p_function: (
p_function[0],
str(p_function[1]),
p_function[2],
p_function[3]))
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error('no rules of the form p_rulename are defined')
self.error = True
return
for line, module, name, doc in self.pfuncs:
file = inspect.getsourcefile(module)
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func.__code__.co_argcount > reqargs:
self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
self.error = True
elif func.__code__.co_argcount < reqargs:
self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
self.error = True
elif not func.__doc__:
self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
file, line, func.__name__)
else:
try:
parsed_g = parse_grammar(doc, file, line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError as e:
self.log.error(str(e))
self.error = True
# Looks like a valid grammar rule
# Mark the file in which defined.
self.modules.add(module)
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n, v in self.pdict.items():
if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
continue
if n.startswith('t_'):
continue
if n.startswith('p_') and n != 'p_error':
self.log.warning('%r not defined as a function', n)
if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
(isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
if v.__doc__:
try:
doc = v.__doc__.split(' ')
if doc[1] == ':':
self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
v.__code__.co_filename, v.__code__.co_firstlineno, n)
except IndexError:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
outputdir=None, debuglog=None, errorlog=None, picklefile=None):
if tabmodule is None:
tabmodule = tab_module
# Reference to the parsing method of the last built parser
global parse
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
pdict = dict(_items)
# If no __file__ or __package__ attributes are available, try to obtain them
# from the __module__ instead
if '__file__' not in pdict:
pdict['__file__'] = sys.modules[pdict['__module__']].__file__
if '__package__' not in pdict and '__module__' in pdict:
if hasattr(sys.modules[pdict['__module__']], '__package__'):
pdict['__package__'] = sys.modules[pdict['__module__']].__package__
else:
pdict = get_caller_module_dict(2)
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If tabmodule specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(tabmodule, types.ModuleType):
srcfile = tabmodule.__file__
else:
if '.' not in tabmodule:
srcfile = pdict['__file__']
else:
parts = tabmodule.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = pdict.get('__package__')
if pkg and isinstance(tabmodule, str):
if '.' not in tabmodule:
tabmodule = pkg + '.' + tabmodule
# Set start symbol if it's specified directly using an argument
if start is not None:
pdict['start'] = start
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict, log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError('Unable to build parser')
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
except Exception as e:
errorlog.warning('There was a problem loading the table file: %r', e)
except VersionError as e:
errorlog.warning(str(e))
except ImportError:
pass
if debuglog is None:
if debug:
try:
debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
except IOError as e:
errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
debuglog = NullLogger()
else:
debuglog = NullLogger()
debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__)
errors = False
# Validate the parser information
if pinfo.validate_all():
raise YaccError('Unable to build parser')
if not pinfo.error_func:
errorlog.warning('no p_error() function is defined')
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term, assoc, level)
except GrammarError as e:
errorlog.warning('%s', e)
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname, syms, funcname, file, line)
except GrammarError as e:
errorlog.error('%s', e)
errors = True
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError as e:
errorlog.error(str(e))
errors = True
if errors:
raise YaccError('Unable to build parser')
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
errors = True
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info('')
debuglog.info('Unused terminals:')
debuglog.info('')
for term in unused_terminals:
errorlog.warning('Token %r defined, but not used', term)
debuglog.info(' %s', term)
# Print out all productions to the debug log
if debug:
debuglog.info('')
debuglog.info('Grammar')
debuglog.info('')
for n, p in enumerate(grammar.Productions):
debuglog.info('Rule %-5d %s', n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning('There is 1 unused token')
if len(unused_terminals) > 1:
errorlog.warning('There are %d unused tokens', len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning('There is 1 unused rule')
if len(unused_rules) > 1:
errorlog.warning('There are %d unused rules', len(unused_rules))
if debug:
debuglog.info('')
debuglog.info('Terminals, with rules where they appear')
debuglog.info('')
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
debuglog.info('')
debuglog.info('Nonterminals, with rules where they appear')
debuglog.info('')
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info('')
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning('Symbol %r is unreachable', u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error('Infinite recursion detected for symbol %r', inf)
errors = True
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
errors = True
if errors:
raise YaccError('Unable to build parser')
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug('Generating %s tables', method)
lr = LRGeneratedTable(grammar, method, debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning('1 shift/reduce conflict')
elif num_sr > 1:
errorlog.warning('%d shift/reduce conflicts', num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning('1 reduce/reduce conflict')
elif num_rr > 1:
errorlog.warning('%d reduce/reduce conflicts', num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning('')
debuglog.warning('Conflicts:')
debuglog.warning('')
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
already_reported = set()
for state, rule, rejected in lr.rr_conflicts:
if (state, id(rule), id(rejected)) in already_reported:
continue
debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
debuglog.warning('rejected rule (%s) in state %d', rejected, state)
errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
errorlog.warning('rejected rule (%s) in state %d', rejected, state)
already_reported.add((state, id(rule), id(rejected)))
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning('Rule (%s) is never reduced', rejected)
errorlog.warning('Rule (%s) is never reduced', rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
try:
lr.write_table(tabmodule, outputdir, signature)
if tabmodule in sys.modules:
del sys.modules[tabmodule]
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
# Write a pickled version of the tables
if picklefile:
try:
lr.pickle_table(picklefile, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
|
1a6c9660363d9e5e85d56250f6b99c405821d459c00c5967a7f4c18d3704b2a5 | # ----------------------------------------------------------------------
# ctokens.py
#
# Token specifications for symbols in ANSI C and C++. This file is
# meant to be used as a library in other tokenizers.
# ----------------------------------------------------------------------
# Reserved words
tokens = [
# Literals (identifier, integer constant, float constant, string constant, char const)
'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
# Increment/decrement (++,--)
'INCREMENT', 'DECREMENT',
# Structure dereference (->)
'ARROW',
# Ternary operator (?)
'TERNARY',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'SEMI', 'COLON',
# Ellipsis (...)
'ELLIPSIS',
]
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MODULO = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'\^='
# Increment/decrement
t_INCREMENT = r'\+\+'
t_DECREMENT = r'--'
# ->
t_ARROW = r'->'
# ?
t_TERNARY = r'\?'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
# Identifiers
t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
# Integer literal
t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_STRING = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
# Comment (C-Style)
def t_COMMENT(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
return t
# Comment (C++-Style)
def t_CPPCOMMENT(t):
r'//.*\n'
t.lexer.lineno += 1
return t
|
4bbdbdd7076f82c3643fc46d5f659713949679b03f4333062e7a0947dac4a094 | # configobj.py
# A config file reader/writer that supports nested sections in config files.
# Copyright (C) 2005-2014:
# (name) : (email)
# Michael Foord: fuzzyman AT voidspace DOT org DOT uk
# Nicola Larosa: nico AT tekNico DOT net
# Rob Dennis: rdennis AT gmail DOT com
# Eli Courtwright: eli AT courtwright DOT org
# This software is licensed under the terms of the BSD license.
# http://opensource.org/licenses/BSD-3-Clause
# ConfigObj 5 - main repository for documentation and issue tracking:
# https://github.com/DiffSK/configobj
import os
import re
import sys
from collections.abc import Mapping
from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
# imported lazily to avoid startup performance hit if it isn't used
compiler = None
# A dictionary mapping BOM to
# the encoding to decode with, and what to set the
# encoding attribute to.
BOMS = {
BOM_UTF8: ('utf_8', None),
BOM_UTF16_BE: ('utf16_be', 'utf_16'),
BOM_UTF16_LE: ('utf16_le', 'utf_16'),
BOM_UTF16: ('utf_16', 'utf_16'),
}
# All legal variants of the BOM codecs.
# TODO: the list of aliases is not meant to be exhaustive, is there a
# better way ?
BOM_LIST = {
'utf_16': 'utf_16',
'u16': 'utf_16',
'utf16': 'utf_16',
'utf-16': 'utf_16',
'utf16_be': 'utf16_be',
'utf_16_be': 'utf16_be',
'utf-16be': 'utf16_be',
'utf16_le': 'utf16_le',
'utf_16_le': 'utf16_le',
'utf-16le': 'utf16_le',
'utf_8': 'utf_8',
'u8': 'utf_8',
'utf': 'utf_8',
'utf8': 'utf_8',
'utf-8': 'utf_8',
}
# Map of encodings to the BOM to write.
BOM_SET = {
'utf_8': BOM_UTF8,
'utf_16': BOM_UTF16,
'utf16_be': BOM_UTF16_BE,
'utf16_le': BOM_UTF16_LE,
None: BOM_UTF8
}
def match_utf8(encoding):
return BOM_LIST.get(encoding.lower()) == 'utf_8'
# Quote strings used for writing values
squot = "'%s'"
dquot = '"%s"'
noquot = "%s"
wspace_plus = ' \r\n\v\t\'"'
tsquot = '"""%s"""'
tdquot = "'''%s'''"
# Sentinel for use in getattr calls to replace hasattr
MISSING = object()
__all__ = (
'DEFAULT_INDENT_TYPE',
'DEFAULT_INTERPOLATION',
'ConfigObjError',
'NestingError',
'ParseError',
'DuplicateError',
'ConfigspecError',
'ConfigObj',
'SimpleVal',
'InterpolationError',
'InterpolationLoopError',
'MissingInterpolationOption',
'RepeatSectionError',
'ReloadError',
'UnreprError',
'UnknownType',
'flatten_errors',
'get_extra_values'
)
DEFAULT_INTERPOLATION = 'configparser'
DEFAULT_INDENT_TYPE = ' '
MAX_INTERPOL_DEPTH = 10
OPTION_DEFAULTS = {
'interpolation': True,
'raise_errors': False,
'list_values': True,
'create_empty': False,
'file_error': False,
'configspec': None,
'stringify': True,
# option may be set to one of ('', ' ', '\t')
'indent_type': None,
'encoding': None,
'default_encoding': None,
'unrepr': False,
'write_empty_values': False,
}
# this could be replaced if six is used for compatibility, or there are no
# more assertions about items being a string
def getObj(s):
global compiler
if compiler is None:
import compiler
s = "a=" + s
p = compiler.parse(s)
return p.getChildren()[1].getChildren()[0].getChildren()[1]
class UnknownType(Exception):
pass
class Builder(object):
def build(self, o):
if m is None:
raise UnknownType(o.__class__.__name__)
return m(o)
def build_List(self, o):
return list(map(self.build, o.getChildren()))
def build_Const(self, o):
return o.value
def build_Dict(self, o):
d = {}
i = iter(map(self.build, o.getChildren()))
for el in i:
d[el] = next(i)
return d
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
if o.name == 'None':
return None
if o.name == 'True':
return True
if o.name == 'False':
return False
# An undefined Name
raise UnknownType('Undefined Name')
def build_Add(self, o):
real, imag = list(map(self.build_Const, o.getChildren()))
try:
real = float(real)
except TypeError:
raise UnknownType('Add')
if not isinstance(imag, complex) or imag.real != 0.0:
raise UnknownType('Add')
return real+imag
def build_Getattr(self, o):
parent = self.build(o.expr)
return getattr(parent, o.attrname)
def build_UnarySub(self, o):
return -self.build_Const(o.getChildren()[0])
def build_UnaryAdd(self, o):
return self.build_Const(o.getChildren()[0])
_builder = Builder()
def unrepr(s):
if not s:
return s
# this is supposed to be safe
import ast
return ast.literal_eval(s)
class ConfigObjError(SyntaxError):
"""
This is the base class for all errors that ConfigObj raises.
It is a subclass of SyntaxError.
"""
def __init__(self, message='', line_number=None, line=''):
self.line = line
self.line_number = line_number
SyntaxError.__init__(self, message)
class NestingError(ConfigObjError):
"""
This error indicates a level of nesting that doesn't match.
"""
class ParseError(ConfigObjError):
"""
This error indicates that a line is badly written.
It is neither a valid ``key = value`` line,
nor a valid section marker line.
"""
class ReloadError(IOError):
"""
A 'reload' operation failed.
This exception is a subclass of ``IOError``.
"""
def __init__(self):
IOError.__init__(self, 'reload failed, filename is not set.')
class DuplicateError(ConfigObjError):
"""
The keyword or section specified already exists.
"""
class ConfigspecError(ConfigObjError):
"""
An error occured whilst parsing a configspec.
"""
class InterpolationError(ConfigObjError):
"""Base class for the two interpolation errors."""
class InterpolationLoopError(InterpolationError):
"""Maximum interpolation depth exceeded in string interpolation."""
def __init__(self, option):
InterpolationError.__init__(
self,
'interpolation loop detected in value "%s".' % option)
class RepeatSectionError(ConfigObjError):
"""
This error indicates additional sections in a section with a
``__many__`` (repeated) section.
"""
class MissingInterpolationOption(InterpolationError):
"""A value specified for interpolation was missing."""
def __init__(self, option):
msg = 'missing option "%s" in interpolation.' % option
InterpolationError.__init__(self, msg)
class UnreprError(ConfigObjError):
"""An error parsing in unrepr mode."""
class InterpolationEngine(object):
"""
A helper class to help perform string interpolation.
This class is an abstract base class; its descendants perform
the actual work.
"""
# compiled regexp to use in self.interpolate()
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
_cookie = '%'
def __init__(self, section):
# the Section instance that "owns" this engine
self.section = section
def interpolate(self, key, value):
# short-cut
if not self._cookie in value:
return value
def recursive_interpolate(key, value, section, backtrail):
"""The function that does the actual work.
``value``: the string we're trying to interpolate.
``section``: the section in which that string was found
``backtrail``: a dict to keep track of where we've been,
to detect and prevent infinite recursion loops
This is similar to a depth-first-search algorithm.
"""
# Have we been here already?
if (key, section.name) in backtrail:
# Yes - infinite loop detected
raise InterpolationLoopError(key)
# Place a marker on our backtrail so we won't come back here again
backtrail[(key, section.name)] = 1
# Now start the actual work
match = self._KEYCRE.search(value)
while match:
# The actual parsing of the match is implementation-dependent,
# so delegate to our helper function
k, v, s = self._parse_match(match)
if k is None:
# That's the signal that no further interpolation is needed
replacement = v
else:
# Further interpolation may be needed to obtain final value
replacement = recursive_interpolate(k, v, s, backtrail)
# Replace the matched string with its final value
start, end = match.span()
value = ''.join((value[:start], replacement, value[end:]))
new_search_start = start + len(replacement)
# Pick up the next interpolation key, if any, for next time
# through the while loop
match = self._KEYCRE.search(value, new_search_start)
# Now safe to come back here again; remove marker from backtrail
del backtrail[(key, section.name)]
return value
# Back in interpolate(), all we have to do is kick off the recursive
# function with appropriate starting values
value = recursive_interpolate(key, value, self.section, {})
return value
def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
"""
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None and not isinstance(val, Section):
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section
def _parse_match(self, match):
"""Implementation-dependent helper function.
Will be passed a match object corresponding to the interpolation
key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
key in the appropriate config file section (using the ``_fetch()``
helper function) and return a 3-tuple: (key, value, section)
``key`` is the name of the key we're looking for
``value`` is the value found for that key
``section`` is a reference to the section where it was found
``key`` and ``section`` should be None if no further
interpolation should be performed on the resulting value
(e.g., if we interpolated "$$" and returned "$").
"""
raise NotImplementedError()
class ConfigParserInterpolation(InterpolationEngine):
"""Behaves like ConfigParser."""
_cookie = '%'
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
def _parse_match(self, match):
key = match.group(1)
value, section = self._fetch(key)
return key, value, section
class TemplateInterpolation(InterpolationEngine):
"""Behaves like string.Template."""
_cookie = '$'
_delimiter = '$'
_KEYCRE = re.compile(r"""
\$(?:
(?P<escaped>\$) | # Two $ signs
(?P<named>[_a-z][_a-z0-9]*) | # $name format
{(?P<braced>[^}]*)} # ${name} format
)
""", re.IGNORECASE | re.VERBOSE)
def _parse_match(self, match):
# Valid name (in or out of braces): fetch value from section
key = match.group('named') or match.group('braced')
if key is not None:
value, section = self._fetch(key)
return key, value, section
# Escaped delimiter (e.g., $$): return single delimiter
if match.group('escaped') is not None:
# Return None for key and section to indicate it's time to stop
return None, self._delimiter, None
# Anything else: ignore completely, just return it unchanged
return None, match.group(), None
interpolation_engines = {
'configparser': ConfigParserInterpolation,
'template': TemplateInterpolation,
}
def __newobj__(cls, *args):
# Hack for pickle
return cls.__new__(cls, *args)
class Section(dict):
"""
A dictionary-like object that represents a section in a config file.
It does string interpolation if the 'interpolation' attribute
of the 'main' object is set to True.
Interpolation is tried first from this object, then from the 'DEFAULT'
section of this object, next from the parent and its 'DEFAULT' section,
and so on until the main object is reached.
A Section will behave like an ordered dictionary - following the
order of the ``scalars`` and ``sections`` attributes.
You can use this to change the order of members.
Iteration follows the order: scalars, then sections.
"""
def __setstate__(self, state):
dict.update(self, state[0])
self.__dict__.update(state[1])
def __reduce__(self):
state = (dict(self), self.__dict__)
return (__newobj__, (self.__class__,), state)
def __init__(self, parent, depth, main, indict=None, name=None):
"""
* parent is the section above
* depth is the depth level of this section
* main is the main ConfigObj
* indict is a dictionary to initialise the section with
"""
if indict is None:
indict = {}
dict.__init__(self)
# used for nesting level *and* interpolation
self.parent = parent
# used for the interpolation attribute
self.main = main
# level of nesting depth of this Section
self.depth = depth
# purely for information
self.name = name
#
self._initialise()
# we do this explicitly so that __setitem__ is used properly
# (rather than just passing to ``dict.__init__``)
for entry, value in indict.items():
self[entry] = value
def _initialise(self):
# the sequence of scalar values in this Section
self.scalars = []
# the sequence of sections in this Section
self.sections = []
# for comments :-)
self.comments = {}
self.inline_comments = {}
# the configspec
self.configspec = None
# for defaults
self.defaults = []
self.default_values = {}
self.extra_values = []
self._created = False
def _interpolate(self, key, value):
try:
# do we already have an interpolation engine?
engine = self._interpolation_engine
except AttributeError:
# not yet: first time running _interpolate(), so pick the engine
name = self.main.interpolation
if name == True: # note that "if name:" would be incorrect here
# backwards-compatibility: interpolation=True means use default
name = DEFAULT_INTERPOLATION
name = name.lower() # so that "Template", "template", etc. all work
class_ = interpolation_engines.get(name, None)
if class_ is None:
# invalid value for self.main.interpolation
self.main.interpolation = False
return value
else:
# save reference to engine so we don't have to do this again
engine = self._interpolation_engine = class_(self)
# let the engine do the actual work
return engine.interpolate(key, value)
def __getitem__(self, key):
"""Fetch the item and do string interpolation."""
val = dict.__getitem__(self, key)
if self.main.interpolation:
if isinstance(val, str):
return self._interpolate(key, val)
if isinstance(val, list):
def _check(entry):
if isinstance(entry, str):
return self._interpolate(key, entry)
return entry
new = [_check(entry) for entry in val]
if new != val:
return new
return val
def __setitem__(self, key, value, unrepr=False):
"""
Correctly set a value.
Making dictionary values Section instances.
(We have to special case 'Section' instances - which are also dicts)
Keys must be strings.
Values need only be strings (or lists of strings) if
``main.stringify`` is set.
``unrepr`` must be set when setting a value to a dictionary, without
creating a new sub-section.
"""
if not isinstance(key, str):
raise ValueError('The key "%s" is not a string.' % key)
# add the comment
if key not in self.comments:
self.comments[key] = []
self.inline_comments[key] = ''
# remove the entry from defaults
if key in self.defaults:
self.defaults.remove(key)
#
if isinstance(value, Section):
if key not in self:
self.sections.append(key)
dict.__setitem__(self, key, value)
elif isinstance(value, Mapping) and not unrepr:
# First create the new depth level,
# then create the section
if key not in self:
self.sections.append(key)
new_depth = self.depth + 1
dict.__setitem__(
self,
key,
Section(
self,
new_depth,
self.main,
indict=value,
name=key))
else:
if key not in self:
self.scalars.append(key)
if not self.main.stringify:
if isinstance(value, str):
pass
elif isinstance(value, (list, tuple)):
for entry in value:
if not isinstance(entry, str):
raise TypeError('Value is not a string "%s".' % entry)
else:
raise TypeError('Value is not a string "%s".' % value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
"""Remove items from the sequence when deleting."""
dict. __delitem__(self, key)
if key in self.scalars:
self.scalars.remove(key)
else:
self.sections.remove(key)
del self.comments[key]
del self.inline_comments[key]
def get(self, key, default=None):
"""A version of ``get`` that doesn't bypass string interpolation."""
try:
return self[key]
except KeyError:
return default
def update(self, indict):
"""
A version of update that uses our ``__setitem__``.
"""
for entry in indict:
self[entry] = indict[entry]
def pop(self, key, default=MISSING):
"""
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised'
"""
try:
val = self[key]
except KeyError:
if default is MISSING:
raise
val = default
else:
del self[key]
return val
def popitem(self):
"""Pops the first (key,val)"""
sequence = (self.scalars + self.sections)
if not sequence:
raise KeyError(": 'popitem(): dictionary is empty'")
key = sequence[0]
val = self[key]
del self[key]
return key, val
def clear(self):
"""
A version of clear that also affects scalars/sections
Also clears comments and configspec.
Leaves other attributes alone :
depth/main/parent are not affected
"""
dict.clear(self)
self.scalars = []
self.sections = []
self.comments = {}
self.inline_comments = {}
self.configspec = None
self.defaults = []
self.extra_values = []
def setdefault(self, key, default=None):
"""A version of setdefault that sets sequence if appropriate."""
try:
return self[key]
except KeyError:
self[key] = default
return self[key]
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples"""
return list(zip((self.scalars + self.sections), list(self.values())))
def keys(self):
"""D.keys() -> list of D's keys"""
return (self.scalars + self.sections)
def values(self):
"""D.values() -> list of D's values"""
return [self[key] for key in (self.scalars + self.sections)]
def iteritems(self):
"""D.iteritems() -> an iterator over the (key, value) items of D"""
return iter(list(self.items()))
def iterkeys(self):
"""D.iterkeys() -> an iterator over the keys of D"""
return iter((self.scalars + self.sections))
__iter__ = iterkeys
def itervalues(self):
"""D.itervalues() -> an iterator over the values of D"""
return iter(list(self.values()))
def __repr__(self):
"""x.__repr__() <==> repr(x)"""
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)])
__str__ = __repr__
__str__.__doc__ = "x.__str__() <==> str(x)"
# Extra methods - not in a normal dictionary
def dict(self):
"""
Return a deepcopy of self as a dictionary.
All members that are ``Section`` instances are recursively turned to
ordinary dictionaries - by calling their ``dict`` method.
>>> n = a.dict()
>>> n == a
1
>>> n is a
0
"""
newdict = {}
for entry in self:
this_entry = self[entry]
if isinstance(this_entry, Section):
this_entry = this_entry.dict()
elif isinstance(this_entry, list):
# create a copy rather than a reference
this_entry = list(this_entry)
elif isinstance(this_entry, tuple):
# create a copy rather than a reference
this_entry = tuple(this_entry)
newdict[entry] = this_entry
return newdict
def merge(self, indict):
"""
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
"""
for key, val in list(indict.items()):
if (key in self and isinstance(self[key], Mapping) and
isinstance(val, Mapping)):
self[key].merge(val)
else:
self[key] = val
def rename(self, oldkey, newkey):
"""
Change a keyname to another, without changing position in sequence.
Implemented so that transformations can be made on keys,
as well as on values. (used by encode and decode)
Also renames comments.
"""
if oldkey in self.scalars:
the_list = self.scalars
elif oldkey in self.sections:
the_list = self.sections
else:
raise KeyError('Key "%s" not found.' % oldkey)
pos = the_list.index(oldkey)
#
val = self[oldkey]
dict.__delitem__(self, oldkey)
dict.__setitem__(self, newkey, val)
the_list.remove(oldkey)
the_list.insert(pos, newkey)
comm = self.comments[oldkey]
inline_comment = self.inline_comments[oldkey]
del self.comments[oldkey]
del self.inline_comments[oldkey]
self.comments[newkey] = comm
self.inline_comments[newkey] = inline_comment
def walk(self, function, raise_errors=True,
call_on_sections=False, **keywargs):
"""
Walk every member and call a function on the keyword and value.
Return a dictionary of the return values
If the function raises an exception, raise the errror
unless ``raise_errors=False``, in which case set the return value to
``False``.
Any unrecognized keyword arguments you pass to walk, will be pased on
to the function you pass in.
Note: if ``call_on_sections`` is ``True`` then - on encountering a
subsection, *first* the function is called for the *whole* subsection,
and then recurses into it's members. This means your function must be
able to handle strings, dictionaries and lists. This allows you
to change the key of subsections as well as for ordinary members. The
return value when called on the whole subsection has to be discarded.
See the encode and decode methods for examples, including functions.
.. admonition:: caution
You can use ``walk`` to transform the names of members of a section
but you mustn't add or delete members.
>>> config = '''[XXXXsection]
... XXXXkey = XXXXvalue'''.splitlines()
>>> cfg = ConfigObj(config)
>>> cfg
ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}})
>>> def transform(section, key):
... val = section[key]
... newkey = key.replace('XXXX', 'CLIENT1')
... section.rename(key, newkey)
... if isinstance(val, (tuple, list, dict)):
... pass
... else:
... val = val.replace('XXXX', 'CLIENT1')
... section[newkey] = val
>>> cfg.walk(transform, call_on_sections=True)
{'CLIENT1section': {'CLIENT1key': None}}
>>> cfg
ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
"""
out = {}
# scalars first
for i in range(len(self.scalars)):
entry = self.scalars[i]
try:
val = function(self, entry, **keywargs)
# bound again in case name has changed
entry = self.scalars[i]
out[entry] = val
except Exception:
if raise_errors:
raise
else:
entry = self.scalars[i]
out[entry] = False
# then sections
for i in range(len(self.sections)):
entry = self.sections[i]
if call_on_sections:
try:
function(self, entry, **keywargs)
except Exception:
if raise_errors:
raise
else:
entry = self.sections[i]
out[entry] = False
# bound again in case name has changed
entry = self.sections[i]
# previous result is discarded
out[entry] = self[entry].walk(
function,
raise_errors=raise_errors,
call_on_sections=call_on_sections,
**keywargs)
return out
def as_bool(self, key):
"""
Accepts a key as input. The corresponding value must be a string or
the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
retain compatibility with Python 2.2.
If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
``True``.
If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
``False``.
``as_bool`` is not case sensitive.
Any other input will raise a ``ValueError``.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_bool('a')
Traceback (most recent call last):
ValueError: Value "fish" is neither True nor False
>>> a['b'] = 'True'
>>> a.as_bool('b')
1
>>> a['b'] = 'off'
>>> a.as_bool('b')
0
"""
val = self[key]
if val == True:
return True
elif val == False:
return False
else:
try:
if not isinstance(val, str):
# TODO: Why do we raise a KeyError here?
raise KeyError()
else:
return self.main._bools[val.lower()]
except KeyError:
raise ValueError('Value "%s" is neither True nor False' % val)
def as_int(self, key):
"""
A convenience method which coerces the specified value to an integer.
If the value is an invalid literal for ``int``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_int('a')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: 'fish'
>>> a['b'] = '1'
>>> a.as_int('b')
1
>>> a['b'] = '3.2'
>>> a.as_int('b')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: '3.2'
"""
return int(self[key])
def as_float(self, key):
"""
A convenience method which coerces the specified value to a float.
If the value is an invalid literal for ``float``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_float('a') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: invalid literal for float(): fish
>>> a['b'] = '1'
>>> a.as_float('b')
1.0
>>> a['b'] = '3.2'
>>> a.as_float('b') #doctest: +ELLIPSIS
3.2...
"""
return float(self[key])
def as_list(self, key):
"""
A convenience method which fetches the specified value, guaranteeing
that it is a list.
>>> a = ConfigObj()
>>> a['a'] = 1
>>> a.as_list('a')
[1]
>>> a['a'] = (1,)
>>> a.as_list('a')
[1]
>>> a['a'] = [1]
>>> a.as_list('a')
[1]
"""
result = self[key]
if isinstance(result, (tuple, list)):
return list(result)
return [result]
def restore_default(self, key):
"""
Restore (and return) default value for the specified key.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
If there is no default value for this key, ``KeyError`` is raised.
"""
default = self.default_values[key]
dict.__setitem__(self, key, default)
if key not in self.defaults:
self.defaults.append(key)
return default
def restore_defaults(self):
"""
Recursively restore default values to all members
that have them.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
It doesn't delete or modify entries without default values.
"""
for key in self.default_values:
self.restore_default(key)
for section in self.sections:
self[section].restore_defaults()
class ConfigObj(Section):
"""An object to read, create, and write config files."""
_keyword = re.compile(r'''^ # line start
(\s*) # indentation
( # keyword
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"=].*?) # no quotes
)
\s*=\s* # divider
(.*) # value (including list values and comments)
$ # line end
''',
re.VERBOSE)
_sectionmarker = re.compile(r'''^
(\s*) # 1: indentation
((?:\[\s*)+) # 2: section marker open
( # 3: section name open
(?:"\s*\S.*?\s*")| # at least one non-space with double quotes
(?:'\s*\S.*?\s*')| # at least one non-space with single quotes
(?:[^'"\s].*?) # at least one non-space unquoted
) # section name close
((?:\s*\])+) # 4: section marker close
\s*(\#.*)? # 5: optional comment
$''',
re.VERBOSE)
# this regexp pulls list values out as a single string
# or single values and comments
# FIXME: this regex adds a '' to the end of comma terminated lists
# workaround in ``_handle_value``
_valueexp = re.compile(r'''^
(?:
(?:
(
(?:
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#][^,\#]*?) # unquoted
)
\s*,\s* # comma
)* # match all list items ending in a comma (if any)
)
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#\s][^,]*?)| # unquoted
(?:(?<!,)) # Empty value
)? # last item in a list - or string value
)|
(,) # alternatively a single comma - empty list
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# use findall to get the members of a list value
_listvalueexp = re.compile(r'''
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#]?.*?) # unquoted
)
\s*,\s* # comma
''',
re.VERBOSE)
# this regexp is used for the value
# when lists are switched off
_nolistvalue = re.compile(r'''^
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"\#].*?)| # unquoted
(?:) # Empty value
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# regexes for finding triple quoted values on one line
_single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
_single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
_multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
_multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
_triple_quote = {
"'''": (_single_line_single, _multi_line_single),
'"""': (_single_line_double, _multi_line_double),
}
# Used by the ``istrue`` Section method
_bools = {
'yes': True, 'no': False,
'on': True, 'off': False,
'1': True, '0': False,
'true': True, 'false': False,
}
def __init__(self, infile=None, options=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False):
"""
Parse a config file or create a config file object.
``ConfigObj(infile=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False)``
"""
self._inspec = _inspec
# init the superclass
Section.__init__(self, self, 0, self)
infile = infile or []
_options = {'configspec': configspec,
'encoding': encoding, 'interpolation': interpolation,
'raise_errors': raise_errors, 'list_values': list_values,
'create_empty': create_empty, 'file_error': file_error,
'stringify': stringify, 'indent_type': indent_type,
'default_encoding': default_encoding, 'unrepr': unrepr,
'write_empty_values': write_empty_values}
if options is None:
options = _options
else:
import warnings
warnings.warn('Passing in an options dictionary to ConfigObj() is '
'deprecated. Use **options instead.',
DeprecationWarning)
# TODO: check the values too.
for entry in options:
if entry not in OPTION_DEFAULTS:
raise TypeError('Unrecognized option "%s".' % entry)
for entry, value in list(OPTION_DEFAULTS.items()):
if entry not in options:
options[entry] = value
keyword_value = _options[entry]
if value != keyword_value:
options[entry] = keyword_value
# XXXX this ignores an explicit list_values = True in combination
# with _inspec. The user should *never* do that anyway, but still...
if _inspec:
options['list_values'] = False
self._initialise(options)
configspec = options['configspec']
self._original_configspec = configspec
self._load(infile, configspec)
def _load(self, infile, configspec):
if isinstance(infile, str):
self.filename = infile
if os.path.isfile(infile):
with open(infile, 'rb') as h:
content = h.readlines() or []
elif self.file_error:
# raise an error if the file doesn't exist
raise IOError('Config file not found: "%s".' % self.filename)
else:
# file doesn't already exist
if self.create_empty:
# this is a good test that the filename specified
# isn't impossible - like on a non-existent device
with open(infile, 'w') as h:
h.write('')
content = []
elif isinstance(infile, (list, tuple)):
content = list(infile)
elif isinstance(infile, dict):
# initialise self
# the Section class handles creating subsections
if isinstance(infile, ConfigObj):
# get a copy of our ConfigObj
def set_section(in_section, this_section):
for entry in in_section.scalars:
this_section[entry] = in_section[entry]
for section in in_section.sections:
this_section[section] = {}
set_section(in_section[section], this_section[section])
set_section(infile, self)
else:
for entry in infile:
self[entry] = infile[entry]
del self._errors
if configspec is not None:
self._handle_configspec(configspec)
else:
self.configspec = None
return
elif getattr(infile, 'read', MISSING) is not MISSING:
# This supports file like objects
content = infile.read() or []
# needs splitting into lines - but needs doing *after* decoding
# in case it's not an 8 bit encoding
else:
raise TypeError('infile must be a filename, file like object, or list of lines.')
if content:
# don't do it for the empty ConfigObj
content = self._handle_bom(content)
# infile is now *always* a list
#
# Set the newlines attribute (first line ending it finds)
# and strip trailing '\n' or '\r' from lines
for line in content:
if (not line) or (line[-1] not in ('\r', '\n')):
continue
for end in ('\r\n', '\n', '\r'):
if line.endswith(end):
self.newlines = end
break
break
assert all(isinstance(line, str) for line in content), repr(content)
content = [line.rstrip('\r\n') for line in content]
self._parse(content)
# if we had any errors, now is the time to raise them
if self._errors:
info = "at line %s." % self._errors[0].line_number
if len(self._errors) > 1:
msg = "Parsing failed with several errors.\nFirst error %s" % info
error = ConfigObjError(msg)
else:
error = self._errors[0]
# set the errors attribute; it's a list of tuples:
# (error_type, message, line_number)
error.errors = self._errors
# set the config attribute
error.config = self
raise error
# delete private attributes
del self._errors
if configspec is None:
self.configspec = None
else:
self._handle_configspec(configspec)
def _initialise(self, options=None):
if options is None:
options = OPTION_DEFAULTS
# initialise a few variables
self.filename = None
self._errors = []
self.raise_errors = options['raise_errors']
self.interpolation = options['interpolation']
self.list_values = options['list_values']
self.create_empty = options['create_empty']
self.file_error = options['file_error']
self.stringify = options['stringify']
self.indent_type = options['indent_type']
self.encoding = options['encoding']
self.default_encoding = options['default_encoding']
self.BOM = False
self.newlines = None
self.write_empty_values = options['write_empty_values']
self.unrepr = options['unrepr']
self.initial_comment = []
self.final_comment = []
self.configspec = None
if self._inspec:
self.list_values = False
# Clear section attributes as well
Section._initialise(self)
def __repr__(self):
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return ('%s({%s})' % (self.__class__.__name__,
', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)])))
def _handle_bom(self, infile):
"""
Handle any BOM, and decode if necessary.
If an encoding is specified, that *must* be used - but the BOM should
still be removed (and the BOM attribute set).
(If the encoding is wrongly specified, then a BOM for an alternative
encoding won't be discovered or removed.)
If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
removed. The BOM attribute will be set. UTF16 will be decoded to
unicode.
NOTE: This method must not be called with an empty ``infile``.
Specifying the *wrong* encoding is likely to cause a
``UnicodeDecodeError``.
``infile`` must always be returned as a list of lines, but may be
passed in as a single string.
"""
if ((self.encoding is not None) and
(self.encoding.lower() not in BOM_LIST)):
# No need to check for a BOM
# the encoding specified doesn't have one
# just decode
return self._decode(infile, self.encoding)
if isinstance(infile, (list, tuple)):
line = infile[0]
else:
line = infile
if isinstance(line, str):
# it's already decoded and there's no need to do anything
# else, just use the _decode utility method to handle
# listifying appropriately
return self._decode(infile, self.encoding)
if self.encoding is not None:
# encoding explicitly supplied
# And it could have an associated BOM
# TODO: if encoding is just UTF16 - we ought to check for both
# TODO: big endian and little endian versions.
enc = BOM_LIST[self.encoding.lower()]
if enc == 'utf_16':
# For UTF16 we try big endian and little endian
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not final_encoding:
# skip UTF8
continue
if infile.startswith(BOM):
### BOM discovered
##self.BOM = True
# Don't need to remove BOM
return self._decode(infile, encoding)
# If we get this far, will *probably* raise a DecodeError
# As it doesn't appear to start with a BOM
return self._decode(infile, self.encoding)
# Must be UTF8
BOM = BOM_SET[enc]
if not line.startswith(BOM):
return self._decode(infile, self.encoding)
newline = line[len(BOM):]
# BOM removed
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
self.BOM = True
return self._decode(infile, self.encoding)
# No encoding specified - so we need to check for UTF8/UTF16
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not isinstance(line, bytes) or not line.startswith(BOM):
# didn't specify a BOM, or it's not a bytestring
continue
else:
# BOM discovered
self.encoding = final_encoding
if not final_encoding:
self.BOM = True
# UTF8
# remove BOM
newline = line[len(BOM):]
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
# UTF-8
if isinstance(infile, str):
return infile.splitlines(True)
elif isinstance(infile, bytes):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
# UTF16 - have to decode
return self._decode(infile, encoding)
# No BOM discovered and no encoding specified, default to UTF-8
if isinstance(infile, bytes):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
def _a_to_u(self, aString):
"""Decode ASCII strings to unicode if a self.encoding is specified."""
if isinstance(aString, bytes) and self.encoding:
return aString.decode(self.encoding)
else:
return aString
def _decode(self, infile, encoding):
"""
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
"""
if isinstance(infile, str):
return infile.splitlines(True)
if isinstance(infile, bytes):
# NOTE: Could raise a ``UnicodeDecodeError``
if encoding:
return infile.decode(encoding).splitlines(True)
else:
return infile.splitlines(True)
if encoding:
for i, line in enumerate(infile):
if isinstance(line, bytes):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
infile[i] = line.decode(encoding)
return infile
def _decode_element(self, line):
"""Decode element to unicode if necessary."""
if isinstance(line, bytes) and self.default_encoding:
return line.decode(self.default_encoding)
else:
return line
# TODO: this may need to be modified
def _str(self, value):
"""
Used by ``stringify`` within validate, to turn non-string values
into strings.
"""
if not isinstance(value, str):
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
return str(value)
else:
return value
def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = []
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error("Cannot compute the section depth",
NestingError, infile, cur_index)
continue
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section,
cur_depth).parent
except SyntaxError:
self._handle_error("Cannot compute nesting level",
NestingError, infile, cur_index)
continue
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section
else:
self._handle_error("Section too nested",
NestingError, infile, cur_index)
continue
sect_name = self._unquote(sect_name)
if sect_name in parent:
self._handle_error('Duplicate section name',
DuplicateError, infile, cur_index)
continue
# create the new section
this_section = Section(
parent,
cur_depth,
self,
name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
self._handle_error(
'Invalid line ({0!r}) (matched as neither section nor keyword)'.format(line),
ParseError, infile, cur_index)
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
value, comment, cur_index = self._multiline(
value, infile, cur_index, maxline)
except SyntaxError:
self._handle_error(
'Parse error in multiline value',
ParseError, infile, cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing multiline value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value)
except SyntaxError:
self._handle_error(
'Parse error in value',
ParseError, infile, cur_index)
continue
#
key = self._unquote(key)
if key in this_section:
self._handle_error(
'Duplicate keyword name',
DuplicateError, infile, cur_index)
continue
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue
#
if self.indent_type is None:
# no indentation used, set the type accordingly
self.indent_type = ''
# preserve the final comment
if not self and not self.initial_comment:
self.initial_comment = comment_list
elif not reset_comment:
self.final_comment = comment_list
self.list_values = temp_list_values
def _match_depth(self, sect, depth):
"""
Given a section and a depth level, walk back through the sections
parents to see if the depth level matches a previous section.
Return a reference to the right section,
or raise a SyntaxError.
"""
while depth < sect.depth:
if sect is sect.parent:
# we've reached the top level already
raise SyntaxError()
sect = sect.parent
if sect.depth == depth:
return sect
# shouldn't get here
raise SyntaxError()
def _handle_error(self, text, ErrorClass, infile, cur_index):
"""
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
"""
line = infile[cur_index]
cur_index += 1
message = '{0} at line {1}.'.format(text, cur_index)
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error
# store the error
# reraise when parsing has finished
self._errors.append(error)
def _unquote(self, value):
"""Return an unquoted version of a value"""
if not value:
# should only happen during parsing of lists
raise SyntaxError
if (value[0] == value[-1]) and (value[0] in ('"', "'")):
value = value[1:-1]
return value
def _quote(self, value, multiline=True):
"""
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
* Don't quote values that don't need it.
* Recursively quote members of a list and return a comma joined list.
* Multiline is ``False`` for lists.
* Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
"""
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, str):
if self.stringify:
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value
def _get_single_quote(self, value):
if ("'" in value) and ('"' in value):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif '"' in value:
quot = squot
else:
quot = dquot
return quot
def _get_triple_quote(self, value):
if (value.find('"""') != -1) and (value.find("'''") != -1):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
if value.find('"""') == -1:
quot = tdquot
else:
quot = tsquot
return quot
def _handle_value(self, value):
"""
Given a value string, unquote, remove comment,
handle lists. (including empty and single member lists)
"""
if self._inspec:
# Parsing a configspec so don't handle comments
return (value, '')
# do we look for lists in values ?
if not self.list_values:
mat = self._nolistvalue.match(value)
if mat is None:
raise SyntaxError()
# NOTE: we don't unquote here
return mat.groups()
#
mat = self._valueexp.match(value)
if mat is None:
# the value is badly constructed, probably badly quoted,
# or an invalid list
raise SyntaxError()
(list_values, single, empty_list, comment) = mat.groups()
if (list_values == '') and (single is None):
# change this if you want to accept empty values
raise SyntaxError()
# NOTE: note there is no error handling from here if the regex
# is wrong: then incorrect values will slip through
if empty_list is not None:
# the single comma - meaning an empty list
return ([], comment)
if single is not None:
# handle empty values
if list_values and not single:
# FIXME: the '' is a workaround because our regex now matches
# '' at the end of a list if it has a trailing comma
single = None
else:
single = single or '""'
single = self._unquote(single)
if list_values == '':
# not a list value
return (single, comment)
the_list = self._listvalueexp.findall(list_values)
the_list = [self._unquote(val) for val in the_list]
if single is not None:
the_list += [single]
return (the_list, comment)
def _multiline(self, value, infile, cur_index, maxline):
"""Extract the value, where we are in a multiline situation."""
quot = value[:3]
newvalue = value[3:]
single_line = self._triple_quote[quot][0]
multi_line = self._triple_quote[quot][1]
mat = single_line.match(value)
if mat is not None:
retval = list(mat.groups())
retval.append(cur_index)
return retval
elif newvalue.find(quot) != -1:
# somehow the triple quote is missing
raise SyntaxError()
#
while cur_index < maxline:
cur_index += 1
newvalue += '\n'
line = infile[cur_index]
if line.find(quot) == -1:
newvalue += line
else:
# end of multiline, process it
break
else:
# we've got to the end of the config, oops...
raise SyntaxError()
mat = multi_line.match(line)
if mat is None:
# a badly formed line
raise SyntaxError()
(value, comment) = mat.groups()
return (newvalue + value, comment, cur_index)
def _handle_configspec(self, configspec):
"""Parse the configspec."""
# FIXME: Should we check that the configspec was created with the
# correct settings ? (i.e. ``list_values=False``)
if not isinstance(configspec, ConfigObj):
try:
configspec = ConfigObj(configspec,
raise_errors=True,
file_error=True,
_inspec=True)
except ConfigObjError as e:
# FIXME: Should these errors have a reference
# to the already parsed ConfigObj ?
raise ConfigspecError('Parsing configspec failed: %s' % e)
except IOError as e:
raise IOError('Reading configspec failed: %s' % e)
self.configspec = configspec
def _set_configspec(self, section, copy):
"""
Called by validate. Handles setting the configspec on subsections
including sections to be validated by __many__
"""
configspec = section.configspec
many = configspec.get('__many__')
if isinstance(many, dict):
for entry in section.sections:
if entry not in configspec:
section[entry].configspec = many
for entry in configspec.sections:
if entry == '__many__':
continue
if entry not in section:
section[entry] = {}
section[entry]._created = True
if copy:
# copy comments
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
# Could be a scalar when we expect a section
if isinstance(section[entry], Section):
section[entry].configspec = configspec[entry]
def _write_line(self, indent_string, entry, this_entry, comment):
"""Write an individual line, for the write method"""
# NOTE: the calls to self._quote here handles non-StringType values.
if not self.unrepr:
val = self._decode_element(self._quote(this_entry))
else:
val = repr(this_entry)
return '%s%s%s%s%s' % (indent_string,
self._decode_element(self._quote(entry, multiline=False)),
self._a_to_u(' = '),
val,
self._decode_element(comment))
def _write_marker(self, indent_string, depth, entry, comment):
"""Write a section marker line"""
return '%s%s%s%s%s' % (indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment))
def _handle_comment(self, comment):
"""Deal with a comment."""
if not comment:
return ''
start = self.indent_type
if not comment.startswith('#'):
start += self._a_to_u(' # ')
return (start + comment)
# Public methods
def write(self, outfile=None, section=None):
"""
Write the current ConfigObj as a file
tekNico: FIXME: use StringIO instead of real files
>>> filename = a.filename
>>> a.filename = 'test.ini'
>>> a.write()
>>> a.filename = filename
>>> a == ConfigObj('test.ini', raise_errors=True)
1
>>> import os
>>> os.remove('test.ini')
"""
if self.indent_type is None:
# this can be true if initialised from a dictionary
self.indent_type = DEFAULT_INDENT_TYPE
out = []
cs = self._a_to_u('#')
csp = self._a_to_u('# ')
if section is None:
int_val = self.interpolation
self.interpolation = False
section = self
for line in self.initial_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
indent_string = self.indent_type * section.depth
for entry in (section.scalars + section.sections):
if entry in section.defaults:
# don't write out default values
continue
for comment_line in section.comments[entry]:
comment_line = self._decode_element(comment_line.lstrip())
if comment_line and not comment_line.startswith(cs):
comment_line = csp + comment_line
out.append(indent_string + comment_line)
this_entry = section[entry]
comment = self._handle_comment(section.inline_comments[entry])
if isinstance(this_entry, Section):
# a section
out.append(self._write_marker(
indent_string,
this_entry.depth,
entry,
comment))
out.extend(self.write(section=this_entry))
else:
out.append(self._write_line(
indent_string,
entry,
this_entry,
comment))
if section is self:
for line in self.final_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
self.interpolation = int_val
if section is not self:
return out
if (self.filename is None) and (outfile is None):
# output a list of lines
# might need to encode
# NOTE: This will *screw* UTF16, each line will start with the BOM
if self.encoding:
out = [l.encode(self.encoding) for l in out]
if (self.BOM and ((self.encoding is None) or
(BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
# Add the UTF8 BOM
if not out:
out.append('')
out[0] = BOM_UTF8 + out[0]
return out
# Turn the list to a string, joined with correct newlines
newline = self.newlines or os.linesep
if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w'
and sys.platform == 'win32' and newline == '\r\n'):
# Windows specific hack to avoid writing '\r\r\n'
newline = '\n'
output = self._a_to_u(newline).join(out)
if not output.endswith(newline):
output += newline
if isinstance(output, bytes):
output_bytes = output
else:
output_bytes = output.encode(self.encoding or
self.default_encoding or
'ascii')
if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
# Add the UTF8 BOM
output_bytes = BOM_UTF8 + output_bytes
if outfile is not None:
outfile.write(output_bytes)
else:
with open(self.filename, 'wb') as h:
h.write(output_bytes)
def validate(self, validator, preserve_errors=False, copy=False,
section=None):
"""
Test the ConfigObj against a configspec.
It uses the ``validator`` object from *validate.py*.
To run ``validate`` on the current ConfigObj, call: ::
test = config.validate(validator)
(Normally having previously passed in the configspec when the ConfigObj
was created - you can dynamically assign a dictionary of checks to the
``configspec`` attribute of a section though).
It returns ``True`` if everything passes, or a dictionary of
pass/fails (True/False). If every member of a subsection passes, it
will just have the value ``True``. (It also returns ``False`` if all
members fail).
In addition, it converts the values from strings to their native
types if their checks pass (and ``stringify`` is set).
If ``preserve_errors`` is ``True`` (``False`` is default) then instead
of a marking a fail with a ``False``, it will preserve the actual
exception object. This can contain info about the reason for failure.
For example the ``VdtValueTooSmallError`` indicates that the value
supplied was too small. If a value (or section) is missing it will
still be marked as ``False``.
You must have the validate module to use ``preserve_errors=True``.
You can then use the ``flatten_errors`` function to turn your nested
results dictionary into a flattened list of failures - useful for
displaying meaningful error messages.
"""
if section is None:
if self.configspec is None:
raise ValueError('No configspec supplied.')
if preserve_errors:
# We do this once to remove a top level dependency on the validate module
# Which makes importing configobj faster
from .validate import VdtMissingValue
self._vdtMissingValue = VdtMissingValue
section = self
if copy:
section.initial_comment = section.configspec.initial_comment
section.final_comment = section.configspec.final_comment
section.encoding = section.configspec.encoding
section.BOM = section.configspec.BOM
section.newlines = section.configspec.newlines
section.indent_type = section.configspec.indent_type
#
# section.default_values.clear() #??
configspec = section.configspec
self._set_configspec(section, copy)
def validate_entry(entry, spec, val, missing, ret_true, ret_false):
section.default_values.pop(entry, None)
try:
section.default_values[entry] = validator.get_default_value(configspec[entry])
except (KeyError, AttributeError, validator.baseErrorClass):
# No default, bad default or validator has no 'get_default_value'
# (e.g. SimpleVal)
pass
try:
check = validator.check(spec,
val,
missing=missing
)
except validator.baseErrorClass as e:
if not preserve_errors or isinstance(e, self._vdtMissingValue):
out[entry] = False
else:
# preserve the error
out[entry] = e
ret_false = False
ret_true = False
else:
ret_false = False
out[entry] = True
if self.stringify or missing:
# if we are doing type conversion
# or the value is a supplied default
if not self.stringify:
if isinstance(check, (list, tuple)):
# preserve lists
check = [self._str(item) for item in check]
elif missing and check is None:
# convert the None from a default to a ''
check = ''
else:
check = self._str(check)
if (check != val) or missing:
section[entry] = check
if not copy and missing and entry not in section.defaults:
section.defaults.append(entry)
return ret_true, ret_false
#
out = {}
ret_true = True
ret_false = True
unvalidated = [k for k in section.scalars if k not in configspec]
incorrect_sections = [k for k in configspec.sections if k in section.scalars]
incorrect_scalars = [k for k in configspec.scalars if k in section.sections]
for entry in configspec.scalars:
if entry in ('__many__', '___many___'):
# reserved names
continue
if (not entry in section.scalars) or (entry in section.defaults):
# missing entries
# or entries from defaults
missing = True
val = None
if copy and entry not in section.scalars:
# copy comments
section.comments[entry] = (
configspec.comments.get(entry, []))
section.inline_comments[entry] = (
configspec.inline_comments.get(entry, ''))
#
else:
missing = False
val = section[entry]
ret_true, ret_false = validate_entry(entry, configspec[entry], val,
missing, ret_true, ret_false)
many = None
if '__many__' in configspec.scalars:
many = configspec['__many__']
elif '___many___' in configspec.scalars:
many = configspec['___many___']
if many is not None:
for entry in unvalidated:
val = section[entry]
ret_true, ret_false = validate_entry(entry, many, val, False,
ret_true, ret_false)
unvalidated = []
for entry in incorrect_scalars:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Value %r was provided as a section' % entry
out[entry] = validator.baseErrorClass(msg)
for entry in incorrect_sections:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Section %r was provided as a single value' % entry
out[entry] = validator.baseErrorClass(msg)
# Missing sections will have been created as empty ones when the
# configspec was read.
for entry in section.sections:
# FIXME: this means DEFAULT is not copied in copy mode
if section is self and entry == 'DEFAULT':
continue
if section[entry].configspec is None:
unvalidated.append(entry)
continue
if copy:
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry])
out[entry] = check
if check == False:
ret_true = False
elif check == True:
ret_false = False
else:
ret_true = False
section.extra_values = unvalidated
if preserve_errors and not section._created:
# If the section wasn't created (i.e. it wasn't missing)
# then we can't return False, we need to preserve errors
ret_false = False
#
if ret_false and preserve_errors and out:
# If we are preserving errors, but all
# the failures are from missing sections / values
# then we can return False. Otherwise there is a
# real failure that we need to preserve.
ret_false = not any(out.values())
if ret_true:
return True
elif ret_false:
return False
return out
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state."""
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None
def reload(self):
"""
Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file.
"""
if not isinstance(self.filename, str):
raise ReloadError()
filename = self.filename
current_options = {}
for entry in OPTION_DEFAULTS:
if entry == 'configspec':
continue
current_options[entry] = getattr(self, entry)
configspec = self._original_configspec
current_options['configspec'] = configspec
self.clear()
self._initialise(current_options)
self._load(filename, configspec)
class SimpleVal(object):
"""
A simple validator.
Can be used to check that all members expected are present.
To use it, provide a configspec with all your members in (the value given
will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
method of your ``ConfigObj``. ``validate`` will return ``True`` if all
members are present, or a dictionary with True/False meaning
present/missing. (Whole missing sections will be replaced with ``False``)
"""
def __init__(self):
self.baseErrorClass = ConfigObjError
def check(self, check, member, missing=False):
"""A dummy check method, always returns the value unchanged."""
if missing:
raise self.baseErrorClass()
return member
def flatten_errors(cfg, res, levels=None, results=None):
"""
An example function that will turn a nested dictionary of results
(as returned by ``ConfigObj.validate``) into a flat list.
``cfg`` is the ConfigObj instance being checked, ``res`` is the results
dictionary returned by ``validate``.
(This is a recursive function, so you shouldn't use the ``levels`` or
``results`` arguments - they are used by the function.)
Returns a list of keys that failed. Each member of the list is a tuple::
([list of sections...], key, result)
If ``validate`` was called with ``preserve_errors=False`` (the default)
then ``result`` will always be ``False``.
*list of sections* is a flattened list of sections that the key was found
in.
If the section was missing (or a section was expected and a scalar provided
- or vice-versa) then key will be ``None``.
If the value (or section) was missing then ``result`` will be ``False``.
If ``validate`` was called with ``preserve_errors=True`` and a value
was present, but failed the check, then ``result`` will be the exception
object returned. You can use this as a string that describes the failure.
For example *The value "3" is of the wrong type*.
"""
if levels is None:
# first time called
levels = []
results = []
if res == True:
return sorted(results)
if res == False or isinstance(res, Exception):
results.append((levels[:], None, res))
if levels:
levels.pop()
return sorted(results)
for (key, val) in list(res.items()):
if val == True:
continue
if isinstance(cfg.get(key), Mapping):
# Go down one level
levels.append(key)
flatten_errors(cfg[key], val, levels, results)
continue
results.append((levels[:], key, val))
#
# Go up one level
if levels:
levels.pop()
#
return sorted(results)
def get_extra_values(conf, _prepend=()):
"""
Find all the values and sections not in the configspec from a validated
ConfigObj.
``get_extra_values`` returns a list of tuples where each tuple represents
either an extra section, or an extra value.
The tuples contain two values, a tuple representing the section the value
is in and the name of the extra values. For extra values in the top level
section the first member will be an empty tuple. For values in the 'foo'
section the first member will be ``('foo',)``. For members in the 'bar'
subsection of the 'foo' section the first member will be ``('foo', 'bar')``.
NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't
been validated it will return an empty list.
"""
out = []
out.extend([(_prepend, name) for name in conf.extra_values])
for name in conf.sections:
if name not in conf.extra_values:
out.extend(get_extra_values(conf[name], _prepend + (name,)))
return out
"""*A programming language is a medium of expression.* - Paul Graham"""
|
6e740932be51b61a6532c1c23699848d3b8a23a22007317b90870d786b92c5c2 | # validate.py
# A Validator object
# Copyright (C) 2005-2014:
# (name) : (email)
# Michael Foord: fuzzyman AT voidspace DOT org DOT uk
# Mark Andrews: mark AT la-la DOT com
# Nicola Larosa: nico AT tekNico DOT net
# Rob Dennis: rdennis AT gmail DOT com
# Eli Courtwright: eli AT courtwright DOT org
# This software is licensed under the terms of the BSD license.
# http://opensource.org/licenses/BSD-3-Clause
# ConfigObj 5 - main repository for documentation and issue tracking:
# https://github.com/DiffSK/configobj
"""
The Validator object is used to check that supplied values
conform to a specification.
The value can be supplied as a string - e.g. from a config file.
In this case the check will also *convert* the value to
the required type. This allows you to add validation
as a transparent layer to access data stored as strings.
The validation checks that the data is correct *and*
converts it to the expected type.
Some standard checks are provided for basic data types.
Additional checks are easy to write. They can be
provided when the ``Validator`` is instantiated or
added afterwards.
The standard functions work with the following basic data types :
* integers
* floats
* booleans
* strings
* ip_addr
plus lists of these datatypes
Adding additional checks is done through coding simple functions.
The full set of standard checks are :
* 'integer': matches integer values (including negative)
Takes optional 'min' and 'max' arguments : ::
integer()
integer(3, 9) # any value from 3 to 9
integer(min=0) # any positive value
integer(max=9)
* 'float': matches float values
Has the same parameters as the integer check.
* 'boolean': matches boolean values - ``True`` or ``False``
Acceptable string values for True are :
true, on, yes, 1
Acceptable string values for False are :
false, off, no, 0
Any other value raises an error.
* 'ip_addr': matches an Internet Protocol address, v.4, represented
by a dotted-quad string, i.e. '1.2.3.4'.
* 'string': matches any string.
Takes optional keyword args 'min' and 'max'
to specify min and max lengths of the string.
* 'list': matches any list.
Takes optional keyword args 'min', and 'max' to specify min and
max sizes of the list. (Always returns a list.)
* 'tuple': matches any tuple.
Takes optional keyword args 'min', and 'max' to specify min and
max sizes of the tuple. (Always returns a tuple.)
* 'int_list': Matches a list of integers.
Takes the same arguments as list.
* 'float_list': Matches a list of floats.
Takes the same arguments as list.
* 'bool_list': Matches a list of boolean values.
Takes the same arguments as list.
* 'ip_addr_list': Matches a list of IP addresses.
Takes the same arguments as list.
* 'string_list': Matches a list of strings.
Takes the same arguments as list.
* 'mixed_list': Matches a list with different types in
specific positions. List size must match
the number of arguments.
Each position can be one of :
'integer', 'float', 'ip_addr', 'string', 'boolean'
So to specify a list with two strings followed
by two integers, you write the check as : ::
mixed_list('string', 'string', 'integer', 'integer')
* 'pass': This check matches everything ! It never fails
and the value is unchanged.
It is also the default if no check is specified.
* 'option': This check matches any from a list of options.
You specify this check with : ::
option('option 1', 'option 2', 'option 3')
You can supply a default value (returned if no value is supplied)
using the default keyword argument.
You specify a list argument for default using a list constructor syntax in
the check : ::
checkname(arg1, arg2, default=list('val 1', 'val 2', 'val 3'))
A badly formatted set of arguments will raise a ``VdtParamError``.
"""
__version__ = '1.0.1'
__all__ = (
'__version__',
'dottedQuadToNum',
'numToDottedQuad',
'ValidateError',
'VdtUnknownCheckError',
'VdtParamError',
'VdtTypeError',
'VdtValueError',
'VdtValueTooSmallError',
'VdtValueTooBigError',
'VdtValueTooShortError',
'VdtValueTooLongError',
'VdtMissingValue',
'Validator',
'is_integer',
'is_float',
'is_boolean',
'is_list',
'is_tuple',
'is_ip_addr',
'is_string',
'is_int_list',
'is_bool_list',
'is_float_list',
'is_string_list',
'is_ip_addr_list',
'is_mixed_list',
'is_option',
'__docformat__',
)
import re
import sys
from pprint import pprint
#TODO - #21 - six is part of the repo now, but we didn't switch over to it here
# this could be replaced if six is used for compatibility, or there are no
# more assertions about items being a string
if sys.version_info < (3,):
string_type = basestring
else:
string_type = str
# so tests that care about unicode on 2.x can specify unicode, and the same
# tests when run on 3.x won't complain about a undefined name "unicode"
# since all strings are unicode on 3.x we just want to pass it through
# unchanged
unicode = lambda x: x
# in python 3, all ints are equivalent to python 2 longs, and they'll
# never show "L" in the repr
long = int
_list_arg = re.compile(r'''
(?:
([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*list\(
(
(?:
\s*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)
\s*,\s*
)*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)? # last one
)
\)
)
''', re.VERBOSE | re.DOTALL) # two groups
_list_members = re.compile(r'''
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s=][^,=]*?) # unquoted
)
(?:
(?:\s*,\s*)|(?:\s*$) # comma
)
''', re.VERBOSE | re.DOTALL) # one group
_paramstring = r'''
(?:
(
(?:
[a-zA-Z_][a-zA-Z0-9_]*\s*=\s*list\(
(?:
\s*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)
\s*,\s*
)*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)? # last one
\)
)|
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s=][^,=]*?)| # unquoted
(?: # keyword argument
[a-zA-Z_][a-zA-Z0-9_]*\s*=\s*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s=][^,=]*?) # unquoted
)
)
)
)
(?:
(?:\s*,\s*)|(?:\s*$) # comma
)
)
'''
_matchstring = '^%s*' % _paramstring
# Python pre 2.2.1 doesn't have bool
try:
bool
except NameError:
def bool(val):
"""Simple boolean equivalent function. """
if val:
return 1
else:
return 0
def dottedQuadToNum(ip):
"""
Convert decimal dotted quad string to long integer
>>> int(dottedQuadToNum('1 '))
1
>>> int(dottedQuadToNum(' 1.2'))
16777218
>>> int(dottedQuadToNum(' 1.2.3 '))
16908291
>>> int(dottedQuadToNum('1.2.3.4'))
16909060
>>> dottedQuadToNum('255.255.255.255')
4294967295
>>> dottedQuadToNum('255.255.255.256')
Traceback (most recent call last):
ValueError: Not a good dotted-quad IP: 255.255.255.256
"""
# import here to avoid it when ip_addr values are not used
import socket, struct
try:
return struct.unpack('!L',
socket.inet_aton(ip.strip()))[0]
except socket.error:
raise ValueError('Not a good dotted-quad IP: %s' % ip)
return
def numToDottedQuad(num):
"""
Convert int or long int to dotted quad string
>>> numToDottedQuad(long(-1))
Traceback (most recent call last):
ValueError: Not a good numeric IP: -1
>>> numToDottedQuad(long(1))
'0.0.0.1'
>>> numToDottedQuad(long(16777218))
'1.0.0.2'
>>> numToDottedQuad(long(16908291))
'1.2.0.3'
>>> numToDottedQuad(long(16909060))
'1.2.3.4'
>>> numToDottedQuad(long(4294967295))
'255.255.255.255'
>>> numToDottedQuad(long(4294967296))
Traceback (most recent call last):
ValueError: Not a good numeric IP: 4294967296
>>> numToDottedQuad(-1)
Traceback (most recent call last):
ValueError: Not a good numeric IP: -1
>>> numToDottedQuad(1)
'0.0.0.1'
>>> numToDottedQuad(16777218)
'1.0.0.2'
>>> numToDottedQuad(16908291)
'1.2.0.3'
>>> numToDottedQuad(16909060)
'1.2.3.4'
>>> numToDottedQuad(4294967295)
'255.255.255.255'
>>> numToDottedQuad(4294967296)
Traceback (most recent call last):
ValueError: Not a good numeric IP: 4294967296
"""
# import here to avoid it when ip_addr values are not used
import socket, struct
# no need to intercept here, 4294967295L is fine
if num > long(4294967295) or num < 0:
raise ValueError('Not a good numeric IP: %s' % num)
try:
return socket.inet_ntoa(
struct.pack('!L', long(num)))
except (socket.error, struct.error, OverflowError):
raise ValueError('Not a good numeric IP: %s' % num)
class ValidateError(Exception):
"""
This error indicates that the check failed.
It can be the base class for more specific errors.
Any check function that fails ought to raise this error.
(or a subclass)
>>> raise ValidateError
Traceback (most recent call last):
ValidateError
"""
class VdtMissingValue(ValidateError):
"""No value was supplied to a check that needed one."""
class VdtUnknownCheckError(ValidateError):
"""An unknown check function was requested"""
def __init__(self, value):
"""
>>> raise VdtUnknownCheckError('yoda')
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
"""
ValidateError.__init__(self, 'the check "%s" is unknown.' % (value,))
class VdtParamError(SyntaxError):
"""An incorrect parameter was passed"""
def __init__(self, name, value):
"""
>>> raise VdtParamError('yoda', 'jedi')
Traceback (most recent call last):
VdtParamError: passed an incorrect value "jedi" for parameter "yoda".
"""
SyntaxError.__init__(self, 'passed an incorrect value "%s" for parameter "%s".' % (value, name))
class VdtTypeError(ValidateError):
"""The value supplied was of the wrong type"""
def __init__(self, value):
"""
>>> raise VdtTypeError('jedi')
Traceback (most recent call last):
VdtTypeError: the value "jedi" is of the wrong type.
"""
ValidateError.__init__(self, 'the value "%s" is of the wrong type.' % (value,))
class VdtValueError(ValidateError):
"""The value supplied was of the correct type, but was not an allowed value."""
def __init__(self, value):
"""
>>> raise VdtValueError('jedi')
Traceback (most recent call last):
VdtValueError: the value "jedi" is unacceptable.
"""
ValidateError.__init__(self, 'the value "%s" is unacceptable.' % (value,))
class VdtValueTooSmallError(VdtValueError):
"""The value supplied was of the correct type, but was too small."""
def __init__(self, value):
"""
>>> raise VdtValueTooSmallError('0')
Traceback (most recent call last):
VdtValueTooSmallError: the value "0" is too small.
"""
ValidateError.__init__(self, 'the value "%s" is too small.' % (value,))
class VdtValueTooBigError(VdtValueError):
"""The value supplied was of the correct type, but was too big."""
def __init__(self, value):
"""
>>> raise VdtValueTooBigError('1')
Traceback (most recent call last):
VdtValueTooBigError: the value "1" is too big.
"""
ValidateError.__init__(self, 'the value "%s" is too big.' % (value,))
class VdtValueTooShortError(VdtValueError):
"""The value supplied was of the correct type, but was too short."""
def __init__(self, value):
"""
>>> raise VdtValueTooShortError('jed')
Traceback (most recent call last):
VdtValueTooShortError: the value "jed" is too short.
"""
ValidateError.__init__(
self,
'the value "%s" is too short.' % (value,))
class VdtValueTooLongError(VdtValueError):
"""The value supplied was of the correct type, but was too long."""
def __init__(self, value):
"""
>>> raise VdtValueTooLongError('jedie')
Traceback (most recent call last):
VdtValueTooLongError: the value "jedie" is too long.
"""
ValidateError.__init__(self, 'the value "%s" is too long.' % (value,))
class Validator(object):
"""
Validator is an object that allows you to register a set of 'checks'.
These checks take input and test that it conforms to the check.
This can also involve converting the value from a string into
the correct datatype.
The ``check`` method takes an input string which configures which
check is to be used and applies that check to a supplied value.
An example input string would be:
'int_range(param1, param2)'
You would then provide something like:
>>> def int_range_check(value, min, max):
... # turn min and max from strings to integers
... min = int(min)
... max = int(max)
... # check that value is of the correct type.
... # possible valid inputs are integers or strings
... # that represent integers
... if not isinstance(value, (int, long, string_type)):
... raise VdtTypeError(value)
... elif isinstance(value, string_type):
... # if we are given a string
... # attempt to convert to an integer
... try:
... value = int(value)
... except ValueError:
... raise VdtValueError(value)
... # check the value is between our constraints
... if not min <= value:
... raise VdtValueTooSmallError(value)
... if not value <= max:
... raise VdtValueTooBigError(value)
... return value
>>> fdict = {'int_range': int_range_check}
>>> vtr1 = Validator(fdict)
>>> vtr1.check('int_range(20, 40)', '30')
30
>>> vtr1.check('int_range(20, 40)', '60')
Traceback (most recent call last):
VdtValueTooBigError: the value "60" is too big.
New functions can be added with : ::
>>> vtr2 = Validator()
>>> vtr2.functions['int_range'] = int_range_check
Or by passing in a dictionary of functions when Validator
is instantiated.
Your functions *can* use keyword arguments,
but the first argument should always be 'value'.
If the function doesn't take additional arguments,
the parentheses are optional in the check.
It can be written with either of : ::
keyword = function_name
keyword = function_name()
The first program to utilise Validator() was Michael Foord's
ConfigObj, an alternative to ConfigParser which supports lists and
can validate a config file using a config schema.
For more details on using Validator with ConfigObj see:
https://configobj.readthedocs.org/en/latest/configobj.html
"""
# this regex does the initial parsing of the checks
_func_re = re.compile(r'(.+?)\((.*)\)', re.DOTALL)
# this regex takes apart keyword arguments
_key_arg = re.compile(r'^([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*(.*)$', re.DOTALL)
# this regex finds keyword=list(....) type values
_list_arg = _list_arg
# this regex takes individual values out of lists - in one pass
_list_members = _list_members
# These regexes check a set of arguments for validity
# and then pull the members out
_paramfinder = re.compile(_paramstring, re.VERBOSE | re.DOTALL)
_matchfinder = re.compile(_matchstring, re.VERBOSE | re.DOTALL)
def __init__(self, functions=None):
"""
>>> vtri = Validator()
"""
self.functions = {
'': self._pass,
'integer': is_integer,
'float': is_float,
'boolean': is_boolean,
'ip_addr': is_ip_addr,
'string': is_string,
'list': is_list,
'tuple': is_tuple,
'int_list': is_int_list,
'float_list': is_float_list,
'bool_list': is_bool_list,
'ip_addr_list': is_ip_addr_list,
'string_list': is_string_list,
'mixed_list': is_mixed_list,
'pass': self._pass,
'option': is_option,
'force_list': force_list,
}
if functions is not None:
self.functions.update(functions)
# tekNico: for use by ConfigObj
self.baseErrorClass = ValidateError
self._cache = {}
def check(self, check, value, missing=False):
"""
Usage: check(check, value)
Arguments:
check: string representing check to apply (including arguments)
value: object to be checked
Returns value, converted to correct type if necessary
If the check fails, raises a ``ValidateError`` subclass.
>>> vtor.check('yoda', '')
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
>>> vtor.check('yoda()', '')
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
>>> vtor.check('string(default="")', '', missing=True)
''
"""
fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
if missing:
if default is None:
# no information needed here - to be handled by caller
raise VdtMissingValue()
value = self._handle_none(default)
if value is None:
return None
return self._check_value(value, fun_name, fun_args, fun_kwargs)
def _handle_none(self, value):
if value == 'None':
return None
elif value in ("'None'", '"None"'):
# Special case a quoted None
value = self._unquote(value)
return value
def _parse_with_caching(self, check):
if check in self._cache:
fun_name, fun_args, fun_kwargs, default = self._cache[check]
# We call list and dict below to work with *copies* of the data
# rather than the original (which are mutable of course)
fun_args = list(fun_args)
fun_kwargs = dict(fun_kwargs)
else:
fun_name, fun_args, fun_kwargs, default = self._parse_check(check)
fun_kwargs = dict([(str(key), value) for (key, value) in list(fun_kwargs.items())])
self._cache[check] = fun_name, list(fun_args), dict(fun_kwargs), default
return fun_name, fun_args, fun_kwargs, default
def _check_value(self, value, fun_name, fun_args, fun_kwargs):
try:
fun = self.functions[fun_name]
except KeyError:
raise VdtUnknownCheckError(fun_name)
else:
return fun(value, *fun_args, **fun_kwargs)
def _parse_check(self, check):
fun_match = self._func_re.match(check)
if fun_match:
fun_name = fun_match.group(1)
arg_string = fun_match.group(2)
arg_match = self._matchfinder.match(arg_string)
if arg_match is None:
# Bad syntax
raise VdtParamError('Bad syntax in check "%s".' % check)
fun_args = []
fun_kwargs = {}
# pull out args of group 2
for arg in self._paramfinder.findall(arg_string):
# args may need whitespace removing (before removing quotes)
arg = arg.strip()
listmatch = self._list_arg.match(arg)
if listmatch:
key, val = self._list_handle(listmatch)
fun_kwargs[key] = val
continue
keymatch = self._key_arg.match(arg)
if keymatch:
val = keymatch.group(2)
if not val in ("'None'", '"None"'):
# Special case a quoted None
val = self._unquote(val)
fun_kwargs[keymatch.group(1)] = val
continue
fun_args.append(self._unquote(arg))
else:
# allows for function names without (args)
return check, (), {}, None
# Default must be deleted if the value is specified too,
# otherwise the check function will get a spurious "default" keyword arg
default = fun_kwargs.pop('default', None)
return fun_name, fun_args, fun_kwargs, default
def _unquote(self, val):
"""Unquote a value if necessary."""
if (len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[-1]):
val = val[1:-1]
return val
def _list_handle(self, listmatch):
"""Take apart a ``keyword=list('val, 'val')`` type string."""
out = []
name = listmatch.group(1)
args = listmatch.group(2)
for arg in self._list_members.findall(args):
out.append(self._unquote(arg))
return name, out
def _pass(self, value):
"""
Dummy check that always passes
>>> vtor.check('', 0)
0
>>> vtor.check('', '0')
'0'
"""
return value
def get_default_value(self, check):
"""
Given a check, return the default value for the check
(converted to the right type).
If the check doesn't specify a default value then a
``KeyError`` will be raised.
"""
fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
if default is None:
raise KeyError('Check "%s" has no default value.' % check)
value = self._handle_none(default)
if value is None:
return value
return self._check_value(value, fun_name, fun_args, fun_kwargs)
def _is_num_param(names, values, to_float=False):
"""
Return numbers from inputs or raise VdtParamError.
Lets ``None`` pass through.
Pass in keyword argument ``to_float=True`` to
use float for the conversion rather than int.
>>> _is_num_param(('', ''), (0, 1.0))
[0, 1]
>>> _is_num_param(('', ''), (0, 1.0), to_float=True)
[0.0, 1.0]
>>> _is_num_param(('a'), ('a'))
Traceback (most recent call last):
VdtParamError: passed an incorrect value "a" for parameter "a".
"""
fun = to_float and float or int
out_params = []
for (name, val) in zip(names, values):
if val is None:
out_params.append(val)
elif isinstance(val, (int, long, float, string_type)):
try:
out_params.append(fun(val))
except ValueError as e:
raise VdtParamError(name, val)
else:
raise VdtParamError(name, val)
return out_params
# built in checks
# you can override these by setting the appropriate name
# in Validator.functions
# note: if the params are specified wrongly in your input string,
# you will also raise errors.
def is_integer(value, min=None, max=None):
"""
A check that tests that a given value is an integer (int, or long)
and optionally, between bounds. A negative value is accepted, while
a float will fail.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
>>> vtor.check('integer', '-1')
-1
>>> vtor.check('integer', '0')
0
>>> vtor.check('integer', 9)
9
>>> vtor.check('integer', 'a')
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('integer', '2.2')
Traceback (most recent call last):
VdtTypeError: the value "2.2" is of the wrong type.
>>> vtor.check('integer(10)', '20')
20
>>> vtor.check('integer(max=20)', '15')
15
>>> vtor.check('integer(10)', '9')
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(10)', 9)
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(max=20)', '35')
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(max=20)', 35)
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(0, 9)', False)
0
"""
(min_val, max_val) = _is_num_param(('min', 'max'), (min, max))
if not isinstance(value, (int, long, string_type)):
raise VdtTypeError(value)
if isinstance(value, string_type):
# if it's a string - does it represent an integer ?
try:
value = int(value)
except ValueError:
raise VdtTypeError(value)
if (min_val is not None) and (value < min_val):
raise VdtValueTooSmallError(value)
if (max_val is not None) and (value > max_val):
raise VdtValueTooBigError(value)
return value
def is_float(value, min=None, max=None):
"""
A check that tests that a given value is a float
(an integer will be accepted), and optionally - that it is between bounds.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
This can accept negative values.
>>> vtor.check('float', '2')
2.0
From now on we multiply the value to avoid comparing decimals
>>> vtor.check('float', '-6.8') * 10
-68.0
>>> vtor.check('float', '12.2') * 10
122.0
>>> vtor.check('float', 8.4) * 10
84.0
>>> vtor.check('float', 'a')
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('float(10.1)', '10.2') * 10
102.0
>>> vtor.check('float(max=20.2)', '15.1') * 10
151.0
>>> vtor.check('float(10.0)', '9.0')
Traceback (most recent call last):
VdtValueTooSmallError: the value "9.0" is too small.
>>> vtor.check('float(max=20.0)', '35.0')
Traceback (most recent call last):
VdtValueTooBigError: the value "35.0" is too big.
"""
(min_val, max_val) = _is_num_param(
('min', 'max'), (min, max), to_float=True)
if not isinstance(value, (int, long, float, string_type)):
raise VdtTypeError(value)
if not isinstance(value, float):
# if it's a string - does it represent a float ?
try:
value = float(value)
except ValueError:
raise VdtTypeError(value)
if (min_val is not None) and (value < min_val):
raise VdtValueTooSmallError(value)
if (max_val is not None) and (value > max_val):
raise VdtValueTooBigError(value)
return value
bool_dict = {
True: True, 'on': True, '1': True, 'true': True, 'yes': True,
False: False, 'off': False, '0': False, 'false': False, 'no': False,
}
def is_boolean(value):
"""
Check if the value represents a boolean.
>>> vtor.check('boolean', 0)
0
>>> vtor.check('boolean', False)
0
>>> vtor.check('boolean', '0')
0
>>> vtor.check('boolean', 'off')
0
>>> vtor.check('boolean', 'false')
0
>>> vtor.check('boolean', 'no')
0
>>> vtor.check('boolean', 'nO')
0
>>> vtor.check('boolean', 'NO')
0
>>> vtor.check('boolean', 1)
1
>>> vtor.check('boolean', True)
1
>>> vtor.check('boolean', '1')
1
>>> vtor.check('boolean', 'on')
1
>>> vtor.check('boolean', 'true')
1
>>> vtor.check('boolean', 'yes')
1
>>> vtor.check('boolean', 'Yes')
1
>>> vtor.check('boolean', 'YES')
1
>>> vtor.check('boolean', '')
Traceback (most recent call last):
VdtTypeError: the value "" is of the wrong type.
>>> vtor.check('boolean', 'up')
Traceback (most recent call last):
VdtTypeError: the value "up" is of the wrong type.
"""
if isinstance(value, string_type):
try:
return bool_dict[value.lower()]
except KeyError:
raise VdtTypeError(value)
# we do an equality test rather than an identity test
# this ensures Python 2.2 compatibilty
# and allows 0 and 1 to represent True and False
if value == False:
return False
elif value == True:
return True
else:
raise VdtTypeError(value)
def is_ip_addr(value):
"""
Check that the supplied value is an Internet Protocol address, v.4,
represented by a dotted-quad string, i.e. '1.2.3.4'.
>>> vtor.check('ip_addr', '1 ')
'1'
>>> vtor.check('ip_addr', ' 1.2')
'1.2'
>>> vtor.check('ip_addr', ' 1.2.3 ')
'1.2.3'
>>> vtor.check('ip_addr', '1.2.3.4')
'1.2.3.4'
>>> vtor.check('ip_addr', '0.0.0.0')
'0.0.0.0'
>>> vtor.check('ip_addr', '255.255.255.255')
'255.255.255.255'
>>> vtor.check('ip_addr', '255.255.255.256')
Traceback (most recent call last):
VdtValueError: the value "255.255.255.256" is unacceptable.
>>> vtor.check('ip_addr', '1.2.3.4.5')
Traceback (most recent call last):
VdtValueError: the value "1.2.3.4.5" is unacceptable.
>>> vtor.check('ip_addr', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
"""
if not isinstance(value, string_type):
raise VdtTypeError(value)
value = value.strip()
try:
dottedQuadToNum(value)
except ValueError:
raise VdtValueError(value)
return value
def is_list(value, min=None, max=None):
"""
Check that the value is a list of values.
You can optionally specify the minimum and maximum number of members.
It does no check on list members.
>>> vtor.check('list', ())
[]
>>> vtor.check('list', [])
[]
>>> vtor.check('list', (1, 2))
[1, 2]
>>> vtor.check('list', [1, 2])
[1, 2]
>>> vtor.check('list(3)', (1, 2))
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2)" is too short.
>>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6))
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
>>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4))
[1, 2, 3, 4]
>>> vtor.check('list', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('list', '12')
Traceback (most recent call last):
VdtTypeError: the value "12" is of the wrong type.
"""
(min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
if isinstance(value, string_type):
raise VdtTypeError(value)
try:
num_members = len(value)
except TypeError:
raise VdtTypeError(value)
if min_len is not None and num_members < min_len:
raise VdtValueTooShortError(value)
if max_len is not None and num_members > max_len:
raise VdtValueTooLongError(value)
return list(value)
def is_tuple(value, min=None, max=None):
"""
Check that the value is a tuple of values.
You can optionally specify the minimum and maximum number of members.
It does no check on members.
>>> vtor.check('tuple', ())
()
>>> vtor.check('tuple', [])
()
>>> vtor.check('tuple', (1, 2))
(1, 2)
>>> vtor.check('tuple', [1, 2])
(1, 2)
>>> vtor.check('tuple(3)', (1, 2))
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2)" is too short.
>>> vtor.check('tuple(max=5)', (1, 2, 3, 4, 5, 6))
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
>>> vtor.check('tuple(min=3, max=5)', (1, 2, 3, 4))
(1, 2, 3, 4)
>>> vtor.check('tuple', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('tuple', '12')
Traceback (most recent call last):
VdtTypeError: the value "12" is of the wrong type.
"""
return tuple(is_list(value, min, max))
def is_string(value, min=None, max=None):
"""
Check that the supplied value is a string.
You can optionally specify the minimum and maximum number of members.
>>> vtor.check('string', '0')
'0'
>>> vtor.check('string', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('string(2)', '12')
'12'
>>> vtor.check('string(2)', '1')
Traceback (most recent call last):
VdtValueTooShortError: the value "1" is too short.
>>> vtor.check('string(min=2, max=3)', '123')
'123'
>>> vtor.check('string(min=2, max=3)', '1234')
Traceback (most recent call last):
VdtValueTooLongError: the value "1234" is too long.
"""
if not isinstance(value, string_type):
raise VdtTypeError(value)
(min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
try:
num_members = len(value)
except TypeError:
raise VdtTypeError(value)
if min_len is not None and num_members < min_len:
raise VdtValueTooShortError(value)
if max_len is not None and num_members > max_len:
raise VdtValueTooLongError(value)
return value
def is_int_list(value, min=None, max=None):
"""
Check that the value is a list of integers.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is an integer.
>>> vtor.check('int_list', ())
[]
>>> vtor.check('int_list', [])
[]
>>> vtor.check('int_list', (1, 2))
[1, 2]
>>> vtor.check('int_list', [1, 2])
[1, 2]
>>> vtor.check('int_list', [1, 'a'])
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
"""
return [is_integer(mem) for mem in is_list(value, min, max)]
def is_bool_list(value, min=None, max=None):
"""
Check that the value is a list of booleans.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a boolean.
>>> vtor.check('bool_list', ())
[]
>>> vtor.check('bool_list', [])
[]
>>> check_res = vtor.check('bool_list', (True, False))
>>> check_res == [True, False]
1
>>> check_res = vtor.check('bool_list', [True, False])
>>> check_res == [True, False]
1
>>> vtor.check('bool_list', [True, 'a'])
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
"""
return [is_boolean(mem) for mem in is_list(value, min, max)]
def is_float_list(value, min=None, max=None):
"""
Check that the value is a list of floats.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a float.
>>> vtor.check('float_list', ())
[]
>>> vtor.check('float_list', [])
[]
>>> vtor.check('float_list', (1, 2.0))
[1.0, 2.0]
>>> vtor.check('float_list', [1, 2.0])
[1.0, 2.0]
>>> vtor.check('float_list', [1, 'a'])
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
"""
return [is_float(mem) for mem in is_list(value, min, max)]
def is_string_list(value, min=None, max=None):
"""
Check that the value is a list of strings.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a string.
>>> vtor.check('string_list', ())
[]
>>> vtor.check('string_list', [])
[]
>>> vtor.check('string_list', ('a', 'b'))
['a', 'b']
>>> vtor.check('string_list', ['a', 1])
Traceback (most recent call last):
VdtTypeError: the value "1" is of the wrong type.
>>> vtor.check('string_list', 'hello')
Traceback (most recent call last):
VdtTypeError: the value "hello" is of the wrong type.
"""
if isinstance(value, string_type):
raise VdtTypeError(value)
return [is_string(mem) for mem in is_list(value, min, max)]
def is_ip_addr_list(value, min=None, max=None):
"""
Check that the value is a list of IP addresses.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is an IP address.
>>> vtor.check('ip_addr_list', ())
[]
>>> vtor.check('ip_addr_list', [])
[]
>>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8'))
['1.2.3.4', '5.6.7.8']
>>> vtor.check('ip_addr_list', ['a'])
Traceback (most recent call last):
VdtValueError: the value "a" is unacceptable.
"""
return [is_ip_addr(mem) for mem in is_list(value, min, max)]
def force_list(value, min=None, max=None):
"""
Check that a value is a list, coercing strings into
a list with one member. Useful where users forget the
trailing comma that turns a single value into a list.
You can optionally specify the minimum and maximum number of members.
A minumum of greater than one will fail if the user only supplies a
string.
>>> vtor.check('force_list', ())
[]
>>> vtor.check('force_list', [])
[]
>>> vtor.check('force_list', 'hello')
['hello']
"""
if not isinstance(value, (list, tuple)):
value = [value]
return is_list(value, min, max)
fun_dict = {
'integer': is_integer,
'float': is_float,
'ip_addr': is_ip_addr,
'string': is_string,
'boolean': is_boolean,
}
def is_mixed_list(value, *args):
"""
Check that the value is a list.
Allow specifying the type of each member.
Work on lists of specific lengths.
You specify each member as a positional argument specifying type
Each type should be one of the following strings :
'integer', 'float', 'ip_addr', 'string', 'boolean'
So you can specify a list of two strings, followed by
two integers as :
mixed_list('string', 'string', 'integer', 'integer')
The length of the list must match the number of positional
arguments you supply.
>>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')"
>>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True'))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True))
Traceback (most recent call last):
VdtTypeError: the value "b" is of the wrong type.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a'))
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b'))
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long.
>>> vtor.check(mix_str, 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('mixed_list("yoda")', ('a'))
Traceback (most recent call last):
VdtParamError: passed an incorrect value "KeyError('yoda',)" for parameter "'mixed_list'"
"""
try:
length = len(value)
except TypeError:
raise VdtTypeError(value)
if length < len(args):
raise VdtValueTooShortError(value)
elif length > len(args):
raise VdtValueTooLongError(value)
try:
return [fun_dict[arg](val) for arg, val in zip(args, value)]
except KeyError as e:
raise VdtParamError('mixed_list', e)
def is_option(value, *options):
"""
This check matches the value to any of a set of options.
>>> vtor.check('option("yoda", "jedi")', 'yoda')
'yoda'
>>> vtor.check('option("yoda", "jedi")', 'jed')
Traceback (most recent call last):
VdtValueError: the value "jed" is unacceptable.
>>> vtor.check('option("yoda", "jedi")', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
"""
if not isinstance(value, string_type):
raise VdtTypeError(value)
if not value in options:
raise VdtValueError(value)
return value
def _test(value, *args, **keywargs):
"""
A function that exists for test purposes.
>>> checks = [
... '3, 6, min=1, max=3, test=list(a, b, c)',
... '3',
... '3, 6',
... '3,',
... 'min=1, test="a b c"',
... 'min=5, test="a, b, c"',
... 'min=1, max=3, test="a, b, c"',
... 'min=-100, test=-99',
... 'min=1, max=3',
... '3, 6, test="36"',
... '3, 6, test="a, b, c"',
... '3, max=3, test=list("a", "b", "c")',
... '''3, max=3, test=list("'a'", 'b', "x=(c)")''',
... "test='x=fish(3)'",
... ]
>>> v = Validator({'test': _test})
>>> for entry in checks:
... pprint(v.check(('test(%s)' % entry), 3))
(3, ('3', '6'), {'max': '3', 'min': '1', 'test': ['a', 'b', 'c']})
(3, ('3',), {})
(3, ('3', '6'), {})
(3, ('3',), {})
(3, (), {'min': '1', 'test': 'a b c'})
(3, (), {'min': '5', 'test': 'a, b, c'})
(3, (), {'max': '3', 'min': '1', 'test': 'a, b, c'})
(3, (), {'min': '-100', 'test': '-99'})
(3, (), {'max': '3', 'min': '1'})
(3, ('3', '6'), {'test': '36'})
(3, ('3', '6'), {'test': 'a, b, c'})
(3, ('3',), {'max': '3', 'test': ['a', 'b', 'c']})
(3, ('3',), {'max': '3', 'test': ["'a'", 'b', 'x=(c)']})
(3, (), {'test': 'x=fish(3)'})
>>> v = Validator()
>>> v.check('integer(default=6)', '3')
3
>>> v.check('integer(default=6)', None, True)
6
>>> v.get_default_value('integer(default=6)')
6
>>> v.get_default_value('float(default=6)')
6.0
>>> v.get_default_value('pass(default=None)')
>>> v.get_default_value("string(default='None')")
'None'
>>> v.get_default_value('pass')
Traceback (most recent call last):
KeyError: 'Check "pass" has no default value.'
>>> v.get_default_value('pass(default=list(1, 2, 3, 4))')
['1', '2', '3', '4']
>>> v = Validator()
>>> v.check("pass(default=None)", None, True)
>>> v.check("pass(default='None')", None, True)
'None'
>>> v.check('pass(default="None")', None, True)
'None'
>>> v.check('pass(default=list(1, 2, 3, 4))', None, True)
['1', '2', '3', '4']
Bug test for unicode arguments
>>> v = Validator()
>>> v.check(unicode('string(min=4)'), unicode('test')) == unicode('test')
True
>>> v = Validator()
>>> v.get_default_value(unicode('string(min=4, default="1234")')) == unicode('1234')
True
>>> v.check(unicode('string(min=4, default="1234")'), unicode('test')) == unicode('test')
True
>>> v = Validator()
>>> default = v.get_default_value('string(default=None)')
>>> default == None
1
"""
return (value, args, keywargs)
def _test2():
"""
>>>
>>> v = Validator()
>>> v.get_default_value('string(default="#ff00dd")')
'#ff00dd'
>>> v.get_default_value('integer(default=3) # comment')
3
"""
def _test3():
r"""
>>> vtor.check('string(default="")', '', missing=True)
''
>>> vtor.check('string(default="\n")', '', missing=True)
'\n'
>>> print(vtor.check('string(default="\n")', '', missing=True))
<BLANKLINE>
<BLANKLINE>
>>> vtor.check('string()', '\n')
'\n'
>>> vtor.check('string(default="\n\n\n")', '', missing=True)
'\n\n\n'
>>> vtor.check('string()', 'random \n text goes here\n\n')
'random \n text goes here\n\n'
>>> vtor.check('string(default=" \nrandom text\ngoes \n here\n\n ")',
... '', missing=True)
' \nrandom text\ngoes \n here\n\n '
>>> vtor.check("string(default='\n\n\n')", '', missing=True)
'\n\n\n'
>>> vtor.check("option('\n','a','b',default='\n')", '', missing=True)
'\n'
>>> vtor.check("string_list()", ['foo', '\n', 'bar'])
['foo', '\n', 'bar']
>>> vtor.check("string_list(default=list('\n'))", '', missing=True)
['\n']
"""
if __name__ == '__main__':
# run the code tests in doctest format
import sys
import doctest
m = sys.modules.get('__main__')
globs = m.__dict__.copy()
globs.update({
'vtor': Validator(),
})
failures, tests = doctest.testmod(
m, globs=globs,
optionflags=doctest.IGNORE_EXCEPTION_DETAIL | doctest.ELLIPSIS)
assert not failures, '{} failures out of {} tests'.format(failures, tests)
|
ca4a951e07b61262034cc39df7a8760f218768cfe0a69ff02b3dcad4c9596627 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
|
1c13c50b6d0ba3a8d52d914cde165f0ef8d0478fab99f9457b6a09836461869e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from astropy.visualization.mpl_normalize import simple_norm
from astropy import log
from astropy.io.fits import getdata
def fits2bitmap(filename, ext=0, out_fn=None, stretch='linear',
power=1.0, asinh_a=0.1, min_cut=None, max_cut=None,
min_percent=None, max_percent=None, percent=None,
cmap='Greys_r'):
"""
Create a bitmap file from a FITS image, applying a stretching
transform between minimum and maximum cut levels and a matplotlib
colormap.
Parameters
----------
filename : str
The filename of the FITS file.
ext : int
FITS extension name or number of the image to convert. The
default is 0.
out_fn : str
The filename of the output bitmap image. The type of bitmap
is determined by the filename extension (e.g. '.jpg', '.png').
The default is a PNG file with the same name as the FITS file.
stretch : {'linear', 'sqrt', 'power', log', 'asinh'}
The stretching function to apply to the image. The default is
'linear'.
power : float, optional
The power index for ``stretch='power'``. The default is 1.0.
asinh_a : float, optional
For ``stretch='asinh'``, the value where the asinh curve
transitions from linear to logarithmic behavior, expressed as a
fraction of the normalized image. Must be in the range between
0 and 1. The default is 0.1.
min_cut : float, optional
The pixel value of the minimum cut level. Data values less than
``min_cut`` will set to ``min_cut`` before stretching the image.
The default is the image minimum. ``min_cut`` overrides
``min_percent``.
max_cut : float, optional
The pixel value of the maximum cut level. Data values greater
than ``min_cut`` will set to ``min_cut`` before stretching the
image. The default is the image maximum. ``max_cut`` overrides
``max_percent``.
min_percent : float, optional
The percentile value used to determine the pixel value of
minimum cut level. The default is 0.0. ``min_percent``
overrides ``percent``.
max_percent : float, optional
The percentile value used to determine the pixel value of
maximum cut level. The default is 100.0. ``max_percent``
overrides ``percent``.
percent : float, optional
The percentage of the image values used to determine the pixel
values of the minimum and maximum cut levels. The lower cut
level will set at the ``(100 - percent) / 2`` percentile, while
the upper cut level will be set at the ``(100 + percent) / 2``
percentile. The default is 100.0. ``percent`` is ignored if
either ``min_percent`` or ``max_percent`` is input.
cmap : str
The matplotlib color map name. The default is 'Greys_r'.
"""
import matplotlib
import matplotlib.cm as cm
import matplotlib.image as mimg
# __main__ gives ext as a string
try:
ext = int(ext)
except ValueError:
pass
try:
image = getdata(filename, ext)
except Exception as e:
log.critical(e)
return 1
if image.ndim != 2:
log.critical(f'data in FITS extension {ext} is not a 2D array')
if out_fn is None:
out_fn = os.path.splitext(filename)[0]
if out_fn.endswith('.fits'):
out_fn = os.path.splitext(out_fn)[0]
out_fn += '.png'
# explicitly define the output format
out_format = os.path.splitext(out_fn)[1][1:]
try:
cm.get_cmap(cmap)
except ValueError:
log.critical(f'{cmap} is not a valid matplotlib colormap name.')
return 1
norm = simple_norm(image, stretch=stretch, power=power, asinh_a=asinh_a,
min_cut=min_cut, max_cut=max_cut,
min_percent=min_percent, max_percent=max_percent,
percent=percent)
mimg.imsave(out_fn, norm(image), cmap=cmap, origin='lower',
format=out_format)
log.info(f'Saved file to {out_fn}.')
def main(args=None):
import argparse
parser = argparse.ArgumentParser(
description='Create a bitmap file from a FITS image.')
parser.add_argument('-e', '--ext', metavar='hdu', default=0,
help='Specify the HDU extension number or name '
'(Default is 0).')
parser.add_argument('-o', metavar='filename', type=str, default=None,
help='Filename for the output image (Default is a '
'PNG file with the same name as the FITS file).')
parser.add_argument('--stretch', type=str, default='linear',
help='Type of image stretching ("linear", "sqrt", '
'"power", "log", or "asinh") (Default is "linear").')
parser.add_argument('--power', type=float, default=1.0,
help='Power index for "power" stretching (Default is '
'1.0).')
parser.add_argument('--asinh_a', type=float, default=0.1,
help='The value in normalized image where the asinh '
'curve transitions from linear to logarithmic '
'behavior (used only for "asinh" stretch) '
'(Default is 0.1).')
parser.add_argument('--min_cut', type=float, default=None,
help='The pixel value of the minimum cut level '
'(Default is the image minimum).')
parser.add_argument('--max_cut', type=float, default=None,
help='The pixel value of the maximum cut level '
'(Default is the image maximum).')
parser.add_argument('--min_percent', type=float, default=None,
help='The percentile value used to determine the '
'minimum cut level (Default is 0).')
parser.add_argument('--max_percent', type=float, default=None,
help='The percentile value used to determine the '
'maximum cut level (Default is 100).')
parser.add_argument('--percent', type=float, default=None,
help='The percentage of the image values used to '
'determine the pixel values of the minimum and '
'maximum cut levels (Default is 100).')
parser.add_argument('--cmap', metavar='colormap_name', type=str,
default='Greys_r', help='matplotlib color map name '
'(Default is "Greys_r").')
parser.add_argument('filename', nargs='+',
help='Path to one or more FITS files to convert')
args = parser.parse_args(args)
for filename in args.filename:
fits2bitmap(filename, ext=args.ext, out_fn=args.o,
stretch=args.stretch, min_cut=args.min_cut,
max_cut=args.max_cut, min_percent=args.min_percent,
max_percent=args.max_percent, percent=args.percent,
power=args.power, asinh_a=args.asinh_a, cmap=args.cmap)
|
dfa2d97a2d8636ab4f647c32ae76c4298f27527e918902bd4c62e18e6aaaa600 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Note: This file includes code derived from pywcsgrid2
#
# This file contains Matplotlib transformation objects (e.g. from pixel to world
# coordinates, but also world-to-world).
import abc
import numpy as np
from matplotlib.path import Path
from matplotlib.transforms import Transform
from astropy import units as u
from astropy.coordinates import (SkyCoord, frame_transform_graph,
UnitSphericalRepresentation,
BaseCoordinateFrame)
__all__ = ['CurvedTransform', 'CoordinateTransform',
'World2PixelTransform', 'Pixel2WorldTransform']
class CurvedTransform(Transform, metaclass=abc.ABCMeta):
"""
Abstract base class for non-affine curved transforms
"""
input_dims = 2
output_dims = 2
is_separable = False
def transform_path(self, path):
"""
Transform a Matplotlib Path
Parameters
----------
path : :class:`~matplotlib.path.Path`
The path to transform
Returns
-------
path : :class:`~matplotlib.path.Path`
The resulting path
"""
return Path(self.transform(path.vertices), path.codes)
transform_path_non_affine = transform_path
def transform(self, input):
raise NotImplementedError("")
def inverted(self):
raise NotImplementedError("")
class CoordinateTransform(CurvedTransform):
has_inverse = True
def __init__(self, input_system, output_system):
super().__init__()
self._input_system_name = input_system
self._output_system_name = output_system
if isinstance(self._input_system_name, str):
frame_cls = frame_transform_graph.lookup_name(self._input_system_name)
if frame_cls is None:
raise ValueError(f"Frame {self._input_system_name} not found")
else:
self.input_system = frame_cls()
elif isinstance(self._input_system_name, BaseCoordinateFrame):
self.input_system = self._input_system_name
else:
raise TypeError("input_system should be a WCS instance, string, or a coordinate frame instance")
if isinstance(self._output_system_name, str):
frame_cls = frame_transform_graph.lookup_name(self._output_system_name)
if frame_cls is None:
raise ValueError(f"Frame {self._output_system_name} not found")
else:
self.output_system = frame_cls()
elif isinstance(self._output_system_name, BaseCoordinateFrame):
self.output_system = self._output_system_name
else:
raise TypeError("output_system should be a WCS instance, string, or a coordinate frame instance")
if self.output_system == self.input_system:
self.same_frames = True
else:
self.same_frames = False
@property
def same_frames(self):
return self._same_frames
@same_frames.setter
def same_frames(self, same_frames):
self._same_frames = same_frames
def transform(self, input_coords):
"""
Transform one set of coordinates to another
"""
if self.same_frames:
return input_coords
input_coords = input_coords*u.deg
x_in, y_in = input_coords[:, 0], input_coords[:, 1]
c_in = SkyCoord(UnitSphericalRepresentation(x_in, y_in),
frame=self.input_system)
# We often need to transform arrays that contain NaN values, and filtering
# out the NaN values would have a performance hit, so instead we just pass
# on all values and just ignore Numpy warnings
with np.errstate(all='ignore'):
c_out = c_in.transform_to(self.output_system)
lon = c_out.spherical.lon.deg
lat = c_out.spherical.lat.deg
return np.concatenate((lon[:, np.newaxis], lat[:, np.newaxis]), axis=1)
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return CoordinateTransform(self._output_system_name, self._input_system_name)
class World2PixelTransform(CurvedTransform, metaclass=abc.ABCMeta):
"""
Base transformation from world to pixel coordinates
"""
has_inverse = True
frame_in = None
@property
@abc.abstractmethod
def input_dims(self):
"""
The number of input world dimensions
"""
@abc.abstractmethod
def transform(self, world):
"""
Transform world to pixel coordinates. You should pass in a NxM array
where N is the number of points to transform, and M is the number of
dimensions. This then returns the (x, y) pixel coordinates
as a Nx2 array.
"""
@abc.abstractmethod
def inverted(self):
"""
Return the inverse of the transform
"""
class Pixel2WorldTransform(CurvedTransform, metaclass=abc.ABCMeta):
"""
Base transformation from pixel to world coordinates
"""
has_inverse = True
frame_out = None
@property
@abc.abstractmethod
def output_dims(self):
"""
The number of output world dimensions
"""
@abc.abstractmethod
def transform(self, pixel):
"""
Transform pixel to world coordinates. You should pass in a Nx2 array
of (x, y) pixel coordinates to transform to world coordinates. This
will then return an NxM array where M is the number of dimensions.
"""
@abc.abstractmethod
def inverted(self):
"""
Return the inverse of the transform
"""
|
ba0f50b3509ec23a13f1785e62edd84533c46e37d894f6b45a8e7a7b653cd829 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file defines the classes used to represent a 'coordinate', which includes
axes, ticks, tick labels, and grid lines.
"""
import warnings
import numpy as np
from matplotlib.ticker import Formatter
from matplotlib.transforms import Affine2D, ScaledTranslation
from matplotlib.patches import PathPatch
from matplotlib.path import Path
from matplotlib import rcParams
from astropy import units as u
from astropy.utils.exceptions import AstropyDeprecationWarning
from .frame import RectangularFrame1D, EllipticalFrame
from .formatter_locator import AngleFormatterLocator, ScalarFormatterLocator
from .ticks import Ticks
from .ticklabels import TickLabels
from .axislabels import AxisLabels
from .grid_paths import get_lon_lat_path, get_gridline_path
__all__ = ['CoordinateHelper']
# Matplotlib's gridlines use Line2D, but ours use PathPatch.
# Patches take a slightly different format of linestyle argument.
LINES_TO_PATCHES_LINESTYLE = {'-': 'solid',
'--': 'dashed',
'-.': 'dashdot',
':': 'dotted',
'none': 'none',
'None': 'none',
' ': 'none',
'': 'none'}
def wrap_angle_at(values, coord_wrap):
# On ARM processors, np.mod emits warnings if there are NaN values in the
# array, although this doesn't seem to happen on other processors.
with np.errstate(invalid='ignore'):
return np.mod(values - coord_wrap, 360.) - (360. - coord_wrap)
class CoordinateHelper:
"""
Helper class to control one of the coordinates in the
:class:`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
parent_axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate helper belongs to.
parent_map : :class:`~astropy.visualization.wcsaxes.CoordinatesMap`
The :class:`~astropy.visualization.wcsaxes.CoordinatesMap` object this
coordinate belongs to.
transform : `~matplotlib.transforms.Transform`
The transform corresponding to this coordinate system.
coord_index : int
The index of this coordinate in the
:class:`~astropy.visualization.wcsaxes.CoordinatesMap`.
coord_type : {'longitude', 'latitude', 'scalar'}
The type of this coordinate, which is used to determine the wrapping and
boundary behavior of coordinates. Longitudes wrap at ``coord_wrap``,
latitudes have to be in the range -90 to 90, and scalars are unbounded
and do not wrap.
coord_unit : `~astropy.units.Unit`
The unit that this coordinate is in given the output of transform.
format_unit : `~astropy.units.Unit`, optional
The unit to use to display the coordinates.
coord_wrap : float
The angle at which the longitude wraps (defaults to 360)
frame : `~astropy.visualization.wcsaxes.frame.BaseFrame`
The frame of the :class:`~astropy.visualization.wcsaxes.WCSAxes`.
"""
def __init__(self, parent_axes=None, parent_map=None, transform=None,
coord_index=None, coord_type='scalar', coord_unit=None,
coord_wrap=None, frame=None, format_unit=None, default_label=None):
# Keep a reference to the parent axes and the transform
self.parent_axes = parent_axes
self.parent_map = parent_map
self.transform = transform
self.coord_index = coord_index
self.coord_unit = coord_unit
self._format_unit = format_unit
self.frame = frame
self.default_label = default_label or ''
self._auto_axislabel = True
# Disable auto label for elliptical frames as it puts labels in
# annoying places.
if issubclass(self.parent_axes.frame_class, EllipticalFrame):
self._auto_axislabel = False
self.set_coord_type(coord_type, coord_wrap)
# Initialize ticks
self.dpi_transform = Affine2D()
self.offset_transform = ScaledTranslation(0, 0, self.dpi_transform)
self.ticks = Ticks(transform=parent_axes.transData + self.offset_transform)
# Initialize tick labels
self.ticklabels = TickLabels(self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure())
self.ticks.display_minor_ticks(rcParams['xtick.minor.visible'])
self.minor_frequency = 5
# Initialize axis labels
self.axislabels = AxisLabels(self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure())
# Initialize container for the grid lines
self.grid_lines = []
# Initialize grid style. Take defaults from matplotlib.rcParams.
# Based on matplotlib.axis.YTick._get_gridline.
self.grid_lines_kwargs = {'visible': False,
'facecolor': 'none',
'edgecolor': rcParams['grid.color'],
'linestyle': LINES_TO_PATCHES_LINESTYLE[rcParams['grid.linestyle']],
'linewidth': rcParams['grid.linewidth'],
'alpha': rcParams['grid.alpha'],
'transform': self.parent_axes.transData}
def grid(self, draw_grid=True, grid_type=None, **kwargs):
"""
Plot grid lines for this coordinate.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : {'lines', 'contours'}
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended. By default, 'lines' is used if the transform has
an inverse, otherwise 'contours' is used.
"""
if grid_type == 'lines' and not self.transform.has_inverse:
raise ValueError('The specified transform has no inverse, so the '
'grid cannot be drawn using grid_type=\'lines\'')
if grid_type is None:
grid_type = 'lines' if self.transform.has_inverse else 'contours'
if grid_type in ('lines', 'contours'):
self._grid_type = grid_type
else:
raise ValueError("grid_type should be 'lines' or 'contours'")
if 'color' in kwargs:
kwargs['edgecolor'] = kwargs.pop('color')
self.grid_lines_kwargs.update(kwargs)
if self.grid_lines_kwargs['visible']:
if not draw_grid:
self.grid_lines_kwargs['visible'] = False
else:
self.grid_lines_kwargs['visible'] = True
def set_coord_type(self, coord_type, coord_wrap=None):
"""
Set the coordinate type for the axis.
Parameters
----------
coord_type : str
One of 'longitude', 'latitude' or 'scalar'
coord_wrap : float, optional
The value to wrap at for angular coordinates
"""
self.coord_type = coord_type
if coord_type == 'longitude' and coord_wrap is None:
self.coord_wrap = 360
elif coord_type != 'longitude' and coord_wrap is not None:
raise NotImplementedError('coord_wrap is not yet supported '
'for non-longitude coordinates')
else:
self.coord_wrap = coord_wrap
# Initialize tick formatter/locator
if coord_type == 'scalar':
self._coord_scale_to_deg = None
self._formatter_locator = ScalarFormatterLocator(unit=self.coord_unit)
elif coord_type in ['longitude', 'latitude']:
if self.coord_unit is u.deg:
self._coord_scale_to_deg = None
else:
self._coord_scale_to_deg = self.coord_unit.to(u.deg)
self._formatter_locator = AngleFormatterLocator(unit=self.coord_unit,
format_unit=self._format_unit)
else:
raise ValueError("coord_type should be one of 'scalar', 'longitude', or 'latitude'")
def set_major_formatter(self, formatter):
"""
Set the formatter to use for the major tick labels.
Parameters
----------
formatter : str or `~matplotlib.ticker.Formatter`
The format or formatter to use.
"""
if isinstance(formatter, Formatter):
raise NotImplementedError() # figure out how to swap out formatter
elif isinstance(formatter, str):
self._formatter_locator.format = formatter
else:
raise TypeError("formatter should be a string or a Formatter "
"instance")
def format_coord(self, value, format='auto'):
"""
Given the value of a coordinate, will format it according to the
format of the formatter_locator.
Parameters
----------
value : float
The value to format
format : {'auto', 'ascii', 'latex'}, optional
The format to use - by default the formatting will be adjusted
depending on whether Matplotlib is using LaTeX or MathTex. To
get plain ASCII strings, use format='ascii'.
"""
if not hasattr(self, "_fl_spacing"):
return "" # _update_ticks has not been called yet
fl = self._formatter_locator
if isinstance(fl, AngleFormatterLocator):
# Convert to degrees if needed
if self._coord_scale_to_deg is not None:
value *= self._coord_scale_to_deg
if self.coord_type == 'longitude':
value = wrap_angle_at(value, self.coord_wrap)
value = value * u.degree
value = value.to_value(fl._unit)
spacing = self._fl_spacing
string = fl.formatter(values=[value] * fl._unit, spacing=spacing, format=format)
return string[0]
def set_separator(self, separator):
"""
Set the separator to use for the angle major tick labels.
Parameters
----------
separator : str or tuple or None
The separator between numbers in sexagesimal representation. Can be
either a string or a tuple (or `None` for default).
"""
if not (self._formatter_locator.__class__ == AngleFormatterLocator):
raise TypeError("Separator can only be specified for angle coordinates")
if isinstance(separator, (str, tuple)) or separator is None:
self._formatter_locator.sep = separator
else:
raise TypeError("separator should be a string, a tuple, or None")
def set_format_unit(self, unit, decimal=None, show_decimal_unit=True):
"""
Set the unit for the major tick labels.
Parameters
----------
unit : class:`~astropy.units.Unit`
The unit to which the tick labels should be converted to.
decimal : bool, optional
Whether to use decimal formatting. By default this is `False`
for degrees or hours (which therefore use sexagesimal formatting)
and `True` for all other units.
show_decimal_unit : bool, optional
Whether to include units when in decimal mode.
"""
self._formatter_locator.format_unit = u.Unit(unit)
self._formatter_locator.decimal = decimal
self._formatter_locator.show_decimal_unit = show_decimal_unit
def get_format_unit(self):
"""
Get the unit for the major tick labels.
"""
return self._formatter_locator.format_unit
def set_ticks(self, values=None, spacing=None, number=None, size=None,
width=None, color=None, alpha=None, direction=None,
exclude_overlapping=None):
"""
Set the location and properties of the ticks.
At most one of the options from ``values``, ``spacing``, or
``number`` can be specified.
Parameters
----------
values : iterable, optional
The coordinate values at which to show the ticks.
spacing : float, optional
The spacing between ticks.
number : float, optional
The approximate number of ticks shown.
size : float, optional
The length of the ticks in points
color : str or tuple, optional
A valid Matplotlib color for the ticks
alpha : float, optional
The alpha value (transparency) for the ticks.
direction : {'in','out'}, optional
Whether the ticks should point inwards or outwards.
"""
if sum([values is None, spacing is None, number is None]) < 2:
raise ValueError("At most one of values, spacing, or number should "
"be specified")
if values is not None:
self._formatter_locator.values = values
elif spacing is not None:
self._formatter_locator.spacing = spacing
elif number is not None:
self._formatter_locator.number = number
if size is not None:
self.ticks.set_ticksize(size)
if width is not None:
self.ticks.set_linewidth(width)
if color is not None:
self.ticks.set_color(color)
if alpha is not None:
self.ticks.set_alpha(alpha)
if direction is not None:
if direction in ('in', 'out'):
self.ticks.set_tick_out(direction == 'out')
else:
raise ValueError("direction should be 'in' or 'out'")
if exclude_overlapping is not None:
warnings.warn("exclude_overlapping= should be passed to "
"set_ticklabel instead of set_ticks",
AstropyDeprecationWarning)
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
def set_ticks_position(self, position):
"""
Set where ticks should appear
Parameters
----------
position : str
The axes on which the ticks for this coordinate should appear.
Should be a string containing zero or more of ``'b'``, ``'t'``,
``'l'``, ``'r'``. For example, ``'lb'`` will lead the ticks to be
shown on the left and bottom axis.
"""
self.ticks.set_visible_axes(position)
def set_ticks_visible(self, visible):
"""
Set whether ticks are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide ticks
along this coordinate.
"""
self.ticks.set_visible(visible)
def set_ticklabel(self, color=None, size=None, pad=None,
exclude_overlapping=None, **kwargs):
"""
Set the visual properties for the tick labels.
Parameters
----------
size : float, optional
The size of the ticks labels in points
color : str or tuple, optional
A valid Matplotlib color for the tick labels
pad : float, optional
Distance in points between tick and label.
exclude_overlapping : bool, optional
Whether to exclude tick labels that overlap over each other.
**kwargs
Other keyword arguments are passed to :class:`matplotlib.text.Text`.
"""
if size is not None:
self.ticklabels.set_size(size)
if color is not None:
self.ticklabels.set_color(color)
if pad is not None:
self.ticklabels.set_pad(pad)
if exclude_overlapping is not None:
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
self.ticklabels.set(**kwargs)
def set_ticklabel_position(self, position):
"""
Set where tick labels should appear
Parameters
----------
position : str
The axes on which the tick labels for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
tick labels to be shown on the left and bottom axis.
"""
self.ticklabels.set_visible_axes(position)
def set_ticklabel_visible(self, visible):
"""
Set whether the tick labels are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide this
coordinate's tick labels.
"""
self.ticklabels.set_visible(visible)
def set_axislabel(self, text, minpad=1, **kwargs):
"""
Set the text and optionally visual properties for the axis label.
Parameters
----------
text : str
The axis label text.
minpad : float, optional
The padding for the label in terms of axis label font size.
**kwargs
Keywords are passed to :class:`matplotlib.text.Text`. These
can include keywords to set the ``color``, ``size``, ``weight``, and
other text properties.
"""
fontdict = kwargs.pop('fontdict', None)
# NOTE: When using plt.xlabel/plt.ylabel, minpad can get set explicitly
# to None so we need to make sure that in that case we change to a
# default numerical value.
if minpad is None:
minpad = 1
self.axislabels.set_text(text)
self.axislabels.set_minpad(minpad)
self.axislabels.set(**kwargs)
if fontdict is not None:
self.axislabels.update(fontdict)
def get_axislabel(self):
"""
Get the text for the axis label
Returns
-------
label : str
The axis label
"""
return self.axislabels.get_text()
def set_auto_axislabel(self, auto_label):
"""
Render default axis labels if no explicit label is provided.
Parameters
----------
auto_label : `bool`
`True` if default labels will be rendered.
"""
self._auto_axislabel = bool(auto_label)
def get_auto_axislabel(self):
"""
Render default axis labels if no explicit label is provided.
Returns
-------
auto_axislabel : `bool`
`True` if default labels will be rendered.
"""
return self._auto_axislabel
def _get_default_axislabel(self):
unit = self.get_format_unit() or self.coord_unit
if not unit or unit is u.one or self.coord_type in ('longitude', 'latitude'):
return f"{self.default_label}"
else:
return f"{self.default_label} [{unit:latex}]"
def set_axislabel_position(self, position):
"""
Set where axis labels should appear
Parameters
----------
position : str
The axes on which the axis label for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
axis label to be shown on the left and bottom axis.
"""
self.axislabels.set_visible_axes(position)
def set_axislabel_visibility_rule(self, rule):
"""
Set the rule used to determine when the axis label is drawn.
Parameters
----------
rule : str
If the rule is 'always' axis labels will always be drawn on the
axis. If the rule is 'ticks' the label will only be drawn if ticks
were drawn on that axis. If the rule is 'labels' the axis label
will only be drawn if tick labels were drawn on that axis.
"""
self.axislabels.set_visibility_rule(rule)
def get_axislabel_visibility_rule(self, rule):
"""
Get the rule used to determine when the axis label is drawn.
"""
return self.axislabels.get_visibility_rule()
@property
def locator(self):
return self._formatter_locator.locator
@property
def formatter(self):
return self._formatter_locator.formatter
def _draw_grid(self, renderer):
renderer.open_group('grid lines')
self._update_ticks()
if self.grid_lines_kwargs['visible']:
if isinstance(self.frame, RectangularFrame1D):
self._update_grid_lines_1d()
else:
if self._grid_type == 'lines':
self._update_grid_lines()
else:
self._update_grid_contour()
if self._grid_type == 'lines':
frame_patch = self.frame.patch
for path in self.grid_lines:
p = PathPatch(path, **self.grid_lines_kwargs)
p.set_clip_path(frame_patch)
p.draw(renderer)
elif self._grid is not None:
for line in self._grid.collections:
line.set(**self.grid_lines_kwargs)
line.draw(renderer)
renderer.close_group('grid lines')
def _draw_ticks(self, renderer, bboxes, ticklabels_bbox):
"""
Draw all ticks and ticklabels.
"""
renderer.open_group('ticks')
self.ticks.draw(renderer)
self.ticklabels.draw(renderer, bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
tick_out_size=self.ticks.out_size)
renderer.close_group('ticks')
def _draw_axislabels(self, renderer, bboxes, ticklabels_bbox, visible_ticks):
# Render the default axis label if no axis label is set.
if self._auto_axislabel and not self.get_axislabel():
self.set_axislabel(self._get_default_axislabel())
renderer.open_group('axis labels')
self.axislabels.draw(renderer, bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
coord_ticklabels_bbox=ticklabels_bbox[self],
ticks_locs=self.ticks.ticks_locs,
visible_ticks=visible_ticks)
renderer.close_group('axis labels')
def _update_ticks(self):
if self.coord_index is None:
return
# TODO: this method should be optimized for speed
# Here we determine the location and rotation of all the ticks. For
# each axis, we can check the intersections for the specific
# coordinate and once we have the tick positions, we can use the WCS
# to determine the rotations.
# Find the range of coordinates in all directions
coord_range = self.parent_map.get_coord_range()
# First find the ticks we want to show
tick_world_coordinates, self._fl_spacing = self.locator(*coord_range[self.coord_index])
if self.ticks.get_display_minor_ticks():
minor_ticks_w_coordinates = self._formatter_locator.minor_locator(self._fl_spacing, self.get_minor_frequency(), *coord_range[self.coord_index])
# We want to allow non-standard rectangular frames, so we just rely on
# the parent axes to tell us what the bounding frame is.
from . import conf
frame = self.frame.sample(conf.frame_boundary_samples)
self.ticks.clear()
self.ticklabels.clear()
self.lblinfo = []
self.lbl_world = []
# Look up parent axes' transform from data to figure coordinates.
#
# See:
# https://matplotlib.org/stable/tutorials/advanced/transforms_tutorial.html#the-transformation-pipeline
transData = self.parent_axes.transData
invertedTransLimits = transData.inverted()
for axis, spine in frame.items():
if not isinstance(self.frame, RectangularFrame1D):
# Determine tick rotation in display coordinates and compare to
# the normal angle in display coordinates.
pixel0 = spine.data
world0 = spine.world[:, self.coord_index]
with np.errstate(invalid='ignore'):
world0 = self.transform.transform(pixel0)[:, self.coord_index]
axes0 = transData.transform(pixel0)
# Advance 2 pixels in figure coordinates
pixel1 = axes0.copy()
pixel1[:, 0] += 2.0
pixel1 = invertedTransLimits.transform(pixel1)
with np.errstate(invalid='ignore'):
world1 = self.transform.transform(pixel1)[:, self.coord_index]
# Advance 2 pixels in figure coordinates
pixel2 = axes0.copy()
pixel2[:, 1] += 2.0 if self.frame.origin == 'lower' else -2.0
pixel2 = invertedTransLimits.transform(pixel2)
with np.errstate(invalid='ignore'):
world2 = self.transform.transform(pixel2)[:, self.coord_index]
dx = (world1 - world0)
dy = (world2 - world0)
# Rotate by 90 degrees
dx, dy = -dy, dx
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
dx *= self._coord_scale_to_deg
dy *= self._coord_scale_to_deg
# Here we wrap at 180 not self.coord_wrap since we want to
# always ensure abs(dx) < 180 and abs(dy) < 180
dx = wrap_angle_at(dx, 180.)
dy = wrap_angle_at(dy, 180.)
tick_angle = np.degrees(np.arctan2(dy, dx))
normal_angle_full = np.hstack([spine.normal_angle, spine.normal_angle[-1]])
with np.errstate(invalid='ignore'):
reset = (((normal_angle_full - tick_angle) % 360 > 90.) &
((tick_angle - normal_angle_full) % 360 > 90.))
tick_angle[reset] -= 180.
else:
rotation = 90 if axis == 'b' else -90
tick_angle = np.zeros((conf.frame_boundary_samples,)) + rotation
# We find for each interval the starting and ending coordinate,
# ensuring that we take wrapping into account correctly for
# longitudes.
w1 = spine.world[:-1, self.coord_index]
w2 = spine.world[1:, self.coord_index]
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
w1 = w1 * self._coord_scale_to_deg
w2 = w2 * self._coord_scale_to_deg
w1 = wrap_angle_at(w1, self.coord_wrap)
w2 = wrap_angle_at(w2, self.coord_wrap)
with np.errstate(invalid='ignore'):
w1[w2 - w1 > 180.] += 360
w2[w1 - w2 > 180.] += 360
if self._coord_scale_to_deg is not None:
w1 = w1 / self._coord_scale_to_deg
w2 = w2 / self._coord_scale_to_deg
# For longitudes, we need to check ticks as well as ticks + 360,
# since the above can produce pairs such as 359 to 361 or 0.5 to
# 1.5, both of which would match a tick at 0.75. Otherwise we just
# check the ticks determined above.
self._compute_ticks(tick_world_coordinates, spine, axis, w1, w2, tick_angle)
if self.ticks.get_display_minor_ticks():
self._compute_ticks(minor_ticks_w_coordinates, spine, axis, w1,
w2, tick_angle, ticks='minor')
# format tick labels, add to scene
text = self.formatter(self.lbl_world * tick_world_coordinates.unit, spacing=self._fl_spacing)
for kwargs, txt in zip(self.lblinfo, text):
self.ticklabels.add(text=txt, **kwargs)
def _compute_ticks(self, tick_world_coordinates, spine, axis, w1, w2,
tick_angle, ticks='major'):
if self.coord_type == 'longitude':
tick_world_coordinates_values = tick_world_coordinates.to_value(u.deg)
tick_world_coordinates_values = np.hstack([tick_world_coordinates_values,
tick_world_coordinates_values + 360])
tick_world_coordinates_values *= u.deg.to(self.coord_unit)
else:
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
for t in tick_world_coordinates_values:
# Find steps where a tick is present. We have to check
# separately for the case where the tick falls exactly on the
# frame points, otherwise we'll get two matches, one for w1 and
# one for w2.
with np.errstate(invalid='ignore'):
intersections = np.hstack([np.nonzero((t - w1) == 0)[0],
np.nonzero(((t - w1) * (t - w2)) < 0)[0]])
# But we also need to check for intersection with the last w2
if t - w2[-1] == 0:
intersections = np.append(intersections, len(w2) - 1)
# Loop over ticks, and find exact pixel coordinates by linear
# interpolation
for imin in intersections:
imax = imin + 1
if np.allclose(w1[imin], w2[imin], rtol=1.e-13, atol=1.e-13):
continue # tick is exactly aligned with frame
else:
frac = (t - w1[imin]) / (w2[imin] - w1[imin])
x_data_i = spine.data[imin, 0] + frac * (spine.data[imax, 0] - spine.data[imin, 0])
y_data_i = spine.data[imin, 1] + frac * (spine.data[imax, 1] - spine.data[imin, 1])
x_pix_i = spine.pixel[imin, 0] + frac * (spine.pixel[imax, 0] - spine.pixel[imin, 0])
y_pix_i = spine.pixel[imin, 1] + frac * (spine.pixel[imax, 1] - spine.pixel[imin, 1])
delta_angle = tick_angle[imax] - tick_angle[imin]
if delta_angle > 180.:
delta_angle -= 360.
elif delta_angle < -180.:
delta_angle += 360.
angle_i = tick_angle[imin] + frac * delta_angle
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
t *= self._coord_scale_to_deg
world = wrap_angle_at(t, self.coord_wrap)
if self._coord_scale_to_deg is not None:
world /= self._coord_scale_to_deg
else:
world = t
if ticks == 'major':
self.ticks.add(axis=axis,
pixel=(x_data_i, y_data_i),
world=world,
angle=angle_i,
axis_displacement=imin + frac)
# store information to pass to ticklabels.add
# it's faster to format many ticklabels at once outside
# of the loop
self.lblinfo.append(dict(axis=axis,
pixel=(x_pix_i, y_pix_i),
world=world,
angle=spine.normal_angle[imin],
axis_displacement=imin + frac))
self.lbl_world.append(world)
else:
self.ticks.add_minor(minor_axis=axis,
minor_pixel=(x_data_i, y_data_i),
minor_world=world,
minor_angle=angle_i,
minor_axis_displacement=imin + frac)
def display_minor_ticks(self, display_minor_ticks):
"""
Display minor ticks for this coordinate.
Parameters
----------
display_minor_ticks : bool
Whether or not to display minor ticks.
"""
self.ticks.display_minor_ticks(display_minor_ticks)
def get_minor_frequency(self):
return self.minor_frequency
def set_minor_frequency(self, frequency):
"""
Set the frequency of minor ticks per major ticks.
Parameters
----------
frequency : int
The number of minor ticks per major ticks.
"""
self.minor_frequency = frequency
def _update_grid_lines_1d(self):
if self.coord_index is None:
return
x_ticks_pos = [a[0] for a in self.ticks.pixel['b']]
ymin, ymax = self.parent_axes.get_ylim()
self.grid_lines = []
for x_coord in x_ticks_pos:
pixel = [[x_coord, ymin], [x_coord, ymax]]
self.grid_lines.append(Path(pixel))
def _update_grid_lines(self):
# For 3-d WCS with a correlated third axis, the *proper* way of
# drawing a grid should be to find the world coordinates of all pixels
# and drawing contours. What we are doing here assumes that we can
# define the grid lines with just two of the coordinates (and
# therefore assumes that the other coordinates are fixed and set to
# the value in the slice). Here we basically assume that if the WCS
# had a third axis, it has been abstracted away in the transformation.
if self.coord_index is None:
return
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
n_coord = len(tick_world_coordinates_values)
from . import conf
n_samples = conf.grid_samples
xy_world = np.zeros((n_samples * n_coord, 2))
self.grid_lines = []
for iw, w in enumerate(tick_world_coordinates_values):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
if self.coord_index == 0:
xy_world[subset, 0] = np.repeat(w, n_samples)
xy_world[subset, 1] = np.linspace(coord_range[1][0], coord_range[1][1], n_samples)
else:
xy_world[subset, 0] = np.linspace(coord_range[0][0], coord_range[0][1], n_samples)
xy_world[subset, 1] = np.repeat(w, n_samples)
# We now convert all the world coordinates to pixel coordinates in a
# single go rather than doing this in the gridline to path conversion
# to fully benefit from vectorized coordinate transformations.
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
for iw in range(n_coord):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
self.grid_lines.append(self._get_gridline(xy_world[subset], pixel[subset], xy_world_round[subset]))
def _get_gridline(self, xy_world, pixel, xy_world_round):
if self.coord_type == 'scalar':
return get_gridline_path(xy_world, pixel)
else:
return get_lon_lat_path(xy_world, pixel, xy_world_round)
def _clear_grid_contour(self):
if hasattr(self, '_grid') and self._grid:
for line in self._grid.collections:
line.remove()
def _update_grid_contour(self):
if self.coord_index is None:
return
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
from . import conf
res = conf.contour_grid_samples
x, y = np.meshgrid(np.linspace(xmin, xmax, res),
np.linspace(ymin, ymax, res))
pixel = np.array([x.ravel(), y.ravel()]).T
world = self.transform.transform(pixel)
field = world[:, self.coord_index].reshape(res, res).T
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
# tick_world_coordinates is a Quantities array and we only needs its values
tick_world_coordinates_values = tick_world_coordinates.value
if self.coord_type == 'longitude':
# Find biggest gap in tick_world_coordinates and wrap in middle
# For now just assume spacing is equal, so any mid-point will do
mid = 0.5 * (tick_world_coordinates_values[0] + tick_world_coordinates_values[1])
field = wrap_angle_at(field, mid)
tick_world_coordinates_values = wrap_angle_at(tick_world_coordinates_values, mid)
# Replace wraps by NaN
with np.errstate(invalid='ignore'):
reset = (np.abs(np.diff(field[:, :-1], axis=0)) > 180) | (np.abs(np.diff(field[:-1, :], axis=1)) > 180)
field[:-1, :-1][reset] = np.nan
field[1:, :-1][reset] = np.nan
field[:-1, 1:][reset] = np.nan
field[1:, 1:][reset] = np.nan
if len(tick_world_coordinates_values) > 0:
with np.errstate(invalid='ignore'):
self._grid = self.parent_axes.contour(x, y, field.transpose(), levels=np.sort(tick_world_coordinates_values))
else:
self._grid = None
def tick_params(self, which='both', **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this will not work correctly if
the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this will not work
correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
# First do some sanity checking on the keyword arguments
# colors= is a fallback default for color and labelcolor
if 'colors' in kwargs:
if 'color' not in kwargs:
kwargs['color'] = kwargs['colors']
if 'labelcolor' not in kwargs:
kwargs['labelcolor'] = kwargs['colors']
# The only property that can be set *specifically* for minor ticks is
# the length. In future we could consider having a separate Ticks instance
# for minor ticks so that e.g. the color can be set separately.
if which == 'minor':
if len(set(kwargs) - {'length'}) > 0:
raise ValueError("When setting which='minor', the only "
"property that can be set at the moment is "
"'length' (the minor tick length)")
else:
if 'length' in kwargs:
self.ticks.set_minor_ticksize(kwargs['length'])
return
# At this point, we can now ignore the 'which' argument.
# Set the tick arguments
self.set_ticks(size=kwargs.get('length'),
width=kwargs.get('width'),
color=kwargs.get('color'),
direction=kwargs.get('direction'))
# Set the tick position
position = None
for arg in ('bottom', 'left', 'top', 'right'):
if arg in kwargs and position is None:
position = ''
if kwargs.get(arg):
position += arg[0]
if position is not None:
self.set_ticks_position(position)
# Set the tick label arguments.
self.set_ticklabel(color=kwargs.get('labelcolor'),
size=kwargs.get('labelsize'),
pad=kwargs.get('pad'))
# Set the tick label position
position = None
for arg in ('bottom', 'left', 'top', 'right'):
if 'label' + arg in kwargs and position is None:
position = ''
if kwargs.get('label' + arg):
position += arg[0]
if position is not None:
self.set_ticklabel_position(position)
# And the grid settings
if 'grid_color' in kwargs:
self.grid_lines_kwargs['edgecolor'] = kwargs['grid_color']
if 'grid_alpha' in kwargs:
self.grid_lines_kwargs['alpha'] = kwargs['grid_alpha']
if 'grid_linewidth' in kwargs:
self.grid_lines_kwargs['linewidth'] = kwargs['grid_linewidth']
if 'grid_linestyle' in kwargs:
if kwargs['grid_linestyle'] in LINES_TO_PATCHES_LINESTYLE:
self.grid_lines_kwargs['linestyle'] = LINES_TO_PATCHES_LINESTYLE[kwargs['grid_linestyle']]
else:
self.grid_lines_kwargs['linestyle'] = kwargs['grid_linestyle']
|
1b60d38f4e5ef97935d24efee0f33ec9b3c41dbef1ae2386215a472927c50869 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file defines the AngleFormatterLocator class which is a class that
# provides both a method for a formatter and one for a locator, for a given
# label spacing. The advantage of keeping the two connected is that we need to
# make sure that the formatter can correctly represent the spacing requested and
# vice versa. For example, a format of dd:mm cannot work with a tick spacing
# that is not a multiple of one arcminute.
import re
import warnings
import numpy as np
from matplotlib import rcParams
from astropy import units as u
from astropy.units import UnitsError
from astropy.coordinates import Angle
DMS_RE = re.compile('^dd(:mm(:ss(.(s)+)?)?)?$')
HMS_RE = re.compile('^hh(:mm(:ss(.(s)+)?)?)?$')
DDEC_RE = re.compile('^d(.(d)+)?$')
DMIN_RE = re.compile('^m(.(m)+)?$')
DSEC_RE = re.compile('^s(.(s)+)?$')
SCAL_RE = re.compile('^x(.(x)+)?$')
# Units with custom representations - see the note where it is used inside
# AngleFormatterLocator.formatter for more details.
CUSTOM_UNITS = {
u.degree: u.def_unit('custom_degree', represents=u.degree,
format={'generic': '\xb0',
'latex': r'^\circ',
'unicode': '°'}),
u.arcmin: u.def_unit('custom_arcmin', represents=u.arcmin,
format={'generic': "'",
'latex': r'^\prime',
'unicode': '′'}),
u.arcsec: u.def_unit('custom_arcsec', represents=u.arcsec,
format={'generic': '"',
'latex': r'^{\prime\prime}',
'unicode': '″'}),
u.hourangle: u.def_unit('custom_hourangle', represents=u.hourangle,
format={'generic': 'h',
'latex': r'^{\mathrm{h}}',
'unicode': r'$\mathregular{^h}$'})}
class BaseFormatterLocator:
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, format_unit=None):
if len([x for x in (values, number, spacing) if x is None]) < 2:
raise ValueError("At most one of values/number/spacing can be specified")
self._unit = unit
self._format_unit = format_unit or unit
if values is not None:
self.values = values
elif number is not None:
self.number = number
elif spacing is not None:
self.spacing = spacing
else:
self.number = 5
self.format = format
@property
def values(self):
return self._values
@values.setter
def values(self, values):
if not isinstance(values, u.Quantity) or (not values.ndim == 1):
raise TypeError("values should be an astropy.units.Quantity array")
if not values.unit.is_equivalent(self._unit):
raise UnitsError("value should be in units compatible with "
"coordinate units ({}) but found {}".format(self._unit, values.unit))
self._number = None
self._spacing = None
self._values = values
@property
def number(self):
return self._number
@number.setter
def number(self, number):
self._number = number
self._spacing = None
self._values = None
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
self._number = None
self._spacing = spacing
self._values = None
def minor_locator(self, spacing, frequency, value_min, value_max):
if self.values is not None:
return [] * self._unit
minor_spacing = spacing.value / frequency
values = self._locate_values(value_min, value_max, minor_spacing)
index = np.where((values % frequency) == 0)
index = index[0][0]
values = np.delete(values, np.s_[index::frequency])
return values * minor_spacing * self._unit
@property
def format_unit(self):
return self._format_unit
@format_unit.setter
def format_unit(self, unit):
self._format_unit = u.Unit(unit)
@staticmethod
def _locate_values(value_min, value_max, spacing):
imin = np.ceil(value_min / spacing)
imax = np.floor(value_max / spacing)
values = np.arange(imin, imax + 1, dtype=int)
return values
class AngleFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, decimal=None, format_unit=None, show_decimal_unit=True):
if unit is None:
unit = u.degree
if format_unit is None:
format_unit = unit
if format_unit not in (u.degree, u.hourangle, u.hour):
if decimal is False:
raise UnitsError("Units should be degrees or hours when using non-decimal (sexagesimal) mode")
self._decimal = decimal
self._sep = None
self.show_decimal_unit = show_decimal_unit
super().__init__(values=values, number=number, spacing=spacing,
format=format, unit=unit, format_unit=format_unit)
@property
def decimal(self):
decimal = self._decimal
if self.format_unit not in (u.degree, u.hourangle, u.hour):
if self._decimal is None:
decimal = True
elif self._decimal is False:
raise UnitsError("Units should be degrees or hours when using non-decimal (sexagesimal) mode")
elif self._decimal is None:
decimal = False
return decimal
@decimal.setter
def decimal(self, value):
self._decimal = value
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and (not isinstance(spacing, u.Quantity) or
spacing.unit.physical_type != 'angle'):
raise TypeError("spacing should be an astropy.units.Quantity "
"instance with units of angle")
self._number = None
self._spacing = spacing
self._values = None
@property
def sep(self):
return self._sep
@sep.setter
def sep(self, separator):
self._sep = separator
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if DMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.degree
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif HMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.hourangle
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif DDEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.degree
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DMIN_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcmin
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DSEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcsec
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
else:
raise ValueError(f"Invalid format: {value}")
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
@property
def base_spacing(self):
if self.decimal:
spacing = self._format_unit / (10. ** self._precision)
else:
if self._fields == 1:
spacing = 1. * u.degree
elif self._fields == 2:
spacing = 1. * u.arcmin
elif self._fields == 3:
if self._precision == 0:
spacing = 1. * u.arcsec
else:
spacing = u.arcsec / (10. ** self._precision)
if self._format_unit is u.hourangle:
spacing *= 15
return spacing
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * u.arcsec
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced). We return a
# non-zero spacing in case the caller needs to format a single
# coordinate, e.g. for mousover.
if value_min == value_max:
return [] * self._unit, 1 * u.arcsec
if self.spacing is not None:
# spacing was manually specified
spacing_value = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing_value = self.base_spacing.to_value(self._unit)
else:
# otherwise we clip to the nearest 'sensible' spacing
if self.decimal:
from .utils import select_step_scalar
spacing_value = select_step_scalar(dv.to_value(self._format_unit)) * self._format_unit.to(self._unit)
else:
if self._format_unit is u.degree:
from .utils import select_step_degree
spacing_value = select_step_degree(dv).to_value(self._unit)
else:
from .utils import select_step_hour
spacing_value = select_step_hour(dv).to_value(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this.
values = self._locate_values(value_min, value_max, spacing_value)
return values * spacing_value * self._unit, spacing_value * self._unit
def formatter(self, values, spacing, format='auto'):
if not isinstance(values, u.Quantity) and values is not None:
raise TypeError("values should be a Quantities array")
if len(values) > 0:
decimal = self.decimal
unit = self._format_unit
if unit is u.hour:
unit = u.hourangle
if self.format is None:
if decimal:
# Here we assume the spacing can be arbitrary, so for example
# 1.000223 degrees, in which case we don't want to have a
# format that rounds to degrees. So we find the number of
# decimal places we get from representing the spacing as a
# string in the desired units. The easiest way to find
# the smallest number of decimal places required is to
# format the number as a decimal float and strip any zeros
# from the end. We do this rather than just trusting e.g.
# str() because str(15.) == 15.0. We format using 10 decimal
# places by default before stripping the zeros since this
# corresponds to a resolution of less than a microarcecond,
# which should be sufficient.
spacing = spacing.to_value(unit)
fields = 0
precision = len(f"{spacing:.10f}".replace('0', ' ').strip().split('.', 1)[1])
else:
spacing = spacing.to_value(unit / 3600)
if spacing >= 3600:
fields = 1
precision = 0
elif spacing >= 60:
fields = 2
precision = 0
elif spacing >= 1:
fields = 3
precision = 0
else:
fields = 3
precision = -int(np.floor(np.log10(spacing)))
else:
fields = self._fields
precision = self._precision
is_latex = format == 'latex' or (format == 'auto' and rcParams['text.usetex'])
if decimal:
# At the moment, the Angle class doesn't have a consistent way
# to always convert angles to strings in decimal form with
# symbols for units (instead of e.g 3arcsec). So as a workaround
# we take advantage of the fact that Angle.to_string converts
# the unit to a string manually when decimal=False and the unit
# is not strictly u.degree or u.hourangle
if self.show_decimal_unit:
decimal = False
sep = 'fromunit'
if is_latex:
fmt = 'latex'
else:
if unit is u.hourangle:
fmt = 'unicode'
else:
fmt = None
unit = CUSTOM_UNITS.get(unit, unit)
else:
sep = None
fmt = None
elif self.sep is not None:
sep = self.sep
fmt = None
else:
sep = 'fromunit'
if unit == u.degree:
if is_latex:
fmt = 'latex'
else:
sep = ('\xb0', "'", '"')
fmt = None
else:
if format == 'ascii':
fmt = None
elif is_latex:
fmt = 'latex'
else:
# Here we still use LaTeX but this is for Matplotlib's
# LaTeX engine - we can't use fmt='latex' as this
# doesn't produce LaTeX output that respects the fonts.
sep = (r'$\mathregular{^h}$', r'$\mathregular{^m}$', r'$\mathregular{^s}$')
fmt = None
angles = Angle(values)
string = angles.to_string(unit=unit,
precision=precision,
decimal=decimal,
fields=fields,
sep=sep,
format=fmt).tolist()
return string
else:
return []
class ScalarFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, format_unit=None):
if unit is not None:
unit = unit
format_unit = format_unit or unit
elif spacing is not None:
unit = spacing.unit
format_unit = format_unit or spacing.unit
elif values is not None:
unit = values.unit
format_unit = format_unit or values.unit
super().__init__(values=values, number=number, spacing=spacing,
format=format, unit=unit, format_unit=format_unit)
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and not isinstance(spacing, u.Quantity):
raise TypeError("spacing should be an astropy.units.Quantity instance")
self._number = None
self._spacing = spacing
self._values = None
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if SCAL_RE.match(value) is not None:
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
elif not value.startswith('%'):
raise ValueError(f"Invalid format: {value}")
@property
def base_spacing(self):
return self._format_unit / (10. ** self._precision)
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * self._unit
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * self._unit, 0 * self._unit
if self.spacing is not None:
# spacing was manually specified
spacing = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and (not self.format.startswith('%')) and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing = self.base_spacing.to_value(self._unit)
else:
from .utils import select_step_scalar
spacing = select_step_scalar(dv.to_value(self._format_unit)) * self._format_unit.to(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this
values = self._locate_values(value_min, value_max, spacing)
return values * spacing * self._unit, spacing * self._unit
def formatter(self, values, spacing, format='auto'):
if len(values) > 0:
if self.format is None:
if spacing.value < 1.:
precision = -int(np.floor(np.log10(spacing.value)))
else:
precision = 0
elif self.format.startswith('%'):
return [(self.format % x.value) for x in values]
else:
precision = self._precision
return [("{0:." + str(precision) + "f}").format(x.to_value(self._format_unit)) for x in values]
else:
return []
|
3e89d3446bfc67e8d8db7b867587d1466869c794fbe7b789ac5a54383c21bd79 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from functools import partial
from collections import defaultdict
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.axes import Axes, subplot_class_factory
from matplotlib.transforms import Affine2D, Bbox, Transform
import astropy.units as u
from astropy.coordinates import SkyCoord, BaseCoordinateFrame
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS, BaseLowLevelWCS
from .transforms import CoordinateTransform
from .coordinates_map import CoordinatesMap
from .utils import get_coord_meta, transform_contour_set_inplace
from .frame import RectangularFrame, RectangularFrame1D
from .wcsapi import IDENTITY, transform_coord_meta_from_wcs
__all__ = ['WCSAxes', 'WCSAxesSubplot']
VISUAL_PROPERTIES = ['facecolor', 'edgecolor', 'linewidth', 'alpha', 'linestyle']
class _WCSAxesArtist(Artist):
"""This is a dummy artist to enforce the correct z-order of axis ticks,
tick labels, and gridlines.
FIXME: This is a bit of a hack. ``Axes.draw`` sorts the artists by zorder
and then renders them in sequence. For normal Matplotlib axes, the ticks,
tick labels, and gridlines are included in this list of artists and hence
are automatically drawn in the correct order. However, ``WCSAxes`` disables
the native ticks, labels, and gridlines. Instead, ``WCSAxes.draw`` renders
ersatz ticks, labels, and gridlines by explicitly calling the functions
``CoordinateHelper._draw_ticks``, ``CoordinateHelper._draw_grid``, etc.
This hack would not be necessary if ``WCSAxes`` drew ticks, tick labels,
and gridlines in the standary way."""
def draw(self, renderer, *args, **kwargs):
self.axes.draw_wcsaxes(renderer)
class WCSAxes(Axes):
"""
The main axes class that can be used to show world coordinates from a WCS.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure to add the axes to
rect : list
The position of the axes in the figure in relative units. Should be
given as ``[left, bottom, width, height]``.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS for the data. If this is specified, ``transform`` cannot be
specified.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data. If this is specified, ``wcs`` cannot be
specified.
coord_meta : dict, optional
A dictionary providing additional metadata when ``transform`` is
specified. This should include the keys ``type``, ``wrap``, and
``unit``. Each of these should be a list with as many items as the
dimension of the WCS. The ``type`` entries should be one of
``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should
give, for the longitude, the angle at which the coordinate wraps (and
`None` otherwise), and the ``unit`` should give the unit of the
coordinates as :class:`~astropy.units.Unit` instances. This can
optionally also include a ``format_unit`` entry giving the units to use
for the tick labels (if not specified, this defaults to ``unit``).
transData : `~matplotlib.transforms.Transform`, optional
Can be used to override the default data -> pixel mapping.
slices : tuple, optional
For WCS transformations with more than two dimensions, we need to
choose which dimensions are being shown in the 2D image. The slice
should contain one ``x`` entry, one ``y`` entry, and the rest of the
values should be integers indicating the slice through the data. The
order of the items in the slice should be the same as the order of the
dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the
order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means
that the first WCS dimension (last Numpy dimension) will be sliced at
an index of 50, the second WCS and Numpy dimension will be shown on the
x axis, and the final WCS dimension (first Numpy dimension) will be
shown on the y-axis (and therefore the data will be plotted using
``data[:, :, 50].transpose()``)
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
"""
def __init__(self, fig, rect, wcs=None, transform=None, coord_meta=None,
transData=None, slices=None, frame_class=None,
**kwargs):
"""
"""
super().__init__(fig, rect, **kwargs)
self._bboxes = []
if frame_class is not None:
self.frame_class = frame_class
elif (wcs is not None and (wcs.pixel_n_dim == 1 or
(slices is not None and 'y' not in slices))):
self.frame_class = RectangularFrame1D
else:
self.frame_class = RectangularFrame
if not (transData is None):
# User wants to override the transform for the final
# data->pixel mapping
self.transData = transData
self.reset_wcs(wcs=wcs, slices=slices, transform=transform, coord_meta=coord_meta)
self._hide_parent_artists()
self.format_coord = self._display_world_coords
self._display_coords_index = 0
fig.canvas.mpl_connect('key_press_event', self._set_cursor_prefs)
self.patch = self.coords.frame.patch
self._wcsaxesartist = _WCSAxesArtist()
self.add_artist(self._wcsaxesartist)
self._drawn = False
def _display_world_coords(self, x, y):
if not self._drawn:
return ""
if self._display_coords_index == -1:
return f"{x} {y} (pixel)"
pixel = np.array([x, y])
coords = self._all_coords[self._display_coords_index]
world = coords._transform.transform(np.array([pixel]))[0]
coord_strings = []
for idx, coord in enumerate(coords):
if coord.coord_index is not None:
coord_strings.append(coord.format_coord(world[coord.coord_index], format='ascii'))
coord_string = ' '.join(coord_strings)
if self._display_coords_index == 0:
system = "world"
else:
system = f"world, overlay {self._display_coords_index}"
coord_string = f"{coord_string} ({system})"
return coord_string
def _set_cursor_prefs(self, event, **kwargs):
if event.key == 'w':
self._display_coords_index += 1
if self._display_coords_index + 1 > len(self._all_coords):
self._display_coords_index = -1
def _hide_parent_artists(self):
# Turn off spines and current axes
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
if self.frame_class is not RectangularFrame1D:
self.yaxis.set_visible(False)
# We now overload ``imshow`` because we need to make sure that origin is
# set to ``lower`` for all images, which means that we need to flip RGB
# images.
def imshow(self, X, *args, **kwargs):
"""
Wrapper to Matplotlib's :meth:`~matplotlib.axes.Axes.imshow`.
If an RGB image is passed as a PIL object, it will be flipped
vertically and ``origin`` will be set to ``lower``, since WCS
transformations - like FITS files - assume that the origin is the lower
left pixel of the image (whereas RGB images have the origin in the top
left).
All arguments are passed to :meth:`~matplotlib.axes.Axes.imshow`.
"""
origin = kwargs.pop('origin', 'lower')
# plt.imshow passes origin as None, which we should default to lower.
if origin is None:
origin = 'lower'
elif origin == 'upper':
raise ValueError("Cannot use images with origin='upper' in WCSAxes.")
# To check whether the image is a PIL image we can check if the data
# has a 'getpixel' attribute - this is what Matplotlib's AxesImage does
try:
from PIL.Image import Image, FLIP_TOP_BOTTOM
except ImportError:
# We don't need to worry since PIL is not installed, so user cannot
# have passed RGB image.
pass
else:
if isinstance(X, Image) or hasattr(X, 'getpixel'):
X = X.transpose(FLIP_TOP_BOTTOM)
return super().imshow(X, *args, origin=origin, **kwargs)
def contour(self, *args, **kwargs):
"""
Plot contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contour`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contour`.
"""
# In Matplotlib, when calling contour() with a transform, each
# individual path in the contour map is transformed separately. However,
# this is much too slow for us since each call to the transforms results
# in an Astropy coordinate transformation, which has a non-negligible
# overhead - therefore a better approach is to override contour(), call
# the Matplotlib one with no transform, then apply the transform in one
# go to all the segments that make up the contour map.
transform = kwargs.pop('transform', None)
cset = super().contour(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def contourf(self, *args, **kwargs):
"""
Plot filled contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contourf`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contourf`.
"""
# See notes for contour above.
transform = kwargs.pop('transform', None)
cset = super().contourf(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def plot_coord(self, *args, **kwargs):
"""
Plot `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.plot`. All other arguments are the same as
`matplotlib.axes.Axes.plot`. If not specified a ``transform`` keyword
argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to plot on the axes. This is converted to the
first two arguments to `matplotlib.axes.Axes.plot`.
See Also
--------
matplotlib.axes.Axes.plot :
This method is called from this function with all arguments passed to it.
"""
if isinstance(args[0], (SkyCoord, BaseCoordinateFrame)):
# Extract the frame from the first argument.
frame0 = args[0]
if isinstance(frame0, SkyCoord):
frame0 = frame0.frame
native_frame = self._transform_pixel2world.frame_out
# Transform to the native frame of the plot
frame0 = frame0.transform_to(native_frame)
plot_data = []
for coord in self.coords:
if coord.coord_type == 'longitude':
plot_data.append(frame0.spherical.lon.to_value(u.deg))
elif coord.coord_type == 'latitude':
plot_data.append(frame0.spherical.lat.to_value(u.deg))
else:
raise NotImplementedError("Coordinates cannot be plotted with this "
"method because the WCS does not represent longitude/latitude.")
if 'transform' in kwargs.keys():
raise TypeError("The 'transform' keyword argument is not allowed,"
" as it is automatically determined by the input coordinate frame.")
transform = self.get_transform(native_frame)
kwargs.update({'transform': transform})
args = tuple(plot_data) + args[1:]
return super().plot(*args, **kwargs)
def reset_wcs(self, wcs=None, slices=None, transform=None, coord_meta=None):
"""
Reset the current Axes, to use a new WCS object.
"""
# Here determine all the coordinate axes that should be shown.
if wcs is None and transform is None:
self.wcs = IDENTITY
else:
# We now force call 'set', which ensures the WCS object is
# consistent, which will only be important if the WCS has been set
# by hand. For example if the user sets a celestial WCS by hand and
# forgets to set the units, WCS.wcs.set() will do this.
if wcs is not None:
# Check if the WCS object is an instance of `astropy.wcs.WCS`
# This check is necessary as only `astropy.wcs.WCS` supports
# wcs.set() method
if isinstance(wcs, WCS):
wcs.wcs.set()
if isinstance(wcs, BaseHighLevelWCS):
wcs = wcs.low_level_wcs
self.wcs = wcs
# If we are making a new WCS, we need to preserve the path object since
# it may already be used by objects that have been plotted, and we need
# to continue updating it. CoordinatesMap will create a new frame
# instance, but we can tell that instance to keep using the old path.
if hasattr(self, 'coords'):
previous_frame = {'path': self.coords.frame._path,
'color': self.coords.frame.get_color(),
'linewidth': self.coords.frame.get_linewidth()}
else:
previous_frame = {'path': None}
if self.wcs is not None:
transform, coord_meta = transform_coord_meta_from_wcs(self.wcs, self.frame_class, slices=slices)
self.coords = CoordinatesMap(self,
transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class,
previous_frame_path=previous_frame['path'])
self._transform_pixel2world = transform
if previous_frame['path'] is not None:
self.coords.frame.set_color(previous_frame['color'])
self.coords.frame.set_linewidth(previous_frame['linewidth'])
self._all_coords = [self.coords]
# Common default settings for Rectangular Frame
for ind, pos in enumerate(coord_meta.get('default_axislabel_position', ['b', 'l'])):
self.coords[ind].set_axislabel_position(pos)
for ind, pos in enumerate(coord_meta.get('default_ticklabel_position', ['b', 'l'])):
self.coords[ind].set_ticklabel_position(pos)
for ind, pos in enumerate(coord_meta.get('default_ticks_position', ['bltr', 'bltr'])):
self.coords[ind].set_ticks_position(pos)
if rcParams['axes.grid']:
self.grid()
def draw_wcsaxes(self, renderer):
if not self.axison:
return
# Here need to find out range of all coordinates, and update range for
# each coordinate axis. For now, just assume it covers the whole sky.
self._bboxes = []
# This generates a structure like [coords][axis] = [...]
ticklabels_bbox = defaultdict(partial(defaultdict, list))
visible_ticks = []
for coords in self._all_coords:
coords.frame.update()
for coord in coords:
coord._draw_grid(renderer)
for coords in self._all_coords:
for coord in coords:
coord._draw_ticks(renderer, bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox[coord])
visible_ticks.extend(coord.ticklabels.get_visible_axes())
for coords in self._all_coords:
for coord in coords:
coord._draw_axislabels(renderer, bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox,
visible_ticks=visible_ticks)
self.coords.frame.draw(renderer)
def draw(self, renderer, **kwargs):
"""Draw the axes."""
# Before we do any drawing, we need to remove any existing grid lines
# drawn with contours, otherwise if we try and remove the contours
# part way through drawing, we end up with the issue mentioned in
# https://github.com/astropy/astropy/issues/12446
for coords in self._all_coords:
for coord in coords:
coord._clear_grid_contour()
# In Axes.draw, the following code can result in the xlim and ylim
# values changing, so we need to force call this here to make sure that
# the limits are correct before we update the patch.
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
if self._axisbelow is True:
self._wcsaxesartist.set_zorder(0.5)
elif self._axisbelow is False:
self._wcsaxesartist.set_zorder(2.5)
else:
# 'line': above patches, below lines
self._wcsaxesartist.set_zorder(1.5)
# We need to make sure that that frame path is up to date
self.coords.frame._update_patch_path()
super().draw(renderer, **kwargs)
self._drawn = True
# Matplotlib internally sometimes calls set_xlabel(label=...).
def set_xlabel(self, xlabel=None, labelpad=1, loc=None, **kwargs):
"""Set x-label."""
if xlabel is None:
xlabel = kwargs.pop('label', None)
if xlabel is None:
raise TypeError("set_xlabel() missing 1 required positional argument: 'xlabel'")
for coord in self.coords:
if ('b' in coord.axislabels.get_visible_axes() or
'h' in coord.axislabels.get_visible_axes()):
coord.set_axislabel(xlabel, minpad=labelpad, **kwargs)
break
def set_ylabel(self, ylabel=None, labelpad=1, loc=None, **kwargs):
"""Set y-label"""
if ylabel is None:
ylabel = kwargs.pop('label', None)
if ylabel is None:
raise TypeError("set_ylabel() missing 1 required positional argument: 'ylabel'")
if self.frame_class is RectangularFrame1D:
return super().set_ylabel(ylabel, labelpad=labelpad, **kwargs)
for coord in self.coords:
if ('l' in coord.axislabels.get_visible_axes() or
'c' in coord.axislabels.get_visible_axes()):
coord.set_axislabel(ylabel, minpad=labelpad, **kwargs)
break
def get_xlabel(self):
for coord in self.coords:
if ('b' in coord.axislabels.get_visible_axes() or
'h' in coord.axislabels.get_visible_axes()):
return coord.get_axislabel()
def get_ylabel(self):
if self.frame_class is RectangularFrame1D:
return super().get_ylabel()
for coord in self.coords:
if ('l' in coord.axislabels.get_visible_axes() or
'c' in coord.axislabels.get_visible_axes()):
return coord.get_axislabel()
def get_coords_overlay(self, frame, coord_meta=None):
# Here we can't use get_transform because that deals with
# pixel-to-pixel transformations when passing a WCS object.
if isinstance(frame, WCS):
transform, coord_meta = transform_coord_meta_from_wcs(frame, self.frame_class)
else:
transform = self._get_transform_no_transdata(frame)
if coord_meta is None:
coord_meta = get_coord_meta(frame)
coords = CoordinatesMap(self, transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class)
self._all_coords.append(coords)
# Common settings for overlay
coords[0].set_axislabel_position('t')
coords[1].set_axislabel_position('r')
coords[0].set_ticklabel_position('t')
coords[1].set_ticklabel_position('r')
self.overlay_coords = coords
return coords
def get_transform(self, frame):
"""
Return a transform from the specified frame to display coordinates.
This does not include the transData transformation
Parameters
----------
frame : :class:`~astropy.wcs.WCS` or :class:`~matplotlib.transforms.Transform` or str
The ``frame`` parameter can have several possible types:
* :class:`~astropy.wcs.WCS` instance: assumed to be a
transformation from pixel to world coordinates, where the
world coordinates are the same as those in the WCS
transformation used for this ``WCSAxes`` instance. This is
used for example to show contours, since this involves
plotting an array in pixel coordinates that are not the
final data coordinate and have to be transformed to the
common world coordinate system first.
* :class:`~matplotlib.transforms.Transform` instance: it is
assumed to be a transform to the world coordinates that are
part of the WCS used to instantiate this ``WCSAxes``
instance.
* ``'pixel'`` or ``'world'``: return a transformation that
allows users to plot in pixel/data coordinates (essentially
an identity transform) and ``world`` (the default
world-to-pixel transformation used to instantiate the
``WCSAxes`` instance).
* ``'fk5'`` or ``'galactic'``: return a transformation from
the specified frame to the pixel/data coordinates.
* :class:`~astropy.coordinates.BaseCoordinateFrame` instance.
"""
return self._get_transform_no_transdata(frame).inverted() + self.transData
def _get_transform_no_transdata(self, frame):
"""
Return a transform from data to the specified frame
"""
if isinstance(frame, (BaseLowLevelWCS, BaseHighLevelWCS)):
if isinstance(frame, BaseHighLevelWCS):
frame = frame.low_level_wcs
transform, coord_meta = transform_coord_meta_from_wcs(frame, self.frame_class)
transform_world2pixel = transform.inverted()
if self._transform_pixel2world.frame_out == transform_world2pixel.frame_in:
return self._transform_pixel2world + transform_world2pixel
else:
return (self._transform_pixel2world +
CoordinateTransform(self._transform_pixel2world.frame_out,
transform_world2pixel.frame_in) +
transform_world2pixel)
elif isinstance(frame, str) and frame == 'pixel':
return Affine2D()
elif isinstance(frame, Transform):
return self._transform_pixel2world + frame
else:
if isinstance(frame, str) and frame == 'world':
return self._transform_pixel2world
else:
coordinate_transform = CoordinateTransform(self._transform_pixel2world.frame_out, frame)
if coordinate_transform.same_frames:
return self._transform_pixel2world
else:
return self._transform_pixel2world + coordinate_transform
def get_tightbbox(self, renderer, *args, **kwargs):
# FIXME: we should determine what to do with the extra arguments here.
# Note that the expected signature of this method is different in
# Matplotlib 3.x compared to 2.x, but we only support 3.x now.
if not self.get_visible():
return
bb = [b for b in self._bboxes if b and (b.width != 0 or b.height != 0)]
bb.append(super().get_tightbbox(renderer, *args, **kwargs))
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return self.get_window_extent(renderer)
def grid(self, b=None, axis='both', *, which='major', **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments. This behaves like `matplotlib.axes.Axes`
except that if no arguments are specified, the grid is shown rather
than toggled.
Parameters
----------
b : bool
Whether to show the gridlines.
axis : 'both', 'x', 'y'
Which axis to turn the gridlines on/off for.
which : str
Currently only ``'major'`` is supported.
"""
if not hasattr(self, 'coords'):
return
if which != 'major':
raise NotImplementedError('Plotting the grid for the minor ticks is '
'not supported.')
if axis == 'both':
self.coords.grid(draw_grid=b, **kwargs)
elif axis == 'x':
self.coords[0].grid(draw_grid=b, **kwargs)
elif axis == 'y':
self.coords[1].grid(draw_grid=b, **kwargs)
else:
raise ValueError('axis should be one of x/y/both')
def tick_params(self, axis='both', **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
axis : int or str, optional
Which axis to apply the parameters to. This defaults to 'both'
but this can also be set to an `int` or `str` that refers to the
axis to apply it to, following the valid values that can index
``ax.coords``. Note that ``'x'`` and ``'y``' are also accepted in
the case of rectangular axes.
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
if not hasattr(self, 'coords'):
# Axes haven't been fully initialized yet, so just ignore, as
# Axes.__init__ calls this method
return
if axis == 'both':
for pos in ('bottom', 'left', 'top', 'right'):
if pos in kwargs:
raise ValueError(f"Cannot specify {pos}= when axis='both'")
if 'label' + pos in kwargs:
raise ValueError(f"Cannot specify label{pos}= when axis='both'")
for coord in self.coords:
coord.tick_params(**kwargs)
elif axis in self.coords:
self.coords[axis].tick_params(**kwargs)
elif axis in ('x', 'y') and self.frame_class is RectangularFrame:
spine = 'b' if axis == 'x' else 'l'
for coord in self.coords:
if spine in coord.axislabels.get_visible_axes():
coord.tick_params(**kwargs)
# In the following, we put the generated subplot class in a temporary class and
# we then inherit it - if we don't do this, the generated class appears to
# belong in matplotlib, not in WCSAxes, from the API's point of view.
class WCSAxesSubplot(subplot_class_factory(WCSAxes)):
"""
A subclass class for WCSAxes
"""
pass
|
f505a77e9853d664495e26acf71fcb111b5f32f48d7a5f72c26a40cd20599ff6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# The following few lines skip this module when running tests if matplotlib is
# not available (and will have no impact otherwise)
try:
import pytest
pytest.importorskip("matplotlib")
del pytest
except ImportError:
pass
from .core import *
from .coordinate_helpers import CoordinateHelper
from .coordinates_map import CoordinatesMap
from .patches import *
from astropy import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.visualization.wcsaxes`.
"""
coordinate_range_samples = _config.ConfigItem(50,
'The number of samples along each image axis when determining '
'the range of coordinates in a plot.')
frame_boundary_samples = _config.ConfigItem(1000,
'How many points to sample along the axes when determining '
'tick locations.')
grid_samples = _config.ConfigItem(1000,
'How many points to sample along grid lines.')
contour_grid_samples = _config.ConfigItem(200,
'The grid size to use when drawing a grid using contours')
conf = Conf()
|
bf648f33c8c90e2afc3585194c14667ffc1cd04ba9e9913f493ac8bbc1be2350 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib import rcParams
from matplotlib.text import Text
from .frame import RectangularFrame
def sort_using(X, Y):
return [x for (y, x) in sorted(zip(Y, X))]
class TickLabels(Text):
def __init__(self, frame, *args, **kwargs):
self.clear()
self._frame = frame
super().__init__(*args, **kwargs)
self.set_clip_on(True)
self.set_visible_axes('all')
self.set_pad(rcParams['xtick.major.pad'])
self._exclude_overlapping = False
# Stale if either xy positions haven't been calculated, or if
# something changes that requires recomputing the positions
self._stale = True
# Check rcParams
if 'color' not in kwargs:
self.set_color(rcParams['xtick.color'])
if 'size' not in kwargs:
self.set_size(rcParams['xtick.labelsize'])
def clear(self):
self.world = {}
self.pixel = {}
self.angle = {}
self.text = {}
self.disp = {}
def add(self, axis, world, pixel, angle, text, axis_displacement):
if axis not in self.world:
self.world[axis] = [world]
self.pixel[axis] = [pixel]
self.angle[axis] = [angle]
self.text[axis] = [text]
self.disp[axis] = [axis_displacement]
else:
self.world[axis].append(world)
self.pixel[axis].append(pixel)
self.angle[axis].append(angle)
self.text[axis].append(text)
self.disp[axis].append(axis_displacement)
self._stale = True
def sort(self):
"""
Sort by axis displacement, which allows us to figure out which parts
of labels to not repeat.
"""
for axis in self.world:
self.world[axis] = sort_using(self.world[axis], self.disp[axis])
self.pixel[axis] = sort_using(self.pixel[axis], self.disp[axis])
self.angle[axis] = sort_using(self.angle[axis], self.disp[axis])
self.text[axis] = sort_using(self.text[axis], self.disp[axis])
self.disp[axis] = sort_using(self.disp[axis], self.disp[axis])
self._stale = True
def simplify_labels(self):
"""
Figure out which parts of labels can be dropped to avoid repetition.
"""
self.sort()
for axis in self.world:
t1 = self.text[axis][0]
for i in range(1, len(self.world[axis])):
t2 = self.text[axis][i]
if len(t1) != len(t2):
t1 = self.text[axis][i]
continue
start = 0
# In the following loop, we need to ignore the last character,
# hence the len(t1) - 1. This is because if we have two strings
# like 13d14m15s we want to make sure that we keep the last
# part (15s) even if the two labels are identical.
for j in range(len(t1) - 1):
if t1[j] != t2[j]:
break
if t1[j] not in '-0123456789.':
start = j + 1
t1 = self.text[axis][i]
if start != 0:
starts_dollar = self.text[axis][i].startswith('$')
self.text[axis][i] = self.text[axis][i][start:]
if starts_dollar:
self.text[axis][i] = '$' + self.text[axis][i]
# Remove any empty LaTeX inline math mode string
if self.text[axis][i] == '$$':
self.text[axis][i] = ''
self._stale = True
def set_pad(self, value):
self._pad = value
self._stale = True
def get_pad(self):
return self._pad
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
self._stale = True
def get_visible_axes(self):
if self._visible_axes == 'all':
return self.world.keys()
else:
return [x for x in self._visible_axes if x in self.world]
def set_exclude_overlapping(self, exclude_overlapping):
self._exclude_overlapping = exclude_overlapping
def _set_xy_alignments(self, renderer, tick_out_size):
"""
Compute and set the x, y positions and the horizontal/vertical alignment of
each label.
"""
if not self._stale:
return
self.simplify_labels()
text_size = renderer.points_to_pixels(self.get_size())
visible_axes = self.get_visible_axes()
self.xy = {axis: {} for axis in visible_axes}
self.ha = {axis: {} for axis in visible_axes}
self.va = {axis: {} for axis in visible_axes}
for axis in visible_axes:
for i in range(len(self.world[axis])):
# In the event that the label is empty (which is not expected
# but could happen in unforeseen corner cases), we should just
# skip to the next label.
if self.text[axis][i] == '':
continue
x, y = self.pixel[axis][i]
pad = renderer.points_to_pixels(self.get_pad() + tick_out_size)
if isinstance(self._frame, RectangularFrame):
# This is just to preserve the current results, but can be
# removed next time the reference images are re-generated.
if np.abs(self.angle[axis][i]) < 45.:
ha = 'right'
va = 'bottom'
dx = -pad
dy = -text_size * 0.5
elif np.abs(self.angle[axis][i] - 90.) < 45:
ha = 'center'
va = 'bottom'
dx = 0
dy = -text_size - pad
elif np.abs(self.angle[axis][i] - 180.) < 45:
ha = 'left'
va = 'bottom'
dx = pad
dy = -text_size * 0.5
else:
ha = 'center'
va = 'bottom'
dx = 0
dy = pad
x = x + dx
y = y + dy
else:
# This is the more general code for arbitrarily oriented
# axes
# Set initial position and find bounding box
self.set_text(self.text[axis][i])
self.set_position((x, y))
bb = super().get_window_extent(renderer)
# Find width and height, as well as angle at which we
# transition which side of the label we use to anchor the
# label.
width = bb.width
height = bb.height
# Project axis angle onto bounding box
ax = np.cos(np.radians(self.angle[axis][i]))
ay = np.sin(np.radians(self.angle[axis][i]))
# Set anchor point for label
if np.abs(self.angle[axis][i]) < 45.:
dx = width
dy = ay * height
elif np.abs(self.angle[axis][i] - 90.) < 45:
dx = ax * width
dy = height
elif np.abs(self.angle[axis][i] - 180.) < 45:
dx = -width
dy = ay * height
else:
dx = ax * width
dy = -height
dx *= 0.5
dy *= 0.5
# Find normalized vector along axis normal, so as to be
# able to nudge the label away by a constant padding factor
dist = np.hypot(dx, dy)
ddx = dx / dist
ddy = dy / dist
dx += ddx * pad
dy += ddy * pad
x = x - dx
y = y - dy
ha = 'center'
va = 'center'
self.xy[axis][i] = (x, y)
self.ha[axis][i] = ha
self.va[axis][i] = va
self._stale = False
def _get_bb(self, axis, i, renderer):
"""
Get the bounding box of an individual label. n.b. _set_xy_alignment()
must be called before this method.
"""
if self.text[axis][i] == '':
return
self.set_text(self.text[axis][i])
self.set_position(self.xy[axis][i])
self.set_ha(self.ha[axis][i])
self.set_va(self.va[axis][i])
return super().get_window_extent(renderer)
def draw(self, renderer, bboxes, ticklabels_bbox, tick_out_size):
if not self.get_visible():
return
self._set_xy_alignments(renderer, tick_out_size)
for axis in self.get_visible_axes():
for i in range(len(self.world[axis])):
# This implicitly sets the label text, position, alignment
bb = self._get_bb(axis, i, renderer)
if bb is None:
continue
# TODO: the problem here is that we might get rid of a label
# that has a key starting bit such as -0:30 where the -0
# might be dropped from all other labels.
if not self._exclude_overlapping or bb.count_overlaps(bboxes) == 0:
super().draw(renderer)
bboxes.append(bb)
ticklabels_bbox[axis].append(bb)
|
4c99881e82a13c200179c349f8b5d4c81162f5e1c494d87446b9be42605c389b | # Functions/classes for WCSAxes related to APE14 WCSes
import numpy as np
from astropy.coordinates import SkyCoord, ICRS, BaseCoordinateFrame
from astropy import units as u
from astropy.wcs import WCS
from astropy.wcs.utils import local_partial_pixel_derivatives
from astropy.wcs.wcsapi import SlicedLowLevelWCS
from .frame import RectangularFrame, EllipticalFrame, RectangularFrame1D
from .transforms import CurvedTransform
__all__ = ['transform_coord_meta_from_wcs', 'WCSWorld2PixelTransform',
'WCSPixel2WorldTransform']
IDENTITY = WCS(naxis=2)
IDENTITY.wcs.ctype = ["X", "Y"]
IDENTITY.wcs.crval = [0., 0.]
IDENTITY.wcs.crpix = [1., 1.]
IDENTITY.wcs.cdelt = [1., 1.]
def transform_coord_meta_from_wcs(wcs, frame_class, slices=None):
if slices is not None:
slices = tuple(slices)
if wcs.pixel_n_dim > 2:
if slices is None:
raise ValueError("WCS has more than 2 pixel dimensions, so "
"'slices' should be set")
elif len(slices) != wcs.pixel_n_dim:
raise ValueError("'slices' should have as many elements as WCS "
"has pixel dimensions (should be {})"
.format(wcs.pixel_n_dim))
is_fits_wcs = isinstance(wcs, WCS) or (isinstance(wcs, SlicedLowLevelWCS) and isinstance(wcs._wcs, WCS))
coord_meta = {}
coord_meta['name'] = []
coord_meta['type'] = []
coord_meta['wrap'] = []
coord_meta['unit'] = []
coord_meta['visible'] = []
coord_meta['format_unit'] = []
for idx in range(wcs.world_n_dim):
axis_type = wcs.world_axis_physical_types[idx]
axis_unit = u.Unit(wcs.world_axis_units[idx])
coord_wrap = None
format_unit = axis_unit
coord_type = 'scalar'
if axis_type is not None:
axis_type_split = axis_type.split('.')
if "pos.helioprojective.lon" in axis_type:
coord_wrap = 180.
format_unit = u.arcsec
coord_type = "longitude"
elif "pos.helioprojective.lat" in axis_type:
format_unit = u.arcsec
coord_type = "latitude"
elif "pos.heliographic.stonyhurst.lon" in axis_type:
coord_wrap = 180.
format_unit = u.deg
coord_type = "longitude"
elif "pos.heliographic.stonyhurst.lat" in axis_type:
format_unit = u.deg
coord_type = "latitude"
elif "pos.heliographic.carrington.lon" in axis_type:
coord_wrap = 360.
format_unit = u.deg
coord_type = "longitude"
elif "pos.heliographic.carrington.lat" in axis_type:
format_unit = u.deg
coord_type = "latitude"
elif "pos" in axis_type_split:
if "lon" in axis_type_split:
coord_type = "longitude"
elif "lat" in axis_type_split:
coord_type = "latitude"
elif "ra" in axis_type_split:
coord_type = "longitude"
format_unit = u.hourangle
elif "dec" in axis_type_split:
coord_type = "latitude"
elif "alt" in axis_type_split:
coord_type = "longitude"
elif "az" in axis_type_split:
coord_type = "latitude"
elif "long" in axis_type_split:
coord_type = "longitude"
coord_meta['type'].append(coord_type)
coord_meta['wrap'].append(coord_wrap)
coord_meta['format_unit'].append(format_unit)
coord_meta['unit'].append(axis_unit)
# For FITS-WCS, for backward-compatibility, we need to make sure that we
# provide aliases based on CTYPE for the name.
if is_fits_wcs:
name = []
if isinstance(wcs, WCS):
name.append(wcs.wcs.ctype[idx].lower())
name.append(wcs.wcs.ctype[idx][:4].replace('-', '').lower())
elif isinstance(wcs, SlicedLowLevelWCS):
name.append(wcs._wcs.wcs.ctype[wcs._world_keep[idx]].lower())
name.append(wcs._wcs.wcs.ctype[wcs._world_keep[idx]][:4].replace('-', '').lower())
if name[0] == name[1]:
name = name[0:1]
if axis_type:
if axis_type not in name:
name.insert(0, axis_type)
if wcs.world_axis_names and wcs.world_axis_names[idx]:
if wcs.world_axis_names[idx] not in name:
name.append(wcs.world_axis_names[idx])
name = tuple(name) if len(name) > 1 else name[0]
else:
name = axis_type or ''
if wcs.world_axis_names:
name = (name, wcs.world_axis_names[idx]) if wcs.world_axis_names[idx] else name
coord_meta['name'].append(name)
coord_meta['default_axislabel_position'] = [''] * wcs.world_n_dim
coord_meta['default_ticklabel_position'] = [''] * wcs.world_n_dim
coord_meta['default_ticks_position'] = [''] * wcs.world_n_dim
# If the world axis has a name use it, else display the world axis physical type.
fallback_labels = [name[0] if isinstance(name, (list, tuple)) else name for name in coord_meta['name']]
coord_meta['default_axis_label'] = [wcs.world_axis_names[i] or fallback_label for i, fallback_label in enumerate(fallback_labels)]
transform_wcs, invert_xy, world_map = apply_slices(wcs, slices)
transform = WCSPixel2WorldTransform(transform_wcs, invert_xy=invert_xy)
for i in range(len(coord_meta['type'])):
coord_meta['visible'].append(i in world_map)
inv_all_corr = [False] * wcs.world_n_dim
m = transform_wcs.axis_correlation_matrix.copy()
if invert_xy:
inv_all_corr = np.all(m, axis=1)
m = m[:, ::-1]
if frame_class is RectangularFrame:
for i, spine_name in enumerate('bltr'):
pos = np.nonzero(m[:, i % 2])[0]
# If all the axes we have are correlated with each other and we
# have inverted the axes, then we need to reverse the index so we
# put the 'y' on the left.
if inv_all_corr[i % 2]:
pos = pos[::-1]
if len(pos) > 0:
index = world_map[pos[0]]
coord_meta['default_axislabel_position'][index] = spine_name
coord_meta['default_ticklabel_position'][index] = spine_name
coord_meta['default_ticks_position'][index] = spine_name
m[pos[0], :] = 0
# In the special and common case where the frame is rectangular and
# we are dealing with 2-d WCS (after slicing), we show all ticks on
# all axes for backward-compatibility.
if len(world_map) == 2:
for index in world_map:
coord_meta['default_ticks_position'][index] = 'bltr'
elif frame_class is RectangularFrame1D:
derivs = np.abs(local_partial_pixel_derivatives(transform_wcs, *[0]*transform_wcs.pixel_n_dim,
normalize_by_world=False))[:, 0]
for i, spine_name in enumerate('bt'):
# Here we are iterating over the correlated axes in world axis order.
# We want to sort the correlated axes by their partial derivatives,
# so we put the most rapidly changing world axis on the bottom.
pos = np.nonzero(m[:, 0])[0]
order = np.argsort(derivs[pos])[::-1] # Sort largest to smallest
pos = pos[order]
if len(pos) > 0:
index = world_map[pos[0]]
coord_meta['default_axislabel_position'][index] = spine_name
coord_meta['default_ticklabel_position'][index] = spine_name
coord_meta['default_ticks_position'][index] = spine_name
m[pos[0], :] = 0
# In the special and common case where the frame is rectangular and
# we are dealing with 2-d WCS (after slicing), we show all ticks on
# all axes for backward-compatibility.
if len(world_map) == 1:
for index in world_map:
coord_meta['default_ticks_position'][index] = 'bt'
elif frame_class is EllipticalFrame:
if 'longitude' in coord_meta['type']:
lon_idx = coord_meta['type'].index('longitude')
coord_meta['default_axislabel_position'][lon_idx] = 'h'
coord_meta['default_ticklabel_position'][lon_idx] = 'h'
coord_meta['default_ticks_position'][lon_idx] = 'h'
if 'latitude' in coord_meta['type']:
lat_idx = coord_meta['type'].index('latitude')
coord_meta['default_axislabel_position'][lat_idx] = 'c'
coord_meta['default_ticklabel_position'][lat_idx] = 'c'
coord_meta['default_ticks_position'][lat_idx] = 'c'
else:
for index in range(len(coord_meta['type'])):
if index in world_map:
coord_meta['default_axislabel_position'][index] = frame_class.spine_names
coord_meta['default_ticklabel_position'][index] = frame_class.spine_names
coord_meta['default_ticks_position'][index] = frame_class.spine_names
return transform, coord_meta
def apply_slices(wcs, slices):
"""
Take the input WCS and slices and return a sliced WCS for the transform and
a mapping of world axes in the sliced WCS to the input WCS.
"""
if isinstance(wcs, SlicedLowLevelWCS):
world_keep = list(wcs._world_keep)
else:
world_keep = list(range(wcs.world_n_dim))
# world_map is the index of the world axis in the input WCS for a given
# axis in the transform_wcs
world_map = list(range(wcs.world_n_dim))
transform_wcs = wcs
invert_xy = False
if slices is not None:
wcs_slice = list(slices)
wcs_slice[wcs_slice.index("x")] = slice(None)
if 'y' in slices:
wcs_slice[wcs_slice.index("y")] = slice(None)
invert_xy = slices.index('x') > slices.index('y')
transform_wcs = SlicedLowLevelWCS(wcs, wcs_slice[::-1])
world_map = tuple(world_keep.index(i) for i in transform_wcs._world_keep)
return transform_wcs, invert_xy, world_map
def wcsapi_to_celestial_frame(wcs):
for cls, _, kwargs, *_ in wcs.world_axis_object_classes.values():
if issubclass(cls, SkyCoord):
return kwargs.get('frame', ICRS())
elif issubclass(cls, BaseCoordinateFrame):
return cls(**kwargs)
class WCSWorld2PixelTransform(CurvedTransform):
"""
WCS transformation from world to pixel coordinates
"""
has_inverse = True
frame_in = None
def __init__(self, wcs, invert_xy=False):
super().__init__()
if wcs.pixel_n_dim > 2:
raise ValueError('Only pixel_n_dim =< 2 is supported')
self.wcs = wcs
self.invert_xy = invert_xy
self.frame_in = wcsapi_to_celestial_frame(wcs)
def __eq__(self, other):
return (isinstance(other, type(self)) and self.wcs is other.wcs and
self.invert_xy == other.invert_xy)
@property
def input_dims(self):
return self.wcs.world_n_dim
def transform(self, world):
# Convert to a list of arrays
world = list(world.T)
if len(world) != self.wcs.world_n_dim:
raise ValueError(f"Expected {self.wcs.world_n_dim} world coordinates, got {len(world)} ")
if len(world[0]) == 0:
pixel = np.zeros((0, 2))
else:
pixel = self.wcs.world_to_pixel_values(*world)
if self.invert_xy:
pixel = pixel[::-1]
pixel = np.array(pixel).T
return pixel
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSPixel2WorldTransform(self.wcs, invert_xy=self.invert_xy)
class WCSPixel2WorldTransform(CurvedTransform):
"""
WCS transformation from pixel to world coordinates
"""
has_inverse = True
def __init__(self, wcs, invert_xy=False):
super().__init__()
if wcs.pixel_n_dim > 2:
raise ValueError('Only pixel_n_dim =< 2 is supported')
self.wcs = wcs
self.invert_xy = invert_xy
self.frame_out = wcsapi_to_celestial_frame(wcs)
def __eq__(self, other):
return (isinstance(other, type(self)) and self.wcs is other.wcs and
self.invert_xy == other.invert_xy)
@property
def output_dims(self):
return self.wcs.world_n_dim
def transform(self, pixel):
# Convert to a list of arrays
pixel = list(pixel.T)
if len(pixel) != self.wcs.pixel_n_dim:
raise ValueError(f"Expected {self.wcs.pixel_n_dim} world coordinates, got {len(pixel)} ")
if self.invert_xy:
pixel = pixel[::-1]
if len(pixel[0]) == 0:
world = np.zeros((0, self.wcs.world_n_dim))
else:
world = self.wcs.pixel_to_world_values(*pixel)
if self.wcs.world_n_dim == 1:
world = [world]
world = np.array(world).T
return world
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSWorld2PixelTransform(self.wcs, invert_xy=self.invert_xy)
|
bfa24e119b1e6423c2dedbef759b4fc64aeb74efe6bbb3a83fba481b0ea6dc78 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import defaultdict
import numpy as np
from matplotlib.lines import Path, Line2D
from matplotlib.transforms import Affine2D
from matplotlib import rcParams
class Ticks(Line2D):
"""
Ticks are derived from Line2D, and note that ticks themselves
are markers. Thus, you should use set_mec, set_mew, etc.
To change the tick size (length), you need to use
set_ticksize. To change the direction of the ticks (ticks are
in opposite direction of ticklabels by default), use
set_tick_out(False).
Note that Matplotlib's defaults dictionary :data:`~matplotlib.rcParams`
contains default settings (color, size, width) of the form `xtick.*` and
`ytick.*`. In a WCS projection, there may not be a clear relationship
between axes of the projection and 'x' or 'y' axes. For this reason,
we read defaults from `xtick.*`. The following settings affect the
default appearance of ticks:
* `xtick.direction`
* `xtick.major.size`
* `xtick.major.width`
* `xtick.minor.size`
* `xtick.color`
Attributes
----------
ticks_locs : dict
This is set when the ticks are drawn, and is a mapping from axis to
the locations of the ticks for that axis.
"""
def __init__(self, ticksize=None, tick_out=None, **kwargs):
if ticksize is None:
ticksize = rcParams['xtick.major.size']
self.set_ticksize(ticksize)
self.set_minor_ticksize(rcParams['xtick.minor.size'])
self.set_tick_out(rcParams['xtick.direction'] == 'out')
self.clear()
line2d_kwargs = {'color': rcParams['xtick.color'],
'linewidth': rcParams['xtick.major.width']}
line2d_kwargs.update(kwargs)
Line2D.__init__(self, [0.], [0.], **line2d_kwargs)
self.set_visible_axes('all')
self._display_minor_ticks = False
def display_minor_ticks(self, display_minor_ticks):
self._display_minor_ticks = display_minor_ticks
def get_display_minor_ticks(self):
return self._display_minor_ticks
def set_tick_out(self, tick_out):
"""
set True if tick need to be rotated by 180 degree.
"""
self._tick_out = tick_out
def get_tick_out(self):
"""
Return True if the tick will be rotated by 180 degree.
"""
return self._tick_out
def set_ticksize(self, ticksize):
"""
set length of the ticks in points.
"""
self._ticksize = ticksize
def get_ticksize(self):
"""
Return length of the ticks in points.
"""
return self._ticksize
def set_minor_ticksize(self, ticksize):
"""
set length of the minor ticks in points.
"""
self._minor_ticksize = ticksize
def get_minor_ticksize(self):
"""
Return length of the minor ticks in points.
"""
return self._minor_ticksize
@property
def out_size(self):
if self._tick_out:
return self._ticksize
else:
return 0.
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == 'all':
return self.world.keys()
else:
return [x for x in self._visible_axes if x in self.world]
def clear(self):
self.world = {}
self.pixel = {}
self.angle = {}
self.disp = {}
self.minor_world = {}
self.minor_pixel = {}
self.minor_angle = {}
self.minor_disp = {}
def add(self, axis, world, pixel, angle, axis_displacement):
if axis not in self.world:
self.world[axis] = [world]
self.pixel[axis] = [pixel]
self.angle[axis] = [angle]
self.disp[axis] = [axis_displacement]
else:
self.world[axis].append(world)
self.pixel[axis].append(pixel)
self.angle[axis].append(angle)
self.disp[axis].append(axis_displacement)
def get_minor_world(self):
return self.minor_world
def add_minor(self, minor_axis, minor_world, minor_pixel, minor_angle,
minor_axis_displacement):
if minor_axis not in self.minor_world:
self.minor_world[minor_axis] = [minor_world]
self.minor_pixel[minor_axis] = [minor_pixel]
self.minor_angle[minor_axis] = [minor_angle]
self.minor_disp[minor_axis] = [minor_axis_displacement]
else:
self.minor_world[minor_axis].append(minor_world)
self.minor_pixel[minor_axis].append(minor_pixel)
self.minor_angle[minor_axis].append(minor_angle)
self.minor_disp[minor_axis].append(minor_axis_displacement)
def __len__(self):
return len(self.world)
_tickvert_path = Path([[0., 0.], [1., 0.]])
def draw(self, renderer):
"""
Draw the ticks.
"""
self.ticks_locs = defaultdict(list)
if not self.get_visible():
return
offset = renderer.points_to_pixels(self.get_ticksize())
self._draw_ticks(renderer, self.pixel, self.angle, offset)
if self._display_minor_ticks:
offset = renderer.points_to_pixels(self.get_minor_ticksize())
self._draw_ticks(renderer, self.minor_pixel, self.minor_angle, offset)
def _draw_ticks(self, renderer, pixel_array, angle_array, offset):
"""
Draw the minor ticks.
"""
path_trans = self.get_transform()
gc = renderer.new_gc()
gc.set_foreground(self.get_color())
gc.set_alpha(self.get_alpha())
gc.set_linewidth(self.get_linewidth())
marker_scale = Affine2D().scale(offset, offset)
marker_rotation = Affine2D()
marker_transform = marker_scale + marker_rotation
initial_angle = 180. if self.get_tick_out() else 0.
for axis in self.get_visible_axes():
if axis not in pixel_array:
continue
for loc, angle in zip(pixel_array[axis], angle_array[axis]):
# Set the rotation for this tick
marker_rotation.rotate_deg(initial_angle + angle)
# Draw the markers
locs = path_trans.transform_non_affine(np.array([loc, loc]))
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
Path(locs), path_trans.get_affine())
# Reset the tick rotation before moving to the next tick
marker_rotation.clear()
self.ticks_locs[axis].append(locs)
gc.restore()
|
5b5ef628457632729948630d8e99ef3ddfdfbf0b9f6c7f52fbc6fe39be4587c3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import warnings
from matplotlib.patches import Polygon
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.coordinates.representation import UnitSphericalRepresentation, SphericalRepresentation
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['Quadrangle', 'SphericalCircle']
# Monkey-patch the docs to fix CapStyle and JoinStyle subs.
# TODO! delete when upstream fix matplotlib/matplotlib#19839
Polygon.__init__.__doc__ = Polygon.__init__.__doc__.replace(
"`.CapStyle`", "``matplotlib._enums.CapStyle``")
Polygon.__init__.__doc__ = Polygon.__init__.__doc__.replace(
"`.JoinStyle`", "``matplotlib._enums.JoinStyle``")
Polygon.set_capstyle.__doc__ = Polygon.set_capstyle.__doc__.replace(
"`.CapStyle`", "``matplotlib._enums.CapStyle``")
Polygon.set_joinstyle.__doc__ = Polygon.set_joinstyle.__doc__.replace(
"`.JoinStyle`", "``matplotlib._enums.JoinStyle``")
def _rotate_polygon(lon, lat, lon0, lat0):
"""
Given a polygon with vertices defined by (lon, lat), rotate the polygon
such that the North pole of the spherical coordinates is now at (lon0,
lat0). Therefore, to end up with a polygon centered on (lon0, lat0), the
polygon should initially be drawn around the North pole.
"""
# Create a representation object
polygon = UnitSphericalRepresentation(lon=lon, lat=lat)
# Determine rotation matrix to make it so that the circle is centered
# on the correct longitude/latitude.
m1 = rotation_matrix(-(0.5 * np.pi * u.radian - lat0), axis='y')
m2 = rotation_matrix(-lon0, axis='z')
transform_matrix = matrix_product(m2, m1)
# Apply 3D rotation
polygon = polygon.to_cartesian()
polygon = polygon.transform(transform_matrix)
polygon = UnitSphericalRepresentation.from_cartesian(polygon)
return polygon.lon, polygon.lat
class SphericalCircle(Polygon):
"""
Create a patch representing a spherical circle - that is, a circle that is
formed of all the points that are within a certain angle of the central
coordinates on a sphere. Here we assume that latitude goes from -90 to +90
This class is needed in cases where the user wants to add a circular patch
to a celestial image, since otherwise the circle will be distorted, because
a fixed interval in longitude corresponds to a different angle on the sky
depending on the latitude.
Parameters
----------
center : tuple or `~astropy.units.Quantity` ['angle']
This can be either a tuple of two `~astropy.units.Quantity` objects, or
a single `~astropy.units.Quantity` array with two elements
or a `~astropy.coordinates.SkyCoord` object.
radius : `~astropy.units.Quantity` ['angle']
The radius of the circle
resolution : int, optional
The number of points that make up the circle - increase this to get a
smoother circle.
vertex_unit : `~astropy.units.Unit`
The units in which the resulting polygon should be defined - this
should match the unit that the transformation (e.g. the WCS
transformation) expects as input.
Notes
-----
Additional keyword arguments are passed to `~matplotlib.patches.Polygon`
"""
def __init__(self, center, radius, resolution=100, vertex_unit=u.degree, **kwargs):
# Extract longitude/latitude, either from a SkyCoord object, or
# from a tuple of two quantities or a single 2-element Quantity.
# The SkyCoord is converted to SphericalRepresentation, if not already.
if isinstance(center, SkyCoord):
rep_type = center.representation_type
if not issubclass(rep_type, (SphericalRepresentation,
UnitSphericalRepresentation)):
warnings.warn(f'Received `center` of representation type {rep_type} '
'will be converted to SphericalRepresentation ',
AstropyUserWarning)
longitude, latitude = center.spherical.lon, center.spherical.lat
else:
longitude, latitude = center
# Start off by generating the circle around the North pole
lon = np.linspace(0., 2 * np.pi, resolution + 1)[:-1] * u.radian
lat = np.repeat(0.5 * np.pi - radius.to_value(u.radian), resolution) * u.radian
lon, lat = _rotate_polygon(lon, lat, longitude, latitude)
# Extract new longitude/latitude in the requested units
lon = lon.to_value(vertex_unit)
lat = lat.to_value(vertex_unit)
# Create polygon vertices
vertices = np.array([lon, lat]).transpose()
super().__init__(vertices, **kwargs)
class Quadrangle(Polygon):
"""
Create a patch representing a latitude-longitude quadrangle.
The edges of the quadrangle lie on two lines of constant longitude and two
lines of constant latitude (or the equivalent component names in the
coordinate frame of interest, such as right ascension and declination).
Note that lines of constant latitude are not great circles.
Unlike `matplotlib.patches.Rectangle`, the edges of this patch will render
as curved lines if appropriate for the WCS transformation.
Parameters
----------
anchor : tuple or `~astropy.units.Quantity` ['angle']
This can be either a tuple of two `~astropy.units.Quantity` objects, or
a single `~astropy.units.Quantity` array with two elements.
width : `~astropy.units.Quantity` ['angle']
The width of the quadrangle in longitude (or, e.g., right ascension)
height : `~astropy.units.Quantity` ['angle']
The height of the quadrangle in latitude (or, e.g., declination)
resolution : int, optional
The number of points that make up each side of the quadrangle -
increase this to get a smoother quadrangle.
vertex_unit : `~astropy.units.Unit` ['angle']
The units in which the resulting polygon should be defined - this
should match the unit that the transformation (e.g. the WCS
transformation) expects as input.
Notes
-----
Additional keyword arguments are passed to `~matplotlib.patches.Polygon`
"""
def __init__(self, anchor, width, height, resolution=100, vertex_unit=u.degree, **kwargs):
# Extract longitude/latitude, either from a tuple of two quantities, or
# a single 2-element Quantity.
longitude, latitude = u.Quantity(anchor).to_value(vertex_unit)
# Convert the quadrangle dimensions to the appropriate units
width = width.to_value(vertex_unit)
height = height.to_value(vertex_unit)
# Create progressions in longitude and latitude
lon_seq = longitude + np.linspace(0, width, resolution + 1)
lat_seq = latitude + np.linspace(0, height, resolution + 1)
# Trace the path of the quadrangle
lon = np.concatenate([lon_seq[:-1],
np.repeat(lon_seq[-1], resolution),
np.flip(lon_seq[1:]),
np.repeat(lon_seq[0], resolution)])
lat = np.concatenate([np.repeat(lat_seq[0], resolution),
lat_seq[:-1],
np.repeat(lat_seq[-1], resolution),
np.flip(lat_seq[1:])])
# Create polygon vertices
vertices = np.array([lon, lat]).transpose()
super().__init__(vertices, **kwargs)
|
379c2fa627a75eaff9fc056a5b7059a438867a59a664d6b9251403972e02637d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from textwrap import indent
from collections import OrderedDict
from .coordinate_helpers import CoordinateHelper
from .frame import RectangularFrame, RectangularFrame1D
from .coordinate_range import find_coordinate_range
class CoordinatesMap:
"""
A container for coordinate helpers that represents a coordinate system.
This object can be used to access coordinate helpers by index (like a list)
or by name (like a dictionary).
Parameters
----------
axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate map belongs to.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data.
coord_meta : dict, optional
A dictionary providing additional metadata. This should include the keys
``type``, ``wrap``, and ``unit``. Each of these should be a list with as
many items as the dimension of the coordinate system. The ``type``
entries should be one of ``longitude``, ``latitude``, or ``scalar``, the
``wrap`` entries should give, for the longitude, the angle at which the
coordinate wraps (and `None` otherwise), and the ``unit`` should give
the unit of the coordinates as :class:`~astropy.units.Unit` instances.
This can optionally also include a ``format_unit`` entry giving the
units to use for the tick labels (if not specified, this defaults to
``unit``).
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
previous_frame_path : `~matplotlib.path.Path`, optional
When changing the WCS of the axes, the frame instance will change but
we might want to keep re-using the same underlying matplotlib
`~matplotlib.path.Path` - in that case, this can be passed to this
keyword argument.
"""
def __init__(self, axes, transform=None, coord_meta=None,
frame_class=RectangularFrame, previous_frame_path=None):
self._axes = axes
self._transform = transform
self.frame = frame_class(axes, self._transform, path=previous_frame_path)
# Set up coordinates
self._coords = []
self._aliases = {}
visible_count = 0
for index in range(len(coord_meta['type'])):
# Extract coordinate metadata
coord_type = coord_meta['type'][index]
coord_wrap = coord_meta['wrap'][index]
coord_unit = coord_meta['unit'][index]
name = coord_meta['name'][index]
visible = True
if 'visible' in coord_meta:
visible = coord_meta['visible'][index]
format_unit = None
if 'format_unit' in coord_meta:
format_unit = coord_meta['format_unit'][index]
default_label = name[0] if isinstance(name, (tuple, list)) else name
if 'default_axis_label' in coord_meta:
default_label = coord_meta['default_axis_label'][index]
coord_index = None
if visible:
visible_count += 1
coord_index = visible_count - 1
self._coords.append(CoordinateHelper(parent_axes=axes,
parent_map=self,
transform=self._transform,
coord_index=coord_index,
coord_type=coord_type,
coord_wrap=coord_wrap,
coord_unit=coord_unit,
format_unit=format_unit,
frame=self.frame,
default_label=default_label))
# Set up aliases for coordinates
if isinstance(name, tuple):
for nm in name:
nm = nm.lower()
# Do not replace an alias already in the map if we have
# more than one alias for this axis.
if nm not in self._aliases:
self._aliases[nm] = index
else:
self._aliases[name.lower()] = index
def __getitem__(self, item):
if isinstance(item, str):
return self._coords[self._aliases[item.lower()]]
else:
return self._coords[item]
def __contains__(self, item):
if isinstance(item, str):
return item.lower() in self._aliases
else:
return 0 <= item < len(self._coords)
def set_visible(self, visibility):
raise NotImplementedError()
def __iter__(self):
for coord in self._coords:
yield coord
def grid(self, draw_grid=True, grid_type=None, **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : { 'lines' | 'contours' }
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended. By default, 'lines' is used if the transform has
an inverse, otherwise 'contours' is used.
"""
for coord in self:
coord.grid(draw_grid=draw_grid, grid_type=grid_type, **kwargs)
def get_coord_range(self):
xmin, xmax = self._axes.get_xlim()
if isinstance(self.frame, RectangularFrame1D):
extent = [xmin, xmax]
else:
ymin, ymax = self._axes.get_ylim()
extent = [xmin, xmax, ymin, ymax]
return find_coordinate_range(self._transform,
extent,
[coord.coord_type for coord in self if coord.coord_index is not None],
[coord.coord_unit for coord in self if coord.coord_index is not None],
[coord.coord_wrap for coord in self if coord.coord_index is not None])
def _as_table(self):
# Import Table here to avoid importing the astropy.table package
# every time astropy.visualization.wcsaxes is imported.
from astropy.table import Table # noqa
rows = []
for icoord, coord in enumerate(self._coords):
aliases = [key for key, value in self._aliases.items() if value == icoord]
row = OrderedDict([('index', icoord), ('aliases', ' '.join(aliases)),
('type', coord.coord_type), ('unit', coord.coord_unit),
('wrap', coord.coord_wrap), ('format_unit', coord.get_format_unit()),
('visible', 'no' if coord.coord_index is None else 'yes')])
rows.append(row)
return Table(rows=rows)
def __repr__(self):
s = f'<CoordinatesMap with {len(self._coords)} world coordinates:\n\n'
table = indent(str(self._as_table()), ' ')
return s + table + '\n\n>'
|
14f3ef92412fe75f6133369f822f80abd6eed6466d8ab4c70f014bba4451ed10 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.coordinates import BaseCoordinateFrame
__all__ = ['select_step_degree', 'select_step_hour', 'select_step_scalar',
'transform_contour_set_inplace']
def select_step_degree(dv):
# Modified from axis_artist, supports astropy.units
if dv > 1. * u.arcsec:
degree_limits_ = [1.5, 3, 7, 13, 20, 40, 70, 120, 270, 520]
degree_steps_ = [1, 2, 5, 10, 15, 30, 45, 90, 180, 360]
degree_units = [u.degree] * len(degree_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 8, 11, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 5, 10, 15, 20, 30]
minute_limits_ = np.array(minsec_limits_) / 60.
minute_units = [u.arcmin] * len(minute_limits_)
second_limits_ = np.array(minsec_limits_) / 3600.
second_units = [u.arcsec] * len(second_limits_)
degree_limits = np.concatenate([second_limits_,
minute_limits_,
degree_limits_])
degree_steps = minsec_steps_ + minsec_steps_ + degree_steps_
degree_units = second_units + minute_units + degree_units
n = degree_limits.searchsorted(dv.to(u.degree))
step = degree_steps[n]
unit = degree_units[n]
return step * unit
else:
return select_step_scalar(dv.to_value(u.arcsec)) * u.arcsec
def select_step_hour(dv):
if dv > 15. * u.arcsec:
hour_limits_ = [1.5, 2.5, 3.5, 5, 7, 10, 15, 21, 36]
hour_steps_ = [1, 2, 3, 4, 6, 8, 12, 18, 24]
hour_units = [u.hourangle] * len(hour_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 4.5, 5.5, 8, 11, 14, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30]
minute_limits_ = np.array(minsec_limits_) / 60.
minute_units = [15. * u.arcmin] * len(minute_limits_)
second_limits_ = np.array(minsec_limits_) / 3600.
second_units = [15. * u.arcsec] * len(second_limits_)
hour_limits = np.concatenate([second_limits_,
minute_limits_,
hour_limits_])
hour_steps = minsec_steps_ + minsec_steps_ + hour_steps_
hour_units = second_units + minute_units + hour_units
n = hour_limits.searchsorted(dv.to(u.hourangle))
step = hour_steps[n]
unit = hour_units[n]
return step * unit
else:
return select_step_scalar(dv.to_value(15. * u.arcsec)) * (15. * u.arcsec)
def select_step_scalar(dv):
log10_dv = np.log10(dv)
base = np.floor(log10_dv)
frac = log10_dv - base
steps = np.log10([1, 2, 5, 10])
imin = np.argmin(np.abs(frac - steps))
return 10. ** (base + steps[imin])
def get_coord_meta(frame):
coord_meta = {}
coord_meta['type'] = ('longitude', 'latitude')
coord_meta['wrap'] = (None, None)
coord_meta['unit'] = (u.deg, u.deg)
from astropy.coordinates import frame_transform_graph
if isinstance(frame, str):
initial_frame = frame
frame = frame_transform_graph.lookup_name(frame)
if frame is None:
raise ValueError(f"Unknown frame: {initial_frame}")
if not isinstance(frame, BaseCoordinateFrame):
frame = frame()
names = list(frame.representation_component_names.keys())
coord_meta['name'] = names[:2]
return coord_meta
def transform_contour_set_inplace(cset, transform):
"""
Transform a contour set in-place using a specified
:class:`matplotlib.transform.Transform`
Using transforms with the native Matplotlib contour/contourf can be slow if
the transforms have a non-negligible overhead (which is the case for
WCS/SkyCoord transforms) since the transform is called for each individual
contour line. It is more efficient to stack all the contour lines together
temporarily and transform them in one go.
"""
# The contours are represented as paths grouped into levels. Each can have
# one or more paths. The approach we take here is to stack the vertices of
# all paths and transform them in one go. The pos_level list helps us keep
# track of where the set of segments for each overall contour level ends.
# The pos_segments list helps us keep track of where each segmnt ends for
# each contour level.
all_paths = []
pos_level = []
pos_segments = []
for collection in cset.collections:
paths = collection.get_paths()
if len(paths) == 0:
continue
all_paths.append(paths)
# The last item in pos isn't needed for np.split and in fact causes
# issues if we keep it because it will cause an extra empty array to be
# returned.
pos = np.cumsum([len(x) for x in paths])
pos_segments.append(pos[:-1])
pos_level.append(pos[-1])
# As above the last item isn't needed
pos_level = np.cumsum(pos_level)[:-1]
# Stack all the segments into a single (n, 2) array
vertices = [path.vertices for paths in all_paths for path in paths]
if len(vertices) > 0:
vertices = np.concatenate(vertices)
else:
return
# Transform all coordinates in one go
vertices = transform.transform(vertices)
# Split up into levels again
vertices = np.split(vertices, pos_level)
# Now re-populate the segments in the line collections
for ilevel, vert in enumerate(vertices):
vert = np.split(vert, pos_segments[ilevel])
for iseg, ivert in enumerate(vert):
all_paths[ilevel][iseg].vertices = ivert
|
c1fe7e28d4c63558c1fbc1196f3078f89ef08bf8b928848e7e22f86cb24d14c9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib import rcParams
from matplotlib.text import Text
import matplotlib.transforms as mtransforms
from .frame import RectangularFrame
class AxisLabels(Text):
def __init__(self, frame, minpad=1, *args, **kwargs):
# Use rcParams if the following parameters were not specified explicitly
if 'weight' not in kwargs:
kwargs['weight'] = rcParams['axes.labelweight']
if 'size' not in kwargs:
kwargs['size'] = rcParams['axes.labelsize']
if 'color' not in kwargs:
kwargs['color'] = rcParams['axes.labelcolor']
self._frame = frame
super().__init__(*args, **kwargs)
self.set_clip_on(True)
self.set_visible_axes('all')
self.set_ha('center')
self.set_va('center')
self._minpad = minpad
self._visibility_rule = 'labels'
def get_minpad(self, axis):
try:
return self._minpad[axis]
except TypeError:
return self._minpad
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == 'all':
return self._frame.keys()
else:
return [x for x in self._visible_axes if x in self._frame]
def set_minpad(self, minpad):
self._minpad = minpad
def set_visibility_rule(self, value):
allowed = ['always', 'labels', 'ticks']
if value not in allowed:
raise ValueError(f"Axis label visibility rule must be one of{' / '.join(allowed)}")
self._visibility_rule = value
def get_visibility_rule(self):
return self._visibility_rule
def draw(self, renderer, bboxes, ticklabels_bbox,
coord_ticklabels_bbox, ticks_locs, visible_ticks):
if not self.get_visible():
return
text_size = renderer.points_to_pixels(self.get_size())
# Flatten the bboxes for all coords and all axes
ticklabels_bbox_list = []
for bbcoord in ticklabels_bbox.values():
for bbaxis in bbcoord.values():
ticklabels_bbox_list += bbaxis
for axis in self.get_visible_axes():
if self.get_visibility_rule() == 'ticks':
if not ticks_locs[axis]:
continue
elif self.get_visibility_rule() == 'labels':
if not coord_ticklabels_bbox:
continue
padding = text_size * self.get_minpad(axis)
# Find position of the axis label. For now we pick the mid-point
# along the path but in future we could allow this to be a
# parameter.
x, y, normal_angle = self._frame[axis]._halfway_x_y_angle()
label_angle = (normal_angle - 90.) % 360.
if 135 < label_angle < 225:
label_angle += 180
self.set_rotation(label_angle)
# Find label position by looking at the bounding box of ticks'
# labels and the image. It sets the default padding at 1 times the
# axis label font size which can also be changed by setting
# the minpad parameter.
if isinstance(self._frame, RectangularFrame):
if len(ticklabels_bbox_list) > 0 and ticklabels_bbox_list[0] is not None:
coord_ticklabels_bbox[axis] = [mtransforms.Bbox.union(ticklabels_bbox_list)]
else:
coord_ticklabels_bbox[axis] = [None]
visible = axis in visible_ticks and coord_ticklabels_bbox[axis][0] is not None
if axis == 'l':
if visible:
x = coord_ticklabels_bbox[axis][0].xmin
x = x - padding
elif axis == 'r':
if visible:
x = coord_ticklabels_bbox[axis][0].x1
x = x + padding
elif axis == 'b':
if visible:
y = coord_ticklabels_bbox[axis][0].ymin
y = y - padding
elif axis == 't':
if visible:
y = coord_ticklabels_bbox[axis][0].y1
y = y + padding
else: # arbitrary axis
x = x + np.cos(np.radians(normal_angle)) * (padding + text_size * 1.5)
y = y + np.sin(np.radians(normal_angle)) * (padding + text_size * 1.5)
self.set_position((x, y))
super().draw(renderer)
bb = super().get_window_extent(renderer)
bboxes.append(bb)
|
6cece3229ad7b9a5274ce33bd2d0395adcd5cc2be42a82050dfb10418df305eb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from astropy import units as u
# Algorithm inspired by PGSBOX from WCSLIB by M. Calabretta
LONLAT = {'longitude', 'latitude'}
def wrap_180(values):
values_new = values % 360.
with np.errstate(invalid='ignore'):
values_new[values_new > 180.] -= 360
return values_new
def find_coordinate_range(transform, extent, coord_types, coord_units, coord_wraps):
"""
Find the range of coordinates to use for ticks/grids
Parameters
----------
transform : func
Function to transform pixel to world coordinates. Should take two
values (the pixel coordinates) and return two values (the world
coordinates).
extent : iterable
The range of the image viewport in pixel coordinates, given as [xmin,
xmax, ymin, ymax].
coord_types : list of str
Whether each coordinate is a ``'longitude'``, ``'latitude'``, or
``'scalar'`` value.
coord_units : list of `astropy.units.Unit`
The units for each coordinate.
coord_wraps : list of float
The wrap angles for longitudes.
"""
# Sample coordinates on a NX x NY grid.
from . import conf
if len(extent) == 4:
nx = ny = conf.coordinate_range_samples
x = np.linspace(extent[0], extent[1], nx + 1)
y = np.linspace(extent[2], extent[3], ny + 1)
xp, yp = np.meshgrid(x, y)
with np.errstate(invalid='ignore'):
world = transform.transform(np.vstack([xp.ravel(), yp.ravel()]).transpose())
else:
nx = conf.coordinate_range_samples
xp = np.linspace(extent[0], extent[1], nx + 1)[None]
with np.errstate(invalid='ignore'):
world = transform.transform(xp.T)
ranges = []
for coord_index, coord_type in enumerate(coord_types):
xw = world[:, coord_index].reshape(xp.shape)
if coord_type in LONLAT:
unit = coord_units[coord_index]
xw = xw * unit.to(u.deg)
# Iron out coordinates along first row
wjump = xw[0, 1:] - xw[0, :-1]
with np.errstate(invalid='ignore'):
reset = np.abs(wjump) > 180.
if np.any(reset):
wjump = wjump + np.sign(wjump) * 180.
wjump = 360. * (wjump / 360.).astype(int)
xw[0, 1:][reset] -= wjump[reset]
# Now iron out coordinates along all columns, starting with first row.
wjump = xw[1:] - xw[:1]
with np.errstate(invalid='ignore'):
reset = np.abs(wjump) > 180.
if np.any(reset):
wjump = wjump + np.sign(wjump) * 180.
wjump = 360. * (wjump / 360.).astype(int)
xw[1:][reset] -= wjump[reset]
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min = np.nanmin(xw)
xw_max = np.nanmax(xw)
# Check if range is smaller when normalizing to the range 0 to 360
if coord_type in LONLAT:
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min_check = np.nanmin(xw % 360.)
xw_max_check = np.nanmax(xw % 360.)
if xw_max_check - xw_min_check <= xw_max - xw_min < 360.:
xw_min = xw_min_check
xw_max = xw_max_check
# Check if range is smaller when normalizing to the range -180 to 180
if coord_type in LONLAT:
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min_check = np.nanmin(wrap_180(xw))
xw_max_check = np.nanmax(wrap_180(xw))
if xw_max_check - xw_min_check < 360. and xw_max - xw_min >= xw_max_check - xw_min_check:
xw_min = xw_min_check
xw_max = xw_max_check
x_range = xw_max - xw_min
if coord_type == 'longitude':
if x_range > 300.:
xw_min = coord_wraps[coord_index] - 360
xw_max = coord_wraps[coord_index] - np.spacing(360.)
elif xw_min < 0.:
xw_min = max(-180., xw_min - 0.1 * x_range)
xw_max = min(+180., xw_max + 0.1 * x_range)
else:
xw_min = max(0., xw_min - 0.1 * x_range)
xw_max = min(360., xw_max + 0.1 * x_range)
elif coord_type == 'latitude':
xw_min = max(-90., xw_min - 0.1 * x_range)
xw_max = min(+90., xw_max + 0.1 * x_range)
if coord_type in LONLAT:
xw_min *= u.deg.to(unit)
xw_max *= u.deg.to(unit)
ranges.append((xw_min, xw_max))
return ranges
|
134b3b37f3e4b53dc999ddf61bd9ddd37dc52529a50daa93edefa41ba7a3bf34 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
from collections import OrderedDict
import numpy as np
from matplotlib import rcParams
from matplotlib.lines import Line2D, Path
from matplotlib.patches import PathPatch
__all__ = ['RectangularFrame1D', 'Spine', 'BaseFrame', 'RectangularFrame', 'EllipticalFrame']
class Spine:
"""
A single side of an axes.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
"""
def __init__(self, parent_axes, transform):
self.parent_axes = parent_axes
self.transform = transform
self.data = None
self.pixel = None
self.world = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = value
self._pixel = self.parent_axes.transData.transform(self._data)
with np.errstate(invalid='ignore'):
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def pixel(self):
return self._pixel
@pixel.setter
def pixel(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = self.parent_axes.transData.inverted().transform(self._data)
self._pixel = value
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def world(self):
return self._world
@world.setter
def world(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = self.transform.transform(value)
self._pixel = self.parent_axes.transData.transform(self._data)
self._world = value
self._update_normal()
def _update_normal(self):
# Find angle normal to border and inwards, in display coordinate
dx = self.pixel[1:, 0] - self.pixel[:-1, 0]
dy = self.pixel[1:, 1] - self.pixel[:-1, 1]
self.normal_angle = np.degrees(np.arctan2(dx, -dy))
def _halfway_x_y_angle(self):
"""
Return the x, y, normal_angle values halfway along the spine
"""
x_disp, y_disp = self.pixel[:, 0], self.pixel[:, 1]
# Get distance along the path
d = np.hstack([0., np.cumsum(np.sqrt(np.diff(x_disp) ** 2 + np.diff(y_disp) ** 2))])
xcen = np.interp(d[-1] / 2., d, x_disp)
ycen = np.interp(d[-1] / 2., d, y_disp)
# Find segment along which the mid-point lies
imin = np.searchsorted(d, d[-1] / 2.) - 1
# Find normal of the axis label facing outwards on that segment
normal_angle = self.normal_angle[imin] + 180.
return xcen, ycen, normal_angle
class SpineXAligned(Spine):
"""
A single side of an axes, aligned with the X data axis.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
"""
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = value
self._pixel = self.parent_axes.transData.transform(self._data)
with np.errstate(invalid='ignore'):
self._world = self.transform.transform(self._data[:,0:1])
self._update_normal()
@property
def pixel(self):
return self._pixel
@pixel.setter
def pixel(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = self.parent_axes.transData.inverted().transform(self._data)
self._pixel = value
self._world = self.transform.transform(self._data[:,0:1])
self._update_normal()
class BaseFrame(OrderedDict, metaclass=abc.ABCMeta):
"""
Base class for frames, which are collections of
:class:`~astropy.visualization.wcsaxes.frame.Spine` instances.
"""
spine_class = Spine
def __init__(self, parent_axes, transform, path=None):
super().__init__()
self.parent_axes = parent_axes
self._transform = transform
self._linewidth = rcParams['axes.linewidth']
self._color = rcParams['axes.edgecolor']
self._path = path
for axis in self.spine_names:
self[axis] = self.spine_class(parent_axes, transform)
@property
def origin(self):
ymin, ymax = self.parent_axes.get_ylim()
return 'lower' if ymin < ymax else 'upper'
@property
def transform(self):
return self._transform
@transform.setter
def transform(self, value):
self._transform = value
for axis in self:
self[axis].transform = value
def _update_patch_path(self):
self.update_spines()
x, y = [], []
for axis in self:
x.append(self[axis].data[:, 0])
y.append(self[axis].data[:, 1])
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
@property
def patch(self):
self._update_patch_path()
return PathPatch(self._path, transform=self.parent_axes.transData,
facecolor=rcParams['axes.facecolor'], edgecolor='white')
def draw(self, renderer):
for axis in self:
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000)
line.draw(renderer)
def sample(self, n_samples):
self.update_spines()
spines = OrderedDict()
for axis in self:
data = self[axis].data
p = np.linspace(0., 1., data.shape[0])
p_new = np.linspace(0., 1., n_samples)
spines[axis] = self.spine_class(self.parent_axes, self.transform)
spines[axis].data = np.array([np.interp(p_new, p, d) for d in data.T]).transpose()
return spines
def set_color(self, color):
"""
Sets the color of the frame.
Parameters
----------
color : str
The color of the frame.
"""
self._color = color
def get_color(self):
return self._color
def set_linewidth(self, linewidth):
"""
Sets the linewidth of the frame.
Parameters
----------
linewidth : float
The linewidth of the frame in points.
"""
self._linewidth = linewidth
def get_linewidth(self):
return self._linewidth
@abc.abstractmethod
def update_spines(self):
raise NotImplementedError("")
class RectangularFrame1D(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = 'bt'
spine_class = SpineXAligned
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self['b'].data = np.array(([xmin, ymin], [xmax, ymin]))
self['t'].data = np.array(([xmax, ymax], [xmin, ymax]))
def _update_patch_path(self):
self.update_spines()
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000,
transform=self.parent_axes.transData)
line.draw(renderer)
class RectangularFrame(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = 'brtl'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self['b'].data = np.array(([xmin, ymin], [xmax, ymin]))
self['r'].data = np.array(([xmax, ymin], [xmax, ymax]))
self['t'].data = np.array(([xmax, ymax], [xmin, ymax]))
self['l'].data = np.array(([xmin, ymax], [xmin, ymin]))
class EllipticalFrame(BaseFrame):
"""
An elliptical frame.
"""
spine_names = 'chv'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
xmid = 0.5 * (xmax + xmin)
ymid = 0.5 * (ymax + ymin)
dx = xmid - xmin
dy = ymid - ymin
theta = np.linspace(0., 2 * np.pi, 1000)
self['c'].data = np.array([xmid + dx * np.cos(theta),
ymid + dy * np.sin(theta)]).transpose()
self['h'].data = np.array([np.linspace(xmin, xmax, 1000),
np.repeat(ymid, 1000)]).transpose()
self['v'].data = np.array([np.repeat(xmid, 1000),
np.linspace(ymin, ymax, 1000)]).transpose()
def _update_patch_path(self):
"""Override path patch to include only the outer ellipse,
not the major and minor axes in the middle."""
self.update_spines()
vertices = self['c'].data
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
"""Override to draw only the outer ellipse,
not the major and minor axes in the middle.
FIXME: we may want to add a general method to give the user control
over which spines are drawn."""
axis = 'c'
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000)
line.draw(renderer)
|
8d37080a565618acbfcf75aecb3c2e0956c6781bf2244ca3a8495b23204b7237 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.lines import Path
from astropy.coordinates.angle_utilities import angular_separation
# Tolerance for WCS round-tripping, relative to the scale size
ROUND_TRIP_RTOL = 1.
# Tolerance for discontinuities relative to the median
DISCONT_FACTOR = 10.
def get_lon_lat_path(lon_lat, pixel, lon_lat_check):
"""
Draw a curve, taking into account discontinuities.
Parameters
----------
lon_lat : ndarray
The longitude and latitude values along the curve, given as a (n,2)
array.
pixel : ndarray
The pixel coordinates corresponding to ``lon_lat``
lon_lat_check : ndarray
The world coordinates derived from converting from ``pixel``, which is
used to ensure round-tripping.
"""
# In some spherical projections, some parts of the curve are 'behind' or
# 'in front of' the plane of the image, so we find those by reversing the
# transformation and finding points where the result is not consistent.
sep = angular_separation(np.radians(lon_lat[:, 0]),
np.radians(lon_lat[:, 1]),
np.radians(lon_lat_check[:, 0]),
np.radians(lon_lat_check[:, 1]))
# Define the relevant scale size using the separation between the first two points
scale_size = angular_separation(*np.radians(lon_lat[0, :]), *np.radians(lon_lat[1, :]))
with np.errstate(invalid='ignore'):
sep[sep > np.pi] -= 2. * np.pi
mask = np.abs(sep > ROUND_TRIP_RTOL * scale_size)
# Mask values with invalid pixel positions
mask = mask | np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1])
# We can now start to set up the codes for the Path.
codes = np.zeros(lon_lat.shape[0], dtype=np.uint8)
codes[:] = Path.LINETO
codes[0] = Path.MOVETO
codes[mask] = Path.MOVETO
# Also need to move to point *after* a hidden value
codes[1:][mask[:-1]] = Path.MOVETO
# We now go through and search for discontinuities in the curve that would
# be due to the curve going outside the field of view, invalid WCS values,
# or due to discontinuities in the projection.
# We start off by pre-computing the step in pixel coordinates from one
# point to the next. The idea is to look for large jumps that might indicate
# discontinuities.
step = np.sqrt((pixel[1:, 0] - pixel[:-1, 0]) ** 2 +
(pixel[1:, 1] - pixel[:-1, 1]) ** 2)
# We search for discontinuities by looking for places where the step
# is larger by more than a given factor compared to the median
# discontinuous = step > DISCONT_FACTOR * np.median(step)
discontinuous = step[1:] > DISCONT_FACTOR * step[:-1]
# Skip over discontinuities
codes[2:][discontinuous] = Path.MOVETO
# The above missed the first step, so check that too
if step[0] > DISCONT_FACTOR * step[1]:
codes[1] = Path.MOVETO
# Create the path
path = Path(pixel, codes=codes)
return path
def get_gridline_path(world, pixel):
"""
Draw a grid line
Parameters
----------
world : ndarray
The longitude and latitude values along the curve, given as a (n,2)
array.
pixel : ndarray
The pixel coordinates corresponding to ``lon_lat``
"""
# Mask values with invalid pixel positions
mask = np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1])
# We can now start to set up the codes for the Path.
codes = np.zeros(world.shape[0], dtype=np.uint8)
codes[:] = Path.LINETO
codes[0] = Path.MOVETO
codes[mask] = Path.MOVETO
# Also need to move to point *after* a hidden value
codes[1:][mask[:-1]] = Path.MOVETO
# We now go through and search for discontinuities in the curve that would
# be due to the curve going outside the field of view, invalid WCS values,
# or due to discontinuities in the projection.
# Create the path
path = Path(pixel, codes=codes)
return path
|
33a78136d0a1102b578fcbb247c89684adbd05ab4f315f237979aed7bf72b1e9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose
from astropy.utils.compat.optional_deps import HAS_PLT, HAS_SCIPY
if HAS_PLT:
import matplotlib.pyplot as plt
import pytest
import numpy as np
from astropy.visualization import hist
from astropy.stats import histogram
@pytest.mark.skipif('not HAS_PLT')
def test_hist_basic(rseed=0):
rng = np.random.default_rng(rseed)
x = rng.standard_normal(100)
for range in [None, (-2, 2)]:
n1, bins1, patches1 = plt.hist(x, 10, range=range)
n2, bins2, patches2 = hist(x, 10, range=range)
assert_allclose(n1, n2)
assert_allclose(bins1, bins2)
@pytest.mark.skipif('not HAS_PLT')
def test_hist_specify_ax(rseed=0):
rng = np.random.default_rng(rseed)
x = rng.standard_normal(100)
fig, ax = plt.subplots(2)
n1, bins1, patches1 = hist(x, 10, ax=ax[0])
assert patches1[0].axes is ax[0]
n2, bins2, patches2 = hist(x, 10, ax=ax[1])
assert patches2[0].axes is ax[1]
@pytest.mark.skipif('not HAS_PLT')
def test_hist_autobin(rseed=0):
rng = np.random.default_rng(rseed)
x = rng.standard_normal(100)
# 'knuth' bintype depends on scipy that is optional dependency
if HAS_SCIPY:
bintypes = [10, np.arange(-3, 3, 10), 'knuth', 'scott',
'freedman', 'blocks']
else:
bintypes = [10, np.arange(-3, 3, 10), 'scott',
'freedman', 'blocks']
for bintype in bintypes:
for range in [None, (-3, 3)]:
n1, bins1 = histogram(x, bintype, range=range)
n2, bins2, patches = hist(x, bintype, range=range)
assert_allclose(n1, n2)
assert_allclose(bins1, bins2)
def test_histogram_pathological_input():
# Regression test for https://github.com/astropy/astropy/issues/7758
# The key feature of the data below is that one of the points is very,
# very different than the rest. That leads to a large number of bins.
data = [9.99999914e+05, -8.31312483e-03, 6.52755852e-02, 1.43104653e-03,
-2.26311017e-02, 2.82660007e-03, 1.80307521e-02, 9.26294279e-03,
5.06606026e-02, 2.05418011e-03]
with pytest.raises(ValueError):
hist(data, bins='freedman', max_bins=10000)
|
446884651dd8b1f446274d165cb929c6da350a6f590d319b1cb26107bd1ca591 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.utils import NumpyRNGContext
from astropy.visualization.interval import (ManualInterval,
MinMaxInterval,
PercentileInterval,
AsymmetricPercentileInterval,
ZScaleInterval)
class TestInterval:
data = np.linspace(-20., 60., 100)
def test_manual(self):
interval = ManualInterval(-10., +15.)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -10.)
np.testing.assert_allclose(vmax, +15.)
def test_manual_defaults(self):
interval = ManualInterval(vmin=-10.)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -10.)
np.testing.assert_allclose(vmax, np.max(self.data))
interval = ManualInterval(vmax=15.)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, np.min(self.data))
np.testing.assert_allclose(vmax, 15.)
def test_manual_zero_limit(self):
# Regression test for a bug that caused ManualInterval to compute the
# limit (min or max) if it was set to zero.
interval = ManualInterval(vmin=0, vmax=0)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, 0)
np.testing.assert_allclose(vmax, 0)
def test_manual_defaults_with_nan(self):
interval = ManualInterval()
data = np.copy(self.data)
data[0] = np.nan
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -20)
np.testing.assert_allclose(vmax, +60)
def test_minmax(self):
interval = MinMaxInterval()
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -20.)
np.testing.assert_allclose(vmax, +60.)
def test_percentile(self):
interval = PercentileInterval(62.2)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -4.88)
np.testing.assert_allclose(vmax, 44.88)
def test_asymmetric_percentile(self):
interval = AsymmetricPercentileInterval(10.5, 70.5)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -11.6)
np.testing.assert_allclose(vmax, 36.4)
def test_asymmetric_percentile_nsamples(self):
with NumpyRNGContext(12345):
interval = AsymmetricPercentileInterval(10.5, 70.5, n_samples=20)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -14.367676767676768)
np.testing.assert_allclose(vmax, 40.266666666666666)
class TestIntervalList(TestInterval):
# Make sure intervals work with lists
data = np.linspace(-20., 60., 100).tolist()
class TestInterval2D(TestInterval):
# Make sure intervals work with 2d arrays
data = np.linspace(-20., 60., 100).reshape(100, 1)
def test_zscale():
np.random.seed(42)
data = np.random.randn(100, 100) * 5 + 10
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(data)
np.testing.assert_allclose(vmin, -9.6, atol=0.1)
np.testing.assert_allclose(vmax, 25.4, atol=0.1)
data = list(range(1000)) + [np.nan]
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(data)
np.testing.assert_allclose(vmin, 0, atol=0.1)
np.testing.assert_allclose(vmax, 999, atol=0.1)
data = list(range(100))
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(data)
np.testing.assert_allclose(vmin, 0, atol=0.1)
np.testing.assert_allclose(vmax, 99, atol=0.1)
def test_zscale_npoints():
"""
Regression test to ensure ZScaleInterval returns the minimum and
maximum of the data if the number of data points is less than
``min_pixels``.
"""
data = np.arange(4).reshape((2, 2))
interval = ZScaleInterval(min_npixels=5)
vmin, vmax = interval.get_limits(data)
assert vmin == 0
assert vmax == 3
def test_integers():
# Need to make sure integers get cast to float
interval = MinMaxInterval()
values = interval([1, 3, 4, 5, 6])
np.testing.assert_allclose(values, [0., 0.4, 0.6, 0.8, 1.0])
# Don't accept integer array in output
out = np.zeros(5, dtype=int)
with pytest.raises(TypeError) as exc:
values = interval([1, 3, 4, 5, 6], out=out)
assert exc.value.args[0] == ("Can only do in-place scaling for "
"floating-point arrays")
# But integer input and floating point output is fine
out = np.zeros(5, dtype=float)
interval([1, 3, 4, 5, 6], out=out)
np.testing.assert_allclose(out, [0., 0.4, 0.6, 0.8, 1.0])
def test_constant_data():
"""Test intervals with constant data (avoiding divide-by-zero)."""
shape = (10, 10)
data = np.ones(shape)
interval = MinMaxInterval()
limits = interval.get_limits(data)
values = interval(data)
np.testing.assert_allclose(limits, (1., 1.))
np.testing.assert_allclose(values, np.zeros(shape))
|
3353ddeb85cd530372668bda77af449137fcd765dff6415daa1e5ef2fe003e35 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_equal
from astropy.visualization.stretch import (LinearStretch, SqrtStretch,
PowerStretch, PowerDistStretch,
InvertedPowerDistStretch,
SquaredStretch, LogStretch,
InvertedLogStretch,
AsinhStretch, SinhStretch,
HistEqStretch,
InvertedHistEqStretch,
ContrastBiasStretch)
DATA = np.array([0.00, 0.25, 0.50, 0.75, 1.00])
RESULTS = {}
RESULTS[LinearStretch()] = np.array([0.00, 0.25, 0.50, 0.75, 1.00])
RESULTS[LinearStretch(intercept=0.5) + LinearStretch(slope=0.5)] = \
np.array([0.5, 0.625, 0.75, 0.875, 1.])
RESULTS[SqrtStretch()] = np.array([0., 0.5, 0.70710678, 0.8660254, 1.])
RESULTS[SquaredStretch()] = np.array([0., 0.0625, 0.25, 0.5625, 1.])
RESULTS[PowerStretch(0.5)] = np.array([0., 0.5, 0.70710678, 0.8660254, 1.])
RESULTS[PowerDistStretch()] = np.array([0., 0.004628, 0.030653, 0.177005, 1.])
RESULTS[LogStretch()] = np.array([0., 0.799776, 0.899816, 0.958408, 1.])
RESULTS[AsinhStretch()] = np.array([0., 0.549402, 0.77127, 0.904691, 1.])
RESULTS[SinhStretch()] = np.array([0., 0.082085, 0.212548, 0.46828, 1.])
RESULTS[ContrastBiasStretch(contrast=2., bias=0.4)] = np.array([-0.3, 0.2,
0.7, 1.2,
1.7])
RESULTS[HistEqStretch(DATA)] = DATA
RESULTS[HistEqStretch(DATA[::-1])] = DATA
RESULTS[HistEqStretch(DATA ** 0.5)] = np.array([0., 0.125, 0.25, 0.5674767,
1.])
class TestStretch:
@pytest.mark.parametrize('stretch', RESULTS.keys())
def test_no_clip(self, stretch):
np.testing.assert_allclose(stretch(DATA, clip=False),
RESULTS[stretch], atol=1.e-6)
@pytest.mark.parametrize('ndim', [2, 3])
@pytest.mark.parametrize('stretch', RESULTS.keys())
def test_clip_ndimensional(self, stretch, ndim):
new_shape = DATA.shape + (1,) * ndim
np.testing.assert_allclose(stretch(DATA.reshape(new_shape),
clip=True).ravel(),
np.clip(RESULTS[stretch], 0., 1),
atol=1.e-6)
@pytest.mark.parametrize('stretch', RESULTS.keys())
def test_clip(self, stretch):
np.testing.assert_allclose(stretch(DATA, clip=True),
np.clip(RESULTS[stretch], 0., 1),
atol=1.e-6)
@pytest.mark.parametrize('stretch', RESULTS.keys())
def test_inplace(self, stretch):
data_in = DATA.copy()
result = np.zeros(DATA.shape)
stretch(data_in, out=result, clip=False)
np.testing.assert_allclose(result, RESULTS[stretch], atol=1.e-6)
np.testing.assert_allclose(data_in, DATA)
@pytest.mark.parametrize('stretch', RESULTS.keys())
def test_round_trip(self, stretch):
np.testing.assert_allclose(stretch.inverse(stretch(DATA, clip=False),
clip=False), DATA)
@pytest.mark.parametrize('stretch', RESULTS.keys())
def test_inplace_roundtrip(self, stretch):
result = np.zeros(DATA.shape)
stretch(DATA, out=result, clip=False)
stretch.inverse(result, out=result, clip=False)
np.testing.assert_allclose(result, DATA)
@pytest.mark.parametrize('stretch', RESULTS.keys())
def test_double_inverse(self, stretch):
np.testing.assert_allclose(stretch.inverse.inverse(DATA),
stretch(DATA), atol=1.e-6)
def test_inverted(self):
stretch_1 = SqrtStretch().inverse
stretch_2 = PowerStretch(2)
np.testing.assert_allclose(stretch_1(DATA),
stretch_2(DATA))
def test_chaining(self):
stretch_1 = SqrtStretch() + SqrtStretch()
stretch_2 = PowerStretch(0.25)
stretch_3 = PowerStretch(4.)
np.testing.assert_allclose(stretch_1(DATA),
stretch_2(DATA))
np.testing.assert_allclose(stretch_1.inverse(DATA),
stretch_3(DATA))
def test_clip_invalid():
stretch = SqrtStretch()
values = stretch([-1., 0., 0.5, 1., 1.5])
np.testing.assert_allclose(values, [0., 0., 0.70710678, 1., 1.])
values = stretch([-1., 0., 0.5, 1., 1.5], clip=False)
np.testing.assert_allclose(values, [np.nan, 0., 0.70710678, 1., 1.2247448])
@pytest.mark.parametrize('a', [-2., -1, 1.])
def test_invalid_powerdist_a(a):
match = 'a must be >= 0, but cannot be set to 1'
with pytest.raises(ValueError, match=match):
PowerDistStretch(a=a)
with pytest.raises(ValueError, match=match):
InvertedPowerDistStretch(a=a)
@pytest.mark.parametrize('a', [-2., -1, 0.])
def test_invalid_power_log_a(a):
match = 'a must be > 0'
with pytest.raises(ValueError, match=match):
PowerStretch(a=a)
with pytest.raises(ValueError, match=match):
LogStretch(a=a)
with pytest.raises(ValueError, match=match):
InvertedLogStretch(a=a)
@pytest.mark.parametrize('a', [-2., -1, 0., 1.5])
def test_invalid_sinh_a(a):
match = 'a must be > 0 and <= 1'
with pytest.raises(ValueError, match=match):
AsinhStretch(a=a)
with pytest.raises(ValueError, match=match):
SinhStretch(a=a)
def test_histeqstretch_invalid():
data = np.array([-np.inf, 0.00, 0.25, 0.50, 0.75, 1.00, np.inf])
result = np.array([0.0, 0.0, 0.25, 0.5, 0.75, 1.0, 1.0])
assert_equal(HistEqStretch(data)(data), result)
assert_equal(InvertedHistEqStretch(data)(data), result)
|
1cf3ede9372e751357828de1e16b572bd85cad1b57dfa36592824a97b15a3d35 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
from astropy.utils.compat.optional_deps import HAS_PLT
if HAS_PLT:
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.visualization.units import quantity_support
def teardown_function(function):
plt.close('all')
@pytest.mark.skipif('not HAS_PLT')
def test_units():
plt.figure()
with quantity_support():
buff = io.BytesIO()
plt.plot([1, 2, 3] * u.m, [3, 4, 5] * u.kg, label='label')
plt.plot([105, 210, 315] * u.cm, [3050, 3025, 3010] * u.g)
plt.legend()
# Also test fill_between, which requires actual conversion to ndarray
# with numpy >=1.10 (#4654).
plt.fill_between([1, 3] * u.m, [3, 5] * u.kg, [3050, 3010] * u.g)
plt.savefig(buff, format='svg')
assert plt.gca().xaxis.get_units() == u.m
assert plt.gca().yaxis.get_units() == u.kg
@pytest.mark.skipif('not HAS_PLT')
def test_units_errbarr():
pytest.importorskip("matplotlib")
plt.figure()
with quantity_support():
x = [1, 2, 3] * u.s
y = [1, 2, 3] * u.m
yerr = [3, 2, 1] * u.cm
fig, ax = plt.subplots()
ax.errorbar(x, y, yerr=yerr)
assert ax.xaxis.get_units() == u.s
assert ax.yaxis.get_units() == u.m
@pytest.mark.skipif('not HAS_PLT')
def test_incompatible_units():
# NOTE: minversion check does not work properly for matplotlib dev.
try:
# https://github.com/matplotlib/matplotlib/pull/13005
from matplotlib.units import ConversionError
except ImportError:
err_type = u.UnitConversionError
else:
err_type = ConversionError
plt.figure()
with quantity_support():
plt.plot([1, 2, 3] * u.m)
with pytest.raises(err_type):
plt.plot([105, 210, 315] * u.kg)
@pytest.mark.skipif('not HAS_PLT')
def test_quantity_subclass():
"""Check that subclasses are recognized.
This sadly is not done by matplotlib.units itself, though
there is a PR to change it:
https://github.com/matplotlib/matplotlib/pull/13536
"""
plt.figure()
with quantity_support():
plt.scatter(Angle([1, 2, 3], u.deg), [3, 4, 5] * u.kg)
plt.scatter([105, 210, 315] * u.arcsec, [3050, 3025, 3010] * u.g)
plt.plot(Angle([105, 210, 315], u.arcsec), [3050, 3025, 3010] * u.g)
assert plt.gca().xaxis.get_units() == u.deg
assert plt.gca().yaxis.get_units() == u.kg
@pytest.mark.skipif('not HAS_PLT')
def test_nested():
with quantity_support():
with quantity_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(Angle([1, 2, 3], u.deg), [3, 4, 5] * u.kg)
assert ax.xaxis.get_units() == u.deg
assert ax.yaxis.get_units() == u.kg
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(Angle([1, 2, 3], u.arcsec), [3, 4, 5] * u.pc)
assert ax.xaxis.get_units() == u.arcsec
assert ax.yaxis.get_units() == u.pc
@pytest.mark.skipif('not HAS_PLT')
def test_empty_hist():
with quantity_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.hist([1, 2, 3, 4] * u.mmag, bins=100)
# The second call results in an empty list being passed to the
# unit converter in matplotlib >= 3.1
ax.hist([] * u.mmag, bins=100)
@pytest.mark.skipif('not HAS_PLT')
def test_radian_formatter():
with quantity_support():
fig, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3] * u.rad * np.pi)
fig.canvas.draw()
labels = [tl.get_text() for tl in ax.yaxis.get_ticklabels()]
assert labels == ['π/2', 'π', '3π/2', '2π', '5π/2', '3π', '7π/2']
|
a82e3b1edddb65412a22dad90eb888845b5fdb40c469ea58fdecbb87c7678a54 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for RGB Images
"""
import sys
import os
import tempfile
import pytest
import numpy as np
from numpy.testing import assert_equal
from astropy.convolution import convolve, Gaussian2DKernel
from astropy.visualization import lupton_rgb
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB # noqa
# Set display=True to get matplotlib imshow windows to help with debugging.
display = False
def display_rgb(rgb, title=None):
"""Display an rgb image using matplotlib (useful for debugging)"""
import matplotlib.pyplot as plt
plt.imshow(rgb, interpolation='nearest', origin='lower')
if title:
plt.title(title)
plt.show()
return plt
def saturate(image, satValue):
"""
Return image with all points above satValue set to NaN.
Simulates saturation on an image, so we can test 'replace_saturated_pixels'
"""
result = image.copy()
saturated = image > satValue
result[saturated] = np.nan
return result
def random_array(dtype, N=100):
return np.array(np.random.random(10)*100, dtype=dtype)
def test_compute_intensity_1_float():
image_r = random_array(np.float64)
intensity = lupton_rgb.compute_intensity(image_r)
assert image_r.dtype == intensity.dtype
assert_equal(image_r, intensity)
def test_compute_intensity_1_uint():
image_r = random_array(np.uint8)
intensity = lupton_rgb.compute_intensity(image_r)
assert image_r.dtype == intensity.dtype
assert_equal(image_r, intensity)
def test_compute_intensity_3_float():
image_r = random_array(np.float64)
image_g = random_array(np.float64)
image_b = random_array(np.float64)
intensity = lupton_rgb.compute_intensity(image_r, image_g, image_b)
assert image_r.dtype == intensity.dtype
assert_equal(intensity, (image_r+image_g+image_b)/3.0)
def test_compute_intensity_3_uint():
image_r = random_array(np.uint8)
image_g = random_array(np.uint8)
image_b = random_array(np.uint8)
intensity = lupton_rgb.compute_intensity(image_r, image_g, image_b)
assert image_r.dtype == intensity.dtype
assert_equal(intensity, (image_r+image_g+image_b)//3)
class TestLuptonRgb:
"""A test case for Rgb"""
def setup_method(self, method):
np.random.seed(1000) # so we always get the same images.
self.min_, self.stretch_, self.Q = 0, 5, 20 # asinh
width, height = 85, 75
self.width = width
self.height = height
shape = (width, height)
image_r = np.zeros(shape)
image_g = np.zeros(shape)
image_b = np.zeros(shape)
# pixel locations, values and colors
points = [[15, 15], [50, 45], [30, 30], [45, 15]]
values = [1000, 5500, 600, 20000]
g_r = [1.0, -1.0, 1.0, 1.0]
r_i = [2.0, -0.5, 2.5, 1.0]
# Put pixels in the images.
for p, v, gr, ri in zip(points, values, g_r, r_i):
image_r[p[0], p[1]] = v*pow(10, 0.4*ri)
image_g[p[0], p[1]] = v*pow(10, 0.4*gr)
image_b[p[0], p[1]] = v
# convolve the image with a reasonable PSF, and add Gaussian background noise
def convolve_with_noise(image, psf):
convolvedImage = convolve(image, psf, boundary='extend', normalize_kernel=True)
randomImage = np.random.normal(0, 2, image.shape)
return randomImage + convolvedImage
psf = Gaussian2DKernel(2.5)
self.image_r = convolve_with_noise(image_r, psf)
self.image_g = convolve_with_noise(image_g, psf)
self.image_b = convolve_with_noise(image_b, psf)
def test_Asinh(self):
"""Test creating an RGB image using an asinh stretch"""
asinhMap = lupton_rgb.AsinhMapping(self.min_, self.stretch_, self.Q)
rgbImage = asinhMap.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscale(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensity(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale on the intensity"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensityPedestal(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale on the intensity
where the images each have a pedestal added"""
pedestal = [100, 400, -400]
self.image_r += pedestal[0]
self.image_g += pedestal[1]
self.image_b += pedestal[2]
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b, pedestal=pedestal)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensityBW(self):
"""Test creating a black-and-white image using an asinh stretch estimated
using zscale on the intensity"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r)
rgbImage = map.make_rgb_image(self.image_r, self.image_r, self.image_r)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
@pytest.mark.skipif('not HAS_MATPLOTLIB')
def test_make_rgb(self):
"""Test the function that does it all"""
satValue = 1000.0
with tempfile.NamedTemporaryFile(suffix=".png") as temp:
red = saturate(self.image_r, satValue)
green = saturate(self.image_g, satValue)
blue = saturate(self.image_b, satValue)
lupton_rgb.make_lupton_rgb(red, green, blue, self.min_, self.stretch_, self.Q, filename=temp)
assert os.path.exists(temp.name)
def test_make_rgb_saturated_fix(self):
pytest.skip('saturation correction is not implemented')
satValue = 1000.0
# TODO: Cannot test with these options yet, as that part of the code is not implemented.
with tempfile.NamedTemporaryFile(suffix=".png") as temp:
red = saturate(self.image_r, satValue)
green = saturate(self.image_g, satValue)
blue = saturate(self.image_b, satValue)
lupton_rgb.make_lupton_rgb(red, green, blue, self.min_, self.stretch_, self.Q,
saturated_border_width=1, saturated_pixel_value=2000,
filename=temp)
def test_linear(self):
"""Test using a specified linear stretch"""
map = lupton_rgb.LinearMapping(-8.45, 13.44)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_linear_min_max(self):
"""Test using a min/max linear stretch determined from one image"""
map = lupton_rgb.LinearMapping(image=self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_saturated(self):
"""Test interpolationolating saturated pixels"""
pytest.skip('replaceSaturatedPixels is not implemented in astropy yet')
satValue = 1000.0
self.image_r = saturate(self.image_r, satValue)
self.image_g = saturate(self.image_g, satValue)
self.image_b = saturate(self.image_b, satValue)
lupton_rgb.replaceSaturatedPixels(self.image_r, self.image_g, self.image_b, 1, 2000)
# Check that we replaced those NaNs with some reasonable value
assert np.isfinite(self.image_r.getImage().getArray()).all()
assert np.isfinite(self.image_g.getImage().getArray()).all()
assert np.isfinite(self.image_b.getImage().getArray()).all()
# Prepare for generating an output file
self.imagesR = self.imagesR.getImage()
self.imagesR = self.imagesG.getImage()
self.imagesR = self.imagesB.getImage()
asinhMap = lupton_rgb.AsinhMapping(self.min_, self.stretch_, self.Q)
rgbImage = asinhMap.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_different_shapes_asserts(self):
with pytest.raises(ValueError) as excinfo:
# just swap the dimensions to get a differently-shaped 'r'
image_r = self.image_r.reshape(self.height, self.width)
lupton_rgb.make_lupton_rgb(image_r, self.image_g, self.image_b)
assert "shapes must match" in str(excinfo.value)
|
1bb0af079c96d01d1115dfecea0040c8a3f87275ea146d4f152a9c78b4d5c0d9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from packaging.version import Version
import pytest
import numpy as np
from numpy import ma
from numpy.testing import assert_allclose, assert_equal
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.visualization.mpl_normalize import ImageNormalize, simple_norm, imshow_norm
from astropy.visualization.interval import ManualInterval, PercentileInterval
from astropy.visualization.stretch import LogStretch, PowerStretch, SqrtStretch
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB, HAS_PLT # noqa
if HAS_MATPLOTLIB:
import matplotlib
MATPLOTLIB_LT_32 = Version(matplotlib.__version__) < Version('3.2')
DATA = np.linspace(0., 15., 6)
DATA2 = np.arange(3)
DATA2SCL = 0.5 * DATA2
DATA3 = np.linspace(-3., 3., 7)
STRETCHES = (SqrtStretch(), PowerStretch(0.5), LogStretch())
INVALID = (None, -np.inf, -1)
@pytest.mark.skipif('HAS_MATPLOTLIB')
def test_normalize_error_message():
with pytest.raises(ImportError) as exc:
ImageNormalize()
assert (exc.value.args[0] == "matplotlib is required in order to use "
"this class.")
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestNormalize:
def test_invalid_interval(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2., vmax=10., interval=ManualInterval,
clip=True)
def test_invalid_stretch(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch,
clip=True)
def test_stretch_none(self):
with pytest.raises(ValueError):
ImageNormalize(vmin=2., vmax=10., stretch=None)
def test_scalar(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(data=6, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
assert_allclose(norm(6), 0.70710678)
assert_allclose(norm(6), norm2(6))
def test_clip(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
output = norm(DATA)
expected = [0., 0.35355339, 0.70710678, 0.93541435, 1., 1.]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(DATA))
def test_noclip(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=False, invalid=None)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=False,
invalid=None)
output = norm(DATA)
expected = [np.nan, 0.35355339, 0.70710678, 0.93541435, 1.11803399,
1.27475488]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(DATA))
def test_implicit_autoscale(self):
norm = ImageNormalize(vmin=None, vmax=10., stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(DATA, interval=ManualInterval(None, 10),
stretch=SqrtStretch(), clip=False)
output = norm(DATA)
assert norm.vmin == np.min(DATA)
assert norm.vmax == 10.
assert_allclose(output, norm2(DATA))
norm = ImageNormalize(vmin=2., vmax=None, stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, None),
stretch=SqrtStretch(), clip=False)
output = norm(DATA)
assert norm.vmin == 2.
assert norm.vmax == np.max(DATA)
assert_allclose(output, norm2(DATA))
def test_call_clip(self):
"""Test that the clip keyword is used when calling the object."""
data = np.arange(5)
norm = ImageNormalize(vmin=1., vmax=3., clip=False)
output = norm(data, clip=True)
assert_equal(output.data, [0, 0, 0.5, 1.0, 1.0])
assert np.all(~output.mask)
output = norm(data, clip=False)
assert_equal(output.data, [-0.5, 0, 0.5, 1.0, 1.5])
assert np.all(~output.mask)
def test_masked_clip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
output = norm(mdata)
expected = [0., 0.35355339, 1., 0.93541435, 1., 1.]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(mdata))
def test_masked_noclip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=False, invalid=None)
norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=False,
invalid=None)
output = norm(mdata)
expected = [np.nan, 0.35355339, -10, 0.93541435, 1.11803399,
1.27475488]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 1, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(mdata))
def test_invalid_data(self):
data = np.arange(25.).reshape((5, 5))
data[2, 2] = np.nan
data[1, 2] = np.inf
percent = 85.0
interval = PercentileInterval(percent)
# initialized without data
norm = ImageNormalize(interval=interval)
norm(data) # sets vmin/vmax
assert_equal((norm.vmin, norm.vmax), (1.65, 22.35))
# initialized with data
norm2 = ImageNormalize(data, interval=interval)
assert_equal((norm2.vmin, norm2.vmax), (norm.vmin, norm.vmax))
norm3 = simple_norm(data, 'linear', percent=percent)
assert_equal((norm3.vmin, norm3.vmax), (norm.vmin, norm.vmax))
assert_allclose(norm(data), norm2(data))
assert_allclose(norm(data), norm3(data))
norm4 = ImageNormalize()
norm4(data) # sets vmin/vmax
assert_equal((norm4.vmin, norm4.vmax), (0, 24))
norm5 = ImageNormalize(data)
assert_equal((norm5.vmin, norm5.vmax), (norm4.vmin, norm4.vmax))
@pytest.mark.parametrize('stretch', STRETCHES)
def test_invalid_keyword(self, stretch):
norm1 = ImageNormalize(stretch=stretch, vmin=-1, vmax=1, clip=False,
invalid=None)
norm2 = ImageNormalize(stretch=stretch, vmin=-1, vmax=1, clip=False)
norm3 = ImageNormalize(DATA3, stretch=stretch, vmin=-1, vmax=1,
clip=False, invalid=-1.)
result1 = norm1(DATA3)
result2 = norm2(DATA3)
result3 = norm3(DATA3)
assert_equal(result1[0:2], (np.nan, np.nan))
assert_equal(result2[0:2], (-1., -1.))
assert_equal(result1[2:], result2[2:])
assert_equal(result2, result3)
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestImageScaling:
def test_linear(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch='linear')
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5)
def test_sqrt(self):
"""Test sqrt scaling."""
norm1 = simple_norm(DATA2, stretch='sqrt')
assert_allclose(norm1(DATA2), np.sqrt(DATA2SCL), atol=0, rtol=1.e-5)
@pytest.mark.parametrize('invalid', INVALID)
def test_sqrt_invalid_kw(self, invalid):
stretch = SqrtStretch()
norm1 = simple_norm(DATA3, stretch='sqrt', min_cut=-1, max_cut=1,
clip=False, invalid=invalid)
norm2 = ImageNormalize(stretch=stretch, vmin=-1, vmax=1, clip=False,
invalid=invalid)
assert_equal(norm1(DATA3), norm2(DATA3))
def test_power(self):
"""Test power scaling."""
power = 3.0
norm = simple_norm(DATA2, stretch='power', power=power)
assert_allclose(norm(DATA2), DATA2SCL ** power, atol=0, rtol=1.e-5)
def test_log(self):
"""Test log10 scaling."""
norm = simple_norm(DATA2, stretch='log')
ref = np.log10(1000 * DATA2SCL + 1.0) / np.log10(1001.0)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_log_with_log_a(self):
"""Test log10 scaling with a custom log_a."""
log_a = 100
norm = simple_norm(DATA2, stretch='log', log_a=log_a)
ref = np.log10(log_a * DATA2SCL + 1.0) / np.log10(log_a + 1)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_asinh(self):
"""Test asinh scaling."""
norm = simple_norm(DATA2, stretch='asinh')
ref = np.arcsinh(10 * DATA2SCL) / np.arcsinh(10)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_asinh_with_asinh_a(self):
"""Test asinh scaling with a custom asinh_a."""
asinh_a = 0.5
norm = simple_norm(DATA2, stretch='asinh', asinh_a=asinh_a)
ref = np.arcsinh(DATA2SCL / asinh_a) / np.arcsinh(1. / asinh_a)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_min(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch='linear', min_cut=1., clip=True)
assert_allclose(norm(DATA2), [0., 0., 1.], atol=0, rtol=1.e-5)
def test_percent(self):
"""Test percent keywords."""
norm = simple_norm(DATA2, stretch='linear', percent=99., clip=True)
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5)
norm2 = simple_norm(DATA2, stretch='linear', min_percent=0.5,
max_percent=99.5, clip=True)
assert_allclose(norm(DATA2), norm2(DATA2), atol=0, rtol=1.e-5)
def test_invalid_stretch(self):
"""Test invalid stretch keyword."""
with pytest.raises(ValueError):
simple_norm(DATA2, stretch='invalid')
@pytest.mark.skipif('not HAS_PLT')
def test_imshow_norm():
import matplotlib.pyplot as plt
image = np.random.randn(10, 10)
ax = plt.subplot(label='test_imshow_norm')
imshow_norm(image, ax=ax)
with pytest.raises(ValueError):
# X and data are the same, can't give both
imshow_norm(image, X=image, ax=ax)
with pytest.raises(ValueError):
# illegal to manually pass in normalization since that defeats the point
imshow_norm(image, ax=ax, norm=ImageNormalize())
imshow_norm(image, ax=ax, vmin=0, vmax=1)
# make sure the pyplot version works
imres, norm = imshow_norm(image, ax=None)
assert isinstance(norm, ImageNormalize)
plt.close('all')
|
5fb45ab0ea8ebae69105ed0b52dd68934e369b518b9dd50fab491344fa439574 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
pytest.importorskip('matplotlib') # noqa
import matplotlib.pyplot as plt
import matplotlib.dates
from contextlib import nullcontext
from erfa import ErfaWarning
from astropy.time import Time
from astropy.visualization.time import time_support
# Matplotlib 3.3 added a settable epoch for plot dates and changed the default
# from 0000-12-31 to 1970-01-01. This can be checked by the existence of
# get_epoch() in matplotlib.dates.
MPL_EPOCH_1970 = hasattr(matplotlib.dates, 'get_epoch')
# Since some of the examples below use times/dates in the future, we use the
# TAI time scale to avoid ERFA warnings about dubious years.
DEFAULT_SCALE = 'tai'
def get_ticklabels(axis):
axis.figure.canvas.draw()
return [x.get_text() for x in axis.get_ticklabels()]
def teardown_function(function):
plt.close('all')
# We first check that we get the expected labels for different time intervals
# for standard ISO formatting. This is a way to check both the locator and
# formatter code.
RANGE_CASES = [
# Interval of many years
(('2014-03-22T12:30:30.9', '2077-03-22T12:30:32.1'),
['2020-01-01',
'2040-01-01',
'2060-01-01']),
# Interval of a few years
(('2014-03-22T12:30:30.9', '2017-03-22T12:30:32.1'),
['2015-01-01',
'2016-01-01',
'2017-01-01']),
# Interval of just under a year
(('2014-03-22T12:30:30.9', '2015-01-22T12:30:32.1'),
['2014-05-01',
'2014-10-01']),
# Interval of a few months
(('2014-11-22T12:30:30.9', '2015-02-22T12:30:32.1'),
['2014-12-01',
'2015-01-01',
'2015-02-01']),
# Interval of just over a month
(('2014-03-22T12:30:30.9', '2014-04-23T12:30:32.1'),
['2014-04-01']),
# Interval of just under a month
(('2014-03-22T12:30:30.9', '2014-04-21T12:30:32.1'),
['2014-03-24',
'2014-04-03',
'2014-04-13']),
# Interval of just over an hour
(('2014-03-22T12:30:30.9', '2014-03-22T13:31:30.9'),
['2014-03-22T12:40:00.000',
'2014-03-22T13:00:00.000',
'2014-03-22T13:20:00.000']),
# Interval of just under an hour
(('2014-03-22T12:30:30.9', '2014-03-22T13:28:30.9'),
['2014-03-22T12:40:00.000',
'2014-03-22T13:00:00.000',
'2014-03-22T13:20:00.000']),
# Interval of a few minutes
(('2014-03-22T12:30:30.9', '2014-03-22T12:38:30.9'),
['2014-03-22T12:33:00.000',
'2014-03-22T12:36:00.000']),
# Interval of a few seconds
(('2014-03-22T12:30:30.9', '2014-03-22T12:30:40.9'),
['2014-03-22T12:30:33.000',
'2014-03-22T12:30:36.000',
'2014-03-22T12:30:39.000']),
# Interval of a couple of seconds
(('2014-03-22T12:30:30.9', '2014-03-22T12:30:32.1'),
['2014-03-22T12:30:31.000',
'2014-03-22T12:30:31.500',
'2014-03-22T12:30:32.000']),
# Interval of under a second
(('2014-03-22T12:30:30.89', '2014-03-22T12:30:31.19'),
['2014-03-22T12:30:30.900',
'2014-03-22T12:30:31.000',
'2014-03-22T12:30:31.100']),
]
@pytest.mark.parametrize(('interval', 'expected'), RANGE_CASES)
def test_formatter_locator(interval, expected):
# Check that the ticks and labels returned for the above cases are correct.
with time_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time(interval[0], scale=DEFAULT_SCALE),
Time(interval[1], scale=DEFAULT_SCALE))
assert get_ticklabels(ax.xaxis) == expected
FORMAT_CASES = [
('byear', ['2020', '2040', '2060']),
('byear_str', ['B2020.000', 'B2040.000', 'B2060.000']),
('cxcsec', ['1000000000', '1500000000', '2000000000', '2500000000']),
('decimalyear', ['2020', '2040', '2060']),
('fits', ['2020-01-01T00:00:00.000', '2040-01-01T00:00:00.000', '2060-01-01T00:00:00.000']),
('gps', ['1500000000', '2000000000', '2500000000', '3000000000']),
('iso', ['2020-01-01 00:00:00.000', '2040-01-01 00:00:00.000', '2060-01-01 00:00:00.000']),
('isot', ['2020-01-01T00:00:00.000', '2040-01-01T00:00:00.000', '2060-01-01T00:00:00.000']),
('jd', ['2458000', '2464000', '2470000', '2476000']),
('jyear', ['2020', '2040', '2060']),
('jyear_str', ['J2020.000', 'J2040.000', 'J2060.000']),
('mjd', ['60000', '66000', '72000', '78000']),
('plot_date', (['18000', '24000', '30000', '36000'] if MPL_EPOCH_1970 else
['738000', '744000', '750000', '756000'])),
('unix', ['1500000000', '2000000000', '2500000000', '3000000000']),
('yday', ['2020:001:00:00:00.000', '2040:001:00:00:00.000', '2060:001:00:00:00.000']),
]
@pytest.mark.parametrize(('format', 'expected'), FORMAT_CASES)
def test_formats(format, expected):
# Check that the locators/formatters work fine for all time formats
with time_support(format=format, simplify=False):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# Getting unix time and plot_date requires going through a scale for
# which ERFA emits a warning about the date being dubious
with pytest.warns(ErfaWarning) if format in ['unix', 'plot_date'] else nullcontext():
ax.set_xlim(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE),
Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))
assert get_ticklabels(ax.xaxis) == expected
ax.get_xlabel() == f'Time ({format})'
@pytest.mark.parametrize(('format', 'expected'), FORMAT_CASES)
def test_auto_formats(format, expected):
# Check that the format/scale is taken from the first time used.
with time_support(simplify=False):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# Getting unix time and plot_date requires going through a scale for
# which ERFA emits a warning about the date being dubious
with pytest.warns(ErfaWarning) if format in ['unix', 'plot_date'] else nullcontext():
ax.set_xlim(Time(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE), format=format),
Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))
assert get_ticklabels(ax.xaxis) == expected
ax.get_xlabel() == f'Time ({format})'
FORMAT_CASES_SIMPLIFY = [
('fits', ['2020-01-01', '2040-01-01', '2060-01-01']),
('iso', ['2020-01-01', '2040-01-01', '2060-01-01']),
('isot', ['2020-01-01', '2040-01-01', '2060-01-01']),
('yday', ['2020', '2040', '2060']),
]
@pytest.mark.parametrize(('format', 'expected'), FORMAT_CASES_SIMPLIFY)
def test_formats_simplify(format, expected):
# Check the use of the simplify= option
with time_support(format=format, simplify=True):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE),
Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))
assert get_ticklabels(ax.xaxis) == expected
def test_plot():
# Make sure that plot() works properly
with time_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE),
Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))
ax.plot(Time(['2015-03-22T12:30:30.9',
'2018-03-22T12:30:30.9',
'2021-03-22T12:30:30.9'], scale=DEFAULT_SCALE))
def test_nested():
with time_support(format='iso', simplify=False):
with time_support(format='yday', simplify=True):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE),
Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))
assert get_ticklabels(ax.xaxis) == ['2020', '2040', '2060']
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE),
Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))
assert get_ticklabels(ax.xaxis) == ['2020-01-01 00:00:00.000',
'2040-01-01 00:00:00.000',
'2060-01-01 00:00:00.000']
|
97361e62c936657cf0b42f4bfc0832d20b123accb7c3f95e48a829afd071bbe3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.io import fits
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB
if HAS_MATPLOTLIB:
import matplotlib.image as mpimg
from astropy.visualization.scripts.fits2bitmap import fits2bitmap, main
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestFits2Bitmap:
def setup_class(self):
self.filename = 'test.fits'
self.array = np.arange(16384).reshape((128, 128))
def test_function(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
fits.writeto(filename, self.array)
fits2bitmap(filename)
def test_script(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
fits.writeto(filename, self.array)
main([filename, '-e', '0'])
def test_exten_num(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(self.array)
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, '-e', '1'])
def test_exten_name(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
hdu1 = fits.PrimaryHDU()
extname = 'SCI'
hdu2 = fits.ImageHDU(self.array)
hdu2.header['EXTNAME'] = extname
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, '-e', extname])
@pytest.mark.parametrize('file_exten', ['.gz', '.bz2'])
def test_compressed_fits(self, tmpdir, file_exten):
filename = tmpdir.join('test.fits' + file_exten).strpath
fits.writeto(filename, self.array)
main([filename, '-e', '0'])
def test_orientation(self, tmpdir):
"""
Regression test to check the image vertical orientation/origin.
"""
filename = tmpdir.join(self.filename).strpath
out_filename = 'fits2bitmap_test.png'
out_filename = tmpdir.join(out_filename).strpath
data = np.zeros((32, 32))
data[0:16, :] = 1.
fits.writeto(filename, data)
main([filename, '-e', '0', '-o', out_filename])
img = mpimg.imread(out_filename)
assert img[0, 0, 0] == 0
assert img[31, 31, 0] == 1
|
f025a90688c00d544695061dc8eb65bd293aa92ad3a4eeaf98aa171aaa5d6f74 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from packaging.version import Version
import pytest
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.contour import QuadContourSet
from astropy import units as u
from astropy.wcs import WCS
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy.utils.data import get_pkg_data_filename
from astropy.wcs.wcsapi import SlicedLowLevelWCS, HighLevelWCSWrapper
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.visualization.wcsaxes.frame import (
EllipticalFrame, RectangularFrame, RectangularFrame1D)
from astropy.visualization.wcsaxes.utils import get_coord_meta
from astropy.visualization.wcsaxes.transforms import CurvedTransform
ft_version = Version(matplotlib.ft2font.__freetype_version__)
FREETYPE_261 = ft_version == Version("2.6.1")
TEX_UNAVAILABLE = not matplotlib.checkdep_usetex(True)
MATPLOTLIB_DEV = Version(matplotlib.__version__).is_devrelease
def teardown_function(function):
plt.close('all')
def test_grid_regression(ignore_matplotlibrc):
# Regression test for a bug that meant that if the rc parameter
# axes.grid was set to True, WCSAxes would crash upon initialization.
plt.rc('axes', grid=True)
fig = plt.figure(figsize=(3, 3))
WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
def test_format_coord_regression(ignore_matplotlibrc, tmpdir):
# Regression test for a bug that meant that if format_coord was called by
# Matplotlib before the axes were drawn, an error occurred.
fig = plt.figure(figsize=(3, 3))
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
assert ax.format_coord(10, 10) == ""
assert ax.coords[0].format_coord(10) == ""
assert ax.coords[1].format_coord(10) == ""
fig.savefig(tmpdir.join('nothing').strpath)
assert ax.format_coord(10, 10) == "10.0 10.0 (world)"
assert ax.coords[0].format_coord(10) == "10.0"
assert ax.coords[1].format_coord(10) == "10.0"
TARGET_HEADER = fits.Header.fromstring("""
NAXIS = 2
NAXIS1 = 200
NAXIS2 = 100
CTYPE1 = 'RA---MOL'
CRPIX1 = 500
CRVAL1 = 180.0
CDELT1 = -0.4
CUNIT1 = 'deg '
CTYPE2 = 'DEC--MOL'
CRPIX2 = 400
CRVAL2 = 0.0
CDELT2 = 0.4
CUNIT2 = 'deg '
COORDSYS= 'icrs '
""", sep='\n')
@pytest.mark.parametrize('grid_type', ['lines', 'contours'])
def test_no_numpy_warnings(ignore_matplotlibrc, tmpdir, grid_type):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.imshow(np.zeros((100, 200)))
ax.coords.grid(color='white', grid_type=grid_type)
# There should be no warnings raised if some pixels are outside WCS
# (since this is normal).
# BUT our own catch_warning was ignoring some warnings before, so now we
# have to catch it. Otherwise, the pytest filterwarnings=error
# setting in setup.cfg will fail this test.
# There are actually multiple warnings but they are all similar.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=r'.*converting a masked element to nan.*')
warnings.filterwarnings('ignore', message=r'.*No contour levels were found within the data range.*')
warnings.filterwarnings('ignore', message=r'.*np\.asscalar\(a\) is deprecated since NumPy v1\.16.*')
warnings.filterwarnings('ignore', message=r'.*PY_SSIZE_T_CLEAN will be required.*')
fig.savefig(tmpdir.join('test.png').strpath)
def test_invalid_frame_overlay(ignore_matplotlibrc):
# Make sure a nice error is returned if a frame doesn't exist
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
with pytest.raises(ValueError) as exc:
ax.get_coords_overlay('banana')
assert exc.value.args[0] == 'Frame banana not found'
with pytest.raises(ValueError) as exc:
get_coord_meta('banana')
assert exc.value.args[0] == 'Unknown frame: banana'
def test_plot_coord_transform(ignore_matplotlibrc):
twoMASS_k_header = get_pkg_data_filename('data/2MASS_k_header')
twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(359.76045223*u.deg, 0.26876217*u.deg)
with pytest.raises(TypeError):
ax.plot_coord(c, 'o', transform=ax.get_transform('galactic'))
def test_set_label_properties(ignore_matplotlibrc):
# Regression test to make sure that arguments passed to
# set_xlabel/set_ylabel are passed to the underlying coordinate helpers
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.set_xlabel('Test x label', labelpad=2, color='red')
ax.set_ylabel('Test y label', labelpad=3, color='green')
assert ax.coords[0].axislabels.get_text() == 'Test x label'
assert ax.coords[0].axislabels.get_minpad('b') == 2
assert ax.coords[0].axislabels.get_color() == 'red'
assert ax.coords[1].axislabels.get_text() == 'Test y label'
assert ax.coords[1].axislabels.get_minpad('l') == 3
assert ax.coords[1].axislabels.get_color() == 'green'
assert ax.get_xlabel() == 'Test x label'
assert ax.get_ylabel() == 'Test y label'
GAL_HEADER = fits.Header.fromstring("""
SIMPLE = T / conforms to FITS standard
BITPIX = -32 / array data type
NAXIS = 3 / number of array dimensions
NAXIS1 = 31
NAXIS2 = 2881
NAXIS3 = 480
EXTEND = T
CTYPE1 = 'DISTMOD '
CRVAL1 = 3.5
CDELT1 = 0.5
CRPIX1 = 1.0
CTYPE2 = 'GLON-CAR'
CRVAL2 = 180.0
CDELT2 = -0.125
CRPIX2 = 1.0
CTYPE3 = 'GLAT-CAR'
CRVAL3 = 0.0
CDELT3 = 0.125
CRPIX3 = 241.0
""", sep='\n')
def test_slicing_warnings(ignore_matplotlibrc, tmpdir):
# Regression test to make sure that no warnings are emitted by the tick
# locator for the sliced axis when slicing a cube.
# Scalar case
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ['x', 'y', 'z']
wcs3d.wcs.cunit = ['deg', 'deg', 'km/s']
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0., 0., 1.]
with warnings.catch_warnings():
# https://github.com/astropy/astropy/issues/9690
warnings.filterwarnings('ignore', message=r'.*PY_SSIZE_T_CLEAN.*')
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
plt.savefig(tmpdir.join('test.png').strpath)
# Angle case
wcs3d = WCS(GAL_HEADER)
with warnings.catch_warnings():
# https://github.com/astropy/astropy/issues/9690
warnings.filterwarnings('ignore', message=r'.*PY_SSIZE_T_CLEAN.*')
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 2))
plt.savefig(tmpdir.join('test.png').strpath)
def test_plt_xlabel_ylabel(tmpdir):
# Regression test for a bug that happened when using plt.xlabel
# and plt.ylabel with Matplotlib 3.0
plt.subplot(projection=WCS())
plt.xlabel('Galactic Longitude')
plt.ylabel('Galactic Latitude')
plt.savefig(tmpdir.join('test.png').strpath)
def test_grid_type_contours_transform(tmpdir):
# Regression test for a bug that caused grid_type='contours' to not work
# with custom transforms
class CustomTransform(CurvedTransform):
# We deliberately don't define the inverse, and has_inverse should
# default to False.
def transform(self, values):
return values * 1.3
transform = CustomTransform()
coord_meta = {'type': ('scalar', 'scalar'),
'unit': (u.m, u.s),
'wrap': (None, None),
'name': ('x', 'y')}
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8],
transform=transform, coord_meta=coord_meta)
fig.add_axes(ax)
ax.grid(grid_type='contours')
fig.savefig(tmpdir.join('test.png').strpath)
def test_plt_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# plt.imshow was called.
ax = plt.subplot(projection=WCS())
plt.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_ax_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# ax.imshow was called with no origin
ax = plt.subplot(projection=WCS())
ax.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_grid_contour_large_spacing(tmpdir):
# Regression test for a bug that caused a crash when grid was called and
# didn't produce grid lines (due e.g. to too large spacing) and was then
# called again.
filename = tmpdir.join('test.png').strpath
ax = plt.subplot(projection=WCS())
ax.set_xlim(-0.5, 1.5)
ax.set_ylim(-0.5, 1.5)
ax.coords[0].set_ticks(values=[] * u.one)
ax.coords[0].grid(grid_type='contours')
plt.savefig(filename)
ax.coords[0].grid(grid_type='contours')
plt.savefig(filename)
def test_contour_return():
# Regression test for a bug that caused contour and contourf to return None
# instead of the contour object.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
cset = ax.contour(np.arange(16).reshape(4, 4), transform=ax.get_transform('world'))
assert isinstance(cset, QuadContourSet)
cset = ax.contourf(np.arange(16).reshape(4, 4), transform=ax.get_transform('world'))
assert isinstance(cset, QuadContourSet)
def test_contour_empty():
# Regression test for a bug that caused contour to crash if no contours
# were present.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
with pytest.warns(UserWarning, match='No contour levels were found within the data range'):
ax.contour(np.zeros((4, 4)), transform=ax.get_transform('world'))
def test_iterate_coords(ignore_matplotlibrc, tmpdir):
# Regression test for a bug that caused ax.coords to return too few axes
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ['x', 'y', 'z']
wcs3d.wcs.cunit = ['deg', 'deg', 'km/s']
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0., 0., 1.]
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
x, y, z = ax.coords
def test_invalid_slices_errors(ignore_matplotlibrc):
# Make sure that users get a clear message when specifying a WCS with
# >2 dimensions without giving the 'slices' argument, or if the 'slices'
# argument has too many/few elements.
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ['x', 'y', 'z']
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
with pytest.raises(ValueError) as exc:
plt.subplot(1, 1, 1, projection=wcs3d)
assert exc.value.args[0] == ("WCS has more than 2 pixel dimensions, so "
"'slices' should be set")
with pytest.raises(ValueError) as exc:
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1, 2))
assert exc.value.args[0] == ("'slices' should have as many elements as "
"WCS has pixel dimensions (should be 3)")
wcs2d = WCS(naxis=2)
wcs2d.wcs.ctype = ['x', 'y']
ax = plt.subplot(1, 1, 1, projection=wcs2d)
assert ax.frame_class is RectangularFrame
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=('x', 'y'))
assert ax.frame_class is RectangularFrame
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=('y', 'x'))
assert ax.frame_class is RectangularFrame
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=['x', 'y'])
assert ax.frame_class is RectangularFrame
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, 'x'))
assert ax.frame_class is RectangularFrame1D
wcs1d = WCS(naxis=1)
wcs1d.wcs.ctype = ['x']
ax = plt.subplot(1, 1, 1, projection=wcs1d)
assert ax.frame_class is RectangularFrame1D
with pytest.raises(ValueError):
plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, 'y'))
EXPECTED_REPR_1 = """
<CoordinatesMap with 3 world coordinates:
index aliases type unit wrap format_unit visible
----- ------------------------------ --------- ---- ---- ----------- -------
0 distmod dist scalar None no
1 pos.galactic.lon glon-car glon longitude deg 360 deg yes
2 pos.galactic.lat glat-car glat latitude deg None deg yes
>
""".strip()
EXPECTED_REPR_2 = """
<CoordinatesMap with 3 world coordinates:
index aliases type unit wrap format_unit visible
----- ------------------------------ --------- ---- ---- ----------- -------
0 distmod dist scalar None yes
1 pos.galactic.lon glon-car glon longitude deg 360 deg yes
2 pos.galactic.lat glat-car glat latitude deg None deg yes
>
""".strip()
def test_repr(ignore_matplotlibrc):
# Unit test to make sure __repr__ looks as expected
wcs3d = WCS(GAL_HEADER)
# Cube header has world coordinates as distance, lon, lat, so start off
# by slicing in a way that we select just lon,lat:
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=(1, 'x', 'y'))
assert repr(ax.coords) == EXPECTED_REPR_1
# Now slice in a way that all world coordinates are still present:
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
assert repr(ax.coords) == EXPECTED_REPR_2
@pytest.fixture
def time_spectral_wcs_2d():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['FREQ', 'TIME']
wcs.wcs.set()
return wcs
def test_time_wcs(time_spectral_wcs_2d):
# Regression test for a bug that caused WCSAxes to error when using a WCS
# with a time axis.
plt.subplot(projection=time_spectral_wcs_2d)
@pytest.mark.skipif('TEX_UNAVAILABLE')
def test_simplify_labels_usetex(ignore_matplotlibrc, tmpdir):
"""Regression test for https://github.com/astropy/astropy/issues/8004."""
plt.rc('text', usetex=True)
header = {
'NAXIS': 2,
'NAXIS1': 360,
'NAXIS2': 180,
'CRPIX1': 180.5,
'CRPIX2': 90.5,
'CRVAL1': 180.0,
'CRVAL2': 0.0,
'CDELT1': -2 * np.sqrt(2) / np.pi,
'CDELT2': 2 * np.sqrt(2) / np.pi,
'CTYPE1': 'RA---MOL',
'CTYPE2': 'DEC--MOL',
'RADESYS': 'ICRS'}
wcs = WCS(header)
fig, ax = plt.subplots(
subplot_kw=dict(frame_class=EllipticalFrame, projection=wcs))
ax.set_xlim(-0.5, header['NAXIS1'] - 0.5)
ax.set_ylim(-0.5, header['NAXIS2'] - 0.5)
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[0].set_ticks(spacing=45 * u.deg)
ax.coords[1].set_ticks(spacing=30 * u.deg)
ax.grid()
fig.savefig(tmpdir / 'plot.png')
@pytest.mark.parametrize('frame_class', [RectangularFrame, EllipticalFrame])
def test_set_labels_with_coords(ignore_matplotlibrc, frame_class):
"""Test if ``axis.set_xlabel()`` calls the correct ``coords[i]_set_axislabel()`` in a
WCS plot. Regression test for https://github.com/astropy/astropy/issues/10435.
"""
labels = ['RA', 'Declination']
header = {
'NAXIS': 2,
'NAXIS1': 360,
'NAXIS2': 180,
'CRPIX1': 180.5,
'CRPIX2': 90.5,
'CRVAL1': 180.0,
'CRVAL2': 0.0,
'CDELT1': -2 * np.sqrt(2) / np.pi,
'CDELT2': 2 * np.sqrt(2) / np.pi,
'CTYPE1': 'RA---AIT',
'CTYPE2': 'DEC--AIT'}
wcs = WCS(header)
fig, ax = plt.subplots(
subplot_kw=dict(frame_class=frame_class, projection=wcs))
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
assert ax.get_xlabel() == labels[0]
assert ax.get_ylabel() == labels[1]
for i in range(2):
assert ax.coords[i].get_axislabel() == labels[i]
@pytest.mark.parametrize('atol', [0.2, 1.0e-8])
def test_bbox_size(atol):
# Test for the size of a WCSAxes bbox (only have Matplotlib >= 3.0 now)
extents = [11.38888888888889, 3.5, 576.0, 432.0]
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
fig.canvas.draw()
renderer = fig.canvas.renderer
ax_bbox = ax.get_tightbbox(renderer)
# Enforce strict test only with reference Freetype version
if atol < 0.1 and not FREETYPE_261:
pytest.xfail("Exact BoundingBox dimensions are only ensured with FreeType 2.6.1")
assert np.allclose(ax_bbox.extents, extents, atol=atol)
def test_wcs_type_transform_regression():
wcs = WCS(TARGET_HEADER)
sliced_wcs = SlicedLowLevelWCS(wcs, np.s_[1:-1, 1:-1])
ax = plt.subplot(1, 1, 1, projection=wcs)
ax.get_transform(sliced_wcs)
high_wcs = HighLevelWCSWrapper(sliced_wcs)
ax.get_transform(sliced_wcs)
def test_multiple_draws_grid_contours(tmpdir):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS())
ax.grid(color='black', grid_type='contours')
fig.savefig(tmpdir / 'plot.png')
fig.savefig(tmpdir / 'plot.png')
|
dbbda9af028d182c9684f8e4d89e4fe4d879ea30c54677a1c0d4ef43da315cd2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_almost_equal
from astropy import units as u
from astropy.visualization.wcsaxes.utils import select_step_degree, select_step_hour, select_step_scalar
from astropy.tests.helper import (assert_quantity_allclose as
assert_almost_equal_quantity)
def test_select_step_degree():
assert_almost_equal_quantity(select_step_degree(127 * u.deg), 180. * u.deg)
assert_almost_equal_quantity(select_step_degree(44 * u.deg), 45. * u.deg)
assert_almost_equal_quantity(select_step_degree(18 * u.arcmin), 15 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(3.4 * u.arcmin), 3 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(2 * u.arcmin), 2 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(59 * u.arcsec), 1 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(33 * u.arcsec), 30 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(2.2 * u.arcsec), 2 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.8 * u.arcsec), 1 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.2 * u.arcsec), 0.2 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.11 * u.arcsec), 0.1 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.022 * u.arcsec), 0.02 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.0043 * u.arcsec), 0.005 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.00083 * u.arcsec), 0.001 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.000027 * u.arcsec), 0.00002 * u.arcsec)
def test_select_step_hour():
assert_almost_equal_quantity(select_step_hour(127 * u.deg), 8. * u.hourangle)
assert_almost_equal_quantity(select_step_hour(44 * u.deg), 3. * u.hourangle)
assert_almost_equal_quantity(select_step_hour(18 * u.arcmin), 15 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(3.4 * u.arcmin), 3 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(2 * u.arcmin), 1.5 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(59 * u.arcsec), 1 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(33 * u.arcsec), 30 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(2.2 * u.arcsec), 3. * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.8 * u.arcsec), 0.75 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.2 * u.arcsec), 0.15 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.11 * u.arcsec), 0.15 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.022 * u.arcsec), 0.03 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.0043 * u.arcsec), 0.003 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.00083 * u.arcsec), 0.00075 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.000027 * u.arcsec), 0.00003 * u.arcsec)
def test_select_step_scalar():
assert_almost_equal(select_step_scalar(33122.), 50000.)
assert_almost_equal(select_step_scalar(433.), 500.)
assert_almost_equal(select_step_scalar(12.3), 10)
assert_almost_equal(select_step_scalar(3.3), 5.)
assert_almost_equal(select_step_scalar(0.66), 0.5)
assert_almost_equal(select_step_scalar(0.0877), 0.1)
assert_almost_equal(select_step_scalar(0.00577), 0.005)
assert_almost_equal(select_step_scalar(0.00022), 0.0002)
assert_almost_equal(select_step_scalar(0.000012), 0.00001)
assert_almost_equal(select_step_scalar(0.000000443), 0.0000005)
|
14057f6030aa7a587109fa2927816141a258e7b237da4e23824a761e32eb5ffd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This sub-package makes use of image testing with the pytest-mpl package:
#
# https://pypi.org/project/pytest-mpl
#
# For more information on writing image tests, see the 'Image tests with
# pytest-mpl' section of the developer docs.
|