hash
stringlengths
64
64
content
stringlengths
0
1.51M
442cef8841d4d4898ffc07dd96d02a299ceea8c8a6d7fb92fb9a7644d9480269
import numpy as np import pytest from numpy.testing import assert_allclose import astropy.units as u from astropy.timeseries.periodograms.lombscargle import LombScargle from astropy.timeseries.periodograms.lombscargle._statistics import (fap_single, inv_fap_single, METHODS) from astropy.timeseries.periodograms.lombscargle.utils import convert_normalization, compute_chi2_ref # noqa: E501 from astropy.utils.compat.optional_deps import HAS_SCIPY METHOD_KWDS = dict(bootstrap={'n_bootstraps': 20, 'random_seed': 42}) NORMALIZATIONS = ['standard', 'psd', 'log', 'model'] def make_data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0, units=False): """Generate some data for testing""" rng = np.random.default_rng(rseed) t = 5 * period * rng.random(N) omega = 2 * np.pi / period y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t) dy = dy * (0.5 + rng.random(N)) y += dy * rng.standard_normal(N) fmax = 5 if units: return t * u.day, y * u.mag, dy * u.mag, fmax / u.day else: return t, y, dy, fmax def null_data(N=1000, dy=1, rseed=0, units=False): """Generate null hypothesis data""" rng = np.random.default_rng(rseed) t = 100 * rng.random(N) dy = 0.5 * dy * (1 + rng.random(N)) y = dy * rng.standard_normal(N) fmax = 40 if units: return t * u.day, y * u.mag, dy * u.mag, fmax / u.day else: return t, y, dy, fmax @pytest.mark.parametrize('normalization', NORMALIZATIONS) @pytest.mark.parametrize('with_errors', [True, False]) @pytest.mark.parametrize('units', [False, True]) def test_distribution(normalization, with_errors, units): t, y, dy, fmax = null_data(units=units) if not with_errors: dy = None ls = LombScargle(t, y, dy, normalization=normalization) freq, power = ls.autopower(maximum_frequency=fmax) z = np.linspace(0, power.max(), 1000) # Test that pdf and cdf are consistent dz = z[1] - z[0] z_mid = z[:-1] + 0.5 * dz pdf = ls.distribution(z_mid) cdf = ls.distribution(z, cumulative=True) if isinstance(dz, u.Quantity): dz = dz.value assert_allclose(pdf, np.diff(cdf) / dz, rtol=1E-5, atol=1E-8) # psd normalization without specified errors produces bad results if not (normalization == 'psd' and not with_errors): # Test that observed power is distributed according to the theoretical pdf hist, bins = np.histogram(power, 30, density=True) midpoints = 0.5 * (bins[1:] + bins[:-1]) pdf = ls.distribution(midpoints) assert_allclose(hist, pdf, rtol=0.05, atol=0.05 * pdf[0]) @pytest.mark.parametrize('N', [10, 100, 1000]) @pytest.mark.parametrize('normalization', NORMALIZATIONS) def test_inverse_single(N, normalization): fap = np.linspace(0, 1, 11) z = inv_fap_single(fap, N, normalization) fap_out = fap_single(z, N, normalization) assert_allclose(fap, fap_out) @pytest.mark.parametrize('normalization', NORMALIZATIONS) @pytest.mark.parametrize('use_errs', [True, False]) @pytest.mark.parametrize('units', [False, True]) def test_inverse_bootstrap(normalization, use_errs, units): t, y, dy, fmax = null_data(units=units) if not use_errs: dy = None fap = np.linspace(0, 1, 11) method = 'bootstrap' method_kwds = METHOD_KWDS['bootstrap'] ls = LombScargle(t, y, dy, normalization=normalization) z = ls.false_alarm_level(fap, maximum_frequency=fmax, method=method, method_kwds=method_kwds) fap_out = ls.false_alarm_probability(z, maximum_frequency=fmax, method=method, method_kwds=method_kwds) # atol = 1 / n_bootstraps assert_allclose(fap, fap_out, atol=0.05) @pytest.mark.parametrize('method', sorted(set(METHODS) - {'bootstrap'})) @pytest.mark.parametrize('normalization', NORMALIZATIONS) @pytest.mark.parametrize('use_errs', [True, False]) @pytest.mark.parametrize('N', [10, 100, 1000]) @pytest.mark.parametrize('units', [False, True]) def test_inverses(method, normalization, use_errs, N, units, T=5): if not HAS_SCIPY and method in ['baluev', 'davies']: pytest.skip("SciPy required") t, y, dy, fmax = make_data(N, rseed=543, units=units) if not use_errs: dy = None method_kwds = METHOD_KWDS.get(method, None) fap = np.logspace(-10, 0, 11) ls = LombScargle(t, y, dy, normalization=normalization) z = ls.false_alarm_level(fap, maximum_frequency=fmax, method=method, method_kwds=method_kwds) fap_out = ls.false_alarm_probability(z, maximum_frequency=fmax, method=method, method_kwds=method_kwds) assert_allclose(fap, fap_out) @pytest.mark.parametrize('method', sorted(METHODS)) @pytest.mark.parametrize('normalization', NORMALIZATIONS) @pytest.mark.parametrize('units', [False, True]) def test_false_alarm_smoketest(method, normalization, units): if not HAS_SCIPY and method in ['baluev', 'davies']: pytest.skip("SciPy required") kwds = METHOD_KWDS.get(method, None) t, y, dy, fmax = make_data(rseed=42, units=units) ls = LombScargle(t, y, dy, normalization=normalization) freq, power = ls.autopower(maximum_frequency=fmax) Z = np.linspace(power.min(), power.max(), 30) fap = ls.false_alarm_probability(Z, maximum_frequency=fmax, method=method, method_kwds=kwds) assert len(fap) == len(Z) if method != 'davies': assert np.all(fap <= 1) assert np.all(fap[:-1] >= fap[1:]) # monotonically decreasing @pytest.mark.parametrize('method', sorted(METHODS)) @pytest.mark.parametrize('use_errs', [True, False]) @pytest.mark.parametrize('normalization', sorted(set(NORMALIZATIONS) - {'psd'})) @pytest.mark.parametrize('units', [False, True]) def test_false_alarm_equivalence(method, normalization, use_errs, units): # Note: the PSD normalization is not equivalent to the others, in that it # depends on the absolute errors rather than relative errors. Because the # scaling contributes to the distribution, it cannot be converted directly # from any of the three normalized versions. if not HAS_SCIPY and method in ['baluev', 'davies']: pytest.skip("SciPy required") kwds = METHOD_KWDS.get(method, None) t, y, dy, fmax = make_data(units=units) if not use_errs: dy = None ls = LombScargle(t, y, dy, normalization=normalization) freq, power = ls.autopower(maximum_frequency=fmax) Z = np.linspace(power.min(), power.max(), 30) fap = ls.false_alarm_probability(Z, maximum_frequency=fmax, method=method, method_kwds=kwds) # Compute the equivalent Z values in the standard normalization # and check that the FAP is consistent Z_std = convert_normalization(Z, len(t), from_normalization=normalization, to_normalization='standard', chi2_ref=compute_chi2_ref(y, dy)) ls = LombScargle(t, y, dy, normalization='standard') fap_std = ls.false_alarm_probability(Z_std, maximum_frequency=fmax, method=method, method_kwds=kwds) assert_allclose(fap, fap_std, rtol=0.1)
71683ea31aa82936ea45f401b3d67f1775910904a341044ac19b314f91eb55a8
import numpy as np import pytest from numpy.testing import assert_allclose from astropy.timeseries.periodograms.lombscargle.utils import convert_normalization, compute_chi2_ref from astropy.timeseries.periodograms.lombscargle.core import LombScargle NORMALIZATIONS = ['standard', 'model', 'log', 'psd'] @pytest.fixture def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0): """Generate some data for testing""" rng = np.random.default_rng(rseed) t = 5 * period * rng.random(N) omega = 2 * np.pi / period y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t) dy = dy * (0.5 + rng.random(N)) y += dy * rng.standard_normal(N) return t, y, dy @pytest.mark.parametrize('norm_in', NORMALIZATIONS) @pytest.mark.parametrize('norm_out', NORMALIZATIONS) def test_convert_normalization(norm_in, norm_out, data): t, y, dy = data _, power_in = LombScargle(t, y, dy).autopower(maximum_frequency=5, normalization=norm_in) _, power_out = LombScargle(t, y, dy).autopower(maximum_frequency=5, normalization=norm_out) power_in_converted = convert_normalization(power_in, N=len(t), from_normalization=norm_in, to_normalization=norm_out, chi2_ref = compute_chi2_ref(y, dy)) assert_allclose(power_in_converted, power_out)
54fb41b96b697591c79bb4abaf5169fb3226a13a7d4122f750788666decf5ecf
import pytest import numpy as np from numpy.testing import assert_allclose from astropy.time import Time, TimeDelta from astropy import units as u from astropy.tests.helper import assert_quantity_allclose from astropy.timeseries.periodograms.lombscargle import LombScargle ALL_METHODS = LombScargle.available_methods ALL_METHODS_NO_AUTO = [method for method in ALL_METHODS if method != 'auto'] FAST_METHODS = [method for method in ALL_METHODS if 'fast' in method] NTERMS_METHODS = [method for method in ALL_METHODS if 'chi2' in method] NORMALIZATIONS = ['standard', 'psd', 'log', 'model'] @pytest.fixture def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0): """Generate some data for testing""" rng = np.random.default_rng(rseed) t = 20 * period * rng.random(N) omega = 2 * np.pi / period y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t) dy = dy * (0.5 + rng.random(N)) y += dy * rng.standard_normal(N) return t, y, dy @pytest.mark.parametrize('minimum_frequency', [None, 1.0]) @pytest.mark.parametrize('maximum_frequency', [None, 5.0]) @pytest.mark.parametrize('nyquist_factor', [1, 10]) @pytest.mark.parametrize('samples_per_peak', [1, 5]) def test_autofrequency(data, minimum_frequency, maximum_frequency, nyquist_factor, samples_per_peak): t, y, dy = data baseline = t.max() - t.min() freq = LombScargle(t, y, dy).autofrequency(samples_per_peak, nyquist_factor, minimum_frequency, maximum_frequency) df = freq[1] - freq[0] # Check sample spacing assert_allclose(df, 1. / baseline / samples_per_peak) # Check minimum frequency if minimum_frequency is None: assert_allclose(freq[0], 0.5 * df) else: assert_allclose(freq[0], minimum_frequency) if maximum_frequency is None: avg_nyquist = 0.5 * len(t) / baseline assert_allclose(freq[-1], avg_nyquist * nyquist_factor, atol=0.5*df) else: assert_allclose(freq[-1], maximum_frequency, atol=0.5*df) @pytest.mark.parametrize('method', ALL_METHODS_NO_AUTO) @pytest.mark.parametrize('center_data', [True, False]) @pytest.mark.parametrize('fit_mean', [True, False]) @pytest.mark.parametrize('errors', ['none', 'partial', 'full']) @pytest.mark.parametrize('with_units', [True, False]) @pytest.mark.parametrize('normalization', NORMALIZATIONS) def test_all_methods(data, method, center_data, fit_mean, errors, with_units, normalization): if method == 'scipy' and (fit_mean or errors != 'none'): return t, y, dy = data frequency = 0.8 + 0.01 * np.arange(40) if with_units: t = t * u.day y = y * u.mag dy = dy * u.mag frequency = frequency / t.unit if errors == 'none': dy = None elif errors == 'partial': dy = dy[0] elif errors == 'full': pass else: raise ValueError(f"Unrecognized error type: '{errors}'") kwds = {} ls = LombScargle(t, y, dy, center_data=center_data, fit_mean=fit_mean, normalization=normalization) P_expected = ls.power(frequency) # don't use the fft approximation here; we'll test this elsewhere if method in FAST_METHODS: kwds['method_kwds'] = dict(use_fft=False) P_method = ls.power(frequency, method=method, **kwds) if with_units: if normalization == 'psd' and errors == 'none': assert P_method.unit == y.unit ** 2 else: assert P_method.unit == u.dimensionless_unscaled else: assert not hasattr(P_method, 'unit') assert_quantity_allclose(P_expected, P_method) @pytest.mark.parametrize('method', ALL_METHODS_NO_AUTO) @pytest.mark.parametrize('center_data', [True, False]) @pytest.mark.parametrize('fit_mean', [True, False]) @pytest.mark.parametrize('with_errors', [True, False]) @pytest.mark.parametrize('normalization', NORMALIZATIONS) def test_integer_inputs(data, method, center_data, fit_mean, with_errors, normalization): if method == 'scipy' and (fit_mean or with_errors): return t, y, dy = data t = np.floor(100 * t) t_int = t.astype(int) y = np.floor(100 * y) y_int = y.astype(int) dy = np.floor(100 * dy) dy_int = dy.astype('int32') frequency = 1E-2 * (0.8 + 0.01 * np.arange(40)) if not with_errors: dy = None dy_int = None kwds = dict(center_data=center_data, fit_mean=fit_mean, normalization=normalization) P_float = LombScargle(t, y, dy, **kwds).power(frequency,method=method) P_int = LombScargle(t_int, y_int, dy_int, **kwds).power(frequency, method=method) assert_allclose(P_float, P_int) @pytest.mark.parametrize('method', NTERMS_METHODS) @pytest.mark.parametrize('center_data', [True, False]) @pytest.mark.parametrize('fit_mean', [True, False]) @pytest.mark.parametrize('errors', ['none', 'partial', 'full']) @pytest.mark.parametrize('nterms', [0, 2, 4]) @pytest.mark.parametrize('normalization', NORMALIZATIONS) def test_nterms_methods(method, center_data, fit_mean, errors, nterms, normalization, data): t, y, dy = data frequency = 0.8 + 0.01 * np.arange(40) if errors == 'none': dy = None elif errors == 'partial': dy = dy[0] elif errors == 'full': pass else: raise ValueError(f"Unrecognized error type: '{errors}'") ls = LombScargle(t, y, dy, center_data=center_data, fit_mean=fit_mean, nterms=nterms, normalization=normalization) if nterms == 0 and not fit_mean: with pytest.raises(ValueError) as err: ls.power(frequency, method=method) assert 'nterms' in str(err.value) and 'bias' in str(err.value) else: P_expected = ls.power(frequency) # don't use fast fft approximations here kwds = {} if 'fast' in method: kwds['method_kwds'] = dict(use_fft=False) P_method = ls.power(frequency, method=method, **kwds) assert_allclose(P_expected, P_method, rtol=1E-7, atol=1E-25) @pytest.mark.parametrize('method', FAST_METHODS) @pytest.mark.parametrize('center_data', [True, False]) @pytest.mark.parametrize('fit_mean', [True, False]) @pytest.mark.parametrize('errors', ['none', 'partial', 'full']) @pytest.mark.parametrize('nterms', [0, 1, 2]) def test_fast_approximations(method, center_data, fit_mean, errors, nterms, data): t, y, dy = data frequency = 0.8 + 0.01 * np.arange(40) if errors == 'none': dy = None elif errors == 'partial': dy = dy[0] elif errors == 'full': pass else: raise ValueError(f"Unrecognized error type: '{errors}'") ls = LombScargle(t, y, dy, center_data=center_data, fit_mean=fit_mean, nterms=nterms, normalization='standard') # use only standard normalization because we compare via absolute tolerance kwds = dict(method=method) if method == 'fast' and nterms != 1: with pytest.raises(ValueError) as err: ls.power(frequency, **kwds) assert 'nterms' in str(err.value) elif nterms == 0 and not fit_mean: with pytest.raises(ValueError) as err: ls.power(frequency, **kwds) assert 'nterms' in str(err.value) and 'bias' in str(err.value) else: P_fast = ls.power(frequency, **kwds) kwds['method_kwds'] = dict(use_fft=False) P_slow = ls.power(frequency, **kwds) assert_allclose(P_fast, P_slow, atol=0.008) @pytest.mark.parametrize('method', LombScargle.available_methods) @pytest.mark.parametrize('shape', [(), (1,), (2,), (3,), (2, 3)]) def test_output_shapes(method, shape, data): t, y, dy = data freq = np.asarray(np.zeros(shape)) freq.flat = np.arange(1, freq.size + 1) PLS = LombScargle(t, y, fit_mean=False).power(freq, method=method) assert PLS.shape == shape @pytest.mark.parametrize('method', LombScargle.available_methods) def test_errors_on_unit_mismatch(method, data): t, y, dy = data t = t * u.second y = y * u.mag frequency = np.linspace(0.5, 1.5, 10) # this should fail because frequency and 1/t units do not match with pytest.raises(ValueError) as err: LombScargle(t, y, fit_mean=False).power(frequency, method=method) assert str(err.value).startswith('Units of frequency not equivalent') # this should fail because dy and y units do not match with pytest.raises(ValueError) as err: LombScargle(t, y, dy, fit_mean=False).power(frequency / t.unit) assert str(err.value).startswith('Units of dy not equivalent') # we don't test all normalizations here because they are tested above # only test method='auto' because unit handling does not depend on method @pytest.mark.parametrize('with_error', [True, False]) def test_unit_conversions(data, with_error): t, y, dy = data t_day = t * u.day t_hour = u.Quantity(t_day, 'hour') y_meter = y * u.meter y_millimeter = u.Quantity(y_meter, 'millimeter') # sanity check on inputs assert_quantity_allclose(t_day, t_hour) assert_quantity_allclose(y_meter, y_millimeter) if with_error: dy = dy * u.meter else: dy = None freq_day, P1 = LombScargle(t_day, y_meter, dy).autopower() freq_hour, P2 = LombScargle(t_hour, y_millimeter, dy).autopower() # Check units of frequency assert freq_day.unit == 1. / u.day assert freq_hour.unit == 1. / u.hour # Check that results match assert_quantity_allclose(freq_day, freq_hour) assert_quantity_allclose(P1, P2) # Check that switching frequency units doesn't change things P3 = LombScargle(t_day, y_meter, dy).power(freq_hour) P4 = LombScargle(t_hour, y_meter, dy).power(freq_day) assert_quantity_allclose(P3, P4) @pytest.mark.parametrize('fit_mean', [True, False]) @pytest.mark.parametrize('with_units', [True, False]) @pytest.mark.parametrize('freq', [1.0, 2.0]) def test_model(fit_mean, with_units, freq): rand = np.random.default_rng(0) t = 10 * rand.random(40) params = 10 * rand.random(3) y = np.zeros_like(t) if fit_mean: y += params[0] y += params[1] * np.sin(2 * np.pi * freq * (t - params[2])) if with_units: t = t * u.day y = y * u.mag freq = freq / u.day ls = LombScargle(t, y, center_data=False, fit_mean=fit_mean) y_fit = ls.model(t, freq) assert_quantity_allclose(y_fit, y) @pytest.mark.parametrize('t_unit', [u.second, u.day]) @pytest.mark.parametrize('frequency_unit', [u.Hz, 1. / u.second]) @pytest.mark.parametrize('y_unit', [u.mag, u.jansky]) def test_model_units_match(data, t_unit, frequency_unit, y_unit): t, y, dy = data t_fit = t[:5] frequency = 1.0 t = t * t_unit t_fit = t_fit * t_unit y = y * y_unit dy = dy * y_unit frequency = frequency * frequency_unit ls = LombScargle(t, y, dy) y_fit = ls.model(t_fit, frequency) assert y_fit.unit == y_unit def test_model_units_mismatch(data): t, y, dy = data frequency = 1.0 t_fit = t[:5] t = t * u.second t_fit = t_fit * u.second y = y * u.mag frequency = 1.0 / t.unit # this should fail because frequency and 1/t units do not match with pytest.raises(ValueError) as err: LombScargle(t, y).model(t_fit, frequency=1.0) assert str(err.value).startswith('Units of frequency not equivalent') # this should fail because t and t_fit units do not match with pytest.raises(ValueError) as err: LombScargle(t, y).model([1, 2], frequency) assert str(err.value).startswith('Units of t not equivalent') # this should fail because dy and y units do not match with pytest.raises(ValueError) as err: LombScargle(t, y, dy).model(t_fit, frequency) assert str(err.value).startswith('Units of dy not equivalent') def test_autopower(data): t, y, dy = data ls = LombScargle(t, y, dy) kwargs = dict(samples_per_peak=6, nyquist_factor=2, minimum_frequency=2, maximum_frequency=None) freq1 = ls.autofrequency(**kwargs) power1 = ls.power(freq1) freq2, power2 = ls.autopower(**kwargs) assert_allclose(freq1, freq2) assert_allclose(power1, power2) @pytest.mark.parametrize('with_units', [True, False]) @pytest.mark.parametrize('errors', ['none', 'partial', 'full']) @pytest.mark.parametrize('center_data', [True, False]) @pytest.mark.parametrize('fit_mean', [True, False]) @pytest.mark.parametrize('nterms', [0, 1, 2]) def test_model_parameters(data, nterms, fit_mean, center_data, errors, with_units): if nterms == 0 and not fit_mean: return t, y, dy = data frequency = 1.5 if with_units: t = t * u.day y = y * u.mag dy = dy * u.mag frequency = frequency / t.unit if errors == 'none': dy = None elif errors == 'partial': dy = dy[0] elif errors == 'full': pass else: raise ValueError(f"Unrecognized error type: '{errors}'") ls = LombScargle(t, y, dy, nterms=nterms, fit_mean=fit_mean, center_data=center_data) tfit = np.linspace(0, 20, 10) if with_units: tfit = tfit * u.day model = ls.model(tfit, frequency) params = ls.model_parameters(frequency) design = ls.design_matrix(frequency, t=tfit) offset = ls.offset() assert len(params) == int(fit_mean) + 2 * nterms assert_quantity_allclose(offset + design.dot(params), model) @pytest.mark.parametrize('timedelta', [False, True]) def test_absolute_times(data, timedelta): # Make sure that we handle absolute times correctly. We also check that # TimeDelta works properly when timedelta is True. # The example data uses relative times t, y, dy = data # FIXME: There seems to be a numerical stability issue in that if we run # the algorithm with the same values but offset in time, the transit_time # is not offset by a fixed amount. To avoid this issue in this test, we # make sure the first time is also the smallest so that internally the # values of the relative time should be the same. t[0] = 0. # Add units t = t * u.day y = y * u.mag dy = dy * u.mag # We now construct a set of absolute times but keeping the rest the same start = Time('2019-05-04T12:34:56') trel = TimeDelta(t) if timedelta else t t = trel + start # and we set up two instances of LombScargle, one with absolute and one # with relative times. ls1 = LombScargle(t, y, dy) ls2 = LombScargle(trel, y, dy) kwargs = dict(samples_per_peak=6, nyquist_factor=2, minimum_frequency=2 / u.day, maximum_frequency=None) freq1 = ls1.autofrequency(**kwargs) freq2 = ls2.autofrequency(**kwargs) assert_quantity_allclose(freq1, freq2) power1 = ls1.power(freq1) power2 = ls2.power(freq2) assert_quantity_allclose(power1, power2) freq1, power1 = ls1.autopower(**kwargs) freq2, power2 = ls2.autopower(**kwargs) assert_quantity_allclose(freq1, freq2) assert_quantity_allclose(power1, power2) model1 = ls1.model(t, 2 / u.day) model2 = ls2.model(trel, 2 / u.day) assert_quantity_allclose(model1, model2) # Check model validation with pytest.raises(TypeError) as exc: ls1.model(trel, 2 / u.day) assert exc.value.args[0] == ('t was provided as a relative time but the ' 'LombScargle class was initialized with ' 'absolute times.') with pytest.raises(TypeError) as exc: ls2.model(t, 2 / u.day) assert exc.value.args[0] == ('t was provided as an absolute time but the ' 'LombScargle class was initialized with ' 'relative times.') # Check design matrix design1 = ls1.design_matrix(2 / u.day, t=t) design2 = ls2.design_matrix(2 / u.day, t=trel) assert_quantity_allclose(design1, design2) # Check design matrix validation with pytest.raises(TypeError) as exc: ls1.design_matrix(2 / u.day, t=trel) assert exc.value.args[0] == ('t was provided as a relative time but the ' 'LombScargle class was initialized with ' 'absolute times.') with pytest.raises(TypeError) as exc: ls2.design_matrix(2 / u.day, t=t) assert exc.value.args[0] == ('t was provided as an absolute time but the ' 'LombScargle class was initialized with ' 'relative times.')
0dc25c34e5cb7909ea1c3b6bc8964db019e4370d531671a42886f126888f4253
import pytest import numpy as np from numpy.testing import assert_allclose, assert_equal from astropy.timeseries.periodograms.lombscargle.implementations.utils import extirpolate, bitceil, trig_sum @pytest.mark.parametrize('N', 2 ** np.arange(1, 12)) @pytest.mark.parametrize('offset', [-1, 0, 1]) def test_bitceil(N, offset): assert_equal(bitceil(N + offset), int(2 ** np.ceil(np.log2(N + offset)))) @pytest.fixture def extirpolate_data(): rng = np.random.default_rng(0) x = 100 * rng.random(50) y = np.sin(x) f = lambda x: np.sin(x / 10) return x, y, f @pytest.mark.parametrize('N', [100, None]) @pytest.mark.parametrize('M', [5]) def test_extirpolate(N, M, extirpolate_data): x, y, f = extirpolate_data y_hat = extirpolate(x, y, N, M) x_hat = np.arange(len(y_hat)) assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat), rtol=1.5e-5) @pytest.fixture def extirpolate_int_data(): rng = np.random.default_rng(0) x = 100 * rng.random(50) x[:25] = x[:25].astype(int) y = np.sin(x) f = lambda x: np.sin(x / 10) return x, y, f @pytest.mark.parametrize('N', [100, None]) @pytest.mark.parametrize('M', [5]) def test_extirpolate_with_integers(N, M, extirpolate_int_data): x, y, f = extirpolate_int_data y_hat = extirpolate(x, y, N, M) x_hat = np.arange(len(y_hat)) assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat), rtol=1.7e-5) @pytest.fixture def trig_sum_data(): rng = np.random.default_rng(0) t = 10 * rng.random(50) h = np.sin(t) return t, h @pytest.mark.parametrize('f0', [0, 1]) @pytest.mark.parametrize('adjust_t', [True, False]) @pytest.mark.parametrize('freq_factor', [1, 2]) @pytest.mark.parametrize('df', [0.1]) def test_trig_sum(f0, adjust_t, freq_factor, df, trig_sum_data): t, h = trig_sum_data tfit = t - t.min() if adjust_t else t S1, C1 = trig_sum(tfit, h, df, N=1000, use_fft=True, f0=f0, freq_factor=freq_factor, oversampling=10) S2, C2 = trig_sum(tfit, h, df, N=1000, use_fft=False, f0=f0, freq_factor=freq_factor, oversampling=10) assert_allclose(S1, S2, atol=1E-2) assert_allclose(C1, C2, atol=1E-2)
dc26c96cb953c53ea121c45db27ebc03838eb56696fe43605cca89c450abbb18
import pytest import numpy as np from numpy.testing import assert_allclose from astropy.timeseries.periodograms.lombscargle.implementations.mle import design_matrix, periodic_fit @pytest.fixture def t(): rand = np.random.default_rng(42) return 10 * rand.random(10) @pytest.mark.parametrize('freq', [1.0, 2]) @pytest.mark.parametrize('dy', [None, 2.0]) @pytest.mark.parametrize('bias', [True, False]) def test_design_matrix(t, freq, dy, bias): X = design_matrix(t, freq, dy, bias=bias) assert X.shape == (t.shape[0], 2 + bool(bias)) if bias: assert_allclose(X[:, 0], 1. / (dy or 1.0)) assert_allclose(X[:, -2], np.sin(2 * np.pi * freq * t) / (dy or 1.0)) assert_allclose(X[:, -1], np.cos(2 * np.pi * freq * t) / (dy or 1.0)) @pytest.mark.parametrize('nterms', range(4)) def test_multiterm_design_matrix(t, nterms): dy = 2.0 freq = 1.5 X = design_matrix(t, freq, dy=dy, bias=True, nterms=nterms) assert X.shape == (t.shape[0], 1 + 2 * nterms) assert_allclose(X[:, 0], 1. / dy) for i in range(1, nterms + 1): assert_allclose(X[:, 2 * i - 1], np.sin(2 * np.pi * i * freq * t) / dy) assert_allclose(X[:, 2 * i], np.cos(2 * np.pi * i * freq * t) / dy) @pytest.mark.parametrize('nterms', range(1, 4)) @pytest.mark.parametrize('freq', [1, 2]) @pytest.mark.parametrize('fit_mean', [True, False]) def test_exact_mle_fit(nterms, freq, fit_mean): rand = np.random.default_rng(42) t = 10 * rand.random(30) theta = -1 + rand.random(2 * nterms + 1) y = np.zeros(t.shape) if fit_mean: y = theta[0] * np.ones(t.shape) for i in range(1, nterms + 1): y += theta[2 * i - 1] * np.sin(2 * np.pi * i * freq * t) y += theta[2 * i] * np.cos(2 * np.pi * i * freq * t) y_fit = periodic_fit(t, y, dy=1, frequency=freq, t_fit=t, nterms=nterms, center_data=False, fit_mean=fit_mean) assert_allclose(y, y_fit)
68ce65092ae845e38aaf9d5b03c79c4d695af00e61f6d1a0e17bf7eb1a48c404
# Licensed under a 3-clause BSD style license - see LICENSE.rst from unittest import mock import pytest from astropy.io.fits import HDUList, Header, PrimaryHDU, BinTableHDU from astropy.utils.data import get_pkg_data_filename from astropy.timeseries.io.kepler import kepler_fits_reader def fake_header(extver, version, timesys, telescop): return Header({"SIMPLE": "T", "BITPIX": 8, "NAXIS": 0, "EXTVER": extver, "VERSION": version, 'TIMESYS': f"{timesys}", "TELESCOP": f"{telescop}"}) def fake_hdulist(extver=1, version=2, timesys="TDB", telescop="KEPLER"): new_header = fake_header(extver, version, timesys, telescop) return [HDUList(hdus=[PrimaryHDU(header=new_header), BinTableHDU(header=new_header, name="LIGHTCURVE")])] @mock.patch("astropy.io.fits.open", side_effect=fake_hdulist(telescop="MadeUp")) def test_raise_telescop_wrong(mock_file): with pytest.raises(NotImplementedError) as exc: kepler_fits_reader(None) assert exc.value.args[0] == ("MadeUp is not implemented, only KEPLER or TESS are " "supported through this reader") @mock.patch("astropy.io.fits.open", side_effect=fake_hdulist(extver=2)) def test_raise_extversion_kepler(mock_file): with pytest.raises(NotImplementedError) as exc: kepler_fits_reader(None) assert exc.value.args[0] == ("Support for KEPLER v2 files not yet " "implemented") @mock.patch("astropy.io.fits.open", side_effect=fake_hdulist(extver=2, telescop="TESS")) def test_raise_extversion_tess(mock_file): with pytest.raises(NotImplementedError) as exc: kepler_fits_reader(None) assert exc.value.args[0] == ("Support for TESS v2 files not yet " "implemented") @mock.patch("astropy.io.fits.open", side_effect=fake_hdulist(timesys="TCB")) def test_raise_timesys_kepler(mock_file): with pytest.raises(NotImplementedError) as exc: kepler_fits_reader(None) assert exc.value.args[0] == ("Support for TCB time scale not yet " "implemented in KEPLER reader") @mock.patch("astropy.io.fits.open", side_effect=fake_hdulist(timesys="TCB", telescop="TESS")) def test_raise_timesys_tess(mock_file): with pytest.raises(NotImplementedError) as exc: kepler_fits_reader(None) assert exc.value.args[0] == ("Support for TCB time scale not yet " "implemented in TESS reader") @pytest.mark.remote_data(source='astropy') def test_kepler_astropy(): from astropy.units import UnitsWarning filename = get_pkg_data_filename('timeseries/kplr010666592-2009131110544_slc.fits') with pytest.warns(UnitsWarning): timeseries = kepler_fits_reader(filename) assert timeseries["time"].format == 'isot' assert timeseries["time"].scale == 'tdb' assert timeseries["sap_flux"].unit.to_string() == 'electron / s' assert len(timeseries) == 14280 assert len(timeseries.columns) == 20 @pytest.mark.remote_data(source='astropy') def test_tess_astropy(): filename = get_pkg_data_filename('timeseries/hlsp_tess-data-alerts_tess_phot_00025155310-s01_tess_v1_lc.fits') with pytest.warns(UserWarning, match='Ignoring 815 rows with NaN times'): timeseries = kepler_fits_reader(filename) assert timeseries["time"].format == 'isot' assert timeseries["time"].scale == 'tdb' assert timeseries["sap_flux"].unit.to_string() == 'electron / s' assert len(timeseries) == 19261 assert len(timeseries.columns) == 20
91b64d684e89238d2d9277ebb30f6ff1f3ccd2614f81710519dd50c776246a73
import pytest import astropy.constants as const from astropy import astronomical_constants, physical_constants def test_version_match(): pversion = physical_constants.get() refpversion = const.h.__class__.__name__.lower() assert pversion == refpversion aversion = astronomical_constants.get() refaversion = const.M_sun.__class__.__name__.lower() assert aversion == refaversion def test_previously_imported(): with pytest.raises(RuntimeError): physical_constants.set('codata2018') with pytest.raises(RuntimeError): astronomical_constants.set('iau2015')
78447a7d4d0a8554a6d876e85f1ff7564960fef10caf956e742c02b33e6c250a
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from astropy import constants as const from astropy.tests.helper import check_pickling_recovery, pickle_protocol # noqa originals = [const.Constant('h_fake', 'Not Planck', 0.0, 'J s', 0.0, 'fakeref', system='si'), const.h, const.e.si] @pytest.mark.parametrize("original", originals) def test_new_constant(pickle_protocol, original): # noqa check_pickling_recovery(original, pickle_protocol)
fe4409d88a7c1bdbca9e2995ecbc16b8ce22b504b9da462010fc51429d73e0f4
# Licensed under a 3-clause BSD style license - see LICENSE.rst import copy import pytest import numpy as np from astropy.constants import Constant from astropy.units import Quantity as Q def test_c(): from astropy.constants.codata2010 import c # c is an exactly defined constant, so it shouldn't be changing assert c.value == 2.99792458e8 # default is S.I. assert c.si.value == 2.99792458e8 assert c.cgs.value == 2.99792458e10 # make sure it has the necessary attributes and they're not blank assert c.uncertainty == 0 # c is a *defined* quantity assert c.name assert c.reference assert c.unit def test_h(): from astropy.constants import h as h_current from astropy.constants.codata2010 import h # check that the value is the CODATA2010 value assert abs(h.value - 6.62606957e-34) < 1e-43 assert abs(h.si.value - 6.62606957e-34) < 1e-43 assert abs(h.cgs.value - 6.62606957e-27) < 1e-36 # Check it is different than the current value assert abs(h.value - h_current.value) > 4e-42 # make sure it has the necessary attributes and they're not blank assert h.uncertainty assert h.name assert h.reference assert h.unit def test_e(): from astropy.constants.astropyconst13 import e as e_13 # A test quantity E = Q(100.00000348276221, 'V/m') # e.cgs is too ambiguous and should not work at all with pytest.raises(TypeError): e_13.cgs * E assert isinstance(e_13.si, Q) assert isinstance(e_13.gauss, Q) assert isinstance(e_13.esu, Q) assert e_13.gauss * E == Q(e_13.gauss.value * E.value, 'Fr V/m') assert e_13.esu * E == Q(e_13.esu.value * E.value, 'Fr V/m') def test_g0(): """Tests for #1263 demonstrating how g0 constant should behave.""" from astropy.constants.astropyconst13 import g0 # g0 is an exactly defined constant, so it shouldn't be changing assert g0.value == 9.80665 # default is S.I. assert g0.si.value == 9.80665 assert g0.cgs.value == 9.80665e2 # make sure it has the necessary attributes and they're not blank assert g0.uncertainty == 0 # g0 is a *defined* quantity assert g0.name assert g0.reference assert g0.unit # Check that its unit have the correct physical type assert g0.unit.physical_type == 'acceleration' def test_b_wien(): """b_wien should give the correct peak wavelength for given blackbody temperature. The Sun is used in this test. """ from astropy import units as u from astropy.constants.astropyconst13 import b_wien t = 5778 * u.K w = (b_wien / t).to(u.nm) assert round(w.value) == 502 def test_pc(): """Parsec is defined to use small-angle limit per IAU 2015 Resolution B 2. iau2012 version still uses tan(parallax). """ from astropy import units as u from astropy.constants import iau2012 plx = np.radians(1 / 3600) assert np.allclose(u.pc.to('m') / iau2012.pc.si.value, np.tan(plx) / plx, rtol=1.e-14, atol=0) def test_masses(): """Ensure mass values are set up correctly. https://github.com/astropy/astropy/issues/8920 """ from astropy.constants import astropyconst13, astropyconst20, astropyconst40 ref_text = "Allen's Astrophysical Quantities 4th Ed." assert (astropyconst13.M_sun.reference == ref_text and astropyconst13.M_jup.reference == ref_text and astropyconst13.M_earth.reference == ref_text) ref_text = "IAU 2015 Resolution B 3 + CODATA 2014" assert (astropyconst20.M_sun.reference == ref_text and astropyconst20.M_jup.reference == ref_text and astropyconst20.M_earth.reference == ref_text) ref_text = "IAU 2015 Resolution B 3 + CODATA 2018" assert (astropyconst40.M_sun.reference == ref_text and astropyconst40.M_jup.reference == ref_text and astropyconst40.M_earth.reference == ref_text) def test_unit(): from astropy import units as u from astropy.constants import astropyconst13 as const for key, val in vars(const).items(): if isinstance(val, Constant): # Getting the unit forces the unit parser to run. Confirm # that none of the constants defined in astropy have # invalid unit. assert not isinstance(val.unit, u.UnrecognizedUnit) def test_copy(): from astropy import constants as const cc = copy.deepcopy(const.c) assert cc == const.c cc = copy.copy(const.c) assert cc == const.c def test_view(): """Check that Constant and Quantity views can be taken (#3537, #3538).""" from astropy.constants import c c2 = c.view(Constant) assert c2 == c assert c2.value == c.value # make sure it has the necessary attributes and they're not blank assert c2.uncertainty == 0 # c is a *defined* quantity assert c2.name == c.name assert c2.reference == c.reference assert c2.unit == c.unit q1 = c.view(Q) assert q1 == c assert q1.value == c.value assert type(q1) is Q assert not hasattr(q1, 'reference') q2 = Q(c) assert q2 == c assert q2.value == c.value assert type(q2) is Q assert not hasattr(q2, 'reference') c3 = Q(c, subok=True) assert c3 == c assert c3.value == c.value # make sure it has the necessary attributes and they're not blank assert c3.uncertainty == 0 # c is a *defined* quantity assert c3.name == c.name assert c3.reference == c.reference assert c3.unit == c.unit c4 = Q(c, subok=True, copy=False) assert c4 is c
8c8259efa0f8b56f16f1dafb6d1ab8a2837fa0c97a12608ffb0b563ac7f38242
# Licensed under a 3-clause BSD style license - see LICENSE.rst import copy import pytest from astropy.constants import Constant from astropy.units import Quantity as Q def test_c(): from astropy.constants import c # c is an exactly defined constant, so it shouldn't be changing assert c.value == 2.99792458e8 # default is S.I. assert c.si.value == 2.99792458e8 assert c.cgs.value == 2.99792458e10 # make sure it has the necessary attributes and they're not blank assert c.uncertainty == 0 # c is a *defined* quantity assert c.name assert c.reference assert c.unit def test_h(): from astropy.constants import h # check that the value is fairly close to what it should be (not exactly # checking because this might get updated in the future) assert abs(h.value - 6.626e-34) < 1e-38 assert abs(h.si.value - 6.626e-34) < 1e-38 assert abs(h.cgs.value - 6.626e-27) < 1e-31 # make sure it has the necessary attributes and they're not blank assert h.uncertainty == 0 # CODATA 2018 set h to exact value assert h.name assert h.reference assert h.unit def test_e(): """Tests for #572 demonstrating how EM constants should behave.""" from astropy.constants import e # A test quantity E = Q(100, 'V/m') # Without specifying a system e should not combine with other quantities pytest.raises(TypeError, lambda: e * E) # Try it again (as regression test on a minor issue mentioned in #745 where # repeated attempts to use e in an expression resulted in UnboundLocalError # instead of TypeError) pytest.raises(TypeError, lambda: e * E) # e.cgs is too ambiguous and should not work at all pytest.raises(TypeError, lambda: e.cgs * E) assert isinstance(e.si, Q) assert isinstance(e.gauss, Q) assert isinstance(e.esu, Q) assert e.si * E == Q(100, 'eV/m') assert e.gauss * E == Q(e.gauss.value * E.value, 'Fr V/m') assert e.esu * E == Q(e.esu.value * E.value, 'Fr V/m') def test_g0(): """Tests for #1263 demonstrating how g0 constant should behave.""" from astropy.constants import g0 # g0 is an exactly defined constant, so it shouldn't be changing assert g0.value == 9.80665 # default is S.I. assert g0.si.value == 9.80665 assert g0.cgs.value == 9.80665e2 # make sure it has the necessary attributes and they're not blank assert g0.uncertainty == 0 # g0 is a *defined* quantity assert g0.name assert g0.reference assert g0.unit # Check that its unit have the correct physical type assert g0.unit.physical_type == 'acceleration' def test_b_wien(): """b_wien should give the correct peak wavelength for given blackbody temperature. The Sun is used in this test. """ from astropy import units as u from astropy.constants import b_wien t = 5778 * u.K w = (b_wien / t).to(u.nm) assert round(w.value) == 502 def test_unit(): from astropy import constants as const from astropy import units as u for key, val in vars(const).items(): if isinstance(val, Constant): # Getting the unit forces the unit parser to run. Confirm # that none of the constants defined in astropy have # invalid unit. assert not isinstance(val.unit, u.UnrecognizedUnit) def test_copy(): from astropy import constants as const cc = copy.deepcopy(const.c) assert cc == const.c cc = copy.copy(const.c) assert cc == const.c def test_view(): """Check that Constant and Quantity views can be taken (#3537, #3538).""" from astropy.constants import c c2 = c.view(Constant) assert c2 == c assert c2.value == c.value # make sure it has the necessary attributes and they're not blank assert c2.uncertainty == 0 # c is a *defined* quantity assert c2.name == c.name assert c2.reference == c.reference assert c2.unit == c.unit q1 = c.view(Q) assert q1 == c assert q1.value == c.value assert type(q1) is Q assert not hasattr(q1, 'reference') q2 = Q(c) assert q2 == c assert q2.value == c.value assert type(q2) is Q assert not hasattr(q2, 'reference') c3 = Q(c, subok=True) assert c3 == c assert c3.value == c.value # make sure it has the necessary attributes and they're not blank assert c3.uncertainty == 0 # c is a *defined* quantity assert c3.name == c.name assert c3.reference == c.reference assert c3.unit == c.unit c4 = Q(c, subok=True, copy=False) assert c4 is c
d5babde091b0ba629fa9b46cff42ea5b987dbd4ed63ae6e762dd30c03d528ea2
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The following are private functions, included here **FOR REFERENCE ONLY** since the io registry cannot be displayed. These functions are registered into :meth:`~astropy.cosmology.Cosmology.to_format` and :meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed via these methods. """ # this is shown in the docs. import abc import copy import inspect import numpy as np from astropy.cosmology.connect import convert_registry from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology from astropy.modeling import FittableModel, Model from astropy.modeling import Parameter as ModelParameter from astropy.utils.decorators import classproperty from .utils import convert_parameter_to_model_parameter __all__ = [] # nothing is publicly scoped class _CosmologyModel(FittableModel): """Base class for Cosmology redshift-method Models. .. note:: This class is not publicly scoped so should not be used directly. Instead, from a Cosmology instance use ``.to_format("astropy.model")`` to create an instance of a subclass of this class. `_CosmologyModel` (subclasses) wrap a redshift-method of a :class:`~astropy.cosmology.Cosmology` class, converting each non-`None` |Cosmology| :class:`~astropy.cosmology.Parameter` to a :class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter` and the redshift-method to the model's ``__call__ / evaluate``. See Also -------- astropy.cosmology.Cosmology.to_format """ @abc.abstractmethod def _cosmology_class(self): """Cosmology class as a private attribute. Set in subclasses.""" @abc.abstractmethod def _method_name(self): """Cosmology method name as a private attribute. Set in subclasses.""" @classproperty def cosmology_class(cls): """|Cosmology| class.""" return cls._cosmology_class @property def cosmology(self): """Return |Cosmology| using `~astropy.modeling.Parameter` values.""" cosmo = self._cosmology_class( name=self.name, **{k: (v.value if not (v := getattr(self, k)).unit else v.quantity) for k in self.param_names}) return cosmo @classproperty def method_name(self): """Redshift-method name on |Cosmology| instance.""" return self._method_name # --------------------------------------------------------------- def evaluate(self, *args, **kwargs): """Evaluate method {method!r} of {cosmo_cls!r} Cosmology. The Model wraps the :class:`~astropy.cosmology.Cosmology` method, converting each |Cosmology| :class:`~astropy.cosmology.Parameter` to a :class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter` (unless the Parameter is None, in which case it is skipped). Here an instance of the cosmology is created using the current Parameter values and the method is evaluated given the input. Parameters ---------- *args, **kwargs The first ``n_inputs`` of ``*args`` are for evaluating the method of the cosmology. The remaining args and kwargs are passed to the cosmology class constructor. Any unspecified Cosmology Parameter use the current value of the corresponding Model Parameter. Returns ------- Any Results of evaluating the Cosmology method. """ # create BoundArgument with all available inputs beyond the Parameters, # which will be filled in next ba = self.cosmology_class._init_signature.bind_partial(*args[self.n_inputs:], **kwargs) # fill in missing Parameters for k in self.param_names: if k not in ba.arguments: v = getattr(self, k) ba.arguments[k] = v.value if not v.unit else v.quantity # unvectorize, since Cosmology is not vectorized # TODO! remove when vectorized if np.shape(ba.arguments[k]): # only in __call__ # m_nu is a special case # TODO! fix by making it 'structured' if k == "m_nu" and len(ba.arguments[k].shape) == 1: continue ba.arguments[k] = ba.arguments[k][0] # make instance of cosmology cosmo = self._cosmology_class(**ba.arguments) # evaluate method result = getattr(cosmo, self._method_name)(*args[:self.n_inputs]) return result ############################################################################## def from_model(model): """Load |Cosmology| from `~astropy.modeling.Model` object. Parameters ---------- model : `_CosmologyModel` subclass instance See ``Cosmology.to_format.help("astropy.model") for details. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Examples -------- >>> from astropy.cosmology import Cosmology, Planck18 >>> model = Planck18.to_format("astropy.model", method="lookback_time") >>> Cosmology.from_format(model) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) """ cosmology = model.cosmology_class meta = copy.deepcopy(model.meta) # assemble the Parameters params = {} for n in model.param_names: p = getattr(model, n) params[p.name] = p.quantity if p.unit else p.value # put all attributes in a dict meta[p.name] = {n: getattr(p, n) for n in dir(p) if not (n.startswith("_") or callable(getattr(p, n)))} ba = cosmology._init_signature.bind(name=model.name, **params, meta=meta) return cosmology(*ba.args, **ba.kwargs) def to_model(cosmology, *_, method): """Convert a `~astropy.cosmology.Cosmology` to a `~astropy.modeling.Model`. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` subclass instance method : str, keyword-only The name of the method on the ``cosmology``. Returns ------- `_CosmologyModel` subclass instance The Model wraps the |Cosmology| method, converting each non-`None` :class:`~astropy.cosmology.Parameter` to a :class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter` and the method to the model's ``__call__ / evaluate``. Examples -------- >>> from astropy.cosmology import Planck18 >>> model = Planck18.to_format("astropy.model", method="lookback_time") >>> model <FlatLambdaCDMCosmologyLookbackTimeModel(H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. , 0. , 0.06] eV, Ob0=0.04897, name='Planck18')> """ cosmo_cls = cosmology.__class__ # get bound method & sig from cosmology (unbound if class). if not hasattr(cosmology, method): raise AttributeError(f"{method} is not a method on {cosmology.__class__}.") func = getattr(cosmology, method) if not callable(func): raise ValueError(f"{cosmology.__class__}.{method} is not callable.") msig = inspect.signature(func) # introspect for number of positional inputs, ignoring "self" n_inputs = len([p for p in tuple(msig.parameters.values()) if (p.kind in (0, 1))]) attrs = {} # class attributes attrs["_cosmology_class"] = cosmo_cls attrs["_method_name"] = method attrs["n_inputs"] = n_inputs attrs["n_outputs"] = 1 params = {} # Parameters (also class attributes) for n in cosmology.__parameters__: v = getattr(cosmology, n) # parameter value if v is None: # skip unspecified parameters continue # add as Model Parameter params[n] = convert_parameter_to_model_parameter(getattr(cosmo_cls, n), v, cosmology.meta.get(n)) # class name is cosmology name + Cosmology + method name + Model clsname = (cosmo_cls.__qualname__.replace(".", "_") + "Cosmology" + method.replace("_", " ").title().replace(" ", "") + "Model") # make Model class CosmoModel = type(clsname, (_CosmologyModel, ), {**attrs, **params}) # override __signature__ and format the doc. setattr(CosmoModel.evaluate, "__signature__", msig) CosmoModel.evaluate.__doc__ = CosmoModel.evaluate.__doc__.format( cosmo_cls=cosmo_cls.__qualname__, method=method) # instantiate class using default values ps = {n: getattr(cosmology, n) for n in params.keys()} model = CosmoModel(**ps, name=cosmology.name, meta=copy.deepcopy(cosmology.meta)) return model def model_identify(origin, format, *args, **kwargs): """Identify if object uses the :class:`~astropy.modeling.Model` format. Returns ------- bool """ itis = False if origin == "read": itis = isinstance(args[1], Model) and (format in (None, "astropy.model")) return itis # =================================================================== # Register convert_registry.register_reader("astropy.model", Cosmology, from_model) convert_registry.register_writer("astropy.model", Cosmology, to_model) convert_registry.register_identifier("astropy.model", Cosmology, model_identify)
abdb2924228a28210b64bbc1897b9e092a8b115374200a26821988d6263999e1
# Licensed under a 3-clause BSD style license - see LICENSE.rst import copy from collections import defaultdict import numpy as np from astropy.table import Row, QTable from astropy.cosmology.connect import convert_registry from astropy.cosmology.core import Cosmology from .mapping import from_mapping def from_row(row, *, move_to_meta=False, cosmology=None): """Instantiate a `~astropy.cosmology.Cosmology` from a `~astropy.table.Row`. Parameters ---------- row : `~astropy.table.Row` The object containing the Cosmology information. move_to_meta : bool (optional, keyword-only) Whether to move keyword arguments that are not in the Cosmology class' signature to the Cosmology's metadata. This will only be applied if the Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``). Arguments moved to the metadata will be merged with existing metadata, preferring specified metadata in the case of a merge conflict (e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be ``{'key': 10}``). cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only) The cosmology class (or string name thereof) to use when constructing the cosmology instance. The class also provides default parameter values, filling in any non-mandatory arguments missing in 'table'. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Examples -------- To see loading a `~astropy.cosmology.Cosmology` from a Row with ``from_row``, we will first make a `~astropy.table.Row` using :func:`~astropy.cosmology.Cosmology.to_format`. >>> from astropy.cosmology import Cosmology, Planck18 >>> cr = Planck18.to_format("astropy.row") >>> cr <Row index=0> cosmology name H0 Om0 Tcmb0 Neff m_nu [3] Ob0 km / (Mpc s) K eV str13 str8 float64 float64 float64 float64 float64 float64 ------------- -------- ------------ ------- ------- ------- ----------- ------- FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 Now this row can be used to load a new cosmological instance identical to the ``Planck18`` cosmology from which it was generated. >>> cosmo = Cosmology.from_format(cr, format="astropy.row") >>> cosmo FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) """ # special values name = row['name'] if 'name' in row.columns else None # get name from column meta = defaultdict(dict, copy.deepcopy(row.meta)) # Now need to add the Columnar metadata. This is only available on the # parent table. If Row is ever separated from Table, this should be moved # to ``to_table``. for col in row._table.itercols(): if col.info.meta: # Only add metadata if not empty meta[col.name].update(col.info.meta) # turn row into mapping, filling cosmo if not in a column mapping = dict(row) mapping["name"] = name mapping.setdefault("cosmology", meta.pop("cosmology", None)) mapping["meta"] = dict(meta) # build cosmology from map return from_mapping(mapping, move_to_meta=move_to_meta, cosmology=cosmology) def to_row(cosmology, *args, cosmology_in_meta=False, table_cls=QTable): """Serialize the cosmology into a `~astropy.table.Row`. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` subclass instance *args Not used. Needed for compatibility with `~astropy.io.registry.UnifiedReadWriteMethod` table_cls : type (optional, keyword-only) Astropy :class:`~astropy.table.Table` class or subclass type to use. Default is :class:`~astropy.table.QTable`. cosmology_in_meta : bool Whether to put the cosmology class in the Table metadata (if `True`) or as the first column (if `False`, default). Returns ------- `~astropy.table.Row` With columns for the cosmology parameters, and metadata in the Table's ``meta`` attribute. The cosmology class name will either be a column or in ``meta``, depending on 'cosmology_in_meta'. Examples -------- A Cosmology as a `~astropy.table.Row` will have the cosmology's name and parameters as columns. >>> from astropy.cosmology import Planck18 >>> cr = Planck18.to_format("astropy.row") >>> cr <Row index=0> cosmology name H0 Om0 Tcmb0 Neff m_nu [3] Ob0 km / (Mpc s) K eV str13 str8 float64 float64 float64 float64 float64 float64 ------------- -------- ------------ ------- ------- ------- ----------- ------- FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 The cosmological class and other metadata, e.g. a paper reference, are in the Table's metadata. """ from .table import to_table table = to_table(cosmology, cls=table_cls, cosmology_in_meta=cosmology_in_meta) return table[0] # extract row from table def row_identify(origin, format, *args, **kwargs): """Identify if object uses the `~astropy.table.Row` format. Returns ------- bool """ itis = False if origin == "read": itis = isinstance(args[1], Row) and (format in (None, "astropy.row")) return itis # =================================================================== # Register convert_registry.register_reader("astropy.row", Cosmology, from_row) convert_registry.register_writer("astropy.row", Cosmology, to_row) convert_registry.register_identifier("astropy.row", Cosmology, row_identify)
070f80f061a7d265bdd9aa07673170ddf2996e0287c60045c12eff57ffc695a7
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The following are private functions. These functions are registered into :meth:`~astropy.cosmology.Cosmology.to_format` and :meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed via these methods. """ from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology from astropy.cosmology.connect import convert_registry __all__ = [] # nothing is publicly scoped def from_cosmology(cosmo, /, **kwargs): """Return the |Cosmology| unchanged. Parameters ---------- cosmo : `~astropy.cosmology.Cosmology` The cosmology to return. **kwargs This argument is required for compatibility with the standard set of keyword arguments in format `~astropy.cosmology.Cosmology.from_format`, e.g. "cosmology". If "cosmology" is included and is not `None`, ``cosmo`` is checked for correctness. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Just ``cosmo`` passed through. Raises ------ TypeError If the |Cosmology| object is not an instance of ``cosmo`` (and ``cosmology`` is not `None`). """ # Check argument `cosmology` cosmology = kwargs.get("cosmology") if isinstance(cosmology, str): cosmology = _COSMOLOGY_CLASSES[cosmology] if cosmology is not None and not isinstance(cosmo, cosmology): raise TypeError(f"cosmology {cosmo} is not an {cosmology} instance.") return cosmo def to_cosmology(cosmo, *args): """Return the |Cosmology| unchanged. Parameters ---------- cosmo : `~astropy.cosmology.Cosmology` The cosmology to return. *args Not used. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Just ``cosmo`` passed through. """ return cosmo def cosmology_identify(origin, format, *args, **kwargs): """Identify if object is a `~astropy.cosmology.Cosmology`. Returns ------- bool """ itis = False if origin == "read": itis = isinstance(args[1], Cosmology) and (format in (None, "astropy.cosmology")) return itis # =================================================================== # Register convert_registry.register_reader("astropy.cosmology", Cosmology, from_cosmology) convert_registry.register_writer("astropy.cosmology", Cosmology, to_cosmology) convert_registry.register_identifier("astropy.cosmology", Cosmology, cosmology_identify)
4027dbbe8a4e7d489367b0ab7fec33828972602365e6f9577ab4f0f67a54e8a8
# Licensed under a 3-clause BSD style license - see LICENSE.rst import copy import numpy as np from astropy.cosmology.connect import convert_registry from astropy.cosmology.core import Cosmology from astropy.table import QTable, Table, Column from .mapping import to_mapping from .row import from_row from .utils import convert_parameter_to_column def from_table(table, index=None, *, move_to_meta=False, cosmology=None): """Instantiate a `~astropy.cosmology.Cosmology` from a |QTable|. Parameters ---------- table : `~astropy.table.Table` The object to parse into a |Cosmology|. index : int, str, or None, optional Needed to select the row in tables with multiple rows. ``index`` can be an integer for the row number or, if the table is indexed by a column, the value of that column. If the table is not indexed and ``index`` is a string, the "name" column is used as the indexing column. move_to_meta : bool (optional, keyword-only) Whether to move keyword arguments that are not in the Cosmology class' signature to the Cosmology's metadata. This will only be applied if the Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``). Arguments moved to the metadata will be merged with existing metadata, preferring specified metadata in the case of a merge conflict (e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be ``{'key': 10}``). cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only) The cosmology class (or string name thereof) to use when constructing the cosmology instance. The class also provides default parameter values, filling in any non-mandatory arguments missing in 'table'. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Examples -------- To see loading a `~astropy.cosmology.Cosmology` from a Table with ``from_table``, we will first make a |QTable| using :func:`~astropy.cosmology.Cosmology.to_format`. >>> from astropy.cosmology import Cosmology, Planck18 >>> ct = Planck18.to_format("astropy.table") >>> ct <QTable length=1> name H0 Om0 Tcmb0 Neff m_nu [3] Ob0 km / (Mpc s) K eV str8 float64 float64 float64 float64 float64 float64 -------- ------------ ------- ------- ------- ----------- ------- Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 Now this table can be used to load a new cosmological instance identical to the ``Planck18`` cosmology from which it was generated. >>> cosmo = Cosmology.from_format(ct, format="astropy.table") >>> cosmo FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) Specific cosmology classes can be used to parse the data. The class' default parameter values are used to fill in any information missing in the data. >>> from astropy.cosmology import FlatLambdaCDM >>> del ct["Tcmb0"] # show FlatLambdaCDM provides default >>> FlatLambdaCDM.from_format(ct) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897) For tables with multiple rows of cosmological parameters, the ``index`` argument is needed to select the correct row. The index can be an integer for the row number or, if the table is indexed by a column, the value of that column. If the table is not indexed and ``index`` is a string, the "name" column is used as the indexing column. Here is an example where ``index`` is needed and can be either an integer (for the row number) or the name of one of the cosmologies, e.g. 'Planck15'. >>> from astropy.cosmology import Planck13, Planck15, Planck18 >>> from astropy.table import vstack >>> cts = vstack([c.to_format("astropy.table") ... for c in (Planck13, Planck15, Planck18)], ... metadata_conflicts='silent') >>> cts <QTable length=3> name H0 Om0 Tcmb0 Neff m_nu [3] Ob0 km / (Mpc s) K eV str8 float64 float64 float64 float64 float64 float64 -------- ------------ ------- ------- ------- ----------- -------- Planck13 67.77 0.30712 2.7255 3.046 0.0 .. 0.06 0.048252 Planck15 67.74 0.3075 2.7255 3.046 0.0 .. 0.06 0.0486 Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 >>> cosmo = Cosmology.from_format(cts, index=1, format="astropy.table") >>> cosmo == Planck15 True For further examples, see :doc:`astropy:cosmology/io`. """ # Get row from table # string index uses the indexed column on the table to find the row index. if isinstance(index, str): if not table.indices: # no indexing column, find by string match indices = np.where(table['name'] == index)[0] else: # has indexing column indices = table.loc_indices[index] # need to convert to row index (int) if isinstance(indices, (int, np.integer)): # loc_indices index = indices elif len(indices) == 1: # only happens w/ np.where index = indices[0] elif len(indices) == 0: # matches from loc_indices raise KeyError(f"No matches found for key {indices}") else: # like the Highlander, there can be only 1 Cosmology raise ValueError(f"more than one cosmology found for key {indices}") # no index is needed for a 1-row table. For a multi-row table... if index is None: if len(table) != 1: # multi-row table and no index raise ValueError("need to select a specific row (e.g. index=1) when " "constructing a Cosmology from a multi-row table.") else: # single-row table index = 0 row = table[index] # index is now the row index (int) # parse row to cosmo return from_row(row, move_to_meta=move_to_meta, cosmology=cosmology) def to_table(cosmology, *args, cls=QTable, cosmology_in_meta=True): """Serialize the cosmology into a `~astropy.table.QTable`. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` subclass instance *args Not used. Needed for compatibility with `~astropy.io.registry.UnifiedReadWriteMethod` cls : type (optional, keyword-only) Astropy :class:`~astropy.table.Table` class or subclass type to return. Default is :class:`~astropy.table.QTable`. cosmology_in_meta : bool Whether to put the cosmology class in the Table metadata (if `True`, default) or as the first column (if `False`). Returns ------- `~astropy.table.QTable` With columns for the cosmology parameters, and metadata and cosmology class name in the Table's ``meta`` attribute Raises ------ TypeError If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table` Examples -------- A Cosmology as a `~astropy.table.QTable` will have the cosmology's name and parameters as columns. >>> from astropy.cosmology import Planck18 >>> ct = Planck18.to_format("astropy.table") >>> ct <QTable length=1> name H0 Om0 Tcmb0 Neff m_nu [3] Ob0 km / (Mpc s) K eV str8 float64 float64 float64 float64 float64 float64 -------- ------------ ------- ------- ------- ----------- ------- Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 The cosmological class and other metadata, e.g. a paper reference, are in the Table's metadata. >>> ct.meta OrderedDict([..., ('cosmology', 'FlatLambdaCDM')]) To move the cosmology class from the metadata to a Table row, set the ``cosmology_in_meta`` argument to `False`: >>> Planck18.to_format("astropy.table", cosmology_in_meta=False) <QTable length=1> cosmology name H0 Om0 Tcmb0 Neff m_nu [3] Ob0 km / (Mpc s) K eV str13 str8 float64 float64 float64 float64 float64 float64 ------------- -------- ------------ ------- ------- ------- ----------- ------- FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 Astropy recommends `~astropy.table.QTable` for tables with `~astropy.units.Quantity` columns. However the returned type may be overridden using the ``cls`` argument: >>> from astropy.table import Table >>> Planck18.to_format("astropy.table", cls=Table) <Table length=1> ... """ if not issubclass(cls, Table): raise TypeError(f"'cls' must be a (sub)class of Table, not {type(cls)}") # Start by getting a map representation. data = to_mapping(cosmology) data["cosmology"] = data["cosmology"].__qualname__ # change to str # Metadata meta = data.pop("meta") # remove the meta if cosmology_in_meta: meta["cosmology"] = data.pop("cosmology") # Need to turn everything into something Table can process: # - Column for Parameter # - list for anything else cosmo_cls = cosmology.__class__ for k, v in data.items(): if k in cosmology.__parameters__: col = convert_parameter_to_column(getattr(cosmo_cls, k), v, cosmology.meta.get(k)) else: col = Column([v]) data[k] = col tbl = cls(data, meta=meta) tbl.add_index("name", unique=True) return tbl def table_identify(origin, format, *args, **kwargs): """Identify if object uses the Table format. Returns ------- bool """ itis = False if origin == "read": itis = isinstance(args[1], Table) and (format in (None, "astropy.table")) return itis # =================================================================== # Register convert_registry.register_reader("astropy.table", Cosmology, from_table) convert_registry.register_writer("astropy.table", Cosmology, to_table) convert_registry.register_identifier("astropy.table", Cosmology, table_identify)
5e0c2af75a172ac1701fd71ad2a219234401f98575c13df96fed6ed318b8c543
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Read/Write/Interchange methods for `astropy.cosmology`. **NOT public API**. """ # Import to register with the I/O machinery from . import ecsv, mapping, model, row, table, yaml # noqa: F403
cc87197252fbb0038ac59759d2af9fbe2f2434a696e2de0a00d52daed7a5986a
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The following are private functions, included here **FOR REFERENCE ONLY** since the io registry cannot be displayed. These functions are registered into :meth:`~astropy.cosmology.Cosmology.to_format` and :meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed via these methods. """ # this is shown in the docs. import astropy.cosmology.units as cu import astropy.units as u from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology from astropy.cosmology.connect import convert_registry from astropy.io.misc.yaml import AstropyDumper, AstropyLoader, dump, load from .mapping import from_mapping __all__ = [] # nothing is publicly scoped ############################################################################## # Serializer Functions # these do Cosmology <-> YAML through a modified dictionary representation of # the Cosmology object. The Unified-I/O functions are just wrappers to the YAML # that calls these functions. def yaml_representer(tag): """:mod:`yaml` representation of |Cosmology| object. Parameters ---------- tag : str The class tag, e.g. '!astropy.cosmology.LambdaCDM' Returns ------- representer : callable[[`~astropy.io.misc.yaml.AstropyDumper`, |Cosmology|], str] Function to construct :mod:`yaml` representation of |Cosmology| object. """ def representer(dumper, obj): """Cosmology yaml representer function for {}. Parameters ---------- dumper : `~astropy.io.misc.yaml.AstropyDumper` obj : `~astropy.cosmology.Cosmology` Returns ------- str :mod:`yaml` representation of |Cosmology| object. """ # convert to mapping map = obj.to_format("mapping") # remove the cosmology class info. It's already recorded in `tag` map.pop("cosmology") # make the metadata serializable in an order-preserving way. map["meta"] = tuple(map["meta"].items()) return dumper.represent_mapping(tag, map) representer.__doc__ = representer.__doc__.format(tag) return representer def yaml_constructor(cls): """Cosmology| object from :mod:`yaml` representation. Parameters ---------- cls : type The class type, e.g. `~astropy.cosmology.LambdaCDM`. Returns ------- constructor : callable Function to construct |Cosmology| object from :mod:`yaml` representation. """ def constructor(loader, node): """Cosmology yaml constructor function. Parameters ---------- loader : `~astropy.io.misc.yaml.AstropyLoader` node : `yaml.nodes.MappingNode` yaml representation of |Cosmology| object. Returns ------- `~astropy.cosmology.Cosmology` subclass instance """ # create mapping from YAML node map = loader.construct_mapping(node) # restore metadata to dict map["meta"] = dict(map["meta"]) # get cosmology class qualified name from node cosmology = str(node.tag).split(".")[-1] # create Cosmology from mapping return from_mapping(map, move_to_meta=False, cosmology=cosmology) return constructor def register_cosmology_yaml(cosmo_cls): """Register :mod:`yaml` for Cosmology class. Parameters ---------- cosmo_cls : `~astropy.cosmology.Cosmology` class """ tag = f"!{cosmo_cls.__module__}.{cosmo_cls.__qualname__}" AstropyDumper.add_representer(cosmo_cls, yaml_representer(tag)) AstropyLoader.add_constructor(tag, yaml_constructor(cosmo_cls)) ############################################################################## # Unified-I/O Functions def from_yaml(yml, *, cosmology=None): """Load `~astropy.cosmology.Cosmology` from :mod:`yaml` object. Parameters ---------- yml : str :mod:`yaml` representation of |Cosmology| object cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only) The expected cosmology class (or string name thereof). This argument is is only checked for correctness if not `None`. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Raises ------ TypeError If the |Cosmology| object loaded from ``yml`` is not an instance of the ``cosmology`` (and ``cosmology`` is not `None`). """ with u.add_enabled_units(cu): cosmo = load(yml) # Check argument `cosmology`, if not None # This kwarg is required for compatibility with |Cosmology.from_format| if isinstance(cosmology, str): cosmology = _COSMOLOGY_CLASSES[cosmology] if cosmology is not None and not isinstance(cosmo, cosmology): raise TypeError(f"cosmology {cosmo} is not an {cosmology} instance.") return cosmo def to_yaml(cosmology, *args): """Return the cosmology class, parameters, and metadata as a :mod:`yaml` object. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` subclass instance *args Not used. Needed for compatibility with `~astropy.io.registry.UnifiedReadWriteMethod` Returns ------- str :mod:`yaml` representation of |Cosmology| object """ return dump(cosmology) # ``read`` cannot handle non-path strings. # TODO! this says there should be different types of I/O registries. # not just hacking object conversion on top of file I/O. # def yaml_identify(origin, format, *args, **kwargs): # """Identify if object uses the yaml format. # # Returns # ------- # bool # """ # itis = False # if origin == "read": # itis = isinstance(args[1], str) and args[1][0].startswith("!") # itis &= format in (None, "yaml") # # return itis # =================================================================== # Register for cosmo_cls in _COSMOLOGY_CLASSES.values(): register_cosmology_yaml(cosmo_cls) convert_registry.register_reader("yaml", Cosmology, from_yaml) convert_registry.register_writer("yaml", Cosmology, to_yaml) # convert_registry.register_identifier("yaml", Cosmology, yaml_identify)
9f8db9567dee38715272f4d045633877c8885ab0bf44510c0beb21c22e50d967
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The following are private functions, included here **FOR REFERENCE ONLY** since the io registry cannot be displayed. These functions are registered into :meth:`~astropy.cosmology.Cosmology.to_format` and :meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed via these methods. """ # this is shown in the docs. import copy from collections.abc import Mapping import numpy as np from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology from astropy.cosmology.connect import convert_registry __all__ = [] # nothing is publicly scoped def from_mapping(map, *, move_to_meta=False, cosmology=None): """Load `~astropy.cosmology.Cosmology` from mapping object. Parameters ---------- map : mapping Arguments into the class -- like "name" or "meta". If 'cosmology' is None, must have field "cosmology" which can be either the string name of the cosmology class (e.g. "FlatLambdaCDM") or the class itself. move_to_meta : bool (optional, keyword-only) Whether to move keyword arguments that are not in the Cosmology class' signature to the Cosmology's metadata. This will only be applied if the Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``). Arguments moved to the metadata will be merged with existing metadata, preferring specified metadata in the case of a merge conflict (e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be ``{'key': 10}``). cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only) The cosmology class (or string name thereof) to use when constructing the cosmology instance. The class also provides default parameter values, filling in any non-mandatory arguments missing in 'map'. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Examples -------- To see loading a `~astropy.cosmology.Cosmology` from a dictionary with ``from_mapping``, we will first make a mapping using :meth:`~astropy.cosmology.Cosmology.to_format`. >>> from astropy.cosmology import Cosmology, Planck18 >>> cm = Planck18.to_format('mapping') >>> cm {'cosmology': <class 'astropy.cosmology.flrw.FlatLambdaCDM'>, 'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966, 'Tcmb0': <Quantity 2.7255 K>, 'Neff': 3.046, 'm_nu': <Quantity [0. , 0. , 0.06] eV>, 'Ob0': 0.04897, 'meta': ... Now this dict can be used to load a new cosmological instance identical to the ``Planck18`` cosmology from which it was generated. >>> cosmo = Cosmology.from_format(cm, format="mapping") >>> cosmo FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) Specific cosmology classes can be used to parse the data. The class' default parameter values are used to fill in any information missing in the data. >>> from astropy.cosmology import FlatLambdaCDM >>> del cm["Tcmb0"] # show FlatLambdaCDM provides default >>> FlatLambdaCDM.from_format(cm) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897) """ params = dict(map) # so we are guaranteed to have a poppable map # get cosmology # 1st from argument. Allows for override of the cosmology, if on file. # 2nd from params. This MUST have the cosmology if 'kwargs' did not. if cosmology is None: cosmology = params.pop("cosmology") else: params.pop("cosmology", None) # pop, but don't use # if string, parse to class if isinstance(cosmology, str): cosmology = _COSMOLOGY_CLASSES[cosmology] # select arguments from mapping that are in the cosmo's signature. ba = cosmology._init_signature.bind_partial() # blank set of args ba.apply_defaults() # fill in the defaults for k in cosmology._init_signature.parameters.keys(): if k in params: # transfer argument, if in params ba.arguments[k] = params.pop(k) # deal with remaining params. If there is a **kwargs use that, else # allow to transfer to metadata. Raise TypeError if can't. lastp = tuple(cosmology._init_signature.parameters.values())[-1] if lastp.kind == 4: # variable keyword-only ba.arguments[lastp.name] = params elif move_to_meta: # prefers current meta, which was explicitly set meta = ba.arguments["meta"] or {} # (None -> dict) ba.arguments["meta"] = {**params, **meta} elif params: raise TypeError(f"there are unused parameters {params}.") # else: pass # no kwargs, no move-to-meta, and all the params are used return cosmology(*ba.args, **ba.kwargs) def to_mapping(cosmology, *args, cls=dict, cosmology_as_str=False, move_from_meta=False): """Return the cosmology class, parameters, and metadata as a `dict`. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` subclass instance *args Not used. Needed for compatibility with `~astropy.io.registry.UnifiedReadWriteMethod` cls : type (optional, keyword-only) `dict` or `collections.Mapping` subclass. The mapping type to return. Default is `dict`. cosmology_as_str : bool (optional, keyword-only) Whether the cosmology value is the class (if `False`, default) or the semi-qualified name (if `True`). move_from_meta : bool (optional, keyword-only) Whether to add the Cosmology's metadata as an item to the mapping (if `False`, default) or to merge with the rest of the mapping, preferring the original values (if `True`) Returns ------- dict with key-values for the cosmology parameters and also: - 'cosmology' : the class - 'meta' : the contents of the cosmology's metadata attribute. If ``move_from_meta`` is `True`, this key is missing and the contained metadata are added to the main `dict`. Examples -------- A Cosmology as a mapping will have the cosmology's name and parameters as items, and the metadata as a nested dictionary. >>> from astropy.cosmology import Planck18 >>> Planck18.to_format('mapping') {'cosmology': <class 'astropy.cosmology.flrw.FlatLambdaCDM'>, 'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966, 'Tcmb0': <Quantity 2.7255 K>, 'Neff': 3.046, 'm_nu': <Quantity [0. , 0. , 0.06] eV>, 'Ob0': 0.04897, 'meta': ... The dictionary type may be changed with the ``cls`` keyword argument: >>> from collections import OrderedDict >>> Planck18.to_format('mapping', cls=OrderedDict) OrderedDict([('cosmology', <class 'astropy.cosmology.flrw.FlatLambdaCDM'>), ('name', 'Planck18'), ('H0', <Quantity 67.66 km / (Mpc s)>), ('Om0', 0.30966), ('Tcmb0', <Quantity 2.7255 K>), ('Neff', 3.046), ('m_nu', <Quantity [0. , 0. , 0.06] eV>), ('Ob0', 0.04897), ('meta', ... Sometimes it is more useful to have the name of the cosmology class, not the object itself. The keyword argument ``cosmology_as_str`` may be used: >>> Planck18.to_format('mapping', cosmology_as_str=True) {'cosmology': 'FlatLambdaCDM', ... The metadata is normally included as a nested mapping. To move the metadata into the main mapping, use the keyword argument ``move_from_meta``. This kwarg inverts ``move_to_meta`` in ``Cosmology.to_format("mapping", move_to_meta=...)`` where extra items are moved to the metadata (if the cosmology constructor does not have a variable keyword-only argument -- ``**kwargs``). >>> from astropy.cosmology import Planck18 >>> Planck18.to_format('mapping', move_from_meta=True) {'cosmology': <class 'astropy.cosmology.flrw.FlatLambdaCDM'>, 'name': 'Planck18', 'Oc0': 0.2607, 'n': 0.9665, 'sigma8': 0.8102, ... """ if not issubclass(cls, (dict, Mapping)): raise TypeError(f"'cls' must be a (sub)class of dict or Mapping, not {cls}") m = cls() # start with the cosmology class & name m["cosmology"] = cosmology.__class__.__qualname__ if cosmology_as_str else cosmology.__class__ m["name"] = cosmology.name # here only for dict ordering meta = copy.deepcopy(cosmology.meta) # metadata (mutable) if move_from_meta: # Merge the mutable metadata. Since params are added later they will # be preferred in cases of overlapping keys. Likewise, need to pop # cosmology and name from meta. meta.pop("cosmology", None) meta.pop("name", None) m.update(meta) # Add all the immutable inputs m.update({k: v for k, v in cosmology._init_arguments.items() if k not in ("meta", "name")}) # Lastly, add the metadata, if haven't already (above) if not move_from_meta: m["meta"] = meta # TODO? should meta be type(cls) return m def mapping_identify(origin, format, *args, **kwargs): """Identify if object uses the mapping format. Returns ------- bool """ itis = False if origin == "read": itis = isinstance(args[1], Mapping) and (format in (None, "mapping")) return itis # =================================================================== # Register convert_registry.register_reader("mapping", Cosmology, from_mapping) convert_registry.register_writer("mapping", Cosmology, to_mapping) convert_registry.register_identifier("mapping", Cosmology, mapping_identify)
a6048ad5f228f005b937f639d09b1102a9cfea8a99bb85d88f792f5094d3bc22
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from astropy.cosmology.parameter import Parameter from astropy.table import Column from astropy.modeling import Parameter as ModelParameter def convert_parameter_to_column(parameter, value, meta=None): """Convert a |Cosmology| Parameter to a Table |Column|. Parameters ---------- parameter : `astropy.cosmology.parameter.Parameter` value : Any meta : dict or None, optional Information from the Cosmology's metadata. Returns ------- `astropy.table.Column` """ format = None if value is None else parameter.format_spec shape = (1,) + np.shape(value) # minimum of 1d col = Column(data=np.reshape(value, shape), name=parameter.name, dtype=None, # inferred from the data description=parameter.__doc__, format=format, meta=meta) return col def convert_parameter_to_model_parameter(parameter, value, meta=None): """Convert a Cosmology Parameter to a Model Parameter. Parameters ---------- parameter : `astropy.cosmology.parameter.Parameter` value : Any meta : dict or None, optional Information from the Cosmology's metadata. This function will use any of: 'getter', 'setter', 'fixed', 'tied', 'min', 'max', 'bounds', 'prior', 'posterior'. Returns ------- `astropy.modeling.Parameter` """ # Get from meta information relavant to Model extra = {k: v for k, v in (meta or {}).items() if k in ('getter', 'setter', 'fixed', 'tied', 'min', 'max', 'bounds', 'prior', 'posterior')} return ModelParameter(description=parameter.__doc__, default=value, unit=getattr(value, "unit", None), **extra)
7905fd21052c86f4e0146a20a2e330d07d42a3172c3b322734e4899895df937c
# Licensed under a 3-clause BSD style license - see LICENSE.rst import astropy.cosmology.units as cu import astropy.units as u from astropy.table import QTable from astropy.cosmology.connect import readwrite_registry from astropy.cosmology.core import Cosmology from .table import from_table, to_table def read_ecsv(filename, index=None, *, move_to_meta=False, cosmology=None, **kwargs): """Read a `~astropy.cosmology.Cosmology` from an ECSV file. Parameters ---------- filename : path-like or file-like From where to read the Cosmology. index : int, str, or None, optional Needed to select the row in tables with multiple rows. ``index`` can be an integer for the row number or, if the table is indexed by a column, the value of that column. If the table is not indexed and ``index`` is a string, the "name" column is used as the indexing column. move_to_meta : bool (optional, keyword-only) Whether to move keyword arguments that are not in the Cosmology class' signature to the Cosmology's metadata. This will only be applied if the Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``). Arguments moved to the metadata will be merged with existing metadata, preferring specified metadata in the case of a merge conflict (e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be ``{'key': 10}``). cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only) The cosmology class (or string name thereof) to use when constructing the cosmology instance. The class also provides default parameter values, filling in any non-mandatory arguments missing in 'table'. **kwargs Passed to :attr:`astropy.table.QTable.read` Returns ------- `~astropy.cosmology.Cosmology` subclass instance """ kwargs["format"] = "ascii.ecsv" with u.add_enabled_units(cu): table = QTable.read(filename, **kwargs) # build cosmology from table return from_table(table, index=index, move_to_meta=move_to_meta, cosmology=cosmology) def write_ecsv(cosmology, file, *, overwrite=False, cls=QTable, cosmology_in_meta=True, **kwargs): """Serialize the cosmology into a ECSV. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` subclass instance file : path-like or file-like Location to save the serialized cosmology. overwrite : bool Whether to overwrite the file, if it exists. cls : type (optional, keyword-only) Astropy :class:`~astropy.table.Table` (sub)class to use when writing. Default is :class:`~astropy.table.QTable`. cosmology_in_meta : bool Whether to put the cosmology class in the Table metadata (if `True`, default) or as the first column (if `False`). **kwargs Passed to ``cls.write`` Raises ------ TypeError If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table` """ table = to_table(cosmology, cls=cls, cosmology_in_meta=cosmology_in_meta) kwargs["format"] = "ascii.ecsv" table.write(file, overwrite=overwrite, **kwargs) def ecsv_identify(origin, filepath, fileobj, *args, **kwargs): """Identify if object uses the Table format. Returns ------- bool """ return filepath is not None and filepath.endswith(".ecsv") # =================================================================== # Register readwrite_registry.register_reader("ascii.ecsv", Cosmology, read_ecsv) readwrite_registry.register_writer("ascii.ecsv", Cosmology, write_ecsv) readwrite_registry.register_identifier("ascii.ecsv", Cosmology, ecsv_identify)
888b82cd2f6f268839ae957f162f1fd45e215ddfa7e98e3d76d5106d1bd52813
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.flrw`.""" ############################################################################## # IMPORTS # STDLIB import abc import copy # THIRD PARTY import pytest import numpy as np import astropy.constants as const # LOCAL import astropy.cosmology.units as cu import astropy.units as u from astropy.cosmology import (FLRW, FlatLambdaCDM, Flatw0waCDM, FlatwCDM, LambdaCDM, Planck18, w0waCDM, w0wzCDM, wCDM, wpwaCDM) from astropy.cosmology.core import _COSMOLOGY_CLASSES from astropy.cosmology.flrw import H0units_to_invs, a_B_c2, critdens_const, ellipkinc, hyp2f1, quad from astropy.cosmology.parameter import Parameter from astropy.utils.compat.optional_deps import HAS_SCIPY from .conftest import get_redshift_methods from .test_core import CosmologySubclassTest as CosmologyTest from .test_core import FlatCosmologyMixinTest, ParameterTestMixin, invalid_zs, valid_zs ############################################################################## # SETUP / TEARDOWN class SubFLRW(FLRW): def w(self, z): return super().w(z) ############################################################################## # TESTS ############################################################################## @pytest.mark.skipif(HAS_SCIPY, reason="scipy is installed") def test_optional_deps_functions(): """Test stand-in functions when optional dependencies not installed.""" with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.integrate'"): quad() with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"): ellipkinc() with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"): hyp2f1() ############################################################################## class ParameterH0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` H0 on a Cosmology. H0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_H0(self, cosmo_cls, cosmo): """Test Parameter ``H0``.""" unit = u.Unit("km/(s Mpc)") # on the class assert isinstance(cosmo_cls.H0, Parameter) assert "Hubble constant" in cosmo_cls.H0.__doc__ assert cosmo_cls.H0.unit == unit # validation assert cosmo_cls.H0.validate(cosmo, 1) == 1 * unit assert cosmo_cls.H0.validate(cosmo, 10 * unit) == 10 * unit with pytest.raises(ValueError, match="H0 is a non-scalar quantity"): cosmo_cls.H0.validate(cosmo, [1, 2]) # on the instance assert cosmo.H0 is cosmo._H0 assert cosmo.H0 == self._cls_args["H0"] assert isinstance(cosmo.H0, u.Quantity) and cosmo.H0.unit == unit def test_init_H0(self, cosmo_cls, ba): """Test initialization for values of ``H0``.""" # test that it works with units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.H0 == ba.arguments["H0"] # also without units ba.arguments["H0"] = ba.arguments["H0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.H0.value == ba.arguments["H0"] # fails for non-scalar ba.arguments["H0"] = u.Quantity([70, 100], u.km / u.s / u.Mpc) with pytest.raises(ValueError, match="H0 is a non-scalar quantity"): cosmo_cls(*ba.args, **ba.kwargs) class ParameterOm0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` Om0 on a Cosmology. Om0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Om0(self, cosmo_cls, cosmo): """Test Parameter ``Om0``.""" # on the class assert isinstance(cosmo_cls.Om0, Parameter) assert "Omega matter" in cosmo_cls.Om0.__doc__ # validation assert cosmo_cls.Om0.validate(cosmo, 1) == 1 assert cosmo_cls.Om0.validate(cosmo, 10 * u.one) == 10 with pytest.raises(ValueError, match="Om0 cannot be negative"): cosmo_cls.Om0.validate(cosmo, -1) # on the instance assert cosmo.Om0 is cosmo._Om0 assert cosmo.Om0 == self._cls_args["Om0"] assert isinstance(cosmo.Om0, float) def test_init_Om0(self, cosmo_cls, ba): """Test initialization for values of ``Om0``.""" # test that it works with units ba.arguments["Om0"] = ba.arguments["Om0"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Om0 == ba.arguments["Om0"] # also without units ba.arguments["Om0"] = ba.arguments["Om0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Om0 == ba.arguments["Om0"] # fails for negative numbers ba.arguments["Om0"] = -0.27 with pytest.raises(ValueError, match="Om0 cannot be negative."): cosmo_cls(*ba.args, **ba.kwargs) class ParameterOde0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` Ode0 on a Cosmology. Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Parameter_Ode0(self, cosmo_cls): """Test Parameter ``Ode0`` on the class.""" assert isinstance(cosmo_cls.Ode0, Parameter) assert "Omega dark energy" in cosmo_cls.Ode0.__doc__ def test_Parameter_Ode0_validation(self, cosmo_cls, cosmo): """Test Parameter ``Ode0`` validation.""" assert cosmo_cls.Ode0.validate(cosmo, 1.1) == 1.1 assert cosmo_cls.Ode0.validate(cosmo, 10 * u.one) == 10.0 with pytest.raises(TypeError, match="only dimensionless"): cosmo_cls.Ode0.validate(cosmo, 10 * u.km) def test_Ode0(self, cosmo): """Test Parameter ``Ode0`` validation.""" # if Ode0 is a parameter, test its value assert cosmo.Ode0 is cosmo._Ode0 assert cosmo.Ode0 == self._cls_args["Ode0"] assert isinstance(cosmo.Ode0, float) def test_init_Ode0(self, cosmo_cls, ba): """Test initialization for values of ``Ode0``.""" # test that it works with units ba.arguments["Ode0"] = ba.arguments["Ode0"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ode0 == ba.arguments["Ode0"] # also without units ba.arguments["Ode0"] = ba.arguments["Ode0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ode0 == ba.arguments["Ode0"] # Setting param to 0 respects that. Note this test uses ``Ode()``. ba.arguments["Ode0"] = 0.0 cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert u.allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0]) assert u.allclose(cosmo.Ode(1), 0) # Must be dimensionless or have no units. Errors otherwise. ba.arguments["Ode0"] = 10 * u.km with pytest.raises(TypeError, match="only dimensionless"): cosmo_cls(*ba.args, **ba.kwargs) class ParameterTcmb0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` Tcmb0 on a Cosmology. Tcmb0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Tcmb0(self, cosmo_cls, cosmo): """Test Parameter ``Tcmb0``.""" # on the class assert isinstance(cosmo_cls.Tcmb0, Parameter) assert "Temperature of the CMB" in cosmo_cls.Tcmb0.__doc__ assert cosmo_cls.Tcmb0.unit == u.K # validation assert cosmo_cls.Tcmb0.validate(cosmo, 1) == 1 * u.K assert cosmo_cls.Tcmb0.validate(cosmo, 10 * u.K) == 10 * u.K with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"): cosmo_cls.Tcmb0.validate(cosmo, [1, 2]) # on the instance assert cosmo.Tcmb0 is cosmo._Tcmb0 assert cosmo.Tcmb0 == self.cls_kwargs["Tcmb0"] assert isinstance(cosmo.Tcmb0, u.Quantity) and cosmo.Tcmb0.unit == u.K def test_init_Tcmb0(self, cosmo_cls, ba): """Test initialization for values of ``Tcmb0``.""" # test that it works with units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Tcmb0 == ba.arguments["Tcmb0"] # also without units ba.arguments["Tcmb0"] = ba.arguments["Tcmb0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Tcmb0.value == ba.arguments["Tcmb0"] # must be a scalar ba.arguments["Tcmb0"] = u.Quantity([0.0, 2], u.K) with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"): cosmo_cls(*ba.args, **ba.kwargs) class ParameterNeffTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` Neff on a Cosmology. Neff is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Neff(self, cosmo_cls, cosmo): """Test Parameter ``Neff``.""" # on the class assert isinstance(cosmo_cls.Neff, Parameter) assert "Number of effective neutrino species" in cosmo_cls.Neff.__doc__ # validation assert cosmo_cls.Neff.validate(cosmo, 1) == 1 assert cosmo_cls.Neff.validate(cosmo, 10 * u.one) == 10 with pytest.raises(ValueError, match="Neff cannot be negative"): cosmo_cls.Neff.validate(cosmo, -1) # on the instance assert cosmo.Neff is cosmo._Neff assert cosmo.Neff == self.cls_kwargs.get("Neff", 3.04) assert isinstance(cosmo.Neff, float) def test_init_Neff(self, cosmo_cls, ba): """Test initialization for values of ``Neff``.""" # test that it works with units ba.arguments["Neff"] = ba.arguments["Neff"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Neff == ba.arguments["Neff"] # also without units ba.arguments["Neff"] = ba.arguments["Neff"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Neff == ba.arguments["Neff"] ba.arguments["Neff"] = -1 with pytest.raises(ValueError): cosmo_cls(*ba.args, **ba.kwargs) class Parameterm_nuTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` m_nu on a Cosmology. m_nu is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_m_nu(self, cosmo_cls, cosmo): """Test Parameter ``m_nu``.""" # on the class assert isinstance(cosmo_cls.m_nu, Parameter) assert "Mass of neutrino species" in cosmo_cls.m_nu.__doc__ assert cosmo_cls.m_nu.unit == u.eV assert cosmo_cls.m_nu.equivalencies == u.mass_energy() assert cosmo_cls.m_nu.format_spec == "" # on the instance # assert cosmo.m_nu is cosmo._m_nu assert u.allclose(cosmo.m_nu, [0.0, 0.0, 0.0] * u.eV) # set differently depending on the other inputs if cosmo.Tnu0.value == 0: assert cosmo.m_nu is None elif not cosmo._massivenu: # only massless assert u.allclose(cosmo.m_nu, 0 * u.eV) elif self._nmasslessnu == 0: # only massive assert cosmo.m_nu == cosmo._massivenu_mass else: # a mix -- the most complicated case assert u.allclose(cosmo.m_nu[:self._nmasslessnu], 0 * u.eV) assert u.allclose(cosmo.m_nu[self._nmasslessnu], cosmo._massivenu_mass) def test_init_m_nu(self, cosmo_cls, ba): """Test initialization for values of ``m_nu``. Note this requires the class to have a property ``has_massive_nu``. """ # Test that it works when m_nu has units. cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert np.all(cosmo.m_nu == ba.arguments["m_nu"]) # (& checks len, unit) assert not cosmo.has_massive_nu assert cosmo.m_nu.unit == u.eV # explicitly check unit once. # And it works when m_nu doesn't have units. ba.arguments["m_nu"] = ba.arguments["m_nu"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert np.all(cosmo.m_nu.value == ba.arguments["m_nu"]) assert not cosmo.has_massive_nu # A negative m_nu raises an exception. tba = copy.copy(ba) tba.arguments["m_nu"] = u.Quantity([-0.3, 0.2, 0.1], u.eV) with pytest.raises(ValueError, match="invalid"): cosmo_cls(*tba.args, **tba.kwargs) def test_init_m_nu_and_Neff(self, cosmo_cls, ba): """Test initialization for values of ``m_nu`` and ``Neff``. Note this test requires ``Neff`` as constructor input, and a property ``has_massive_nu``. """ # Mismatch with Neff = wrong number of neutrinos tba = copy.copy(ba) tba.arguments["Neff"] = 4.05 tba.arguments["m_nu"] = u.Quantity([0.15, 0.2, 0.1], u.eV) with pytest.raises(ValueError, match="unexpected number of neutrino"): cosmo_cls(*tba.args, **tba.kwargs) # No neutrinos, but Neff tba.arguments["m_nu"] = 0 cosmo = cosmo_cls(*tba.args, **tba.kwargs) assert not cosmo.has_massive_nu assert len(cosmo.m_nu) == 4 assert cosmo.m_nu.unit == u.eV assert u.allclose(cosmo.m_nu, 0 * u.eV) # TODO! move this test when create ``test_nu_relative_density`` assert u.allclose(cosmo.nu_relative_density(1.0), 0.22710731766 * 4.05, rtol=1e-6) # All massive neutrinos case, len from Neff tba.arguments["m_nu"] = 0.1 * u.eV cosmo = cosmo_cls(*tba.args, **tba.kwargs) assert cosmo.has_massive_nu assert len(cosmo.m_nu) == 4 assert cosmo.m_nu.unit == u.eV assert u.allclose(cosmo.m_nu, [0.1, 0.1, 0.1, 0.1] * u.eV) def test_init_m_nu_override_by_Tcmb0(self, cosmo_cls, ba): """Test initialization for values of ``m_nu``. Note this test requires ``Tcmb0`` as constructor input, and a property ``has_massive_nu``. """ # If Neff = 0, m_nu is None. tba = copy.copy(ba) tba.arguments["Neff"] = 0 cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.m_nu is None assert not cosmo.has_massive_nu # If Tcmb0 = 0, m_nu is None tba = copy.copy(ba) tba.arguments["Tcmb0"] = 0 cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.m_nu is None assert not cosmo.has_massive_nu class ParameterOb0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` Ob0 on a Cosmology. Ob0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Ob0(self, cosmo_cls, cosmo): """Test Parameter ``Ob0``.""" # on the class assert isinstance(cosmo_cls.Ob0, Parameter) assert "Omega baryon;" in cosmo_cls.Ob0.__doc__ # validation assert cosmo_cls.Ob0.validate(cosmo, None) is None assert cosmo_cls.Ob0.validate(cosmo, 0.1) == 0.1 assert cosmo_cls.Ob0.validate(cosmo, 0.1 * u.one) == 0.1 with pytest.raises(ValueError, match="Ob0 cannot be negative"): cosmo_cls.Ob0.validate(cosmo, -1) with pytest.raises(ValueError, match="baryonic density can not be larger"): cosmo_cls.Ob0.validate(cosmo, cosmo.Om0 + 1) # on the instance assert cosmo.Ob0 is cosmo._Ob0 assert cosmo.Ob0 == 0.03 def test_init_Ob0(self, cosmo_cls, ba): """Test initialization for values of ``Ob0``.""" # test that it works with units assert isinstance(ba.arguments["Ob0"], u.Quantity) cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ob0 == ba.arguments["Ob0"] # also without units ba.arguments["Ob0"] = ba.arguments["Ob0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ob0 == ba.arguments["Ob0"] # Setting param to 0 respects that. Note this test uses ``Ob()``. ba.arguments["Ob0"] = 0.0 cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ob0 == 0.0 if not self.abstract_w: assert u.allclose(cosmo.Ob(1), 0) assert u.allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0]) # Negative Ob0 errors tba = copy.copy(ba) tba.arguments["Ob0"] = -0.04 with pytest.raises(ValueError, match="Ob0 cannot be negative"): cosmo_cls(*tba.args, **tba.kwargs) # Ob0 > Om0 errors tba.arguments["Ob0"] = tba.arguments["Om0"] + 0.1 with pytest.raises(ValueError, match="baryonic density can not be larger"): cosmo_cls(*tba.args, **tba.kwargs) # No baryons specified means baryon-specific methods fail. tba = copy.copy(ba) tba.arguments.pop("Ob0", None) cosmo = cosmo_cls(*tba.args, **tba.kwargs) with pytest.raises(ValueError): cosmo.Ob(1) # also means DM fraction is undefined with pytest.raises(ValueError): cosmo.Odm(1) # The default value is None assert cosmo_cls._init_signature.parameters["Ob0"].default is None class TestFLRW(CosmologyTest, ParameterH0TestMixin, ParameterOm0TestMixin, ParameterOde0TestMixin, ParameterTcmb0TestMixin, ParameterNeffTestMixin, Parameterm_nuTestMixin, ParameterOb0TestMixin): """Test :class:`astropy.cosmology.FLRW`.""" abstract_w = True def setup_class(self): """ Setup for testing. FLRW is abstract, so tests are done on a subclass. """ # make sure SubCosmology is known _COSMOLOGY_CLASSES["SubFLRW"] = SubFLRW self.cls = SubFLRW self._cls_args = dict(H0=70 * u.km / u.s / u.Mpc, Om0=0.27 * u.one, Ode0=0.73 * u.one) self.cls_kwargs = dict(Tcmb0=3.0 * u.K, Ob0=0.03 * u.one, name=self.__class__.__name__, meta={"a": "b"}) def teardown_class(self): super().teardown_class(self) _COSMOLOGY_CLASSES.pop("SubFLRW", None) @pytest.fixture(scope="class") def nonflatcosmo(self): """A non-flat cosmology used in equivalence tests.""" return LambdaCDM(70, 0.4, 0.8) # =============================================================== # Method & Attribute Tests def test_init(self, cosmo_cls): """Test initialization.""" super().test_init(cosmo_cls) # TODO! tests for initializing calculated values, e.g. `h` # TODO! transfer tests for initializing neutrinos def test_init_Tcmb0_zeroing(self, cosmo_cls, ba): """Test if setting Tcmb0 parameter to 0 influences other parameters. TODO: consider moving this test to ``FLRWSubclassTest`` """ ba.arguments["Tcmb0"] = 0.0 cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ogamma0 == 0.0 assert cosmo.Onu0 == 0.0 if not self.abstract_w: assert u.allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0]) assert u.allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0]) assert u.allclose(cosmo.Onu(1.5), [0, 0, 0, 0]) assert u.allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0]) # --------------------------------------------------------------- # Properties def test_Odm0(self, cosmo_cls, cosmo): """Test property ``Odm0``.""" # on the class assert isinstance(cosmo_cls.Odm0, property) assert cosmo_cls.Odm0.fset is None # immutable # on the instance assert cosmo.Odm0 is cosmo._Odm0 # Odm0 can be None, if Ob0 is None. Otherwise DM = matter - baryons. if cosmo.Ob0 is None: assert cosmo.Odm0 is None else: assert np.allclose(cosmo.Odm0, cosmo.Om0 - cosmo.Ob0) def test_Ok0(self, cosmo_cls, cosmo): """Test property ``Ok0``.""" # on the class assert isinstance(cosmo_cls.Ok0, property) assert cosmo_cls.Ok0.fset is None # immutable # on the instance assert cosmo.Ok0 is cosmo._Ok0 assert np.allclose(cosmo.Ok0, 1.0 - (cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0)) def test_is_flat(self, cosmo_cls, cosmo): """Test property ``is_flat``.""" # on the class assert isinstance(cosmo_cls.is_flat, property) assert cosmo_cls.is_flat.fset is None # immutable # on the instance assert isinstance(cosmo.is_flat, bool) assert cosmo.is_flat is bool((cosmo.Ok0 == 0.0) and (cosmo.Otot0 == 1.0)) def test_Tnu0(self, cosmo_cls, cosmo): """Test property ``Tnu0``.""" # on the class assert isinstance(cosmo_cls.Tnu0, property) assert cosmo_cls.Tnu0.fset is None # immutable # on the instance assert cosmo.Tnu0 is cosmo._Tnu0 assert cosmo.Tnu0.unit == u.K assert u.allclose(cosmo.Tnu0, 0.7137658555036082 * cosmo.Tcmb0, rtol=1e-5) def test_has_massive_nu(self, cosmo_cls, cosmo): """Test property ``has_massive_nu``.""" # on the class assert isinstance(cosmo_cls.has_massive_nu, property) assert cosmo_cls.has_massive_nu.fset is None # immutable # on the instance if cosmo.Tnu0 == 0: assert cosmo.has_massive_nu is False else: assert cosmo.has_massive_nu is cosmo._massivenu def test_h(self, cosmo_cls, cosmo): """Test property ``h``.""" # on the class assert isinstance(cosmo_cls.h, property) assert cosmo_cls.h.fset is None # immutable # on the instance assert cosmo.h is cosmo._h assert np.allclose(cosmo.h, cosmo.H0.value / 100.0) def test_hubble_time(self, cosmo_cls, cosmo): """Test property ``hubble_time``.""" # on the class assert isinstance(cosmo_cls.hubble_time, property) assert cosmo_cls.hubble_time.fset is None # immutable # on the instance assert cosmo.hubble_time is cosmo._hubble_time assert u.allclose(cosmo.hubble_time, (1 / cosmo.H0) << u.Gyr) def test_hubble_distance(self, cosmo_cls, cosmo): """Test property ``hubble_distance``.""" # on the class assert isinstance(cosmo_cls.hubble_distance, property) assert cosmo_cls.hubble_distance.fset is None # immutable # on the instance assert cosmo.hubble_distance is cosmo._hubble_distance assert cosmo.hubble_distance == (const.c / cosmo._H0).to(u.Mpc) def test_critical_density0(self, cosmo_cls, cosmo): """Test property ``critical_density0``.""" # on the class assert isinstance(cosmo_cls.critical_density0, property) assert cosmo_cls.critical_density0.fset is None # immutable # on the instance assert cosmo.critical_density0 is cosmo._critical_density0 assert cosmo.critical_density0.unit == u.g / u.cm ** 3 cd0value = critdens_const * (cosmo.H0.value * H0units_to_invs) ** 2 assert cosmo.critical_density0.value == cd0value def test_Ogamma0(self, cosmo_cls, cosmo): """Test property ``Ogamma0``.""" # on the class assert isinstance(cosmo_cls.Ogamma0, property) assert cosmo_cls.Ogamma0.fset is None # immutable # on the instance assert cosmo.Ogamma0 is cosmo._Ogamma0 # Ogamma cor \propto T^4/rhocrit expect = a_B_c2 * cosmo.Tcmb0.value ** 4 / cosmo.critical_density0.value assert np.allclose(cosmo.Ogamma0, expect) # check absolute equality to 0 if Tcmb0 is 0 if cosmo.Tcmb0 == 0: assert cosmo.Ogamma0 == 0 def test_Onu0(self, cosmo_cls, cosmo): """Test property ``Onu0``.""" # on the class assert isinstance(cosmo_cls.Onu0, property) assert cosmo_cls.Onu0.fset is None # immutable # on the instance assert cosmo.Onu0 is cosmo._Onu0 # neutrino temperature <= photon temperature since the neutrinos # decouple first. if cosmo.has_massive_nu: # Tcmb0 > 0 & has massive # check the expected formula assert cosmo.Onu0 == cosmo.Ogamma0 * cosmo.nu_relative_density(0) # a sanity check on on the ratio of neutrinos to photons # technically it could be 1, but not for any of the tested cases. assert cosmo.nu_relative_density(0) <= 1 elif cosmo.Tcmb0 == 0: assert cosmo.Onu0 == 0 else: # check the expected formula assert cosmo.Onu0 == 0.22710731766 * cosmo._Neff * cosmo.Ogamma0 # and check compatibility with nu_relative_density assert np.allclose(cosmo.nu_relative_density(0), 0.22710731766 * cosmo._Neff) def test_Otot0(self, cosmo): """Test :attr:`astropy.cosmology.FLRW.Otot0`.""" assert cosmo.Otot0 == cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ode0 + cosmo.Ok0 # --------------------------------------------------------------- # Methods def test_w(self, cosmo): """Test abstract :meth:`astropy.cosmology.FLRW.w`.""" with pytest.raises(NotImplementedError, match="not implemented"): cosmo.w(1) def test_Otot(self, cosmo): """Test :meth:`astropy.cosmology.FLRW.Otot`.""" exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError with pytest.raises(exception): assert cosmo.Otot(1) def test_efunc_vs_invefunc(self, cosmo): """ Test that efunc and inv_efunc give inverse values. Here they just fail b/c no ``w(z)`` or no scipy. """ exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError with pytest.raises(exception): cosmo.efunc(0.5) with pytest.raises(exception): cosmo.inv_efunc(0.5) # --------------------------------------------------------------- # from Cosmology def test_clone_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_change_param(cosmo) # don't change any values kwargs = cosmo._init_arguments.copy() kwargs.pop("name", None) # make sure not setting name c = cosmo.clone(**kwargs) assert c.__class__ == cosmo.__class__ assert c.name == cosmo.name + " (modified)" assert c.is_equivalent(cosmo) # change ``H0`` # Note that H0 affects Ode0 because it changes Ogamma0 c = cosmo.clone(H0=100) assert c.__class__ == cosmo.__class__ assert c.name == cosmo.name + " (modified)" assert c.H0.value == 100 for n in (set(cosmo.__parameters__) - {"H0"}): v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)) assert not u.allclose(c.Ogamma0, cosmo.Ogamma0) assert not u.allclose(c.Onu0, cosmo.Onu0) # change multiple things c = cosmo.clone(name="new name", H0=100, Tcmb0=2.8, meta=dict(zz="tops")) assert c.__class__ == cosmo.__class__ assert c.name == "new name" assert c.H0.value == 100 assert c.Tcmb0.value == 2.8 assert c.meta == {**cosmo.meta, **dict(zz="tops")} for n in (set(cosmo.__parameters__) - {"H0", "Tcmb0"}): v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)) assert not u.allclose(c.Ogamma0, cosmo.Ogamma0) assert not u.allclose(c.Onu0, cosmo.Onu0) assert not u.allclose(c.Tcmb0.value, cosmo.Tcmb0.value) def test_is_equivalent(self, cosmo): """Test :meth:`astropy.cosmology.FLRW.is_equivalent`.""" super().test_is_equivalent(cosmo) # pass to CosmologySubclassTest # test against a FlatFLRWMixin # case (3) in FLRW.is_equivalent if isinstance(cosmo, FlatLambdaCDM): assert cosmo.is_equivalent(Planck18) assert Planck18.is_equivalent(cosmo) else: assert not cosmo.is_equivalent(Planck18) assert not Planck18.is_equivalent(cosmo) class FLRWSubclassTest(TestFLRW): """ Test subclasses of :class:`astropy.cosmology.FLRW`. This is broken away from ``TestFLRW``, because ``FLRW`` is an ABC and subclasses must override some methods. """ abstract_w = False @abc.abstractmethod def setup_class(self): """Setup for testing.""" super().setup_class(self) # =============================================================== # Method & Attribute Tests _FLRW_redshift_methods = get_redshift_methods(FLRW, allow_private=True, allow_z2=False) @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize("z, exc", invalid_zs) @pytest.mark.parametrize('method', _FLRW_redshift_methods) def test_redshift_method_bad_input(self, cosmo, method, z, exc): """Test all the redshift methods for bad input.""" with pytest.raises(exc): getattr(cosmo, method)(z) @pytest.mark.parametrize("z", valid_zs) @abc.abstractmethod def test_w(self, cosmo, z): """Test :meth:`astropy.cosmology.FLRW.w`. Since ``w`` is abstract, each test class needs to define further tests. """ # super().test_w(cosmo, z) # NOT b/c abstract `w(z)` w = cosmo.w(z) assert np.shape(w) == np.shape(z) # test same shape assert u.Quantity(w).unit == u.one # test no units or dimensionless # ------------------------------------------- @pytest.mark.parametrize("z", valid_zs) def test_Otot(self, cosmo, z): """Test :meth:`astropy.cosmology.FLRW.Otot`.""" # super().test_Otot(cosmo) # NOT b/c abstract `w(z)` assert np.allclose( cosmo.Otot(z), cosmo.Om(z) + cosmo.Ogamma(z) + cosmo.Onu(z) + cosmo.Ode(z) + cosmo.Ok(z)) # --------------------------------------------------------------- def test_efunc_vs_invefunc(self, cosmo): """Test that ``efunc`` and ``inv_efunc`` give inverse values. Note that the test doesn't need scipy because it doesn't need to call ``de_density_scale``. """ # super().test_efunc_vs_invefunc(cosmo) # NOT b/c abstract `w(z)` z0 = 0.5 z = np.array([0.5, 1.0, 2.0, 5.0]) assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) # ----------------------------------------------------------------------------- class ParameterFlatOde0TestMixin(ParameterOde0TestMixin): """Tests for `astropy.cosmology.Parameter` Ode0 on a flat Cosmology. This will augment or override some tests in ``ParameterOde0TestMixin``. Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Parameter_Ode0(self, cosmo_cls): """Test Parameter ``Ode0`` on the class.""" super().test_Parameter_Ode0(cosmo_cls) assert cosmo_cls.Ode0.derived == True def test_Ode0(self, cosmo): """Test no-longer-Parameter ``Ode0``.""" assert cosmo.Ode0 is cosmo._Ode0 assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0) def test_init_Ode0(self, cosmo_cls, ba): """Test initialization for values of ``Ode0``.""" cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ok0) # Ode0 is not in the signature with pytest.raises(TypeError, match="Ode0"): cosmo_cls(*ba.args, **ba.kwargs, Ode0=1) class FlatFLRWMixinTest(FlatCosmologyMixinTest, ParameterFlatOde0TestMixin): """Tests for :class:`astropy.cosmology.FlatFLRWMixin` subclasses. E.g to use this class:: class TestFlatSomeFLRW(FlatFLRWMixinTest, TestSomeFLRW): ... """ def setup_class(self): """Setup for testing. Set up as for regular FLRW test class, but remove dark energy component since flat cosmologies are forbidden Ode0 as an argument, see ``test_init_subclass``. """ super().setup_class(self) self._cls_args.pop("Ode0") # =============================================================== # Method & Attribute Tests # --------------------------------------------------------------- # class-level def test_init_subclass(self, cosmo_cls): """Test initializing subclass, mostly that can't have Ode0 in init.""" super().test_init_subclass(cosmo_cls) with pytest.raises(TypeError, match="subclasses of"): class HASOde0SubClass(cosmo_cls): def __init__(self, Ode0): pass _COSMOLOGY_CLASSES.pop(HASOde0SubClass.__qualname__, None) # --------------------------------------------------------------- # instance-level def test_init(self, cosmo_cls): super().test_init(cosmo_cls) cosmo = cosmo_cls(*self.cls_args, **self.cls_kwargs) assert cosmo._Ok0 == 0.0 assert cosmo._Ode0 == 1.0 - (cosmo._Om0 + cosmo._Ogamma0 + cosmo._Onu0 + cosmo._Ok0) def test_Ok0(self, cosmo_cls, cosmo): """Test property ``Ok0``.""" super().test_Ok0(cosmo_cls, cosmo) # for flat cosmologies, Ok0 is not *close* to 0, it *is* 0 assert cosmo.Ok0 == 0.0 def test_Otot0(self, cosmo): """Test :attr:`astropy.cosmology.FLRW.Otot0`. Should always be 1.""" super().test_Otot0(cosmo) # for flat cosmologies, Otot0 is not *close* to 1, it *is* 1 assert cosmo.Otot0 == 1.0 @pytest.mark.parametrize("z", valid_zs) def test_Otot(self, cosmo, z): """Test :meth:`astropy.cosmology.FLRW.Otot`. Should always be 1.""" super().test_Otot(cosmo, z) # for flat cosmologies, Otot is 1, within precision. assert u.allclose(cosmo.Otot(z), 1.0) @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize("z, exc", invalid_zs) @pytest.mark.parametrize('method', FLRWSubclassTest._FLRW_redshift_methods - {"Otot"}) def test_redshift_method_bad_input(self, cosmo, method, z, exc): """Test all the redshift methods for bad input.""" super().test_redshift_method_bad_input(cosmo, method, z, exc) # --------------------------------------------------------------- def test_is_equivalent(self, cosmo, nonflatcosmo): """Test :meth:`astropy.cosmology.FLRW.is_equivalent`.""" super().test_is_equivalent(cosmo) # pass to TestFLRW # against non-flat Cosmology assert not cosmo.is_equivalent(nonflatcosmo) assert not nonflatcosmo.is_equivalent(cosmo) # non-flat version of class nonflat_cosmo_cls = cosmo.__class__.mro()[3] # keys check in `test_is_equivalent_nonflat_class_different_params` # non-flat nonflat = nonflat_cosmo_cls(*self.cls_args, Ode0=0.9, **self.cls_kwargs) assert not nonflat.is_equivalent(cosmo) assert not cosmo.is_equivalent(nonflat) # flat, but not FlatFLRWMixin flat = nonflat_cosmo_cls(*self.cls_args, Ode0=1.0 - cosmo.Om0 - cosmo.Ogamma0 - cosmo.Onu0, **self.cls_kwargs) flat._Ok0 = 0.0 assert flat.is_equivalent(cosmo) assert cosmo.is_equivalent(flat) def test_repr(self, cosmo_cls, cosmo): """ Test method ``.__repr__()``. Skip non-flat superclass test. e.g. `TestFlatLambdaCDDM` -> `FlatFLRWMixinTest` vs `TestFlatLambdaCDDM` -> `TestLambdaCDDM` -> `FlatFLRWMixinTest` """ FLRWSubclassTest.test_repr(self, cosmo_cls, cosmo) # test eliminated Ode0 from parameters assert "Ode0" not in repr(cosmo) # ----------------------------------------------------------------------------- class TestLambdaCDM(FLRWSubclassTest): """Test :class:`astropy.cosmology.LambdaCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = LambdaCDM # =============================================================== # Method & Attribute Tests _FLRW_redshift_methods = get_redshift_methods(LambdaCDM, allow_private=True, allow_z2=False) - {"_dS_age"} # `_dS_age` is removed because it doesn't strictly rely on the value of `z`, # so any input that doesn't trip up ``np.shape`` is "valid" @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize("z, exc", invalid_zs) @pytest.mark.parametrize('method', _FLRW_redshift_methods) def test_redshift_method_bad_input(self, cosmo, method, z, exc): """Test all the redshift methods for bad input.""" super().test_redshift_method_bad_input(cosmo, method, z, exc) @pytest.mark.parametrize("z", valid_zs) def test_w(self, cosmo, z): """Test :meth:`astropy.cosmology.LambdaCDM.w`.""" super().test_w(cosmo, z) w = cosmo.w(z) assert u.allclose(w, -1.0) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("LambdaCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27," " Ode0=0.73, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV," " Ob0=0.03)") assert repr(cosmo) == expected # ----------------------------------------------------------------------------- class TestFlatLambdaCDM(FlatFLRWMixinTest, TestLambdaCDM): """Test :class:`astropy.cosmology.FlatLambdaCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = FlatLambdaCDM @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize("z, exc", invalid_zs) @pytest.mark.parametrize('method', TestLambdaCDM._FLRW_redshift_methods - {"Otot"}) def test_redshift_method_bad_input(self, cosmo, method, z, exc): """Test all the redshift methods for bad input.""" super().test_redshift_method_bad_input(cosmo, method, z, exc) # =============================================================== # Method & Attribute Tests def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("FlatLambdaCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s)," " Om0=0.27, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV," " Ob0=0.03)") assert repr(cosmo) == expected # ----------------------------------------------------------------------------- class Parameterw0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` w0 on a Cosmology. w0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_w0(self, cosmo_cls, cosmo): """Test Parameter ``w0``.""" # on the class assert isinstance(cosmo_cls.w0, Parameter) assert "Dark energy equation of state" in cosmo_cls.w0.__doc__ assert cosmo_cls.w0.unit is None # on the instance assert cosmo.w0 is cosmo._w0 assert cosmo.w0 == self.cls_kwargs["w0"] def test_init_w0(self, cosmo_cls, ba): """Test initialization for values of ``w0``.""" # test that it works with units ba.arguments["w0"] = ba.arguments["w0"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.w0 == ba.arguments["w0"] # also without units ba.arguments["w0"] = ba.arguments["w0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.w0 == ba.arguments["w0"] # must be dimensionless ba.arguments["w0"] = 10 * u.km with pytest.raises(TypeError): cosmo_cls(*ba.args, **ba.kwargs) class TestwCDM(FLRWSubclassTest, Parameterw0TestMixin): """Test :class:`astropy.cosmology.wCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = wCDM self.cls_kwargs.update(w0=-0.5) # =============================================================== # Method & Attribute Tests def test_clone_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_change_param(cosmo) # `w` params c = cosmo.clone(w0=0.1) assert c.w0 == 0.1 for n in (set(cosmo.__parameters__) - {"w0"}): v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)) @pytest.mark.parametrize("z", valid_zs) def test_w(self, cosmo, z): """Test :meth:`astropy.cosmology.wCDM.w`.""" super().test_w(cosmo, z) w = cosmo.w(z) assert u.allclose(w, self.cls_kwargs["w0"]) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("wCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27," " Ode0=0.73, w0=-0.5, Tcmb0=3.0 K, Neff=3.04," " m_nu=[0. 0. 0.] eV, Ob0=0.03)") assert repr(cosmo) == expected # ----------------------------------------------------------------------------- class TestFlatwCDM(FlatFLRWMixinTest, TestwCDM): """Test :class:`astropy.cosmology.FlatwCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = FlatwCDM self.cls_kwargs.update(w0=-0.5) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("FlatwCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27," " w0=-0.5, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV," " Ob0=0.03)") assert repr(cosmo) == expected # ----------------------------------------------------------------------------- class ParameterwaTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` wa on a Cosmology. wa is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_wa(self, cosmo_cls, cosmo): """Test Parameter ``wa``.""" # on the class assert isinstance(cosmo_cls.wa, Parameter) assert "Negative derivative" in cosmo_cls.wa.__doc__ assert cosmo_cls.wa.unit is None # on the instance assert cosmo.wa is cosmo._wa assert cosmo.wa == self.cls_kwargs["wa"] def test_init_wa(self, cosmo_cls, ba): """Test initialization for values of ``wa``.""" # test that it works with units ba.arguments["wa"] = ba.arguments["wa"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wa == ba.arguments["wa"] # also without units ba.arguments["wa"] = ba.arguments["wa"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wa == ba.arguments["wa"] # must be dimensionless ba.arguments["wa"] = 10 * u.km with pytest.raises(TypeError): cosmo_cls(*ba.args, **ba.kwargs) class Testw0waCDM(FLRWSubclassTest, Parameterw0TestMixin, ParameterwaTestMixin): """Test :class:`astropy.cosmology.w0waCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = w0waCDM self.cls_kwargs.update(w0=-1, wa=-0.5) # =============================================================== # Method & Attribute Tests def test_clone_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_change_param(cosmo) # `w` params c = cosmo.clone(w0=0.1, wa=0.2) assert c.w0 == 0.1 assert c.wa == 0.2 for n in (set(cosmo.__parameters__) - {"w0", "wa"}): v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)) # @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below def test_w(self, cosmo): """Test :meth:`astropy.cosmology.w0waCDM.w`.""" # super().test_w(cosmo, z) assert u.allclose(cosmo.w(1.0), -1.25) assert u.allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]), [-1, -1.16666667, -1.25, -1.3, -1.34848485]) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("w0waCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27," " Ode0=0.73, w0=-1.0, wa=-0.5, Tcmb0=3.0 K, Neff=3.04," " m_nu=[0. 0. 0.] eV, Ob0=0.03)") assert repr(cosmo) == expected # ----------------------------------------------------------------------------- class TestFlatw0waCDM(FlatFLRWMixinTest, Testw0waCDM): """Test :class:`astropy.cosmology.Flatw0waCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = Flatw0waCDM self.cls_kwargs.update(w0=-1, wa=-0.5) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("Flatw0waCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s)," " Om0=0.27, w0=-1.0, wa=-0.5, Tcmb0=3.0 K, Neff=3.04," " m_nu=[0. 0. 0.] eV, Ob0=0.03)") assert repr(cosmo) == expected # ----------------------------------------------------------------------------- class ParameterwpTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` wp on a Cosmology. wp is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_wp(self, cosmo_cls, cosmo): """Test Parameter ``wp``.""" # on the class assert isinstance(cosmo_cls.wp, Parameter) assert "at the pivot" in cosmo_cls.wp.__doc__ assert cosmo_cls.wp.unit is None # on the instance assert cosmo.wp is cosmo._wp assert cosmo.wp == self.cls_kwargs["wp"] def test_init_wp(self, cosmo_cls, ba): """Test initialization for values of ``wp``.""" # test that it works with units ba.arguments["wp"] = ba.arguments["wp"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wp == ba.arguments["wp"] # also without units ba.arguments["wp"] = ba.arguments["wp"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wp == ba.arguments["wp"] # must be dimensionless ba.arguments["wp"] = 10 * u.km with pytest.raises(TypeError): cosmo_cls(*ba.args, **ba.kwargs) class ParameterzpTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` zp on a Cosmology. zp is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_zp(self, cosmo_cls, cosmo): """Test Parameter ``zp``.""" # on the class assert isinstance(cosmo_cls.zp, Parameter) assert "pivot redshift" in cosmo_cls.zp.__doc__ assert cosmo_cls.zp.unit == cu.redshift # on the instance assert cosmo.zp is cosmo._zp assert cosmo.zp == self.cls_kwargs["zp"] << cu.redshift def test_init_zp(self, cosmo_cls, ba): """Test initialization for values of ``zp``.""" # test that it works with units ba.arguments["zp"] = ba.arguments["zp"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.zp == ba.arguments["zp"] # also without units ba.arguments["zp"] = ba.arguments["zp"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.zp.value == ba.arguments["zp"] # must be dimensionless ba.arguments["zp"] = 10 * u.km with pytest.raises(u.UnitConversionError): cosmo_cls(*ba.args, **ba.kwargs) class TestwpwaCDM(FLRWSubclassTest, ParameterwpTestMixin, ParameterwaTestMixin, ParameterzpTestMixin): """Test :class:`astropy.cosmology.wpwaCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = wpwaCDM self.cls_kwargs.update(wp=-0.9, wa=0.2, zp=0.5) # =============================================================== # Method & Attribute Tests def test_clone_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_change_param(cosmo) # `w` params c = cosmo.clone(wp=0.1, wa=0.2, zp=14) assert c.wp == 0.1 assert c.wa == 0.2 assert c.zp == 14 for n in (set(cosmo.__parameters__) - {"wp", "wa", "zp"}): v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)) # @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below def test_w(self, cosmo): """Test :meth:`astropy.cosmology.wpwaCDM.w`.""" # super().test_w(cosmo, z) assert u.allclose(cosmo.w(0.5), -0.9) assert u.allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]), [-0.94848485, -0.93333333, -0.9, -0.84666667, -0.82380952, -0.78266667]) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("wpwaCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27," " Ode0=0.73, wp=-0.9, wa=0.2, zp=0.5 redshift, Tcmb0=3.0 K," " Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.03)") assert repr(cosmo) == expected # ----------------------------------------------------------------------------- class ParameterwzTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` wz on a Cosmology. wz is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_wz(self, cosmo_cls, cosmo): """Test Parameter ``wz``.""" # on the class assert isinstance(cosmo_cls.wz, Parameter) assert "Derivative of the dark energy" in cosmo_cls.wz.__doc__ assert cosmo_cls.wz.unit is None # on the instance assert cosmo.wz is cosmo._wz assert cosmo.wz == self.cls_kwargs["wz"] def test_init_wz(self, cosmo_cls, ba): """Test initialization for values of ``wz``.""" # test that it works with units ba.arguments["wz"] = ba.arguments["wz"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wz == ba.arguments["wz"] # also without units ba.arguments["wz"] = ba.arguments["wz"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wz == ba.arguments["wz"] # must be dimensionless ba.arguments["wz"] = 10 * u.km with pytest.raises(TypeError): cosmo_cls(*ba.args, **ba.kwargs) class Testw0wzCDM(FLRWSubclassTest, Parameterw0TestMixin, ParameterwzTestMixin): """Test :class:`astropy.cosmology.w0wzCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = w0wzCDM self.cls_kwargs.update(w0=-1, wz=0.5) # =============================================================== # Method & Attribute Tests def test_clone_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_change_param(cosmo) # `w` params c = cosmo.clone(w0=0.1, wz=0.2) assert c.w0 == 0.1 assert c.wz == 0.2 for n in (set(cosmo.__parameters__) - {"w0", "wz"}): v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)) # @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below def test_w(self, cosmo): """Test :meth:`astropy.cosmology.w0wzCDM.w`.""" # super().test_w(cosmo, z) assert u.allclose(cosmo.w(1.0), -0.5) assert u.allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]), [-1.0, -0.75, -0.5, -0.25, 0.15]) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ("w0wzCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27," " Ode0=0.73, w0=-1.0, wz=0.5, Tcmb0=3.0 K, Neff=3.04," " m_nu=[0. 0. 0.] eV, Ob0=0.03)") assert repr(cosmo) == expected
55a99faf675e56f65fafa645c08beb10135ec1f51dc28908155b7591fa77e82a
# Licensed under a 3-clause BSD style license - see LICENSE.rst from math import inf import pytest import numpy as np from astropy.cosmology.utils import aszarr, vectorize_redshift_method from astropy.cosmology.utils import inf_like, vectorize_if_needed from astropy.utils.exceptions import AstropyDeprecationWarning from .test_core import _zarr, invalid_zs, valid_zs def test_vectorize_redshift_method(): """Test :func:`astropy.cosmology.utils.vectorize_redshift_method`.""" class Class: @vectorize_redshift_method def method(self, z): return z c = Class() assert hasattr(c.method, "__vectorized__") assert isinstance(c.method.__vectorized__, np.vectorize) # calling with Number assert c.method(1) == 1 assert isinstance(c.method(1), int) # calling with a numpy scalar assert c.method(np.float64(1)) == np.float64(1) assert isinstance(c.method(np.float64(1)), np.float64) # numpy array assert all(c.method(np.array([1, 2])) == np.array([1, 2])) assert isinstance(c.method(np.array([1, 2])), np.ndarray) # non-scalar assert all(c.method([1, 2]) == np.array([1, 2])) assert isinstance(c.method([1, 2]), np.ndarray) def test_vectorize_if_needed(): """ Test :func:`astropy.cosmology.utils.vectorize_if_needed`. There's no need to test 'veckw' because that is directly pasased to `numpy.vectorize` which thoroughly tests the various inputs. """ func = lambda x: x ** 2 with pytest.warns(AstropyDeprecationWarning): # not vectorized assert vectorize_if_needed(func, 2) == 4 # vectorized assert all(vectorize_if_needed(func, [2, 3]) == [4, 9]) @pytest.mark.parametrize("arr, expected", [(0.0, inf), # float scalar (1, inf), # integer scalar should give float output ([0.0, 1.0, 2.0, 3.0], (inf, inf, inf, inf)), ([0, 1, 2, 3], (inf, inf, inf, inf)), # integer list ]) def test_inf_like(arr, expected): """ Test :func:`astropy.cosmology.utils.inf_like`. All inputs should give a float output. These tests are also in the docstring, but it's better to have them also in one consolidated location. """ with pytest.warns(AstropyDeprecationWarning): assert np.all(inf_like(arr) == expected) # ------------------------------------------------------------------- class Test_aszarr: @pytest.mark.parametrize("z, expect", list(zip(valid_zs, [ 0, 1, 1100, np.float64(3300), 2.0, 3.0, _zarr, _zarr, _zarr, _zarr ]))) def test_valid(self, z, expect): """Test :func:`astropy.cosmology.utils.aszarr`.""" got = aszarr(z) assert np.array_equal(got, expect) @pytest.mark.parametrize("z, exc", invalid_zs) def test_invalid(self, z, exc): """Test :func:`astropy.cosmology.utils.aszarr`.""" with pytest.raises(exc): aszarr(z)
a23140512ee2a9e5617ec6436bb4ebf92f0e6071b93b251f82d1a55294495282
import os import sys sys.path.insert(0, os.path.dirname(__file__)) # allows import of "mypackage" # isort split import mypackage
6b478d6c99b8de9a293371986197ba74d4c4bbffa96fca329364431c113d72c3
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Configure the tests for :mod:`astropy.cosmology`.""" ############################################################################## # IMPORTS # STDLIB import inspect # THIRD-PARTY import pytest # LOCAL from astropy.cosmology import core from astropy.tests.helper import pickle_protocol # noqa: F403 ############################################################################### # FUNCTIONS def get_redshift_methods(cosmology, allow_private=True, allow_z2=True): """Get redshift methods from a cosmology. Parameters ---------- cosmology : |Cosmology| class or instance Returns ------- set[str] """ methods = set() for n in dir(cosmology): try: # get method, some will error on ABCs m = getattr(cosmology, n) except NotImplementedError: continue # Add anything callable, optionally excluding private methods. if callable(m) and (not n.startswith('_') or allow_private): methods.add(n) # Sieve out incompatible methods. # The index to check for redshift depends on whether cosmology is a class # or instance and does/doesn't include 'self'. iz1 = 1 if inspect.isclass(cosmology) else 0 for n in tuple(methods): try: sig = inspect.signature(getattr(cosmology, n)) except ValueError: # Remove non-introspectable methods. methods.discard(n) continue else: params = list(sig.parameters.keys()) # Remove non redshift methods: if len(params) <= iz1: # Check there are enough arguments. methods.discard(n) elif len(params) >= iz1 + 1 and not params[iz1].startswith("z"): # First non-self arg is z. methods.discard(n) # If methods with 2 z args are not allowed, the following arg is checked. elif not allow_z2 and (len(params) >= iz1 + 2) and params[iz1 + 1].startswith("z"): methods.discard(n) return methods ############################################################################### # FIXTURES @pytest.fixture def clean_registry(): # TODO! with monkeypatch instead for thread safety. ORIGINAL_COSMOLOGY_CLASSES = core._COSMOLOGY_CLASSES core._COSMOLOGY_CLASSES = {} # set as empty dict yield core._COSMOLOGY_CLASSES core._COSMOLOGY_CLASSES = ORIGINAL_COSMOLOGY_CLASSES
cb099a40d6c36fe9a72d7fbcb6863f94d3aceeeea2b1a24a53da7a56c2e9f8a1
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.parameter`.""" ############################################################################## # IMPORTS # STDLIB import ast import inspect import sys # THIRD PARTY import pytest import numpy as np # LOCAL import astropy.units as u from astropy.cosmology import Cosmology from astropy.cosmology.core import _COSMOLOGY_CLASSES from astropy.cosmology.parameter import Parameter, _validate_to_float, _validate_with_unit ############################################################################## # TESTS ############################################################################## class ParameterTestMixin: """Tests for a :class:`astropy.cosmology.Parameter` on a Cosmology. :class:`astropy.cosmology.Parameter` is a descriptor and this test suite tests descriptors by class inheritance, so ``ParameterTestMixin`` is mixed into ``TestCosmology`` (tests :class:`astropy.cosmology.Cosmology`). """ @pytest.fixture def parameter(self, cosmo_cls): """Cosmological Parameters""" # I wish this would work # yield from {getattr(cosmo_cls, n) for n in cosmo_cls.__parameters__} # just return one parameter at random yield getattr(cosmo_cls, set(cosmo_cls.__parameters__).pop()) @pytest.fixture def all_parameter(self, cosmo_cls): """Cosmological All Parameter instances""" # I wish this would work # yield from {getattr(cosmo_cls, n) for n in cosmo_cls.__all_parameters__} # just return one parameter at random yield getattr(cosmo_cls, set(cosmo_cls.__all_parameters__).pop()) # =============================================================== # Method Tests def test_Parameter_class_attributes(self, all_parameter): """Test :class:`astropy.cosmology.Parameter` attributes on class.""" # _registry_validators assert hasattr(all_parameter, "_registry_validators") assert isinstance(all_parameter._registry_validators, dict) assert all(isinstance(k, str) for k in all_parameter._registry_validators.keys()) assert all(callable(v) for v in all_parameter._registry_validators.values()) def test_Parameter_init(self): """Test :class:`astropy.cosmology.Parameter` instantiation.""" # defaults parameter = Parameter() assert parameter.fvalidate is _validate_with_unit assert parameter.unit is None assert parameter.equivalencies == [] assert parameter.format_spec == "" assert parameter.derived is False assert parameter.name is None # setting all kwargs parameter = Parameter(fvalidate="float", doc="DOCSTRING", unit="km", equivalencies=[u.mass_energy()], fmt=".4f", derived=True) assert parameter.fvalidate is _validate_to_float assert parameter.unit is u.km assert parameter.equivalencies == [u.mass_energy()] assert parameter.format_spec == ".4f" assert parameter.derived is True def test_Parameter_instance_attributes(self, all_parameter): """Test :class:`astropy.cosmology.Parameter` attributes from init.""" assert hasattr(all_parameter, "fvalidate") assert callable(all_parameter.fvalidate) assert hasattr(all_parameter, "__doc__") # Parameter assert hasattr(all_parameter, "_unit") assert hasattr(all_parameter, "_equivalencies") assert hasattr(all_parameter, "_fmt") assert hasattr(all_parameter, "_derived") # __set_name__ assert hasattr(all_parameter, "_attr_name") assert hasattr(all_parameter, "_attr_name_private") def test_Parameter_fvalidate(self, all_parameter): """Test :attr:`astropy.cosmology.Parameter.fvalidate`.""" assert hasattr(all_parameter, "fvalidate") assert callable(all_parameter.fvalidate) def test_Parameter_name(self, all_parameter): """Test :attr:`astropy.cosmology.Parameter.name`.""" assert hasattr(all_parameter, "name") assert isinstance(all_parameter.name, str) assert all_parameter.name is all_parameter._attr_name def test_Parameter_unit(self, all_parameter): """Test :attr:`astropy.cosmology.Parameter.unit`.""" assert hasattr(all_parameter, "unit") assert isinstance(all_parameter.unit, (u.UnitBase, type(None))) assert all_parameter.unit is all_parameter._unit def test_Parameter_equivalencies(self, all_parameter): """Test :attr:`astropy.cosmology.Parameter.equivalencies`.""" assert hasattr(all_parameter, "equivalencies") assert isinstance(all_parameter.equivalencies, (list, u.Equivalency)) assert all_parameter.equivalencies is all_parameter._equivalencies def test_Parameter_format_spec(self, all_parameter): """Test :attr:`astropy.cosmology.Parameter.format_spec`.""" assert hasattr(all_parameter, "format_spec") assert isinstance(all_parameter.format_spec, str) assert all_parameter.format_spec is all_parameter._fmt def test_Parameter_derived(self, cosmo_cls, all_parameter): """Test :attr:`astropy.cosmology.Parameter.derived`.""" assert hasattr(all_parameter, "derived") assert isinstance(all_parameter.derived, bool) assert all_parameter.derived is all_parameter._derived # test value if all_parameter.name in cosmo_cls.__parameters__: assert all_parameter.derived is False else: assert all_parameter.derived is True # ------------------------------------------- # descriptor methods def test_Parameter_descriptor_get(self, cosmo_cls, cosmo, all_parameter): """Test :attr:`astropy.cosmology.Parameter.__get__`.""" # from class parameter = getattr(cosmo_cls, all_parameter.name) assert isinstance(parameter, Parameter) assert parameter is all_parameter # from instance parameter = getattr(cosmo, all_parameter.name) assert np.all(parameter == getattr(cosmo, all_parameter._attr_name_private)) def test_Parameter_descriptor_set(self, cosmo, all_parameter): """Test :attr:`astropy.cosmology.Parameter.__set__`.""" # test it's already set assert hasattr(cosmo, all_parameter._attr_name_private) # and raises an error if set again with pytest.raises(AttributeError, match="can't set attribute"): setattr(cosmo, all_parameter._attr_name, None) # ------------------------------------------- # validate value # tested later. # =============================================================== # Usage Tests def test_Parameter_listed(self, cosmo_cls, all_parameter): """Test each `astropy.cosmology.Parameter` attached to Cosmology.""" # just double check that each entry is a Parameter assert isinstance(all_parameter, Parameter) # the reverse: check that if it is a Parameter, it's listed. # note have to check the more inclusive ``__all_parameters__`` assert all_parameter.name in cosmo_cls.__all_parameters__ if not all_parameter.derived: assert all_parameter.name in cosmo_cls.__parameters__ def test_parameter_related_attributes_on_Cosmology(self, cosmo_cls): """Test `astropy.cosmology.Parameter`-related on Cosmology.""" # establish has expected attribute assert hasattr(cosmo_cls, "__parameters__") assert hasattr(cosmo_cls, "__all_parameters__") def test_Parameter_not_unique(self, cosmo_cls, clean_registry): """Cosmology Parameter not unique to class when subclass defined.""" # define subclass to show param is same class ExampleBase(cosmo_cls): param = Parameter() class Example(ExampleBase): pass assert Example.param is ExampleBase.param assert Example.__parameters__ == ExampleBase.__parameters__ def test_Parameters_reorder_by_signature(self, cosmo_cls, clean_registry): """Test parameters are reordered.""" class Example(cosmo_cls): param = Parameter() def __init__(self, param, *, name=None, meta=None): pass # never actually initialized # param should be 1st, all other parameters next Example.__parameters__[0] == "param" # Check the other parameters are as expected. # only run this test if "param" is not already on the cosmology if cosmo_cls.__parameters__[0] != "param": assert set(Example.__parameters__[1:]) == set(cosmo_cls.__parameters__) def test_make_from_Parameter(self, cosmo_cls, clean_registry): """Test the parameter creation process. Uses ``__set__``.""" class Example(cosmo_cls): param = Parameter(unit=u.eV, equivalencies=u.mass_energy()) def __init__(self, param, *, name=None, meta=None): self.param = param @property def is_flat(self): return super().is_flat() assert Example(1).param == 1 * u.eV assert Example(1 * u.eV).param == 1 * u.eV assert Example(1 * u.J).param == (1 * u.J).to(u.eV) assert Example(1 * u.kg).param == (1 * u.kg).to(u.eV, u.mass_energy()) # ======================================================================== class TestParameter(ParameterTestMixin): """ Test `astropy.cosmology.Parameter` directly. Adds a lot of specific tests that wouldn't be covered by the per-cosmology tests. """ def setup_class(self): class Example1(Cosmology): param = Parameter(doc="Description of example parameter.", unit=u.m, equivalencies=u.mass_energy()) def __init__(self, param=15): self.param = param @property def is_flat(self): return super().is_flat() # with validator class Example2(Example1): def __init__(self, param=15 * u.m): self.param = param @Example1.param.validator def param(self, param, value): return value.to(u.km) # attributes self.classes = {"Example1": Example1, "Example2": Example2} def teardown_class(self): for cls in self.classes.values(): _COSMOLOGY_CLASSES.pop(cls.__qualname__) @pytest.fixture(scope="class", params=["Example1", "Example2"]) def cosmo_cls(self, request): """Cosmology class.""" return self.classes[request.param] @pytest.fixture(scope="class") def cosmo(self, cosmo_cls): """Cosmology instance""" return cosmo_cls() @pytest.fixture(scope="class") def param(self, cosmo_cls): """Get Parameter 'param' from cosmology class.""" return cosmo_cls.param # ============================================================== def test_Parameter_instance_attributes(self, param): """Test :class:`astropy.cosmology.Parameter` attributes from init.""" super().test_Parameter_instance_attributes(param) # property assert param.__doc__ == "Description of example parameter." # custom from init assert param._unit == u.m assert param._equivalencies == u.mass_energy() assert param._fmt == "" assert param._derived == False # custom from set_name assert param._attr_name == "param" assert param._attr_name_private == "_param" def test_Parameter_fvalidate(self, cosmo, param): """Test :attr:`astropy.cosmology.Parameter.fvalidate`.""" super().test_Parameter_fvalidate(param) value = param.fvalidate(cosmo, param, 1000 * u.m) assert value == 1 * u.km def test_Parameter_name(self, param): """Test :attr:`astropy.cosmology.Parameter.name`.""" super().test_Parameter_name(param) assert param.name == "param" def test_Parameter_unit(self, param): """Test :attr:`astropy.cosmology.Parameter.unit`.""" super().test_Parameter_unit(param) assert param.unit == u.m def test_Parameter_equivalencies(self, param): """Test :attr:`astropy.cosmology.Parameter.equivalencies`.""" super().test_Parameter_equivalencies(param) assert param.equivalencies == u.mass_energy() def test_Parameter_format_spec(self, param): """Test :attr:`astropy.cosmology.Parameter.format_spec`.""" super().test_Parameter_format_spec(param) assert param.format_spec == "" def test_Parameter_derived(self, cosmo_cls, param): """Test :attr:`astropy.cosmology.Parameter.derived`.""" super().test_Parameter_derived(cosmo_cls, param) assert param.derived is False # ------------------------------------------- # descriptor methods def test_Parameter_descriptor_get(self, cosmo_cls, cosmo, param): """Test :meth:`astropy.cosmology.Parameter.__get__`.""" super().test_Parameter_descriptor_get(cosmo_cls, cosmo, param) # from instance value = getattr(cosmo, param.name) assert value == 15 * u.m # ------------------------------------------- # validation def test_Parameter_validator(self, param): """Test :meth:`astropy.cosmology.Parameter.validator`.""" for k in Parameter._registry_validators: newparam = param.validator(k) assert newparam.fvalidate == newparam._registry_validators[k] # error for non-registered str with pytest.raises(ValueError, match="`fvalidate`, if str"): Parameter(fvalidate="NOT REGISTERED") # error if wrong type with pytest.raises(TypeError, match="`fvalidate` must be a function or"): Parameter(fvalidate=object()) def test_Parameter_validate(self, cosmo, param): """Test :meth:`astropy.cosmology.Parameter.validate`.""" value = param.validate(cosmo, 1000 * u.m) # whether has custom validator if param.fvalidate is param._registry_validators["default"]: assert value.unit == u.m assert value.value == 1000 else: assert value.unit == u.km assert value.value == 1 def test_Parameter_register_validator(self, param): """Test :meth:`astropy.cosmology.Parameter.register_validator`.""" # already registered with pytest.raises(KeyError, match="validator 'default' already"): param.__class__.register_validator("default", None) # validator not None try: func = lambda x: x validator = param.__class__.register_validator("newvalidator", func) assert validator is func finally: param.__class__._registry_validators.pop("newvalidator", None) # used as decorator try: @param.__class__.register_validator("newvalidator") def func(cosmology, param, value): return value assert param.__class__._registry_validators["newvalidator"] is func finally: param.__class__._registry_validators.pop("newvalidator", None) # ------------------------------------------- def test_Parameter_clone(self, param): """Test :meth:`astropy.cosmology.Parameter.clone`.""" # this implicitly relies on `__eq__` testing properly. Which is tested. # basic test that nothing changes assert param.clone() == param assert param.clone() is not param # but it's not a 'singleton' # passing kwargs will change stuff newparam = param.clone(unit="km/(yr sr)") assert newparam.unit == u.km / u.yr / u.sr assert param.unit != u.km / u.yr / u.sr # original is unchanged # expected failure for not-an-argument with pytest.raises(TypeError): param.clone(not_a_valid_parameter=True) # ------------------------------------------- def test_Parameter_equality(self): """ Test Parameter equality. Determined from the processed initialization args (including defaults). """ p1 = Parameter(unit="km / (s Mpc)") p2 = Parameter(unit="km / (s Mpc)") assert p1 == p2 # not equal parameters p3 = Parameter(unit="km / s") assert p3 != p1 # misc assert p1 != 2 # show doesn't error # ------------------------------------------- def test_Parameter_repr(self, cosmo_cls, param): """Test Parameter repr.""" r = repr(param) assert "Parameter(" in r for subs in ("derived=False", 'unit=Unit("m")', 'equivalencies=[(Unit("kg"), Unit("J")', "fmt=''", "doc='Description of example parameter.'"): assert subs in r, subs # `fvalidate` is a little tricker b/c one of them is custom! if param.fvalidate in param._registry_validators.values(): # not custom assert "fvalidate='default'" in r else: assert "fvalidate=<" in r # Some function, don't care about details. def test_Parameter_repr_roundtrip(self, param): """Test ``eval(repr(Parameter))`` can round trip to ``Parameter``.""" P = Parameter(doc="A description of this parameter.", derived=True) NP = eval(repr(P)) # Evaluate string representation back into a param. assert P == NP # ============================================================== def test_Parameter_doesnt_change_with_generic_class(self): """Descriptors are initialized once and not updated on subclasses.""" class ExampleBase: def __init__(self, param=15): self._param = param sig = inspect.signature(__init__) _init_signature = sig.replace(parameters=list(sig.parameters.values())[1:]) param = Parameter(doc="example parameter") class Example(ExampleBase): pass assert Example.param is ExampleBase.param def test_Parameter_doesnt_change_with_cosmology(self, cosmo_cls): """Cosmology reinitializes all descriptors when a subclass is defined.""" # define subclass to show param is same class Example(cosmo_cls): pass assert Example.param is cosmo_cls.param # unregister _COSMOLOGY_CLASSES.pop(Example.__qualname__) assert Example.__qualname__ not in _COSMOLOGY_CLASSES
db0eec731304570c317d1e3efe84e8829736e37c57f025c1c1dbaf1a86f31921
# -*- coding: utf-8 -*- """Testing :mod:`astropy.cosmology.units`.""" ############################################################################## # IMPORTS import contextlib import pytest import astropy.cosmology.units as cu import astropy.units as u from astropy.cosmology import Planck13, default_cosmology, flrw from astropy.tests.helper import assert_quantity_allclose from astropy.utils.compat.optional_deps import HAS_ASDF, HAS_SCIPY from astropy.utils.exceptions import AstropyDeprecationWarning ############################################################################## # TESTS ############################################################################## def test_has_expected_units(): """ Test that this module has the expected set of units. Some of the units are imported from :mod:`astropy.units`, or vice versa. Here we test presence, not usage. Units from :mod:`astropy.units` are tested in that module. Units defined in :mod:`astropy.cosmology` will be tested subsequently. """ with pytest.warns(AstropyDeprecationWarning, match="`littleh`"): assert u.astrophys.littleh is cu.littleh def test_has_expected_equivalencies(): """ Test that this module has the expected set of equivalencies. Many of the equivalencies are imported from :mod:`astropy.units`, so here we test presence, not usage. Equivalencies from :mod:`astropy.units` are tested in that module. Equivalencies defined in :mod:`astropy.cosmology` will be tested subsequently. """ with pytest.warns(AstropyDeprecationWarning, match="`with_H0`"): assert u.equivalencies.with_H0 is cu.with_H0 def test_littleh(): """Test :func:`astropy.cosmology.units.with_H0`.""" H0_70 = 70 * u.km / u.s / u.Mpc h70dist = 70 * u.Mpc / cu.littleh assert_quantity_allclose(h70dist.to(u.Mpc, cu.with_H0(H0_70)), 100 * u.Mpc) # make sure using the default cosmology works cosmodist = default_cosmology.get().H0.value * u.Mpc / cu.littleh assert_quantity_allclose(cosmodist.to(u.Mpc, cu.with_H0()), 100 * u.Mpc) # Now try a luminosity scaling h1lum = 0.49 * u.Lsun * cu.littleh ** -2 assert_quantity_allclose(h1lum.to(u.Lsun, cu.with_H0(H0_70)), 1 * u.Lsun) # And the trickiest one: magnitudes. Using H0=10 here for the round numbers H0_10 = 10 * u.km / u.s / u.Mpc # assume the "true" magnitude M = 12. # Then M - 5*log_10(h) = M + 5 = 17 withlittlehmag = 17 * (u.mag - u.MagUnit(cu.littleh ** 2)) assert_quantity_allclose(withlittlehmag.to(u.mag, cu.with_H0(H0_10)), 12 * u.mag) @pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy") def test_dimensionless_redshift(): """Test :func:`astropy.cosmology.units.dimensionless_redshift`.""" z = 3 * cu.redshift val = 3 * u.one # show units not equal assert z.unit == cu.redshift assert z.unit != u.one # test equivalency enabled by default assert z == val # also test that it works for powers assert (3 * cu.redshift ** 3) == val # and in composite units assert (3 * u.km / cu.redshift ** 3) == 3 * u.km # test it also works as an equivalency with u.set_enabled_equivalencies([]): # turn off default equivalencies assert z.to(u.one, equivalencies=cu.dimensionless_redshift()) == val with pytest.raises(ValueError): z.to(u.one) # if this fails, something is really wrong with u.add_enabled_equivalencies(cu.dimensionless_redshift()): assert z == val @pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy") def test_redshift_temperature(): """Test :func:`astropy.cosmology.units.redshift_temperature`.""" cosmo = Planck13.clone(Tcmb0=3 * u.K) default_cosmo = default_cosmology.get() z = 15 * cu.redshift Tcmb = cosmo.Tcmb(z) # 1) Default (without specifying the cosmology) with default_cosmology.set(cosmo): equivalency = cu.redshift_temperature() assert_quantity_allclose(z.to(u.K, equivalency), Tcmb) assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z) # showing the answer changes if the cosmology changes # this test uses the default cosmology equivalency = cu.redshift_temperature() assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z)) assert default_cosmo.Tcmb(z) != Tcmb # 2) Specifying the cosmology equivalency = cu.redshift_temperature(cosmo) assert_quantity_allclose(z.to(u.K, equivalency), Tcmb) assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z) # Test `atzkw` equivalency = cu.redshift_temperature(cosmo, ztol=1e-10) assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z) @pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy") def test_redshift_hubble(): """Test :func:`astropy.cosmology.units.redshift_hubble`.""" unit = u.km / u.s / u.Mpc cosmo = Planck13.clone(H0=100 * unit) default_cosmo = default_cosmology.get() z = 15 * cu.redshift H = cosmo.H(z) h = H.to_value(u.km/u.s/u.Mpc) / 100 * cu.littleh # 1) Default (without specifying the cosmology) with default_cosmology.set(cosmo): equivalency = cu.redshift_hubble() # H assert_quantity_allclose(z.to(unit, equivalency), H) assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # little-h assert_quantity_allclose(z.to(cu.littleh, equivalency), h) assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # showing the answer changes if the cosmology changes # this test uses the default cosmology equivalency = cu.redshift_hubble() assert_quantity_allclose(z.to(unit, equivalency), default_cosmo.H(z)) assert default_cosmo.H(z) != H # 2) Specifying the cosmology equivalency = cu.redshift_hubble(cosmo) # H assert_quantity_allclose(z.to(unit, equivalency), H) assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # little-h assert_quantity_allclose(z.to(cu.littleh, equivalency), h) assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # Test `atzkw` equivalency = cu.redshift_hubble(cosmo, ztol=1e-10) assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # H assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # little-h @pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy") @pytest.mark.parametrize( "kind", [cu.redshift_distance.__defaults__[-1], "comoving", "lookback", "luminosity"] ) def test_redshift_distance(kind): """Test :func:`astropy.cosmology.units.redshift_distance`.""" z = 15 * cu.redshift d = getattr(Planck13, kind + "_distance")(z) equivalency = cu.redshift_distance(cosmology=Planck13, kind=kind) # properties of Equivalency assert equivalency.name[0] == "redshift_distance" assert equivalency.kwargs[0]["cosmology"] == Planck13 assert equivalency.kwargs[0]["distance"] == kind # roundtrip assert_quantity_allclose(z.to(u.Mpc, equivalency), d) assert_quantity_allclose(d.to(cu.redshift, equivalency), z) def test_redshift_distance_wrong_kind(): """Test :func:`astropy.cosmology.units.redshift_distance` wrong kind.""" with pytest.raises(ValueError, match="`kind`"): cu.redshift_distance(kind=None) @pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy") class Test_with_redshift: """Test `astropy.cosmology.units.with_redshift`.""" @pytest.fixture(scope="class") def cosmo(self): """Test cosmology.""" return Planck13.clone(Tcmb0=3 * u.K) # =========================================== def test_cosmo_different(self, cosmo): """The default is different than the test cosmology.""" default_cosmo = default_cosmology.get() assert default_cosmo != cosmo # shows changing default def test_no_equivalency(self, cosmo): """Test the equivalency ``with_redshift`` without any enabled.""" equivalency = cu.with_redshift(distance=None, hubble=False, Tcmb=False) assert len(equivalency) == 0 # ------------------------------------------- def test_temperature_off(self, cosmo): """Test ``with_redshift`` with the temperature off.""" default_cosmo = default_cosmology.get() z = 15 * cu.redshift Tcmb = cosmo.Tcmb(z) # 1) Default (without specifying the cosmology) with default_cosmology.set(cosmo): equivalency = cu.with_redshift(Tcmb=False) with pytest.raises(u.UnitConversionError, match="'redshift' and 'K'"): z.to(u.K, equivalency) # 2) Specifying the cosmology equivalency = cu.with_redshift(cosmo, Tcmb=False) with pytest.raises(u.UnitConversionError, match="'redshift' and 'K'"): z.to(u.K, equivalency) def test_temperature(self, cosmo): """Test temperature equivalency component.""" default_cosmo = default_cosmology.get() z = 15 * cu.redshift Tcmb = cosmo.Tcmb(z) # 1) Default (without specifying the cosmology) with default_cosmology.set(cosmo): equivalency = cu.with_redshift(Tcmb=True) assert_quantity_allclose(z.to(u.K, equivalency), Tcmb) assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z) # showing the answer changes if the cosmology changes # this test uses the default cosmology equivalency = cu.with_redshift(Tcmb=True) assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z)) assert default_cosmo.Tcmb(z) != Tcmb # 2) Specifying the cosmology equivalency = cu.with_redshift(cosmo, Tcmb=True) assert_quantity_allclose(z.to(u.K, equivalency), Tcmb) assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z) # Test `atzkw` # this is really just a test that 'atzkw' doesn't fail equivalency = cu.with_redshift(cosmo, Tcmb=True, atzkw={"ztol": 1e-10}) assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z) # ------------------------------------------- def test_hubble_off(self, cosmo): """Test ``with_redshift`` with Hubble off.""" unit = u.km / u.s / u.Mpc default_cosmo = default_cosmology.get() z = 15 * cu.redshift H = cosmo.H(z) # 1) Default (without specifying the cosmology) with default_cosmology.set(cosmo): equivalency = cu.with_redshift(hubble=False) with pytest.raises(u.UnitConversionError, match="'redshift' and 'km / "): z.to(unit, equivalency) # 2) Specifying the cosmology equivalency = cu.with_redshift(cosmo, hubble=False) with pytest.raises(u.UnitConversionError, match="'redshift' and 'km / "): z.to(unit, equivalency) def test_hubble(self, cosmo): """Test Hubble equivalency component.""" unit = u.km/u.s/u.Mpc default_cosmo = default_cosmology.get() z = 15 * cu.redshift H = cosmo.H(z) h = H.to_value(u.km / u.s / u.Mpc) / 100 * cu.littleh # 1) Default (without specifying the cosmology) with default_cosmology.set(cosmo): equivalency = cu.with_redshift(hubble=True) # H assert_quantity_allclose(z.to(unit, equivalency), H) assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # little-h assert_quantity_allclose(z.to(cu.littleh, equivalency), h) assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # showing the answer changes if the cosmology changes # this test uses the default cosmology equivalency = cu.with_redshift(hubble=True) assert_quantity_allclose(z.to(unit, equivalency), default_cosmo.H(z)) assert default_cosmo.H(z) != H # 2) Specifying the cosmology equivalency = cu.with_redshift(cosmo, hubble=True) # H assert_quantity_allclose(z.to(unit, equivalency), H) assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # little-h assert_quantity_allclose(z.to(cu.littleh, equivalency), h) assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # Test `atzkw` # this is really just a test that 'atzkw' doesn't fail equivalency = cu.with_redshift(cosmo, hubble=True, atzkw={"ztol": 1e-10}) assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # H assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # h # ------------------------------------------- def test_distance_off(self, cosmo): """Test ``with_redshift`` with the distance off.""" default_cosmo = default_cosmology.get() z = 15 * cu.redshift # 1) Default (without specifying the cosmology) with default_cosmology.set(cosmo): equivalency = cu.with_redshift(distance=None) with pytest.raises(u.UnitConversionError, match="'redshift' and 'Mpc'"): z.to(u.Mpc, equivalency) # 2) Specifying the cosmology equivalency = cu.with_redshift(cosmo, distance=None) with pytest.raises(u.UnitConversionError, match="'redshift' and 'Mpc'"): z.to(u.Mpc, equivalency) def test_distance_default(self): """Test distance equivalency default.""" z = 15 * cu.redshift d = default_cosmology.get().comoving_distance(z) equivalency = cu.with_redshift() assert_quantity_allclose(z.to(u.Mpc, equivalency), d) assert_quantity_allclose(d.to(cu.redshift, equivalency), z) def test_distance_wrong_kind(self): """Test distance equivalency, but the wrong kind.""" with pytest.raises(ValueError, match="`kind`"): cu.with_redshift(distance=ValueError) @pytest.mark.parametrize("kind", ["comoving", "lookback", "luminosity"]) def test_distance(self, kind): """Test distance equivalency.""" cosmo = Planck13 z = 15 * cu.redshift dist = getattr(cosmo, kind + "_distance")(z) default_cosmo = default_cosmology.get() assert default_cosmo != cosmo # shows changing default # 1) without specifying the cosmology with default_cosmology.set(cosmo): equivalency = cu.with_redshift(distance=kind) assert_quantity_allclose(z.to(u.Mpc, equivalency), dist) # showing the answer changes if the cosmology changes # this test uses the default cosmology equivalency = cu.with_redshift(distance=kind) assert_quantity_allclose(z.to(u.Mpc, equivalency), getattr(default_cosmo, kind + "_distance")(z)) assert not u.allclose(getattr(default_cosmo, kind + "_distance")(z), dist) # 2) Specifying the cosmology equivalency = cu.with_redshift(cosmo, distance=kind) assert_quantity_allclose(z.to(u.Mpc, equivalency), dist) assert_quantity_allclose(dist.to(cu.redshift, equivalency), z) # Test atzkw # this is really just a test that 'atzkw' doesn't fail equivalency = cu.with_redshift(cosmo, distance=kind, atzkw={"ztol": 1e-10}) assert_quantity_allclose(dist.to(cu.redshift, equivalency), z) # FIXME! get "dimensionless_redshift", "with_redshift" to work in this # they are not in ``astropy.units.equivalencies``, so the following fails @pytest.mark.skipif(not HAS_ASDF, reason="requires ASDF") @pytest.mark.parametrize("equiv", [cu.with_H0]) def test_equivalencies_asdf(tmpdir, equiv, recwarn): from asdf.tests import helpers tree = {"equiv": equiv()} helpers.assert_roundtrip_tree(tree, tmpdir) def test_equivalency_context_manager(): base_registry = u.get_current_unit_registry() # check starting with only the dimensionless_redshift equivalency. assert len(base_registry.equivalencies) == 1 assert str(base_registry.equivalencies[0][0]) == "redshift"
a26e4c1b783546b3ea7ab7e6434d2f7b5ae14df12f5e4c564038df1fb43f9a1c
# Licensed under a 3-clause BSD style license - see LICENSE.rst import copy import inspect import os import pytest from astropy import cosmology from astropy.cosmology import Cosmology, w0wzCDM from astropy.cosmology.connect import CosmologyRead, readwrite_registry from astropy.cosmology.core import Cosmology from astropy.cosmology.io.tests import (test_cosmology, test_ecsv, test_json, test_mapping, test_model, test_row, test_table, test_yaml) from astropy.table import QTable, Row ############################################################################### # SETUP cosmo_instances = cosmology.realizations.available # Collect the registered read/write formats. readwrite_formats = {"ascii.ecsv", "json"} # Collect all the registered to/from formats. Unfortunately this is NOT # automatic since the output format class is not stored on the registry. # (format, data type) tofrom_formats = [("mapping", dict), ("yaml", str), ("astropy.cosmology", Cosmology), ("astropy.row", Row), ("astropy.table", QTable)] ############################################################################### class ReadWriteTestMixin(test_ecsv.ReadWriteECSVTestMixin, test_json.ReadWriteJSONTestMixin): """ Tests for a CosmologyRead/Write on a |Cosmology|. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestReadWriteCosmology`` or ``TestCosmology`` for examples. """ @pytest.mark.parametrize("format", readwrite_formats) def test_readwrite_complete_info(self, cosmo, tmpdir, format): """ Test writing from an instance and reading from the base class. This requires full information. The round-tripped metadata can be in a different order, so the OrderedDict must be converted to a dict before testing equality. """ fname = str(tmpdir / f"{cosmo.name}.{format}") cosmo.write(fname, format=format) # Also test kwarg "overwrite" assert os.path.exists(fname) # file exists with pytest.raises(IOError): cosmo.write(fname, format=format, overwrite=False) assert os.path.exists(fname) # overwrite file existing file cosmo.write(fname, format=format, overwrite=True) # Read back got = Cosmology.read(fname, format=format) assert got == cosmo assert dict(got.meta) == dict(cosmo.meta) @pytest.mark.parametrize("format", readwrite_formats) def test_readwrite_from_subclass_complete_info(self, cosmo_cls, cosmo, tmpdir, format): """ Test writing from an instance and reading from that class, when there's full information saved. """ fname = str(tmpdir / f"{cosmo.name}.{format}") cosmo.write(fname, format=format) # read with the same class that wrote. got = cosmo_cls.read(fname, format=format) assert got == cosmo assert got.meta == cosmo.meta # this should be equivalent to got = Cosmology.read(fname, format=format, cosmology=cosmo_cls) assert got == cosmo assert got.meta == cosmo.meta # and also got = Cosmology.read(fname, format=format, cosmology=cosmo_cls.__qualname__) assert got == cosmo assert got.meta == cosmo.meta class TestCosmologyReadWrite(ReadWriteTestMixin): """Test the classes CosmologyRead/Write.""" @pytest.fixture(scope="class", params=cosmo_instances) def cosmo(self, request): return getattr(cosmology.realizations, request.param) @pytest.fixture(scope="class") def cosmo_cls(self, cosmo): return cosmo.__class__ # ============================================================== @pytest.mark.parametrize("format", readwrite_formats) def test_write_methods_have_explicit_kwarg_overwrite(self, format): writer = readwrite_registry.get_writer(format, Cosmology) # test in signature sig = inspect.signature(writer) assert "overwrite" in sig.parameters # also in docstring assert "overwrite : bool" in writer.__doc__ @pytest.mark.parametrize("format", readwrite_formats) def test_readwrite_reader_class_mismatch(self, cosmo, tmpdir, format): """Test when the reader class doesn't match the file.""" fname = tmpdir / f"{cosmo.name}.{format}" cosmo.write(str(fname), format=format) # class mismatch # when reading directly with pytest.raises(TypeError, match="missing 1 required"): w0wzCDM.read(fname, format=format) with pytest.raises(TypeError, match="missing 1 required"): Cosmology.read(fname, format=format, cosmology=w0wzCDM) # when specifying the class with pytest.raises(ValueError, match="`cosmology` must be either"): w0wzCDM.read(fname, format=format, cosmology="FlatLambdaCDM") ############################################################################### # To/From_Format Tests class ToFromFormatTestMixin(test_cosmology.ToFromCosmologyTestMixin, test_mapping.ToFromMappingTestMixin, test_model.ToFromModelTestMixin, test_row.ToFromRowTestMixin, test_table.ToFromTableTestMixin, test_yaml.ToFromYAMLTestMixin): """ Tests for a Cosmology[To/From]Format on a |Cosmology|. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ @pytest.mark.parametrize("format, totype", tofrom_formats) def test_tofromformat_complete_info(self, cosmo, format, totype, xfail_if_not_registered_with_yaml): """Read tests happen later.""" # test to_format obj = cosmo.to_format(format) assert isinstance(obj, totype) # test from_format got = Cosmology.from_format(obj, format=format) # Test autodetect, if enabled if self.can_autodentify(format): got2 = Cosmology.from_format(obj) assert got2 == got # internal consistency assert got == cosmo # external consistency assert got.meta == cosmo.meta @pytest.mark.parametrize("format, totype", tofrom_formats) def test_fromformat_subclass_complete_info(self, cosmo_cls, cosmo, format, totype, xfail_if_not_registered_with_yaml): """ Test transforming an instance and parsing from that class, when there's full information available. Partial information tests are handled in the Mixin super classes. """ # test to_format obj = cosmo.to_format(format) assert isinstance(obj, totype) # read with the same class that wrote. got = cosmo_cls.from_format(obj, format=format) if self.can_autodentify(format): got2 = Cosmology.from_format(obj) # and autodetect assert got2 == got # internal consistency assert got == cosmo # external consistency assert got.meta == cosmo.meta # this should be equivalent to got = Cosmology.from_format(obj, format=format, cosmology=cosmo_cls) assert got == cosmo assert got.meta == cosmo.meta # and also got = Cosmology.from_format(obj, format=format, cosmology=cosmo_cls.__qualname__) assert got == cosmo assert got.meta == cosmo.meta class TestCosmologyToFromFormat(ToFromFormatTestMixin): """Test Cosmology[To/From]Format classes.""" @pytest.fixture(scope="class", params=cosmo_instances) def cosmo(self, request): return getattr(cosmology.realizations, request.param) @pytest.fixture(scope="class") def cosmo_cls(self, cosmo): return cosmo.__class__ # ============================================================== @pytest.mark.parametrize("format_type", tofrom_formats) def test_fromformat_class_mismatch(self, cosmo, format_type): format, totype = format_type # test to_format obj = cosmo.to_format(format) assert isinstance(obj, totype) # class mismatch with pytest.raises(TypeError): w0wzCDM.from_format(obj, format=format) with pytest.raises(TypeError): Cosmology.from_format(obj, format=format, cosmology=w0wzCDM) # when specifying the class with pytest.raises(ValueError, match="`cosmology` must be either"): w0wzCDM.from_format(obj, format=format, cosmology="FlatLambdaCDM")
e8bfac1d907a80c3e55ec883ed929c664f143ae084754b152abf485bc747a02e
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.core`.""" ############################################################################## # IMPORTS # STDLIB import abc import inspect import pickle from types import MappingProxyType # THIRD PARTY import pytest import numpy as np # LOCAL import astropy.cosmology.units as cu import astropy.units as u from astropy.cosmology import Cosmology, core from astropy.cosmology.core import _COSMOLOGY_CLASSES from astropy.cosmology.parameter import Parameter from astropy.table import Column, QTable, Table from astropy.utils.exceptions import AstropyDeprecationWarning from astropy.utils.metadata import MetaData from .test_connect import ReadWriteTestMixin, ToFromFormatTestMixin from .test_parameter import ParameterTestMixin ############################################################################## # SETUP / TEARDOWN scalar_zs = [ 0, 1, 1100, # interesting times # FIXME! np.inf breaks some funcs. 0 * inf is an error np.float64(3300), # different type 2 * cu.redshift, 3 * u.one # compatible units ] _zarr = np.linspace(0, 1e5, num=20) array_zs = [ _zarr, # numpy _zarr.tolist(), # pure python Column(_zarr), # table-like _zarr * cu.redshift # Quantity ] valid_zs = scalar_zs + array_zs invalid_zs = [ (None, TypeError), # wrong type # Wrong units (the TypeError is for the cython, which can differ) (4 * u.MeV, (u.UnitConversionError, TypeError)), # scalar ([0, 1] * u.m, (u.UnitConversionError, TypeError)), # array ] class SubCosmology(Cosmology): """Defined here to be serializable.""" H0 = Parameter(unit="km/(s Mpc)") Tcmb0 = Parameter(unit=u.K) m_nu = Parameter(unit=u.eV) def __init__(self, H0, Tcmb0=0*u.K, m_nu=0*u.eV, name=None, meta=None): super().__init__(name=name, meta=meta) self.H0 = H0 self.Tcmb0 = Tcmb0 self.m_nu = m_nu @property def is_flat(self): return super().is_flat() ############################################################################## # TESTS ############################################################################## class MetaTestMixin: """Tests for a :class:`astropy.utils.metadata.MetaData` on a Cosmology.""" def test_meta_on_class(self, cosmo_cls): assert isinstance(cosmo_cls.meta, MetaData) def test_meta_on_instance(self, cosmo): assert isinstance(cosmo.meta, dict) # test type # value set at initialization assert cosmo.meta == self.cls_kwargs.get("meta", {}) def test_meta_mutable(self, cosmo): """The metadata is NOT immutable on a cosmology""" key = tuple(cosmo.meta.keys())[0] # select some key cosmo.meta[key] = cosmo.meta.pop(key) # will error if immutable class TestCosmology(ParameterTestMixin, MetaTestMixin, ReadWriteTestMixin, ToFromFormatTestMixin, metaclass=abc.ABCMeta): """Test :class:`astropy.cosmology.Cosmology`. Subclasses should define tests for: - ``test_clone_change_param()`` - ``test_repr()`` """ def setup_class(self): """ Setup for testing. Cosmology should not be instantiated, so tests are done on a subclass. """ # make sure SubCosmology is known _COSMOLOGY_CLASSES["SubCosmology"] = SubCosmology self.cls = SubCosmology self._cls_args = dict(H0=70 * (u.km / u.s / u.Mpc), Tcmb0=2.7 * u.K, m_nu=0.6 * u.eV) self.cls_kwargs = dict(name=self.__class__.__name__, meta={"a": "b"}) def teardown_class(self): _COSMOLOGY_CLASSES.pop("SubCosmology", None) @property def cls_args(self): return tuple(self._cls_args.values()) @pytest.fixture(scope="class") def cosmo_cls(self): """The Cosmology class as a :func:`pytest.fixture`.""" return self.cls @pytest.fixture(scope="function") # ensure not cached. def ba(self): """Return filled `inspect.BoundArguments` for cosmology.""" ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs) ba.apply_defaults() return ba @pytest.fixture(scope="class") def cosmo(self, cosmo_cls): """The cosmology instance with which to test.""" ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs) ba.apply_defaults() return cosmo_cls(*ba.args, **ba.kwargs) # =============================================================== # Method & Attribute Tests # --------------------------------------------------------------- # class-level def test_init_subclass(self, cosmo_cls): """Test creating subclasses registers classes and manages Parameters.""" class InitSubclassTest(cosmo_cls): pass # test parameters assert InitSubclassTest.__parameters__ == cosmo_cls.__parameters__ # test and cleanup registry registrant = _COSMOLOGY_CLASSES.pop(InitSubclassTest.__qualname__) assert registrant is InitSubclassTest def test_init_signature(self, cosmo_cls, cosmo): """Test class-property ``_init_signature``.""" # test presence assert hasattr(cosmo_cls, "_init_signature") assert hasattr(cosmo, "_init_signature") # test internal consistency, so following tests can use either cls or instance. assert cosmo_cls._init_signature == cosmo._init_signature # test matches __init__, but without 'self' sig = inspect.signature(cosmo.__init__) # (instances don't have self) assert set(sig.parameters.keys()) == set(cosmo._init_signature.parameters.keys()) assert all(np.all(sig.parameters[k].default == p.default) for k, p in cosmo._init_signature.parameters.items()) # --------------------------------------------------------------- # instance-level def test_init(self, cosmo_cls): """Test initialization.""" # Cosmology only does name and meta, but this subclass adds H0 & Tcmb0. cosmo = cosmo_cls(*self.cls_args, name="test_init", meta={"m": 1}) assert cosmo.name == "test_init" assert cosmo.meta["m"] == 1 # if meta is None, it is changed to a dict cosmo = cosmo_cls(*self.cls_args, name="test_init", meta=None) assert cosmo.meta == {} def test_name(self, cosmo): """Test property ``name``.""" assert cosmo.name is cosmo._name # accesses private attribute assert cosmo.name is None or isinstance(cosmo.name, str) # type assert cosmo.name == self.cls_kwargs["name"] # test has expected value # immutable with pytest.raises(AttributeError, match="can't set"): cosmo.name = None def test_is_flat(self, cosmo_cls, cosmo): """Test property ``is_flat``. It's an ABC.""" with pytest.raises(NotImplementedError, match="is_flat is not implemented"): cosmo.is_flat # ------------------------------------------------ # clone def test_clone_identical(self, cosmo): """Test method ``.clone()`` if no (kw)args.""" assert cosmo.clone() is cosmo def test_clone_name(self, cosmo): """Test method ``.clone()`` name argument.""" # test changing name. clone treats 'name' differently (see next test) c = cosmo.clone(name="cloned cosmo") assert c.name == "cloned cosmo" # changed # show name is the only thing changed c._name = cosmo.name # first change name back assert c == cosmo assert c.meta == cosmo.meta # now change a different parameter and see how 'name' changes c = cosmo.clone(meta={}) assert c.name == cosmo.name + " (modified)" def test_clone_meta(self, cosmo): """Test method ``.clone()`` meta argument: updates meta, doesn't clear.""" # start with no change c = cosmo.clone(meta=None) assert c.meta == cosmo.meta # add something c = cosmo.clone(meta=dict(test_clone_meta=True)) assert c.meta["test_clone_meta"] is True c.meta.pop("test_clone_meta") # remove from meta assert c.meta == cosmo.meta # now they match def test_clone_change_param(self, cosmo): """ Test method ``.clone()`` changing a(many) Parameter(s). Nothing here b/c no Parameters. """ pass def test_clone_fail_unexpected_arg(self, cosmo): """Test when ``.clone()`` gets an unexpected argument.""" with pytest.raises(TypeError, match="unexpected keyword argument"): newclone = cosmo.clone(not_an_arg=4) def test_clone_fail_positional_arg(self, cosmo): with pytest.raises(TypeError, match="1 positional argument"): cosmo.clone(None) # --------------------------------------------------------------- # comparison methods def test_is_equivalent(self, cosmo): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.""" # to self assert cosmo.is_equivalent(cosmo) # same class, different instance newclone = cosmo.clone(name="test_is_equivalent") assert cosmo.is_equivalent(newclone) assert newclone.is_equivalent(cosmo) # different class and not convertible to Cosmology. assert not cosmo.is_equivalent(2) def test_equality(self, cosmo): """Test method ``.__eq__().""" # wrong class assert (cosmo != 2) and (2 != cosmo) # correct assert cosmo == cosmo # different name <= not equal, but equivalent newcosmo = cosmo.clone(name="test_equality") assert (cosmo != newcosmo) and (newcosmo != cosmo) assert cosmo.__equiv__(newcosmo) and newcosmo.__equiv__(cosmo) # --------------------------------------------------------------- def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``. This is a very general test and it is probably good to have a hard-coded comparison. """ r = repr(cosmo) # class in string rep assert cosmo_cls.__qualname__ in r assert r.index(cosmo_cls.__qualname__) == 0 # it's the first thing r = r[len(cosmo_cls.__qualname__) + 1:] # remove # name in string rep if cosmo.name is not None: assert f"name=\"{cosmo.name}\"" in r assert r.index("name=") == 0 r = r[6 + len(cosmo.name) + 3:] # remove # parameters in string rep ps = {k: getattr(cosmo, k) for k in cosmo.__parameters__} cps = {k: getattr(cosmo_cls, k) for k in cosmo.__parameters__} for k, v in ps.items(): sv = format(v, cps[k].format_spec if v is not None else '') assert (k + '=' + sv) in r assert r.index(k) == 0 r = r[len((k + '=' + sv)) + 2:] # remove # ------------------------------------------------ @pytest.mark.parametrize("in_meta", [True, False]) @pytest.mark.parametrize("table_cls", [Table, QTable]) def test_astropy_table(self, cosmo, table_cls, in_meta): """Test ``astropy.table.Table(cosmology)``.""" tbl = table_cls(cosmo, cosmology_in_meta=in_meta) assert isinstance(tbl, table_cls) # the name & all parameters are columns for n in ("name", *cosmo.__parameters__): assert n in tbl.colnames assert np.all(tbl[n] == getattr(cosmo, n)) # check if Cosmology is in metadata or a column if in_meta: assert tbl.meta["cosmology"] == cosmo.__class__.__qualname__ assert "cosmology" not in tbl.colnames else: assert "cosmology" not in tbl.meta assert tbl["cosmology"][0] == cosmo.__class__.__qualname__ # the metadata is transferred for k, v in cosmo.meta.items(): assert np.all(tbl.meta[k] == v) # =============================================================== # Usage Tests def test_immutability(self, cosmo): """ Test immutability of cosmologies. The metadata is mutable: see ``test_meta_mutable``. """ for n in cosmo.__all_parameters__: with pytest.raises(AttributeError): setattr(cosmo, n, getattr(cosmo, n)) def test_pickle_class(self, cosmo_cls, pickle_protocol): """Test classes can pickle and unpickle.""" # pickle and unpickle f = pickle.dumps(cosmo_cls, protocol=pickle_protocol) unpickled = pickle.loads(f) # test equality assert unpickled == cosmo_cls def test_pickle_instance(self, cosmo, pickle_protocol): """Test instances can pickle and unpickle.""" # pickle and unpickle f = pickle.dumps(cosmo, protocol=pickle_protocol) with u.add_enabled_units(cu): unpickled = pickle.loads(f) assert unpickled == cosmo assert unpickled.meta == cosmo.meta class CosmologySubclassTest(TestCosmology): """ Test subclasses of :class:`astropy.cosmology.Cosmology`. This is broken away from ``TestCosmology``, because |Cosmology| is/will be an ABC and subclasses must override some methods. """ @abc.abstractmethod def setup_class(self): """Setup for testing.""" pass # =============================================================== # Method & Attribute Tests # --------------------------------------------------------------- # instance-level @abc.abstractmethod def test_is_flat(self, cosmo_cls, cosmo): """Test property ``is_flat``.""" # ----------------------------------------------------------------------------- class FlatCosmologyMixinTest: """Tests for :class:`astropy.cosmology.core.FlatCosmologyMixin` subclasses. E.g to use this class:: class TestFlatSomeCosmology(FlatCosmologyMixinTest, TestSomeCosmology): ... """ def test_is_flat(self, cosmo_cls, cosmo): """Test property ``is_flat``.""" super().test_is_flat(cosmo_cls, cosmo) # it's always True assert cosmo.is_flat is True def test_is_equivalent(self, cosmo): """Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.is_equivalent`. Normally this would pass up via super(), but ``__equiv__`` is meant to be overridden, so we skip super(). e.g. FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestCosmology vs FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestFLRW -> TestCosmology """ CosmologySubclassTest.test_is_equivalent(self, cosmo) # ----------------------------------------------------------------------------- def test_flrw_moved_deprecation(): """Test the deprecation warning about the move of FLRW classes.""" from astropy.cosmology import flrw # it's deprecated to import `flrw/*` from `core.py` with pytest.warns(AstropyDeprecationWarning): from astropy.cosmology.core import FLRW # but they are the same object assert FLRW is flrw.FLRW
ac4e7e7d6d97d23121bf59cbb622d15222b642ce1664e0fe5fb871077a5a64e5
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB from types import MappingProxyType # THIRD PARTY import numpy as np import pytest # LOCAL from astropy.cosmology import parameters, realizations def test_realizations_in_dir(): """Test the realizations are in ``dir`` of :mod:`astropy.cosmology.parameters`.""" d = dir(parameters) assert set(d) == set(parameters.__all__) for n in parameters.available: assert n in d @pytest.mark.parametrize("name", parameters.available) def test_getting_parameters(name): """ Test getting 'parameters' and that it is derived from the corresponding realization. """ params = getattr(parameters, name) assert isinstance(params, MappingProxyType) assert params["name"] == name # Check parameters have the right keys and values cosmo = getattr(realizations, name) assert params["name"] == cosmo.name assert params["cosmology"] == cosmo.__class__.__qualname__ # All the cosmology parameters are equal for n in cosmo.__parameters__: assert np.array_equal(params[n], getattr(cosmo, n)) # All the metadata is included. Parameter values take precedence, so only # checking the keys. assert set(cosmo.meta.keys()).issubset(params.keys()) # Lastly, check the generation process. m = cosmo.to_format("mapping", cosmology_as_str=True, move_from_meta=True) assert params == m
8dc8430fcc4ac60343a7ac71cce4aa79ec37f23e3b6d9e70df443028e3247d9a
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Stand-alone overall systems tests for :mod:`astropy.cosmology`.""" from io import StringIO import pytest import numpy as np import astropy.constants as const import astropy.units as u from astropy.cosmology import Cosmology, flrw, funcs from astropy.cosmology.realizations import Planck18 from astropy.units import allclose from astropy.utils.compat.optional_deps import HAS_SCIPY from astropy.utils.exceptions import AstropyUserWarning @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy.") def test_flat_z1(): """Test a flat cosmology at z=1 against several other on-line calculators. Test values were taken from the following web cosmology calculators on 2012-02-11: Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html (https://ui.adsabs.harvard.edu/abs/2006PASP..118.1711W) Kempner: http://www.kempner.net/cosmic.php iCosmos: http://www.icosmos.co.uk/index.html """ cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0) # The order of values below is Wright, Kempner, iCosmos' assert allclose(cosmo.comoving_distance(1), [3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4) assert allclose(cosmo.angular_diameter_distance(1), [1682.3, 1682.4, 1682.3994] * u.Mpc, rtol=1e-4) assert allclose(cosmo.luminosity_distance(1), [6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4) assert allclose(cosmo.lookback_time(1), [7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3) assert allclose(cosmo.lookback_distance(1), [2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy.") def test_varyde_lumdist_mathematica(): """Tests a few varying dark energy EOS models against a Mathematica computation.""" z = np.array([0.2, 0.4, 0.9, 1.2]) # w0wa models cosmo = flrw.w0waCDM(H0=70, Om0=0.2, Ode0=0.8, w0=-1.1, wa=0.2, Tcmb0=0.0) assert allclose(cosmo.luminosity_distance(z), [1004.0, 2268.62, 6265.76, 9061.84] * u.Mpc, rtol=1e-4) assert allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5) assert allclose(cosmo.de_density_scale([0.0, 0.5, 1.5]), [1.0, 0.9246310669529021, 0.9184087000251957]) cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.0, Tcmb0=0.0) assert allclose(cosmo.luminosity_distance(z), [971.667, 2141.67, 5685.96, 8107.41] * u.Mpc, rtol=1e-4) cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=-0.5, Tcmb0=0.0) assert allclose(cosmo.luminosity_distance(z), [974.087, 2157.08, 5783.92, 8274.08] * u.Mpc, rtol=1e-4) # wpwa models cosmo = flrw.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5, Tcmb0=0.0) assert allclose(cosmo.luminosity_distance(z), [1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc, rtol=1e-4) cosmo = flrw.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9, Tcmb0=0.0) assert allclose(cosmo.luminosity_distance(z), [1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc, rtol=1e-4) ############################################################################### # TODO! sort and refactor following tests. # overall systems tests stay here, specific tests go to new test suite. @pytest.mark.skipif('not HAS_SCIPY') def test_units(): """ Test if the right units are being returned""" cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0) assert cosmo.comoving_distance(1.0).unit == u.Mpc assert cosmo._comoving_distance_z1z2(1.0, 2.0).unit == u.Mpc assert cosmo.comoving_transverse_distance(1.0).unit == u.Mpc assert cosmo._comoving_transverse_distance_z1z2(1.0, 2.0).unit == u.Mpc assert cosmo.angular_diameter_distance(1.0).unit == u.Mpc assert cosmo.angular_diameter_distance_z1z2(1.0, 2.0).unit == u.Mpc assert cosmo.luminosity_distance(1.0).unit == u.Mpc assert cosmo.lookback_time(1.0).unit == u.Gyr assert cosmo.lookback_distance(1.0).unit == u.Mpc assert cosmo.H(1.0).unit == u.km / u.Mpc / u.s assert cosmo.Tcmb(1.0).unit == u.K assert cosmo.Tcmb([0.0, 1.0]).unit == u.K assert cosmo.Tnu(1.0).unit == u.K assert cosmo.Tnu([0.0, 1.0]).unit == u.K assert cosmo.arcsec_per_kpc_comoving(1.0).unit == u.arcsec / u.kpc assert cosmo.arcsec_per_kpc_proper(1.0).unit == u.arcsec / u.kpc assert cosmo.kpc_comoving_per_arcmin(1.0).unit == u.kpc / u.arcmin assert cosmo.kpc_proper_per_arcmin(1.0).unit == u.kpc / u.arcmin assert cosmo.critical_density(1.0).unit == u.g / u.cm ** 3 assert cosmo.comoving_volume(1.0).unit == u.Mpc ** 3 assert cosmo.age(1.0).unit == u.Gyr assert cosmo.distmod(1.0).unit == u.mag @pytest.mark.skipif('not HAS_SCIPY') def test_distance_broadcast(): """ Test array shape broadcasting for functions with single redshift inputs""" cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, m_nu=u.Quantity([0.0, 0.1, 0.011], u.eV)) z = np.linspace(0.1, 1, 6) z_reshape2d = z.reshape(2, 3) z_reshape3d = z.reshape(3, 2, 1) # Things with units methods = ['comoving_distance', 'luminosity_distance', 'comoving_transverse_distance', 'angular_diameter_distance', 'distmod', 'lookback_time', 'age', 'comoving_volume', 'differential_comoving_volume', 'kpc_comoving_per_arcmin'] for method in methods: g = getattr(cosmo, method) value_flat = g(z) assert value_flat.shape == z.shape value_2d = g(z_reshape2d) assert value_2d.shape == z_reshape2d.shape value_3d = g(z_reshape3d) assert value_3d.shape == z_reshape3d.shape assert value_flat.unit == value_2d.unit assert value_flat.unit == value_3d.unit assert allclose(value_flat, value_2d.flatten()) assert allclose(value_flat, value_3d.flatten()) # Also test unitless ones methods = ['absorption_distance', 'Om', 'Ode', 'Ok', 'H', 'w', 'de_density_scale', 'Onu', 'Ogamma', 'nu_relative_density'] for method in methods: g = getattr(cosmo, method) value_flat = g(z) assert value_flat.shape == z.shape value_2d = g(z_reshape2d) assert value_2d.shape == z_reshape2d.shape value_3d = g(z_reshape3d) assert value_3d.shape == z_reshape3d.shape assert allclose(value_flat, value_2d.flatten()) assert allclose(value_flat, value_3d.flatten()) # Test some dark energy models methods = ['Om', 'Ode', 'w', 'de_density_scale'] for tcosmo in [flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.5), flrw.wCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2), flrw.w0waCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wa=-0.2), flrw.wpwaCDM(H0=70, Om0=0.27, Ode0=0.5, wp=-1.2, wa=-0.2, zp=0.9), flrw.w0wzCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wz=0.1)]: for method in methods: g = getattr(cosmo, method) value_flat = g(z) assert value_flat.shape == z.shape value_2d = g(z_reshape2d) assert value_2d.shape == z_reshape2d.shape value_3d = g(z_reshape3d) assert value_3d.shape == z_reshape3d.shape assert allclose(value_flat, value_2d.flatten()) assert allclose(value_flat, value_3d.flatten()) def test_equality(): """Test equality and equivalence.""" # mismatched signatures, both directions. newcosmo = flrw.w0waCDM(**Planck18._init_arguments, Ode0=0.6) assert newcosmo != Planck18 assert Planck18 != newcosmo def test_xtfuncs(): """ Test of absorption and lookback integrand""" cosmo = flrw.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725) z = np.array([2.0, 3.2]) assert allclose(cosmo.lookback_time_integrand(3), 0.052218976654969378, rtol=1e-4) assert allclose(cosmo.lookback_time_integrand(z), [0.10333179, 0.04644541], rtol=1e-4) assert allclose(cosmo.abs_distance_integrand(3), 3.3420145059180402, rtol=1e-4) assert allclose(cosmo.abs_distance_integrand(z), [2.7899584, 3.44104758], rtol=1e-4) # This class is to test whether the routines work correctly # if one only overloads w(z) class test_cos_sub(flrw.FLRW): def __init__(self): super().__init__(70.0, 0.27, 0.73, Tcmb0=0.0, name="test_cos") self._w0 = -0.9 def w(self, z): return self._w0 * np.ones_like(z) # Similar, but with neutrinos class test_cos_subnu(flrw.FLRW): def __init__(self): super().__init__(70.0, 0.27, 0.73, Tcmb0=3.0, m_nu=0.1 * u.eV, name="test_cos_nu") self._w0 = -0.8 def w(self, z): return self._w0 * np.ones_like(z) @pytest.mark.skipif('not HAS_SCIPY') def test_de_subclass(): # This is the comparison object z = [0.2, 0.4, 0.6, 0.9] cosmo = flrw.wCDM(H0=70, Om0=0.27, Ode0=0.73, w0=-0.9, Tcmb0=0.0) # Values taken from Ned Wrights advanced cosmo calculator, Aug 17 2012 assert allclose(cosmo.luminosity_distance(z), [975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3) # Now try the subclass that only gives w(z) cosmo = test_cos_sub() assert allclose(cosmo.luminosity_distance(z), [975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3) # Test efunc assert allclose(cosmo.efunc(1.0), 1.7489240754, rtol=1e-5) assert allclose(cosmo.efunc([0.5, 1.0]), [1.31744953, 1.7489240754], rtol=1e-5) assert allclose(cosmo.inv_efunc([0.5, 1.0]), [0.75904236, 0.57178011], rtol=1e-5) # Test de_density_scale assert allclose(cosmo.de_density_scale(1.0), 1.23114444, rtol=1e-4) assert allclose(cosmo.de_density_scale([0.5, 1.0]), [1.12934694, 1.23114444], rtol=1e-4) # Add neutrinos for efunc, inv_efunc @pytest.mark.skipif('not HAS_SCIPY') def test_matter(): # Test non-relativistic matter evolution tcos = flrw.FlatLambdaCDM(70.0, 0.3, Ob0=0.045) assert allclose(tcos.Om0, 0.3) assert allclose(tcos.H0, 70.0 * u.km / u.s / u.Mpc) assert allclose(tcos.Om(0), 0.3) assert allclose(tcos.Ob(0), 0.045) z = np.array([0.0, 0.5, 1.0, 2.0]) assert allclose(tcos.Om(z), [0.3, 0.59124088, 0.77419355, 0.92045455], rtol=1e-4) assert allclose(tcos.Ob(z), [0.045, 0.08868613, 0.11612903, 0.13806818], rtol=1e-4) assert allclose(tcos.Odm(z), [0.255, 0.50255474, 0.65806452, 0.78238636], rtol=1e-4) # Consistency of dark and baryonic matter evolution with all # non-relativistic matter assert allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z)) @pytest.mark.skipif('not HAS_SCIPY') def test_ocurv(): # Test Ok evolution # Flat, boring case tcos = flrw.FlatLambdaCDM(70.0, 0.3) assert allclose(tcos.Ok0, 0.0) assert allclose(tcos.Ok(0), 0.0) z = np.array([0.0, 0.5, 1.0, 2.0]) assert allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0], rtol=1e-6) # Not flat tcos = flrw.LambdaCDM(70.0, 0.3, 0.5, Tcmb0=u.Quantity(0.0, u.K)) assert allclose(tcos.Ok0, 0.2) assert allclose(tcos.Ok(0), 0.2) assert allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692], rtol=1e-4) # Test the sum; note that Ogamma/Onu are 0 assert allclose(tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z), [1.0, 1.0, 1.0, 1.0], rtol=1e-5) @pytest.mark.skipif('not HAS_SCIPY') def test_ode(): # Test Ode evolution, turn off neutrinos, cmb tcos = flrw.FlatLambdaCDM(70.0, 0.3, Tcmb0=0) assert allclose(tcos.Ode0, 0.7) assert allclose(tcos.Ode(0), 0.7) z = np.array([0.0, 0.5, 1.0, 2.0]) assert allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545], rtol=1e-5) @pytest.mark.skipif('not HAS_SCIPY') def test_ogamma(): """Tests the effects of changing the temperature of the CMB""" # Tested against Ned Wright's advanced cosmology calculator, # Sep 7 2012. The accuracy of our comparison is limited by # how many digits it outputs, which limits our test to about # 0.2% accuracy. The NWACC does not allow one # to change the number of nuetrino species, fixing that at 3. # Also, inspection of the NWACC code shows it uses inaccurate # constants at the 0.2% level (specifically, a_B), # so we shouldn't expect to match it that well. The integral is # also done rather crudely. Therefore, we should not expect # the NWACC to be accurate to better than about 0.5%, which is # unfortunate, but reflects a problem with it rather than this code. # More accurate tests below using Mathematica z = np.array([1.0, 10.0, 500.0, 1000.0]) cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3) assert allclose(cosmo.angular_diameter_distance(z), [1651.9, 858.2, 26.855, 13.642] * u.Mpc, rtol=5e-4) cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3) assert allclose(cosmo.angular_diameter_distance(z), [1651.8, 857.9, 26.767, 13.582] * u.Mpc, rtol=5e-4) cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3) assert allclose(cosmo.angular_diameter_distance(z), [1651.4, 856.6, 26.489, 13.405] * u.Mpc, rtol=5e-4) # Next compare with doing the integral numerically in Mathematica, # which allows more precision in the test. It is at least as # good as 0.01%, possibly better cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04) assert allclose(cosmo.angular_diameter_distance(z), [1651.91, 858.205, 26.8586, 13.6469] * u.Mpc, rtol=1e-5) cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04) assert allclose(cosmo.angular_diameter_distance(z), [1651.76, 857.817, 26.7688, 13.5841] * u.Mpc, rtol=1e-5) cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04) assert allclose(cosmo.angular_diameter_distance(z), [1651.21, 856.411, 26.4845, 13.4028] * u.Mpc, rtol=1e-5) # Just to be really sure, we also do a version where the integral # is analytic, which is a Ode = 0 flat universe. In this case # Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1) # Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance. Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0 ** 3 * 2.725 ** 4 / 1.87837e-26 Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04 Or0 = (Ogamma0h2 + Onu0h2) / 0.7 ** 2 Om0 = 1.0 - Or0 hubdis = (299792.458 / 70.0) * u.Mpc cosmo = flrw.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04) targvals = 2.0 * hubdis * \ (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0) assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5) # And integers for z assert allclose(cosmo.comoving_distance(z.astype(int)), targvals, rtol=1e-5) # Try Tcmb0 = 4 Or0 *= (4.0 / 2.725) ** 4 Om0 = 1.0 - Or0 cosmo = flrw.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04) targvals = 2.0 * hubdis * \ (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0) assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5) @pytest.mark.skipif('not HAS_SCIPY') def test_tcmb(): cosmo = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.5) assert allclose(cosmo.Tcmb0, 2.5 * u.K) assert allclose(cosmo.Tcmb(2), 7.5 * u.K) z = [0.0, 1.0, 2.0, 3.0, 9.0] assert allclose(cosmo.Tcmb(z), [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6) # Make sure it's the same for integers z = [0, 1, 2, 3, 9] assert allclose(cosmo.Tcmb(z), [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6) @pytest.mark.skipif('not HAS_SCIPY') def test_tnu(): cosmo = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0) assert allclose(cosmo.Tnu0, 2.1412975665108247 * u.K, rtol=1e-6) assert allclose(cosmo.Tnu(2), 6.423892699532474 * u.K, rtol=1e-6) z = [0.0, 1.0, 2.0, 3.0] expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027] * u.K assert allclose(cosmo.Tnu(z), expected, rtol=1e-6) # Test for integers z = [0, 1, 2, 3] assert allclose(cosmo.Tnu(z), expected, rtol=1e-6) @pytest.mark.skipif('not HAS_SCIPY') def test_efunc_vs_invefunc_flrw(): """ Test that efunc and inv_efunc give inverse values""" z0 = 0.5 z = np.array([0.5, 1.0, 2.0, 5.0]) # FLRW is abstract, so requires test_cos_sub defined earlier # This requires scipy, unlike the built-ins, because it # calls de_density_scale, which has an integral in it cosmo = test_cos_sub() assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) # Add neutrinos cosmo = test_cos_subnu() assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) @pytest.mark.skipif('not HAS_SCIPY') def test_kpc_methods(): cosmo = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) assert allclose(cosmo.arcsec_per_kpc_comoving(3), 0.0317179167 * u.arcsec / u.kpc) assert allclose(cosmo.arcsec_per_kpc_proper(3), 0.1268716668 * u.arcsec / u.kpc) assert allclose(cosmo.kpc_comoving_per_arcmin(3), 1891.6753126 * u.kpc / u.arcmin) assert allclose(cosmo.kpc_proper_per_arcmin(3), 472.918828 * u.kpc / u.arcmin) @pytest.mark.skipif('not HAS_SCIPY') def test_comoving_volume(): c_flat = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0) c_open = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0) c_closed = flrw.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0) # test against ned wright's calculator (cubic Gpc) redshifts = np.array([0.5, 1, 2, 3, 5, 9]) wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802]) * u.Gpc**3 wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814]) * u.Gpc**3 wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82, 358.992]) * u.Gpc**3 # The wright calculator isn't very accurate, so we use a rather # modest precision assert allclose(c_flat.comoving_volume(redshifts), wright_flat, rtol=1e-2) assert allclose(c_open.comoving_volume(redshifts), wright_open, rtol=1e-2) assert allclose(c_closed.comoving_volume(redshifts), wright_closed, rtol=1e-2) @pytest.mark.skipif('not HAS_SCIPY') def test_differential_comoving_volume(): from scipy.integrate import quad c_flat = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0) c_open = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0) c_closed = flrw.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0) # test that integration of differential_comoving_volume() # yields same as comoving_volume() redshifts = np.array([0.5, 1, 2, 3, 5, 9]) wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802]) * u.Gpc**3 wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814]) * u.Gpc**3 wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82, 358.992]) * u.Gpc**3 # The wright calculator isn't very accurate, so we use a rather # modest precision. ftemp = lambda x: c_flat.differential_comoving_volume(x).value otemp = lambda x: c_open.differential_comoving_volume(x).value ctemp = lambda x: c_closed.differential_comoving_volume(x).value # Multiply by solid_angle (4 * pi) assert allclose(np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0] for redshift in redshifts]) * u.Mpc**3, wright_flat, rtol=1e-2) assert allclose(np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0] for redshift in redshifts]) * u.Mpc**3, wright_open, rtol=1e-2) assert allclose(np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0] for redshift in redshifts]) * u.Mpc**3, wright_closed, rtol=1e-2) @pytest.mark.skipif('not HAS_SCIPY') def test_flat_open_closed_icosmo(): """ Test against the tabulated values generated from icosmo.org with three example cosmologies (flat, open and closed). """ cosmo_flat = """\ # from icosmo (icosmo.org) # Om 0.3 w -1 h 0.7 Ol 0.7 # z comoving_transvers_dist angular_diameter_dist luminosity_dist 0.0000000 0.0000000 0.0000000 0.0000000 0.16250000 669.77536 576.15085 778.61386 0.32500000 1285.5964 970.26143 1703.4152 0.50000000 1888.6254 1259.0836 2832.9381 0.66250000 2395.5489 1440.9317 3982.6000 0.82500000 2855.5732 1564.6976 5211.4210 1.0000000 3303.8288 1651.9144 6607.6577 1.1625000 3681.1867 1702.2829 7960.5663 1.3250000 4025.5229 1731.4077 9359.3408 1.5000000 4363.8558 1745.5423 10909.640 1.6625000 4651.4830 1747.0359 12384.573 1.8250000 4916.5970 1740.3883 13889.387 2.0000000 5179.8621 1726.6207 15539.586 2.1625000 5406.0204 1709.4136 17096.540 2.3250000 5616.5075 1689.1752 18674.888 2.5000000 5827.5418 1665.0120 20396.396 2.6625000 6010.4886 1641.0890 22013.414 2.8250000 6182.1688 1616.2533 23646.796 3.0000000 6355.6855 1588.9214 25422.742 3.1625000 6507.2491 1563.3031 27086.425 3.3250000 6650.4520 1537.6768 28763.205 3.5000000 6796.1499 1510.2555 30582.674 3.6625000 6924.2096 1485.0852 32284.127 3.8250000 7045.8876 1460.2876 33996.408 4.0000000 7170.3664 1434.0733 35851.832 4.1625000 7280.3423 1410.2358 37584.767 4.3250000 7385.3277 1386.9160 39326.870 4.5000000 7493.2222 1362.4040 41212.722 4.6625000 7588.9589 1340.2135 42972.480 """ cosmo_open = """\ # from icosmo (icosmo.org) # Om 0.3 w -1 h 0.7 Ol 0.1 # z comoving_transvers_dist angular_diameter_dist luminosity_dist 0.0000000 0.0000000 0.0000000 0.0000000 0.16250000 643.08185 553.18868 747.58265 0.32500000 1200.9858 906.40441 1591.3062 0.50000000 1731.6262 1154.4175 2597.4393 0.66250000 2174.3252 1307.8648 3614.8157 0.82500000 2578.7616 1413.0201 4706.2399 1.0000000 2979.3460 1489.6730 5958.6920 1.1625000 3324.2002 1537.2024 7188.5829 1.3250000 3646.8432 1568.5347 8478.9104 1.5000000 3972.8407 1589.1363 9932.1017 1.6625000 4258.1131 1599.2913 11337.226 1.8250000 4528.5346 1603.0211 12793.110 2.0000000 4804.9314 1601.6438 14414.794 2.1625000 5049.2007 1596.5852 15968.097 2.3250000 5282.6693 1588.7727 17564.875 2.5000000 5523.0914 1578.0261 19330.820 2.6625000 5736.9813 1566.4113 21011.694 2.8250000 5942.5803 1553.6158 22730.370 3.0000000 6155.4289 1538.8572 24621.716 3.1625000 6345.6997 1524.4924 26413.975 3.3250000 6529.3655 1509.6799 28239.506 3.5000000 6720.2676 1493.3928 30241.204 3.6625000 6891.5474 1478.0799 32131.840 3.8250000 7057.4213 1462.6780 34052.058 4.0000000 7230.3723 1446.0745 36151.862 4.1625000 7385.9998 1430.7021 38130.224 4.3250000 7537.1112 1415.4199 40135.117 4.5000000 7695.0718 1399.1040 42322.895 4.6625000 7837.5510 1384.1150 44380.133 """ cosmo_closed = """\ # from icosmo (icosmo.org) # Om 2 w -1 h 0.7 Ol 0.1 # z comoving_transvers_dist angular_diameter_dist luminosity_dist 0.0000000 0.0000000 0.0000000 0.0000000 0.16250000 601.80160 517.67879 699.59436 0.32500000 1057.9502 798.45297 1401.7840 0.50000000 1438.2161 958.81076 2157.3242 0.66250000 1718.6778 1033.7912 2857.3019 0.82500000 1948.2400 1067.5288 3555.5381 1.0000000 2152.7954 1076.3977 4305.5908 1.1625000 2312.3427 1069.2914 5000.4410 1.3250000 2448.9755 1053.3228 5693.8681 1.5000000 2575.6795 1030.2718 6439.1988 1.6625000 2677.9671 1005.8092 7130.0873 1.8250000 2768.1157 979.86398 7819.9270 2.0000000 2853.9222 951.30739 8561.7665 2.1625000 2924.8116 924.84161 9249.7167 2.3250000 2988.5333 898.80701 9936.8732 2.5000000 3050.3065 871.51614 10676.073 2.6625000 3102.1909 847.01459 11361.774 2.8250000 3149.5043 823.39982 12046.854 3.0000000 3195.9966 798.99915 12783.986 3.1625000 3235.5334 777.30533 13467.908 3.3250000 3271.9832 756.52790 14151.327 3.5000000 3308.1758 735.15017 14886.791 3.6625000 3339.2521 716.19347 15569.263 3.8250000 3368.1489 698.06195 16251.319 4.0000000 3397.0803 679.41605 16985.401 4.1625000 3422.1142 662.87926 17666.664 4.3250000 3445.5542 647.05243 18347.576 4.5000000 3469.1805 630.76008 19080.493 4.6625000 3489.7534 616.29199 19760.729 """ redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_flat), unpack=1) dm = dm * u.Mpc da = da * u.Mpc dl = dl * u.Mpc cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70, Tcmb0=0.0) assert allclose(cosmo.comoving_transverse_distance(redshifts), dm) assert allclose(cosmo.angular_diameter_distance(redshifts), da) assert allclose(cosmo.luminosity_distance(redshifts), dl) redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_open), unpack=1) dm = dm * u.Mpc da = da * u.Mpc dl = dl * u.Mpc cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.1, Tcmb0=0.0) assert allclose(cosmo.comoving_transverse_distance(redshifts), dm) assert allclose(cosmo.angular_diameter_distance(redshifts), da) assert allclose(cosmo.luminosity_distance(redshifts), dl) redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_closed), unpack=1) dm = dm * u.Mpc da = da * u.Mpc dl = dl * u.Mpc cosmo = flrw.LambdaCDM(H0=70, Om0=2, Ode0=0.1, Tcmb0=0.0) assert allclose(cosmo.comoving_transverse_distance(redshifts), dm) assert allclose(cosmo.angular_diameter_distance(redshifts), da) assert allclose(cosmo.luminosity_distance(redshifts), dl) @pytest.mark.skipif('not HAS_SCIPY') def test_integral(): # Test integer vs. floating point inputs cosmo = flrw.LambdaCDM(H0=73.2, Om0=0.3, Ode0=0.50) assert allclose(cosmo.comoving_distance(3), cosmo.comoving_distance(3.0), rtol=1e-7) assert allclose(cosmo.comoving_distance([1, 2, 3, 5]), cosmo.comoving_distance([1.0, 2.0, 3.0, 5.0]), rtol=1e-7) assert allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7) assert allclose(cosmo.efunc([1, 2, 6]), cosmo.efunc([1.0, 2.0, 6.0]), rtol=1e-7) assert allclose(cosmo.inv_efunc([1, 2, 6]), cosmo.inv_efunc([1.0, 2.0, 6.0]), rtol=1e-7) @pytest.mark.skipif('not HAS_SCIPY') def test_de_densityscale(): cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70) z = np.array([0.1, 0.2, 0.5, 1.5, 2.5]) assert allclose(cosmo.de_density_scale(z), [1.0, 1.0, 1.0, 1.0, 1.0]) # Integer check assert allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7) assert allclose(cosmo.de_density_scale([1, 2, 3]), cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7) cosmo = flrw.wCDM(H0=70, Om0=0.3, Ode0=0.60, w0=-0.5) assert allclose(cosmo.de_density_scale(z), [1.15369, 1.31453, 1.83712, 3.95285, 6.5479], rtol=1e-4) assert allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7) assert allclose(cosmo.de_density_scale([1, 2, 3]), cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7) cosmo = flrw.w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5) assert allclose(cosmo.de_density_scale(z), [0.746048, 0.5635595, 0.25712378, 0.026664129, 0.0035916468], rtol=1e-4) assert allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7) assert allclose(cosmo.de_density_scale([1, 2, 3]), cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7) cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5) assert allclose(cosmo.de_density_scale(z), [0.9934201, 0.9767912, 0.897450, 0.622236, 0.4458753], rtol=1e-4) assert allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7) assert allclose(cosmo.de_density_scale([1, 2, 3]), cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7) cosmo = flrw.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9, wa=0.2, zp=0.5) assert allclose(cosmo.de_density_scale(z), [1.012246048, 1.0280102, 1.087439, 1.324988, 1.565746], rtol=1e-4) assert allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7) assert allclose(cosmo.de_density_scale([1, 2, 3]), cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7) @pytest.mark.skipif('not HAS_SCIPY') def test_age(): # WMAP7 but with Omega_relativisitic = 0 tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) assert allclose(tcos.hubble_time, 13.889094057856937 * u.Gyr) assert allclose(tcos.age(4), 1.5823603508870991 * u.Gyr) assert allclose(tcos.age([1., 5.]), [5.97113193, 1.20553129] * u.Gyr) assert allclose(tcos.age([1, 5]), [5.97113193, 1.20553129] * u.Gyr) # Add relativistic species tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0) assert allclose(tcos.age(4), 1.5773003779230699 * u.Gyr) assert allclose(tcos.age([1, 5]), [5.96344942, 1.20093077] * u.Gyr) # And massive neutrinos tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0, m_nu=0.1 * u.eV) assert allclose(tcos.age(4), 1.5546485439853412 * u.Gyr) assert allclose(tcos.age([1, 5]), [5.88448152, 1.18383759] * u.Gyr) @pytest.mark.skipif('not HAS_SCIPY') def test_distmod(): # WMAP7 but with Omega_relativisitic = 0 tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) assert allclose(tcos.hubble_distance, 4258.415596590909 * u.Mpc) assert allclose(tcos.distmod([1, 5]), [44.124857, 48.40167258] * u.mag) assert allclose(tcos.distmod([1., 5.]), [44.124857, 48.40167258] * u.mag) @pytest.mark.skipif('not HAS_SCIPY') def test_neg_distmod(): # Cosmology with negative luminosity distances (perfectly okay, # if obscure) tcos = flrw.LambdaCDM(70, 0.2, 1.3, Tcmb0=0) assert allclose(tcos.luminosity_distance([50, 100]), [16612.44047622, -46890.79092244] * u.Mpc) assert allclose(tcos.distmod([50, 100]), [46.102167189, 48.355437790944] * u.mag) @pytest.mark.skipif('not HAS_SCIPY') def test_critical_density(): from astropy.constants import codata2014 # WMAP7 but with Omega_relativistic = 0 # These tests will fail if astropy.const starts returning non-mks # units by default; see the comment at the top of core.py. # critical_density0 is inversely proportional to G. tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) fac = (const.G / codata2014.G).to(u.dimensionless_unscaled).value assert allclose(tcos.critical_density0 * fac, 9.309668456020899e-30 * (u.g / u.cm**3)) assert allclose(tcos.critical_density0, tcos.critical_density(0)) assert allclose( tcos.critical_density([1, 5]) * fac, [2.70352772e-29, 5.53739080e-28] * (u.g / u.cm**3)) assert allclose( tcos.critical_density([1., 5.]) * fac, [2.70352772e-29, 5.53739080e-28] * (u.g / u.cm**3)) @pytest.mark.skipif('not HAS_SCIPY') def test_comoving_distance_z1z2(): tcos = flrw.LambdaCDM(100, 0.3, 0.8, Tcmb0=0.0) with pytest.raises(ValueError): # test diff size z1, z2 fail tcos._comoving_distance_z1z2((1, 2), (3, 4, 5)) # Comoving distances are invertible assert allclose(tcos._comoving_distance_z1z2(1, 2), -tcos._comoving_distance_z1z2(2, 1)) z1 = 0, 0, 2, 0.5, 1 z2 = 2, 1, 1, 2.5, 1.1 results = (3767.90579253, 2386.25591391, -1381.64987862, 2893.11776663, 174.1524683) * u.Mpc assert allclose(tcos._comoving_distance_z1z2(z1, z2), results) @pytest.mark.skipif('not HAS_SCIPY') def test_age_in_special_cosmologies(): """Check that age in de Sitter and Einstein-de Sitter Universes work. Some analytic solutions fail at these critical points. """ c_dS = flrw.FlatLambdaCDM(100, 0, Tcmb0=0) assert allclose(c_dS.age(z=0), np.inf * u.Gyr) assert allclose(c_dS.age(z=1), np.inf * u.Gyr) assert allclose(c_dS.lookback_time(z=0), 0 * u.Gyr) assert allclose(c_dS.lookback_time(z=1), 6.777539216261741 * u.Gyr) c_EdS = flrw.FlatLambdaCDM(100, 1, Tcmb0=0) assert allclose(c_EdS.age(z=0), 6.518614811154189 * u.Gyr) assert allclose(c_EdS.age(z=1), 2.3046783684542738 * u.Gyr) assert allclose(c_EdS.lookback_time(z=0), 0 * u.Gyr) assert allclose(c_EdS.lookback_time(z=1), 4.213936442699092 * u.Gyr) @pytest.mark.skipif('not HAS_SCIPY') def test_distance_in_special_cosmologies(): """Check that de Sitter and Einstein-de Sitter Universes both work. Some analytic solutions fail at these critical points. """ c_dS = flrw.FlatLambdaCDM(100, 0, Tcmb0=0) assert allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc) assert allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc) c_EdS = flrw.FlatLambdaCDM(100, 1, Tcmb0=0) assert allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc) assert allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc) c_dS = flrw.LambdaCDM(100, 0, 1, Tcmb0=0) assert allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc) assert allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc) c_EdS = flrw.LambdaCDM(100, 1, 0, Tcmb0=0) assert allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc) assert allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc) @pytest.mark.skipif('not HAS_SCIPY') def test_comoving_transverse_distance_z1z2(): tcos = flrw.FlatLambdaCDM(100, 0.3, Tcmb0=0.0) with pytest.raises(ValueError): # test diff size z1, z2 fail tcos._comoving_transverse_distance_z1z2((1, 2), (3, 4, 5)) # Tests that should actually work, target values computed with # http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML # Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686) assert allclose(tcos._comoving_transverse_distance_z1z2(1, 2), 1313.2232194828466 * u.Mpc) # In a flat universe comoving distance and comoving transverse # distance are identical z1 = 0, 0, 2, 0.5, 1 z2 = 2, 1, 1, 2.5, 1.1 assert allclose(tcos._comoving_distance_z1z2(z1, z2), tcos._comoving_transverse_distance_z1z2(z1, z2)) # Test Flat Universe with Omega_M > 1. Rarely used, but perfectly valid. tcos = flrw.FlatLambdaCDM(100, 1.5, Tcmb0=0.0) results = (2202.72682564, 1559.51679971, -643.21002593, 1408.36365679, 85.09286258) * u.Mpc assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), results) # In a flat universe comoving distance and comoving transverse # distance are identical z1 = 0, 0, 2, 0.5, 1 z2 = 2, 1, 1, 2.5, 1.1 assert allclose(tcos._comoving_distance_z1z2(z1, z2), tcos._comoving_transverse_distance_z1z2(z1, z2)) # Test non-flat cases to avoid simply testing # comoving_distance_z1z2. Test array, array case. tcos = flrw.LambdaCDM(100, 0.3, 0.5, Tcmb0=0.0) results = (3535.931375645655, 2226.430046551708, -1208.6817970036532, 2595.567367601969, 151.36592003406884) * u.Mpc assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), results) # Test positive curvature with scalar, array combination. tcos = flrw.LambdaCDM(100, 1.0, 0.2, Tcmb0=0.0) z1 = 0.1 z2 = 0, 0.1, 0.2, 0.5, 1.1, 2 results = (-281.31602666724865, 0., 248.58093707820436, 843.9331377460543, 1618.6104987686672, 2287.5626543279927) * u.Mpc assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), results) @pytest.mark.skipif('not HAS_SCIPY') def test_angular_diameter_distance_z1z2(): tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) with pytest.raises(ValueError): # test diff size z1, z2 fail tcos.angular_diameter_distance_z1z2([1, 2], [3, 4, 5]) # Tests that should actually work assert allclose(tcos.angular_diameter_distance_z1z2(1, 2), 646.22968662822018 * u.Mpc) z1 = 2 # Separate test for z2<z1, returns negative value with warning z2 = 1 results = -969.34452994 * u.Mpc with pytest.warns(AstropyUserWarning, match='less than first redshift'): assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2), results) z1 = 0, 0, 0.5, 1 z2 = 2, 1, 2.5, 1.1 results = (1760.0628637762106, 1670.7497657219858, 1159.0970895962193, 115.72768186186921) * u.Mpc assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2), results) z1 = 0.1 z2 = 0.1, 0.2, 0.5, 1.1, 2 results = (0., 332.09893173, 986.35635069, 1508.37010062, 1621.07937976) * u.Mpc assert allclose(tcos.angular_diameter_distance_z1z2(0.1, z2), results) # Non-flat (positive Ok0) test tcos = flrw.LambdaCDM(H0=70.4, Om0=0.2, Ode0=0.5, Tcmb0=0.0) assert allclose(tcos.angular_diameter_distance_z1z2(1, 2), 620.1175337852428 * u.Mpc) # Non-flat (negative Ok0) test tcos = flrw.LambdaCDM(H0=100, Om0=2, Ode0=1, Tcmb0=0.0) assert allclose(tcos.angular_diameter_distance_z1z2(1, 2), 228.42914659246014 * u.Mpc) @pytest.mark.skipif('not HAS_SCIPY') def test_absorption_distance(): tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) assert allclose(tcos.absorption_distance([1, 3]), [1.72576635, 7.98685853]) assert allclose(tcos.absorption_distance([1., 3.]), [1.72576635, 7.98685853]) assert allclose(tcos.absorption_distance(3), 7.98685853) assert allclose(tcos.absorption_distance(3.), 7.98685853) @pytest.mark.skipif('not HAS_SCIPY') def test_distances(): # Test distance calculations for various special case # scenarios (no relativistic species, normal, massive neutrinos) # These do not come from external codes -- they are just internal # checks to make sure nothing changes if we muck with the distance # calculators z = np.array([1.0, 2.0, 3.0, 4.0]) # The pattern here is: no relativistic species, the relativistic # species with massless neutrinos, then massive neutrinos cos = flrw.LambdaCDM(75.0, 0.25, 0.5, Tcmb0=0.0) assert allclose(cos.comoving_distance(z), [2953.93001902, 4616.7134253, 5685.07765971, 6440.80611897] * u.Mpc, rtol=1e-4) cos = flrw.LambdaCDM(75.0, 0.25, 0.6, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)) assert allclose(cos.comoving_distance(z), [3037.12620424, 4776.86236327, 5889.55164479, 6671.85418235] * u.Mpc, rtol=1e-4) cos = flrw.LambdaCDM(75.0, 0.3, 0.4, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(10.0, u.eV)) assert allclose(cos.comoving_distance(z), [2471.80626824, 3567.1902565, 4207.15995626, 4638.20476018] * u.Mpc, rtol=1e-4) # Flat cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=0.0) assert allclose(cos.comoving_distance(z), [3180.83488552, 5060.82054204, 6253.6721173, 7083.5374303] * u.Mpc, rtol=1e-4) cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)) assert allclose(cos.comoving_distance(z), [3180.42662867, 5059.60529655, 6251.62766102, 7080.71698117] * u.Mpc, rtol=1e-4) cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(10.0, u.eV)) assert allclose(cos.comoving_distance(z), [2337.54183142, 3371.91131264, 3988.40711188, 4409.09346922] * u.Mpc, rtol=1e-4) # Add w cos = flrw.FlatwCDM(75.0, 0.25, w0=-1.05, Tcmb0=0.0) assert allclose(cos.comoving_distance(z), [3216.8296894, 5117.2097601, 6317.05995437, 7149.68648536] * u.Mpc, rtol=1e-4) cos = flrw.FlatwCDM(75.0, 0.25, w0=-0.95, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)) assert allclose(cos.comoving_distance(z), [3143.56537758, 5000.32196494, 6184.11444601, 7009.80166062] * u.Mpc, rtol=1e-4) cos = flrw.FlatwCDM(75.0, 0.25, w0=-0.9, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(10.0, u.eV)) assert allclose(cos.comoving_distance(z), [2337.76035371, 3372.1971387, 3988.71362289, 4409.40817174] * u.Mpc, rtol=1e-4) # Non-flat w cos = flrw.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=0.0) assert allclose(cos.comoving_distance(z), [2849.6163356, 4428.71661565, 5450.97862778, 6179.37072324] * u.Mpc, rtol=1e-4) cos = flrw.wCDM(75.0, 0.25, 0.4, w0=-1.1, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)) assert allclose(cos.comoving_distance(z), [2904.35580229, 4511.11471267, 5543.43643353, 6275.9206788] * u.Mpc, rtol=1e-4) cos = flrw.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(10.0, u.eV)) assert allclose(cos.comoving_distance(z), [2473.32522734, 3581.54519631, 4232.41674426, 4671.83818117] * u.Mpc, rtol=1e-4) # w0wa cos = flrw.w0waCDM(75.0, 0.3, 0.6, w0=-0.9, wa=0.1, Tcmb0=0.0) assert allclose(cos.comoving_distance(z), [2937.7807638, 4572.59950903, 5611.52821924, 6339.8549956] * u.Mpc, rtol=1e-4) cos = flrw.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)) assert allclose(cos.comoving_distance(z), [2907.34722624, 4539.01723198, 5593.51611281, 6342.3228444] * u.Mpc, rtol=1e-4) cos = flrw.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(10.0, u.eV)) assert allclose(cos.comoving_distance(z), [2507.18336722, 3633.33231695, 4292.44746919, 4736.35404638] * u.Mpc, rtol=1e-4) # Flatw0wa cos = flrw.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=0.0) assert allclose(cos.comoving_distance(z), [3123.29892781, 4956.15204302, 6128.15563818, 6948.26480378] * u.Mpc, rtol=1e-4) cos = flrw.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)) assert allclose(cos.comoving_distance(z), [3122.92671907, 4955.03768936, 6126.25719576, 6945.61856513] * u.Mpc, rtol=1e-4) cos = flrw.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(10.0, u.eV)) assert allclose(cos.comoving_distance(z), [2337.70072701, 3372.13719963, 3988.6571093, 4409.35399673] * u.Mpc, rtol=1e-4) # wpwa cos = flrw.wpwaCDM(75.0, 0.3, 0.6, wp=-0.9, zp=0.5, wa=0.1, Tcmb0=0.0) assert allclose(cos.comoving_distance(z), [2954.68975298, 4599.83254834, 5643.04013201, 6373.36147627] * u.Mpc, rtol=1e-4) cos = flrw.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=0.4, wa=0.1, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)) assert allclose(cos.comoving_distance(z), [2919.00656215, 4558.0218123, 5615.73412391, 6366.10224229] * u.Mpc, rtol=1e-4) cos = flrw.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=1.0, wa=0.1, Tcmb0=3.0, Neff=4, m_nu=u.Quantity(5.0, u.eV)) assert allclose(cos.comoving_distance(z), [2629.48489827, 3874.13392319, 4614.31562397, 5116.51184842] * u.Mpc, rtol=1e-4) # w0wz cos = flrw.w0wzCDM(75.0, 0.3, 0.6, w0=-0.9, wz=0.1, Tcmb0=0.0) assert allclose(cos.comoving_distance(z), [3051.68786716, 4756.17714818, 5822.38084257, 6562.70873734] * u.Mpc, rtol=1e-4) cos = flrw.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)) assert allclose(cos.comoving_distance(z), [2997.8115653, 4686.45599916, 5764.54388557, 6524.17408738] * u.Mpc, rtol=1e-4) cos = flrw.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1, Tcmb0=3.0, Neff=4, m_nu=u.Quantity(5.0, u.eV)) assert allclose(cos.comoving_distance(z), [2676.73467639, 3940.57967585, 4686.90810278, 5191.54178243] * u.Mpc, rtol=1e-4) # Also test different numbers of massive neutrinos # for FlatLambdaCDM to give the scalar nu density functions a # work out cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, m_nu=u.Quantity([10.0, 0, 0], u.eV)) assert allclose(cos.comoving_distance(z), [2777.71589173, 4186.91111666, 5046.0300719, 5636.10397302] * u.Mpc, rtol=1e-4) cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, m_nu=u.Quantity([10.0, 5, 0], u.eV)) assert allclose(cos.comoving_distance(z), [2636.48149391, 3913.14102091, 4684.59108974, 5213.07557084] * u.Mpc, rtol=1e-4) cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, m_nu=u.Quantity([4.0, 5, 9], u.eV)) assert allclose(cos.comoving_distance(z), [2563.5093049, 3776.63362071, 4506.83448243, 5006.50158829] * u.Mpc, rtol=1e-4) cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=4.2, m_nu=u.Quantity([1.0, 4.0, 5, 9], u.eV)) assert allclose(cos.comoving_distance(z), [2525.58017482, 3706.87633298, 4416.58398847, 4901.96669755] * u.Mpc, rtol=1e-4) @pytest.mark.skipif('not HAS_SCIPY') def test_massivenu_density(): # Testing neutrino density calculation # Simple test cosmology, where we compare rho_nu and rho_gamma # against the exact formula (eq 24/25 of Komatsu et al. 2011) # computed using Mathematica. The approximation we use for f(y) # is only good to ~ 0.5% (with some redshift dependence), so that's # what we test to. ztest = np.array([0.0, 1.0, 2.0, 10.0, 1000.0]) nuprefac = 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) # First try 3 massive neutrinos, all 100 eV -- note this is a universe # seriously dominated by neutrinos! tcos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(100.0, u.eV)) assert tcos.has_massive_nu assert tcos.Neff == 3 nurel_exp = nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323, 15633.5, 171.801]) assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3) assert allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3) # Next, slightly less massive tcos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.25, u.eV)) nurel_exp = nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312, 39.1005, 1.11086]) assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3) # For this one also test Onu directly onu_exp = np.array([0.01890217, 0.05244681, 0.0638236, 0.06999286, 0.1344951]) assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) # And fairly light tcos = flrw.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.01, u.eV)) nurel_exp = nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348, 1.90671, 1.00021]) assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3) onu_exp = np.array([0.00066599, 0.00172677, 0.0020732, 0.00268404, 0.0978313]) assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) assert allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048], rtol=1e-4) assert allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534], rtol=1e-4) # Now a mixture of neutrino masses, with non-integer Neff tcos = flrw.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3.04, m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV)) nurel_exp = nuprefac * tcos.Neff * \ np.array([149.386233, 74.87915, 50.0518, 14.002403, 1.03702333]) assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3) onu_exp = np.array([0.00584959, 0.01493142, 0.01772291, 0.01963451, 0.10227728]) assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) # Integer redshifts ztest = ztest.astype(int) assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3) assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) @pytest.mark.skipif('not HAS_SCIPY') def test_elliptic_comoving_distance_z1z2(): """Regression test for #8388.""" cosmo = flrw.LambdaCDM(70., 2.3, 0.05, Tcmb0=0) z = 0.2 assert allclose(cosmo.comoving_distance(z), cosmo._integral_comoving_distance_z1z2(0., z)) assert allclose(cosmo._elliptic_comoving_distance_z1z2(0., z), cosmo._integral_comoving_distance_z1z2(0., z)) SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES = [ flrw.FlatLambdaCDM(H0=70, Om0=0.0, Tcmb0=0.0), # de Sitter flrw.FlatLambdaCDM(H0=70, Om0=1.0, Tcmb0=0.0), # Einstein - de Sitter flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0.0), # Hypergeometric flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.6, Tcmb0=0.0), # Elliptic ] ITERABLE_REDSHIFTS = [ (0, 1, 2, 3, 4), # tuple [0, 1, 2, 3, 4], # list np.array([0, 1, 2, 3, 4]), # array ] @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('cosmo', SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES) @pytest.mark.parametrize('z', ITERABLE_REDSHIFTS) def test_comoving_distance_iterable_argument(cosmo, z): """ Regression test for #10980 Test that specialized comoving distance methods handle iterable arguments. """ assert allclose(cosmo.comoving_distance(z), cosmo._integral_comoving_distance_z1z2(0., z)) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('cosmo', SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES) def test_comoving_distance_broadcast(cosmo): """ Regression test for #10980 Test that specialized comoving distance methods broadcast array arguments. """ z1 = np.zeros((2, 5)) z2 = np.ones((3, 1, 5)) z3 = np.ones((7, 5)) output_shape = np.broadcast(z1, z2).shape # Check compatible array arguments return an array with the correct shape assert cosmo._comoving_distance_z1z2(z1, z2).shape == output_shape # Check incompatible array arguments raise an error with pytest.raises(ValueError, match='z1 and z2 have different shapes'): cosmo._comoving_distance_z1z2(z1, z3)
a3fe34355508974352a910919ecfd0b464fcb10d0f46c70d829727f529e64fd0
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB import pickle # THIRD PARTY import pytest # LOCAL import astropy.cosmology.units as cu import astropy.units as u from astropy import cosmology from astropy.cosmology import parameters, realizations from astropy.cosmology.realizations import Planck13, default_cosmology from astropy.tests.helper import pickle_protocol def test_realizations_in_toplevel_dir(): """Test the realizations are in ``dir`` of :mod:`astropy.cosmology`.""" d = dir(cosmology) assert set(d) == set(cosmology.__all__) for n in parameters.available: assert n in d def test_realizations_in_realizations_dir(): """Test the realizations are in ``dir`` of :mod:`astropy.cosmology.realizations`.""" d = dir(realizations) assert set(d) == set(realizations.__all__) for n in parameters.available: assert n in d class Test_default_cosmology(object): """Tests for :class:`~astropy.cosmology.realizations.default_cosmology`.""" # ----------------------------------------------------- # Get def test_get_fail(self): """Test bad inputs to :meth:`astropy.cosmology.default_cosmology.get`.""" # a not-valid option, but still a str with pytest.raises(ValueError, match="Unknown cosmology"): cosmo = default_cosmology.get("fail!") # a not-valid type with pytest.raises(TypeError, match="'key' must be must be"): cosmo = default_cosmology.get(object()) def test_get_current(self): """Test :meth:`astropy.cosmology.default_cosmology.get` current value.""" cosmo = default_cosmology.get(None) assert cosmo is default_cosmology.get(default_cosmology._value) def test_get_none(self): """Test :meth:`astropy.cosmology.default_cosmology.get` to `None`.""" cosmo = default_cosmology.get("no_default") assert cosmo is None @pytest.mark.parametrize("name", parameters.available) def test_get_valid(self, name): """Test :meth:`astropy.cosmology.default_cosmology.get` from str.""" cosmo = default_cosmology.get(name) assert cosmo is getattr(realizations, name) def test_get_cosmology_from_string(self, recwarn): """Test method ``get_cosmology_from_string``.""" cosmo = default_cosmology.get_cosmology_from_string("no_default") assert cosmo is None cosmo = default_cosmology.get_cosmology_from_string("Planck13") assert cosmo is Planck13 with pytest.raises(ValueError): cosmo = default_cosmology.get_cosmology_from_string("fail!") # ----------------------------------------------------- # Validate def test_validate_fail(self): """Test :meth:`astropy.cosmology.default_cosmology.validate`.""" # bad input type with pytest.raises(TypeError, match="must be a string or Cosmology"): default_cosmology.validate(TypeError) def test_validate_default(self): """Test method ``validate`` for specific values.""" value = default_cosmology.validate(None) assert value is realizations.Planck18 @pytest.mark.parametrize("name", parameters.available) def test_validate_str(self, name): """Test method ``validate`` for string input.""" value = default_cosmology.validate(name) assert value is getattr(realizations, name) @pytest.mark.parametrize("name", parameters.available) def test_validate_cosmo(self, name): """Test method ``validate`` for cosmology instance input.""" cosmo = getattr(realizations, name) value = default_cosmology.validate(cosmo) assert value is cosmo @pytest.mark.parametrize("name", parameters.available) def test_pickle_builtin_realizations(name, pickle_protocol): """ Test in-built realizations can pickle and unpickle. Also a regression test for #12008. """ # get class instance original = getattr(cosmology, name) # pickle and unpickle f = pickle.dumps(original, protocol=pickle_protocol) with u.add_enabled_units(cu): unpickled = pickle.loads(f) assert unpickled == original assert unpickled.meta == original.meta # if the units are not enabled, it isn't equal because redshift units # are not equal. This is a weird, known issue. unpickled = pickle.loads(f) assert unpickled == original assert unpickled.meta != original.meta
aae6169062dec76847ab293b42670c4faef6f34ff28772478b344b1438702f0f
# Licensed under a 3-clause BSD style license - see LICENSE.rst import inspect import sys from io import StringIO import pytest import numpy as np from astropy import units as u from astropy.cosmology import core, flrw from astropy.cosmology.funcs import _z_at_scalar_value, z_at_value from astropy.cosmology.realizations import (WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15, Planck18) from astropy.units import allclose from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa from astropy.utils.exceptions import AstropyUserWarning @pytest.mark.skipif('not HAS_SCIPY') def test_z_at_value_scalar(): # These are tests of expected values, and hence have less precision # than the roundtrip tests below (test_z_at_value_roundtrip); # here we have to worry about the cosmological calculations # giving slightly different values on different architectures, # there we are checking internal consistency on the same architecture # and so can be more demanding cosmo = Planck13 assert allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.19812268, rtol=1e-6) assert allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr), 0.795198375, rtol=1e-6) assert allclose(z_at_value(cosmo.distmod, 46 * u.mag), 1.991389168, rtol=1e-6) assert allclose(z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc), 1.36857907, rtol=1e-6) assert allclose(z_at_value(cosmo.luminosity_distance, 26.037193804 * u.Gpc, ztol=1e-10), 3, rtol=1e-9) assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmax=2), 0.681277696, rtol=1e-6) assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmin=2.5), 3.7914908, rtol=1e-6) # test behavior when the solution is outside z limits (should # raise a CosmologyError) with pytest.raises(core.CosmologyError): with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'): z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmax=0.5) with pytest.raises(core.CosmologyError): with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'): z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmin=4.) @pytest.mark.skipif('not HAS_SCIPY') class Test_ZatValue: def setup_class(self): self.cosmo = Planck13 def test_broadcast_arguments(self): """Test broadcast of arguments.""" # broadcasting main argument assert allclose( z_at_value(self.cosmo.age, [2, 7] * u.Gyr), [3.1981206134773115, 0.7562044333305182], rtol=1e-6) # basic broadcast of secondary arguments assert allclose( z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=[0, 2.5], zmax=[2, 4]), [0.681277696, 3.7914908], rtol=1e-6) # more interesting broadcast assert allclose( z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=[[0, 2.5]], zmax=[2, 4]), [[0.681277696, 3.7914908]], rtol=1e-6) def test_broadcast_bracket(self): """`bracket` has special requirements.""" # start with an easy one assert allclose( z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=None), 3.1981206134773115, rtol=1e-6) # now actually have a bracket assert allclose( z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4]), 3.1981206134773115, rtol=1e-6) # now a bad length with pytest.raises(ValueError, match="sequence"): z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4, 4, 5]) # now the wrong dtype : an ndarray, but not an object array with pytest.raises(TypeError, match="dtype"): z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=np.array([0, 4])) # now an object array of brackets bracket=np.array([[0, 4], [0, 3, 4]], dtype=object) assert allclose( z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=bracket), [3.1981206134773115, 3.1981206134773115], rtol=1e-6) def test_bad_broadcast(self): """Shapes mismatch as expected""" with pytest.raises(ValueError, match="broadcast"): z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=[0, 2.5, 0.1], zmax=[2, 4]) def test_scalar_input_to_output(self): """Test scalar input returns a scalar.""" z = z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=0, zmax=2) assert isinstance(z, u.Quantity) assert z.dtype == np.float64 assert z.shape == () @pytest.mark.skipif('not HAS_SCIPY') def test_z_at_value_numpyvectorize(): """Test that numpy vectorize fails on Quantities. If this test starts failing then numpy vectorize can be used instead of the home-brewed vectorization. Please submit a PR making the change. """ z_at_value = np.vectorize(_z_at_scalar_value, excluded=["func", "method", "verbose"]) with pytest.raises(u.UnitConversionError, match="dimensionless quantities"): z_at_value(Planck15.age, 10*u.Gyr) @pytest.mark.skipif('not HAS_SCIPY') def test_z_at_value_verbose(monkeypatch): cosmo = Planck13 # Test the "verbose" flag. Since this uses "print", need to mod stdout mock_stdout = StringIO() monkeypatch.setattr(sys, 'stdout', mock_stdout) resx = z_at_value(cosmo.age, 2 * u.Gyr, verbose=True) assert str(resx.value) in mock_stdout.getvalue() # test "verbose" prints res @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('method', ['Brent', 'Golden', 'Bounded']) def test_z_at_value_bracketed(method): """ Test 2 solutions for angular diameter distance by not constraining zmin, zmax, but setting `bracket` on the appropriate side of the turning point z. Setting zmin / zmax should override `bracket`. """ cosmo = Planck13 if method == 'Bounded': with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'): z = z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method) if z > 1.6: z = 3.7914908 bracket = (0.9, 1.5) else: z = 0.6812777 bracket = (1.6, 2.0) with pytest.warns(UserWarning, match=r"Option 'bracket' is ignored"): assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method, bracket=bracket), z, rtol=1e-6) else: assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method, bracket=(0.3, 1.0)), 0.6812777, rtol=1e-6) assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method, bracket=(2.0, 4.0)), 3.7914908, rtol=1e-6) assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method, bracket=(0.1, 1.5)), 0.6812777, rtol=1e-6) assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method, bracket=(0.1, 1.0, 2.0)), 0.6812777, rtol=1e-6) with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'): assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method, bracket=(0.9, 1.5)), 0.6812777, rtol=1e-6) assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method, bracket=(1.6, 2.0)), 3.7914908, rtol=1e-6) assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method, bracket=(1.6, 2.0), zmax=1.6), 0.6812777, rtol=1e-6) assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method, bracket=(0.9, 1.5), zmin=1.5), 3.7914908, rtol=1e-6) with pytest.raises(core.CosmologyError): with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'): z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method, bracket=(3.9, 5.0), zmin=4.) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('method', ['Brent', 'Golden', 'Bounded']) def test_z_at_value_unconverged(method): """ Test warnings on non-converged solution when setting `maxfun` to too small iteration number - only 'Bounded' returns status value and specific message. """ cosmo = Planck18 ztol = {'Brent': [1e-4, 1e-4], 'Golden': [1e-3, 1e-2], 'Bounded': [1e-3, 1e-1]} if method == 'Bounded': ctx = pytest.warns(AstropyUserWarning, match='Solver returned 1: Maximum number of ' 'function calls reached') else: ctx = pytest.warns(AstropyUserWarning, match='Solver returned None') with ctx: z0 = z_at_value(cosmo.angular_diameter_distance, 1*u.Gpc, zmax=2, maxfun=13, method=method) with ctx: z1 = z_at_value(cosmo.angular_diameter_distance, 1*u.Gpc, zmin=2, maxfun=13, method=method) assert allclose(z0, 0.32442, rtol=ztol[method][0]) assert allclose(z1, 8.18551, rtol=ztol[method][1]) @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('cosmo', [Planck13, Planck15, Planck18, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, flrw.LambdaCDM, flrw.FlatLambdaCDM, flrw.wpwaCDM, flrw.w0wzCDM, flrw.wCDM, flrw.FlatwCDM, flrw.w0waCDM, flrw.Flatw0waCDM]) def test_z_at_value_roundtrip(cosmo): """ Calculate values from a known redshift, and then check that z_at_value returns the right answer. """ z = 0.5 # Skip Ok, w, de_density_scale because in the Planck cosmologies # they are redshift independent and hence uninvertable, # *_distance_z1z2 methods take multiple arguments, so require # special handling # clone is not a redshift-dependent method # nu_relative_density is not redshift-dependent in the WMAP cosmologies skip = ('Ok', 'Otot', 'angular_diameter_distance_z1z2', 'clone', 'is_equivalent', 'de_density_scale', 'w') if str(cosmo.name).startswith('WMAP'): skip += ('nu_relative_density', ) methods = inspect.getmembers(cosmo, predicate=inspect.ismethod) for name, func in methods: if name.startswith('_') or name in skip: continue fval = func(z) # we need zmax here to pick the right solution for # angular_diameter_distance and related methods. # Be slightly more generous with rtol than the default 1e-8 # used in z_at_value got = z_at_value(func, fval, bracket=[0.3, 1.0], ztol=1e-12) assert allclose(got, z, rtol=2e-11), f'Round-trip testing {name} failed' # Test distance functions between two redshifts; only for realizations if isinstance(cosmo.name, str): z2 = 2.0 func_z1z2 = [ lambda z1: cosmo._comoving_distance_z1z2(z1, z2), lambda z1: cosmo._comoving_transverse_distance_z1z2(z1, z2), lambda z1: cosmo.angular_diameter_distance_z1z2(z1, z2) ] for func in func_z1z2: fval = func(z) assert allclose(z, z_at_value(func, fval, zmax=1.5, ztol=1e-12), rtol=2e-11)
874a86a65ac6bc6b360e99c3f21cd5efff76ced070e74f4f247f5fc4a31e355c
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB import inspect import random # THIRD PARTY import pytest import numpy as np # LOCAL from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology from astropy.cosmology.io.model import _CosmologyModel, from_model, to_model from astropy.cosmology.tests.conftest import get_redshift_methods from astropy.modeling import FittableModel from astropy.modeling.models import Gaussian1D from astropy.utils.compat.optional_deps import HAS_SCIPY from .base import ToFromDirectTestBase, ToFromTestMixinBase ############################################################################### class ToFromModelTestMixin(ToFromTestMixinBase): """Tests for a Cosmology[To/From]Format with ``format="astropy.model"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples. """ @pytest.fixture(scope="class") def method_name(self, cosmo): # get methods, ignoring private and dunder methods = get_redshift_methods(cosmo, allow_private=False, allow_z2=True) # dynamically detect ABC and optional dependencies for n in tuple(methods): params = inspect.signature(getattr(cosmo, n)).parameters.keys() ERROR_SEIVE = (NotImplementedError, ValueError) # # ABC can't introspect for good input if not HAS_SCIPY: ERROR_SEIVE = ERROR_SEIVE + (ModuleNotFoundError, ) args = np.arange(len(params)) + 1 try: getattr(cosmo, n)(*args) except ERROR_SEIVE: methods.discard(n) # TODO! pytest doesn't currently allow multiple yields (`cosmo`) so # testing with 1 random method # yield from methods return random.choice(tuple(methods)) if methods else None # =============================================================== def test_fromformat_model_wrong_cls(self, from_format): """Test when Model is not the correct class.""" model = Gaussian1D(amplitude=10, mean=14) with pytest.raises(AttributeError): from_format(model) def test_toformat_model_not_method(self, to_format): """Test when method is not a method.""" with pytest.raises(AttributeError): to_format("astropy.model", method="this is definitely not a method.") def test_toformat_model_not_callable(self, to_format): """Test when method is actually an attribute.""" with pytest.raises(ValueError): to_format("astropy.model", method="name") def test_toformat_model(self, cosmo, to_format, method_name): """Test cosmology -> astropy.model.""" if method_name is None: # no test if no method return model = to_format("astropy.model", method=method_name) assert isinstance(model, _CosmologyModel) # Parameters expect = tuple([n for n in cosmo.__parameters__ if getattr(cosmo, n) is not None]) assert model.param_names == expect # scalar result args = np.arange(model.n_inputs) + 1 got = model.evaluate(*args) expected = getattr(cosmo, method_name)(*args) assert np.all(got == expected) got = model(*args) expected = getattr(cosmo, method_name)(*args) assert np.all(got == expected) # vector result if "scalar" not in method_name: args = (np.ones((model.n_inputs, 3)).T + np.arange(model.n_inputs)).T got = model.evaluate(*args) expected = getattr(cosmo, method_name)(*args) assert np.all(got == expected) got = model(*args) expected = getattr(cosmo, method_name)(*args) assert np.all(got == expected) def test_tofromformat_model_instance(self, cosmo_cls, cosmo, method_name, to_format, from_format): """Test cosmology -> astropy.model -> cosmology.""" if method_name is None: # no test if no method return # ------------ # To Model # this also serves as a test of all added methods / attributes # in _CosmologyModel. model = to_format("astropy.model", method=method_name) assert isinstance(model, _CosmologyModel) assert model.cosmology_class is cosmo_cls assert model.cosmology == cosmo assert model.method_name == method_name # ------------ # From Model # it won't error if everything matches up got = from_format(model, format="astropy.model") assert got == cosmo assert set(cosmo.meta.keys()).issubset(got.meta.keys()) # Note: model adds parameter attributes to the metadata # also it auto-identifies 'format' got = from_format(model) assert got == cosmo assert set(cosmo.meta.keys()).issubset(got.meta.keys()) def test_fromformat_model_subclass_partial_info(self): """ Test writing from an instance and reading from that class. This works with missing information. """ pass # there's no partial information with a Model @pytest.mark.parametrize("format", [True, False, None, "astropy.model"]) def test_is_equivalent_to_model(self, cosmo, method_name, to_format, format): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. This test checks that Cosmology equivalency can be extended to any Python object that can be converted to a Cosmology -- in this case a model. """ if method_name is None: # no test if no method return obj = to_format("astropy.model", method=method_name) assert not isinstance(obj, Cosmology) is_equiv = cosmo.is_equivalent(obj, format=format) assert is_equiv is (True if format is not False else False) class TestToFromModel(ToFromDirectTestBase, ToFromModelTestMixin): """Directly test ``to/from_model``.""" def setup_class(self): self.functions = {"to": to_model, "from": from_model}
289388e0762e40ed21521154d68fcf44ccc527939c7a5ce616f7f2dfd01f1173
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB import copy import inspect from collections import OrderedDict # THIRD PARTY import numpy as np import pytest # LOCAL from astropy.cosmology import Cosmology from astropy.cosmology.core import _COSMOLOGY_CLASSES from astropy.cosmology.io.mapping import from_mapping, to_mapping from astropy.table import QTable, vstack from .base import ToFromDirectTestBase, ToFromTestMixinBase ############################################################################### class ToFromMappingTestMixin(ToFromTestMixinBase): """Tests for a Cosmology[To/From]Format with ``format="mapping"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ def test_to_mapping_default(self, cosmo, to_format): """Test default usage of Cosmology -> mapping.""" m = to_format('mapping') keys = tuple(m.keys()) assert isinstance(m, dict) # Check equality of all expected items assert keys[0] == "cosmology" assert m.pop("cosmology") is cosmo.__class__ assert keys[1] == "name" assert m.pop("name") == cosmo.name for i, k in enumerate(cosmo.__parameters__, start=2): assert keys[i] == k assert np.array_equal(m.pop(k), getattr(cosmo, k)) assert keys[-1] == "meta" assert m.pop("meta") == cosmo.meta # No unexpected items assert not m def test_to_mapping_wrong_cls(self, to_format): """Test incorrect argument ``cls`` in ``to_mapping()``.""" with pytest.raises(TypeError, match="'cls' must be"): to_format('mapping', cls=list) @pytest.mark.parametrize("map_cls", [dict, OrderedDict]) def test_to_mapping_cls(self, to_format, map_cls): """Test argument ``cls`` in ``to_mapping()``.""" m = to_format('mapping', cls=map_cls) assert isinstance(m, map_cls) # test type def test_to_mapping_cosmology_as_str(self, cosmo_cls, to_format): """Test argument ``cosmology_as_str`` in ``to_mapping()``.""" default = to_format('mapping') # Cosmology is the class m = to_format('mapping', cosmology_as_str=False) assert inspect.isclass(m["cosmology"]) assert cosmo_cls is m["cosmology"] assert m == default # False is the default option # Cosmology is a string m = to_format('mapping', cosmology_as_str=True) assert isinstance(m["cosmology"], str) assert m["cosmology"] == cosmo_cls.__qualname__ # Correct class assert tuple(m.keys())[0] == "cosmology" # Stayed at same index def test_tofrom_mapping_cosmology_as_str(self, cosmo, to_format, from_format): """Test roundtrip with ``cosmology_as_str=True``. The test for the default option (`False`) is in ``test_tofrom_mapping_instance``. """ m = to_format('mapping', cosmology_as_str=True) got = from_format(m, format="mapping") assert got == cosmo assert got.meta == cosmo.meta def test_to_mapping_move_from_meta(self, to_format): """Test argument ``move_from_meta`` in ``to_mapping()``.""" default = to_format('mapping') # Metadata is 'separate' from main mapping m = to_format('mapping', move_from_meta=False) assert "meta" in m.keys() assert not any([k in m for k in m["meta"]]) # Not added to main assert m == default # False is the default option # Metadata is mixed into main mapping. m = to_format('mapping', move_from_meta=True) assert "meta" not in m.keys() assert all([k in m for k in default["meta"]]) # All added to main # The parameters take precedence over the metadata assert all([np.array_equal(v, m[k]) for k, v in default.items() if k != "meta"]) def test_tofrom_mapping_move_tofrom_meta(self, cosmo, to_format, from_format): """Test roundtrip of ``move_from/to_meta`` in ``to/from_mapping()``.""" # Metadata is mixed into main mapping. m = to_format('mapping', move_from_meta=True) # (Just adding something to ensure there's 'metadata') m["mismatching"] = "will error" # (Tests are different if the last argument is a **kwarg) if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4: got = from_format(m, format="mapping") assert got.name == cosmo.name assert "mismatching" not in got.meta return # don't continue testing # Reading with mismatching parameters errors... with pytest.raises(TypeError, match="there are unused parameters"): from_format(m, format="mapping") # unless mismatched are moved to meta. got = from_format(m, format="mapping", move_to_meta=True) assert got == cosmo # (Doesn't check metadata) assert got.meta["mismatching"] == "will error" # ----------------------------------------------------- def test_from_not_mapping(self, cosmo, from_format): """Test incorrect map type in ``from_mapping()``.""" with pytest.raises((TypeError, ValueError)): from_format("NOT A MAP", format="mapping") def test_from_mapping_default(self, cosmo, to_format, from_format): """Test (cosmology -> Mapping) -> cosmology.""" m = to_format('mapping') # Read from exactly as given. got = from_format(m, format="mapping") assert got == cosmo assert got.meta == cosmo.meta # Reading auto-identifies 'format' got = from_format(m) assert got == cosmo assert got.meta == cosmo.meta def test_fromformat_subclass_partial_info_mapping(self, cosmo): """ Test writing from an instance and reading from that class. This works with missing information. """ m = cosmo.to_format("mapping") # partial information m.pop("cosmology", None) m.pop("Tcmb0", None) # read with the same class that wrote fills in the missing info with # the default value got = cosmo.__class__.from_format(m, format="mapping") got2 = Cosmology.from_format(m, format="mapping", cosmology=cosmo.__class__) got3 = Cosmology.from_format(m, format="mapping", cosmology=cosmo.__class__.__qualname__) assert (got == got2) and (got2 == got3) # internal consistency # not equal, because Tcmb0 is changed, which also changes m_nu assert got != cosmo assert got.Tcmb0 == cosmo.__class__._init_signature.parameters["Tcmb0"].default assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo # but the metadata is the same assert got.meta == cosmo.meta @pytest.mark.parametrize("format", [True, False, None, "mapping"]) def test_is_equivalent_to_mapping(self, cosmo, to_format, format): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. This test checks that Cosmology equivalency can be extended to any Python object that can be converted to a Cosmology -- in this case a mapping. """ obj = to_format("mapping") assert not isinstance(obj, Cosmology) is_equiv = cosmo.is_equivalent(obj, format=format) assert is_equiv is (True if format is not False else False) class TestToFromMapping(ToFromDirectTestBase, ToFromMappingTestMixin): """Directly test ``to/from_mapping``.""" def setup_class(self): self.functions = {"to": to_mapping, "from": from_mapping} @pytest.mark.skip("N/A") def test_fromformat_subclass_partial_info_mapping(self): """This test does not apply to the direct functions.""" pass
3a617e60d6d8dbf4b9064399320ec050cd5e5acfa19cf0f233a8dfb1e2cc7c84
# Licensed under a 3-clause BSD style license - see LICENSE.rst # THIRD PARTY import pytest # LOCAL from astropy.cosmology import Cosmology, Planck18 from astropy.cosmology.core import _COSMOLOGY_CLASSES from astropy.cosmology.io.table import from_table, to_table from astropy.table import QTable, Table, vstack from .base import ToFromDirectTestBase, ToFromTestMixinBase ############################################################################### class ToFromTableTestMixin(ToFromTestMixinBase): """ Tests for a Cosmology[To/From]Format with ``format="astropy.table"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ def test_to_table_bad_index(self, from_format, to_format): """Test if argument ``index`` is incorrect""" tbl = to_format("astropy.table") # single-row table and has a non-0/None index with pytest.raises(IndexError, match="index 2 out of range"): from_format(tbl, index=2, format="astropy.table") # string index where doesn't match with pytest.raises(KeyError, match="No matches found for key"): from_format(tbl, index="row 0", format="astropy.table") # ----------------------- def test_to_table_failed_cls(self, to_format): """Test failed table type.""" with pytest.raises(TypeError, match="'cls' must be"): to_format('astropy.table', cls=list) @pytest.mark.parametrize("tbl_cls", [QTable, Table]) def test_to_table_cls(self, to_format, tbl_cls): tbl = to_format('astropy.table', cls=tbl_cls) assert isinstance(tbl, tbl_cls) # test type # ----------------------- @pytest.mark.parametrize("in_meta", [True, False]) def test_to_table_in_meta(self, cosmo_cls, to_format, in_meta): """Test where the cosmology class is placed.""" tbl = to_format('astropy.table', cosmology_in_meta=in_meta) # if it's in metadata, it's not a column. And vice versa. if in_meta: assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ assert "cosmology" not in tbl.colnames # not also a column else: assert tbl["cosmology"][0] == cosmo_cls.__qualname__ assert "cosmology" not in tbl.meta # ----------------------- def test_to_table(self, cosmo_cls, cosmo, to_format): """Test cosmology -> astropy.table.""" tbl = to_format("astropy.table") # Test properties of Table. assert isinstance(tbl, QTable) assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ assert tbl["name"] == cosmo.name assert tbl.indices # indexed # Test each Parameter column has expected information. for n in cosmo.__parameters__: P = getattr(cosmo_cls, n) # Parameter col = tbl[n] # Column # Compare the two assert col.info.name == P.name assert col.info.description == P.__doc__ assert col.info.format == (None if col[0] is None else P.format_spec) assert col.info.meta == (cosmo.meta.get(n) or {}) # ----------------------- def test_from_not_table(self, cosmo, from_format): """Test not passing a Table to the Table parser.""" with pytest.raises((TypeError, ValueError)): from_format("NOT A TABLE", format="astropy.table") def test_tofrom_table_instance(self, cosmo_cls, cosmo, from_format, to_format): """Test cosmology -> astropy.table -> cosmology.""" tbl = to_format("astropy.table") # add information tbl["mismatching"] = "will error" # tests are different if the last argument is a **kwarg if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4: got = from_format(tbl, format="astropy.table") assert got.__class__ is cosmo_cls assert got.name == cosmo.name assert "mismatching" not in got.meta return # don't continue testing # read with mismatching parameters errors with pytest.raises(TypeError, match="there are unused parameters"): from_format(tbl, format="astropy.table") # unless mismatched are moved to meta got = from_format(tbl, format="astropy.table", move_to_meta=True) assert got == cosmo assert got.meta["mismatching"] == "will error" # it won't error if everything matches up tbl.remove_column("mismatching") got = from_format(tbl, format="astropy.table") assert got == cosmo # and it will also work if the cosmology is a class # Note this is not the default output of ``to_format``. tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]] got = from_format(tbl, format="astropy.table") assert got == cosmo # also it auto-identifies 'format' got = from_format(tbl) assert got == cosmo def test_fromformat_table_subclass_partial_info(self, cosmo_cls, cosmo, from_format, to_format): """ Test writing from an instance and reading from that class. This works with missing information. """ # test to_format tbl = to_format("astropy.table") assert isinstance(tbl, QTable) # partial information tbl.meta.pop("cosmology", None) del tbl["Tcmb0"] # read with the same class that wrote fills in the missing info with # the default value got = cosmo_cls.from_format(tbl, format="astropy.table") got2 = from_format(tbl, format="astropy.table", cosmology=cosmo_cls) got3 = from_format(tbl, format="astropy.table", cosmology=cosmo_cls.__qualname__) assert (got == got2) and (got2 == got3) # internal consistency # not equal, because Tcmb0 is changed, which also changes m_nu assert got != cosmo assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo # but the metadata is the same assert got.meta == cosmo.meta @pytest.mark.parametrize("add_index", [True, False]) def test_tofrom_table_mutlirow(self, cosmo_cls, cosmo, from_format, add_index): """Test if table has multiple rows.""" # ------------ # To Table cosmo1 = cosmo.clone(name="row 0") cosmo2 = cosmo.clone(name="row 2") tbl = vstack([c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)], metadata_conflicts='silent') assert isinstance(tbl, QTable) assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ assert tbl[1]["name"] == cosmo.name # whether to add an index. `from_format` can work with or without. if add_index: tbl.add_index("name", unique=True) # ------------ # From Table # it will error on a multi-row table with pytest.raises(ValueError, match="need to select a specific row"): from_format(tbl, format="astropy.table") # unless the index argument is provided got = from_format(tbl, index=1, format="astropy.table") assert got == cosmo # the index can be a string got = from_format(tbl, index=cosmo.name, format="astropy.table") assert got == cosmo # when there's more than one cosmology found tbls = vstack([tbl, tbl], metadata_conflicts="silent") with pytest.raises(ValueError, match="more than one"): from_format(tbls, index=cosmo.name, format="astropy.table") @pytest.mark.parametrize("format", [True, False, None, "astropy.table"]) def test_is_equivalent_to_table(self, cosmo, to_format, format): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. This test checks that Cosmology equivalency can be extended to any Python object that can be converted to a Cosmology -- in this case a |Table|. """ obj = to_format("astropy.table") assert not isinstance(obj, Cosmology) is_equiv = cosmo.is_equivalent(obj, format=format) assert is_equiv is (True if format is not False else False) class TestToFromTable(ToFromDirectTestBase, ToFromTableTestMixin): """Directly test ``to/from_table``.""" def setup_class(self): self.functions = {"to": to_table, "from": from_table}
2b3709d64454a220fb6ddf60ca978a4b2725d74998368c0749f27fb19306ff95
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB import inspect # THIRD PARTY import pytest # LOCAL import astropy.units as u from astropy.cosmology import Cosmology, FlatLambdaCDM, Planck18 from astropy.cosmology import units as cu from astropy.cosmology.io.yaml import from_yaml, to_yaml, yaml_constructor, yaml_representer from astropy.io.misc.yaml import AstropyDumper, dump, load from astropy.table import QTable, vstack from .base import ToFromDirectTestBase, ToFromTestMixinBase ############################################################################## # Test Serializer def test_yaml_representer(): """Test :func:`~astropy.cosmology.io.yaml.yaml_representer`.""" # test function `representer` representer = yaml_representer("!astropy.cosmology.flrw.LambdaCDM") assert callable(representer) # test the normal method of dumping to YAML yml = dump(Planck18) assert isinstance(yml, str) assert yml.startswith("!astropy.cosmology.flrw.FlatLambdaCDM") def test_yaml_constructor(): """Test :func:`~astropy.cosmology.io.yaml.yaml_constructor`.""" # test function `constructor` constructor = yaml_constructor(FlatLambdaCDM) assert callable(constructor) # it's too hard to manually construct a node, so we only test dump/load # this is also a good round-trip test yml = dump(Planck18) with u.add_enabled_units(cu): # needed for redshift units cosmo = load(yml) assert isinstance(cosmo, FlatLambdaCDM) assert cosmo == Planck18 assert cosmo.meta == Planck18.meta ############################################################################## # Test Unified I/O class ToFromYAMLTestMixin(ToFromTestMixinBase): """ Tests for a Cosmology[To/From]Format with ``format="yaml"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples. """ @pytest.fixture def xfail_if_not_registered_with_yaml(self, cosmo_cls): """ YAML I/O only works on registered classes. So the thing to check is if this class is registered. If not, :func:`pytest.xfail` this test. Some of the tests define custom cosmologies. They are not registered. """ if cosmo_cls not in AstropyDumper.yaml_representers: pytest.xfail(f"Cosmologies of type {cosmo_cls} are not registered with YAML.") # =============================================================== def test_to_yaml(self, cosmo, to_format, xfail_if_not_registered_with_yaml): """Test cosmology -> YAML.""" yml = to_format('yaml') assert isinstance(yml, str) # test type assert yml.startswith("!astropy.cosmology.") def test_from_yaml_default(self, cosmo, to_format, from_format, xfail_if_not_registered_with_yaml): """Test cosmology -> YAML -> cosmology.""" yml = to_format('yaml') got = from_format(yml, format="yaml") # (cannot autoidentify) assert got.name == cosmo.name assert got.meta == cosmo.meta # it won't error if everything matches up got = from_format(yml, format="yaml") assert got == cosmo assert got.meta == cosmo.meta # auto-identify test moved because it doesn't work. # see test_from_yaml_autoidentify def test_from_yaml_autoidentify(self, cosmo, to_format, from_format, xfail_if_not_registered_with_yaml): """As a non-path string, it does NOT auto-identifies 'format'. TODO! this says there should be different types of I/O registries. not just hacking object conversion on top of file I/O. """ assert self.can_autodentify("yaml") is False # Showing the specific error. The str is interpreted as a file location # but is too long a file name. yml = to_format('yaml') with pytest.raises((FileNotFoundError, OSError)): # OSError in Windows from_format(yml) # # TODO! this is a challenging test to write. It's also unlikely to happen. # def test_fromformat_subclass_partial_info_yaml(self, cosmo): # """ # Test writing from an instance and reading from that class. # This works with missing information. # """ # ----------------------------------------------------- @pytest.mark.parametrize("format", [True, False, None]) def test_is_equivalent_to_yaml(self, cosmo, to_format, format, xfail_if_not_registered_with_yaml): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. This test checks that Cosmology equivalency can be extended to any Python object that can be converted to a Cosmology -- in this case a YAML string. YAML can't be identified without "format" specified. """ obj = to_format("yaml") assert not isinstance(obj, Cosmology) is_equiv = cosmo.is_equivalent(obj, format=format) assert is_equiv is False def test_is_equivalent_to_yaml_specify_format(self, cosmo, to_format, xfail_if_not_registered_with_yaml): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. Same as ``test_is_equivalent_to_yaml`` but with ``format="yaml"``. """ assert cosmo.is_equivalent(to_format("yaml"), format="yaml") is True class TestToFromYAML(ToFromDirectTestBase, ToFromYAMLTestMixin): """ Directly test ``to/from_yaml``. These are not public API and are discouraged from use, in favor of ``Cosmology.to/from_format(..., format="yaml")``, but should be tested regardless b/c 3rd party packages might use these in their Cosmology I/O. Also, it's cheap to test. """ def setup_class(self): """Set up fixtures to use ``to/from_yaml``, not the I/O abstractions.""" self.functions = {"to": to_yaml, "from": from_yaml} @pytest.fixture(scope="class", autouse=True) def setup(self): """ Setup and teardown for tests. This overrides from super because `ToFromDirectTestBase` adds a custom Cosmology ``CosmologyWithKwargs`` that is not registered with YAML. """ yield # run tests def test_from_yaml_autoidentify(self, cosmo, to_format, from_format): """ If directly calling the function there's no auto-identification. So this overrides the test from `ToFromYAMLTestMixin` """ pass
41205ec476d7a4f40aed14b0d8900a7fe83e0518009912d20d97a543c75fad51
# Licensed under a 3-clause BSD style license - see LICENSE.rst # THIRD PARTY import pytest # LOCAL import astropy.units as u from astropy.cosmology.core import _COSMOLOGY_CLASSES from astropy.cosmology.io.ecsv import read_ecsv, write_ecsv from astropy.table import QTable, Table, vstack from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase ############################################################################### class ReadWriteECSVTestMixin(ReadWriteTestMixinBase): """ Tests for a Cosmology[Read/Write] with ``format="ascii.ecsv"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ def test_to_ecsv_bad_index(self, read, write, tmp_path): """Test if argument ``index`` is incorrect""" fp = tmp_path / "test_to_ecsv_bad_index.ecsv" write(fp, format="ascii.ecsv") # single-row table and has a non-0/None index with pytest.raises(IndexError, match="index 2 out of range"): read(fp, index=2, format="ascii.ecsv") # string index where doesn't match with pytest.raises(KeyError, match="No matches found for key"): read(fp, index="row 0", format="ascii.ecsv") # ----------------------- def test_to_ecsv_failed_cls(self, write, tmp_path): """Test failed table type.""" fp = tmp_path / "test_to_ecsv_failed_cls.ecsv" with pytest.raises(TypeError, match="'cls' must be"): write(fp, format='ascii.ecsv', cls=list) @pytest.mark.parametrize("tbl_cls", [QTable, Table]) def test_to_ecsv_cls(self, write, tbl_cls, tmp_path): fp = tmp_path / "test_to_ecsv_cls.ecsv" write(fp, format='ascii.ecsv', cls=tbl_cls) # ----------------------- @pytest.mark.parametrize("in_meta", [True, False]) def test_to_ecsv_in_meta(self, cosmo_cls, write, in_meta, tmp_path, add_cu): """Test where the cosmology class is placed.""" fp = tmp_path / "test_to_ecsv_in_meta.ecsv" write(fp, format='ascii.ecsv', cosmology_in_meta=in_meta) # if it's in metadata, it's not a column. And vice versa. tbl = QTable.read(fp) if in_meta: assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ assert "cosmology" not in tbl.colnames # not also a column else: assert tbl["cosmology"][0] == cosmo_cls.__qualname__ assert "cosmology" not in tbl.meta # ----------------------- def test_readwrite_ecsv_instance(self, cosmo_cls, cosmo, read, write, tmp_path, add_cu): """Test cosmology -> ascii.ecsv -> cosmology.""" fp = tmp_path / "test_readwrite_ecsv_instance.ecsv" # ------------ # To Table write(fp, format="ascii.ecsv") # some checks on the saved file tbl = QTable.read(fp) assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ assert tbl["name"] == cosmo.name # ------------ # From Table tbl["mismatching"] = "will error" tbl.write(fp, format="ascii.ecsv", overwrite=True) # tests are different if the last argument is a **kwarg if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4: got = read(fp, format="ascii.ecsv") assert got.__class__ is cosmo_cls assert got.name == cosmo.name assert "mismatching" not in got.meta return # don't continue testing # read with mismatching parameters errors with pytest.raises(TypeError, match="there are unused parameters"): read(fp, format="ascii.ecsv") # unless mismatched are moved to meta got = read(fp, format="ascii.ecsv", move_to_meta=True) assert got == cosmo assert got.meta["mismatching"] == "will error" # it won't error if everything matches up tbl.remove_column("mismatching") tbl.write(fp, format="ascii.ecsv", overwrite=True) got = read(fp, format="ascii.ecsv") assert got == cosmo # and it will also work if the cosmology is a class # Note this is not the default output of ``write``. tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]] got = read(fp, format="ascii.ecsv") assert got == cosmo # also it auto-identifies 'format' got = read(fp) assert got == cosmo def test_readwrite_ecsv_subclass_partial_info(self, cosmo_cls, cosmo, read, write, tmp_path, add_cu): """ Test writing from an instance and reading from that class. This works with missing information. """ fp = tmp_path / "test_read_ecsv_subclass_partial_info.ecsv" # test write write(fp, format="ascii.ecsv") # partial information tbl = QTable.read(fp) tbl.meta.pop("cosmology", None) del tbl["Tcmb0"] tbl.write(fp, overwrite=True) # read with the same class that wrote fills in the missing info with # the default value got = cosmo_cls.read(fp, format="ascii.ecsv") got2 = read(fp, format="ascii.ecsv", cosmology=cosmo_cls) got3 = read(fp, format="ascii.ecsv", cosmology=cosmo_cls.__qualname__) assert (got == got2) and (got2 == got3) # internal consistency # not equal, because Tcmb0 is changed, which also changes m_nu assert got != cosmo assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo # but the metadata is the same assert got.meta == cosmo.meta def test_readwrite_ecsv_mutlirow(self, cosmo, read, write, tmp_path, add_cu): """Test if table has multiple rows.""" fp = tmp_path / "test_readwrite_ecsv_mutlirow.ecsv" # Make cosmo1 = cosmo.clone(name="row 0") cosmo2 = cosmo.clone(name="row 2") tbl = vstack([c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)], metadata_conflicts='silent') tbl.write(fp, format="ascii.ecsv") # ------------ # From Table # it will error on a multi-row table with pytest.raises(ValueError, match="need to select a specific row"): read(fp, format="ascii.ecsv") # unless the index argument is provided got = read(fp, index=1, format="ascii.ecsv") assert got == cosmo # the index can be a string got = read(fp, index=cosmo.name, format="ascii.ecsv") assert got == cosmo # it's better if the table already has an index # this will be identical to the previous ``got`` tbl.add_index("name") got2 = read(fp, index=cosmo.name, format="ascii.ecsv") assert got2 == cosmo class TestReadWriteECSV(ReadWriteDirectTestBase, ReadWriteECSVTestMixin): """ Directly test ``read/write_ecsv``. These are not public API and are discouraged from use, in favor of ``Cosmology.read/write(..., format="ascii.ecsv")``, but should be tested regardless b/c they are used internally. """ def setup_class(self): self.functions = {"read": read_ecsv, "write": write_ecsv}
c291643b8de475530b5484c007bc63bfa8e96068b2b7bed7a1937a0561918041
# Licensed under a 3-clause BSD style license - see LICENSE.rst # THIRD PARTY import pytest # LOCAL import astropy.units as u from astropy.cosmology import Cosmology, Parameter, realizations from astropy.cosmology import units as cu from astropy.cosmology.core import _COSMOLOGY_CLASSES from astropy.cosmology.realizations import available cosmo_instances = [getattr(realizations, name) for name in available] ############################################################################## class IOTestBase: """Base class for Cosmology I/O tests. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ class ToFromTestMixinBase(IOTestBase): """Tests for a Cosmology[To/From]Format with some ``format``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ @pytest.fixture(scope="class") def from_format(self): """Convert to Cosmology using ``Cosmology.from_format()``.""" return Cosmology.from_format @pytest.fixture(scope="class") def to_format(self, cosmo): """Convert Cosmology instance using ``.to_format()``.""" return cosmo.to_format def can_autodentify(self, format): """Check whether a format can auto-identify.""" return format in Cosmology.from_format.registry._identifiers class ReadWriteTestMixinBase(IOTestBase): """Tests for a Cosmology[Read/Write]. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ @pytest.fixture(scope="class") def read(self): """Read Cosmology instance using ``Cosmology.read()``.""" return Cosmology.read @pytest.fixture(scope="class") def write(self, cosmo): """Write Cosmology using ``.write()``.""" return cosmo.write @pytest.fixture def add_cu(self): """Add :mod:`astropy.cosmology.units` to the enabled units.""" # TODO! autoenable 'cu' if cosmology is imported? with u.add_enabled_units(cu): yield ############################################################################## class IODirectTestBase(IOTestBase): """Directly test Cosmology I/O functions. These functions are not public API and are discouraged from public use, in favor of the I/O methods on |Cosmology|. They are tested b/c they are used internally and because some tests for the methods on |Cosmology| don't need to be run in the |Cosmology| class's large test matrix. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. """ @pytest.fixture(scope="class", autouse=True) def setup(self): """Setup and teardown for tests.""" class CosmologyWithKwargs(Cosmology): Tcmb0 = Parameter(unit=u.K) def __init__(self, Tcmb0=0, name="cosmology with kwargs", meta=None, **kwargs): super().__init__(name=name, meta=meta) self._Tcmb0 = Tcmb0 << u.K yield # run tests # pop CosmologyWithKwargs from registered classes # but don't error b/c it can fail in parallel _COSMOLOGY_CLASSES.pop(CosmologyWithKwargs.__qualname__, None) @pytest.fixture(scope="class", params=cosmo_instances) def cosmo(self, request): """Cosmology instance.""" if isinstance(request.param, str): # CosmologyWithKwargs return _COSMOLOGY_CLASSES[request.param](Tcmb0=3) return request.param @pytest.fixture(scope="class") def cosmo_cls(self, cosmo): """Cosmology classes.""" return cosmo.__class__ class ToFromDirectTestBase(IODirectTestBase, ToFromTestMixinBase): """Directly test ``to/from_<format>``. These functions are not public API and are discouraged from public use, in favor of ``Cosmology.to/from_format(..., format="<format>")``. They are tested because they are used internally and because some tests for the methods on |Cosmology| don't need to be run in the |Cosmology| class's large test matrix. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses should have an attribute ``functions`` which is a dictionary containing two items: ``"to"=<function for to_format>`` and ``"from"=<function for from_format>``. """ @pytest.fixture(scope="class") def from_format(self): """Convert to Cosmology using function ``from``.""" def use_from_format(*args, **kwargs): kwargs.pop("format", None) # specific to Cosmology.from_format return self.functions["from"](*args, **kwargs) return use_from_format @pytest.fixture(scope="class") def to_format(self, cosmo): """Convert Cosmology to format using function ``to``.""" def use_to_format(*args, **kwargs): return self.functions["to"](cosmo, *args, **kwargs) return use_to_format class ReadWriteDirectTestBase(IODirectTestBase, ToFromTestMixinBase): """Directly test ``read/write_<format>``. These functions are not public API and are discouraged from public use, in favor of ``Cosmology.read/write(..., format="<format>")``. They are tested because they are used internally and because some tests for the methods on |Cosmology| don't need to be run in the |Cosmology| class's large test matrix. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses should have an attribute ``functions`` which is a dictionary containing two items: ``"read"=<function for read>`` and ``"write"=<function for write>``. """ @pytest.fixture(scope="class") def read(self): """Read Cosmology from file using function ``read``.""" def use_read(*args, **kwargs): kwargs.pop("format", None) # specific to Cosmology.from_format return self.functions["read"](*args, **kwargs) return use_read @pytest.fixture(scope="class") def write(self, cosmo): """Write Cosmology to file using function ``write``.""" def use_write(*args, **kwargs): return self.functions["write"](cosmo, *args, **kwargs) return use_write
1fea6bb2cc37412ed918e0f5124e80a144ea652d106511960826f96366cfb9f2
# Licensed under a 3-clause BSD style license - see LICENSE.rst # THIRD PARTY import pytest # LOCAL from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology from astropy.cosmology.io.row import from_row, to_row from astropy.table import Row from .base import ToFromDirectTestBase, ToFromTestMixinBase ############################################################################### class ToFromRowTestMixin(ToFromTestMixinBase): """ Tests for a Cosmology[To/From]Format with ``format="astropy.row"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples. """ @pytest.mark.parametrize("in_meta", [True, False]) def test_to_row_in_meta(self, cosmo_cls, cosmo, in_meta): """Test where the cosmology class is placed.""" row = cosmo.to_format('astropy.row', cosmology_in_meta=in_meta) # if it's in metadata, it's not a column. And vice versa. if in_meta: assert row.meta["cosmology"] == cosmo_cls.__qualname__ assert "cosmology" not in row.colnames # not also a column else: assert row["cosmology"] == cosmo_cls.__qualname__ assert "cosmology" not in row.meta # ----------------------- def test_from_not_row(self, cosmo, from_format): """Test not passing a Row to the Row parser.""" with pytest.raises(AttributeError): from_format("NOT A ROW", format="astropy.row") def test_tofrom_row_instance(self, cosmo, to_format, from_format): """Test cosmology -> astropy.row -> cosmology.""" # ------------ # To Row row = to_format("astropy.row") assert isinstance(row, Row) assert row["cosmology"] == cosmo.__class__.__qualname__ assert row["name"] == cosmo.name # ------------ # From Row row.table["mismatching"] = "will error" # tests are different if the last argument is a **kwarg if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4: got = from_format(row, format="astropy.row") assert got.__class__ is cosmo.__class__ assert got.name == cosmo.name assert "mismatching" not in got.meta return # don't continue testing # read with mismatching parameters errors with pytest.raises(TypeError, match="there are unused parameters"): from_format(row, format="astropy.row") # unless mismatched are moved to meta got = from_format(row, format="astropy.row", move_to_meta=True) assert got == cosmo assert got.meta["mismatching"] == "will error" # it won't error if everything matches up row.table.remove_column("mismatching") got = from_format(row, format="astropy.row") assert got == cosmo # and it will also work if the cosmology is a class # Note this is not the default output of ``to_format``. cosmology = _COSMOLOGY_CLASSES[row["cosmology"]] row.table.remove_column("cosmology") row.table["cosmology"] = cosmology got = from_format(row, format="astropy.row") assert got == cosmo # also it auto-identifies 'format' got = from_format(row) assert got == cosmo def test_fromformat_row_subclass_partial_info(self, cosmo): """ Test writing from an instance and reading from that class. This works with missing information. """ pass # there are no partial info options @pytest.mark.parametrize("format", [True, False, None, "astropy.row"]) def test_is_equivalent_to_row(self, cosmo, to_format, format): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. This test checks that Cosmology equivalency can be extended to any Python object that can be converted to a Cosmology -- in this case a Row. """ obj = to_format("astropy.row") assert not isinstance(obj, Cosmology) is_equiv = cosmo.is_equivalent(obj, format=format) assert is_equiv is (True if format is not False else False) class TestToFromTable(ToFromDirectTestBase, ToFromRowTestMixin): """ Directly test ``to/from_row``. These are not public API and are discouraged from use, in favor of ``Cosmology.to/from_format(..., format="astropy.row")``, but should be tested regardless b/c 3rd party packages might use these in their Cosmology I/O. Also, it's cheap to test. """ def setup_class(self): self.functions = {"to": to_row, "from": from_row}
1c4b5cdb71c97bd8fe82298cb31a2767f0324a342f2dab403e8bba98f72f322e
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB import json import os # THIRD PARTY import pytest # LOCAL import astropy.units as u from astropy.cosmology import units as cu from astropy.cosmology.connect import readwrite_registry from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase ############################################################################### def read_json(filename, **kwargs): """Read JSON. Parameters ---------- filename : str **kwargs Keyword arguments into :meth:`~astropy.cosmology.Cosmology.from_format` Returns ------- `~astropy.cosmology.Cosmology` instance """ # read if isinstance(filename, (str, bytes, os.PathLike)): with open(filename, "r") as file: data = file.read() else: # file-like : this also handles errors in dumping data = filename.read() mapping = json.loads(data) # parse json mappable to dict # deserialize Quantity with u.add_enabled_units(cu.redshift): for k, v in mapping.items(): if isinstance(v, dict) and "value" in v and "unit" in v: mapping[k] = u.Quantity(v["value"], v["unit"]) for k, v in mapping.get("meta", {}).items(): # also the metadata if isinstance(v, dict) and "value" in v and "unit" in v: mapping["meta"][k] = u.Quantity(v["value"], v["unit"]) return Cosmology.from_format(mapping, format="mapping", **kwargs) def write_json(cosmology, file, *, overwrite=False): """Write Cosmology to JSON. Parameters ---------- cosmology : `astropy.cosmology.Cosmology` subclass instance file : path-like or file-like overwrite : bool (optional, keyword-only) """ data = cosmology.to_format("mapping") # start by turning into dict data["cosmology"] = data["cosmology"].__qualname__ # serialize Quantity for k, v in data.items(): if isinstance(v, u.Quantity): data[k] = {"value": v.value.tolist(), "unit": str(v.unit)} for k, v in data.get("meta", {}).items(): # also serialize the metadata if isinstance(v, u.Quantity): data["meta"][k] = {"value": v.value.tolist(), "unit": str(v.unit)} # check that file exists and whether to overwrite. if os.path.exists(file) and not overwrite: raise IOError(f"{file} exists. Set 'overwrite' to write over.") with open(file, "w") as write_file: json.dump(data, write_file) def json_identify(origin, filepath, fileobj, *args, **kwargs): return filepath is not None and filepath.endswith(".json") ############################################################################### class ReadWriteJSONTestMixin(ReadWriteTestMixinBase): """ Tests for a Cosmology[Read/Write] with ``format="json"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ @pytest.fixture(scope="class", autouse=True) def register_and_unregister_json(self): """Setup & teardown for JSON read/write tests.""" # Register readwrite_registry.register_reader("json", Cosmology, read_json, force=True) readwrite_registry.register_writer("json", Cosmology, write_json, force=True) readwrite_registry.register_identifier("json", Cosmology, json_identify, force=True) yield # Run all tests in class # Unregister readwrite_registry.unregister_reader("json", Cosmology) readwrite_registry.unregister_writer("json", Cosmology) readwrite_registry.unregister_identifier("json", Cosmology) # ======================================================================== def test_readwrite_json_subclass_partial_info(self, cosmo_cls, cosmo, read, write, tmp_path, add_cu): """ Test writing from an instance and reading from that class. This works with missing information. """ fp = tmp_path / "test_readwrite_json_subclass_partial_info.json" # test write cosmo.write(fp, format="json") # partial information with open(fp, "r") as file: L = file.readlines()[0] L = L[: L.index('"cosmology":')] + L[L.index(", ") + 2 :] # remove cosmology i = L.index('"Tcmb0":') # delete Tcmb0 L = L[:i] + L[L.index(", ", L.index(", ", i) + 1) + 2 :] # second occurence tempfname = tmp_path / f"{cosmo.name}_temp.json" with open(tempfname, "w") as file: file.writelines([L]) # read with the same class that wrote fills in the missing info with # the default value got = cosmo_cls.read(tempfname, format="json") got2 = read(tempfname, format="json", cosmology=cosmo_cls) got3 = read(tempfname, format="json", cosmology=cosmo_cls.__qualname__) assert (got == got2) and (got2 == got3) # internal consistency # not equal, because Tcmb0 is changed, which also changes m_nu assert got != cosmo assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo # but the metadata is the same assert got.meta == cosmo.meta class TestReadWriteJSON(ReadWriteDirectTestBase, ReadWriteJSONTestMixin): """ Directly test ``read/write_json``. These are not public API and are discouraged from use, in favor of ``Cosmology.read/write(..., format="json")``, but should be tested regardless b/c they are used internally. """ def setup_class(self): self.functions = {"read": read_json, "write": write_json}
7a665f07bef138a80d4ef3ae0a814c525abf45d73c067022e54046f062ad0c23
# Licensed under a 3-clause BSD style license - see LICENSE.rst # THIRD PARTY import pytest # LOCAL from astropy.cosmology import Cosmology from astropy.cosmology.io.cosmology import from_cosmology, to_cosmology from .base import ToFromTestMixinBase, IODirectTestBase ############################################################################### class ToFromCosmologyTestMixin(ToFromTestMixinBase): """ Tests for a Cosmology[To/From]Format with ``format="astropy.cosmology"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ def test_to_cosmology_default(self, cosmo, to_format): """Test cosmology -> cosmology.""" newcosmo = to_format("astropy.cosmology") assert newcosmo is cosmo def test_from_not_cosmology(self, cosmo, from_format): """Test incorrect type in ``Cosmology``.""" with pytest.raises(TypeError): from_format("NOT A COSMOLOGY", format="astropy.cosmology") def test_from_cosmology_default(self, cosmo, from_format): """Test cosmology -> cosmology.""" newcosmo = from_format(cosmo) assert newcosmo is cosmo @pytest.mark.parametrize("format", [True, False, None, "astropy.cosmology"]) def test_is_equivalent_to_cosmology(self, cosmo, to_format, format): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. This test checks that Cosmology equivalency can be extended to any Python object that can be converted to a Cosmology -- in this case a Cosmology! Since it's the identity conversion, the cosmology is always equivalent to itself, regardless of ``format``. """ obj = to_format("astropy.cosmology") assert obj is cosmo is_equiv = cosmo.is_equivalent(obj, format=format) assert is_equiv is True # equivalent to self class TestToFromCosmology(IODirectTestBase, ToFromCosmologyTestMixin): """Directly test ``to/from_cosmology``.""" def setup_class(self): self.functions = {"to": to_cosmology, "from": from_cosmology}
808016887f737d3989c75cf258de4b2abfa50427b3a98ffb8da15aefbd52ddc4
# -*- coding: utf-8 -*- """Cosmology classes and instances for package ``mypackage``.""" # STDLIB from collections import UserDict # THIRD PARTY import numpy as np class MyCosmology(UserDict): """Cosmology, from ``mypackage``. Parameters ---------- **kw values for `MyCosmology`. """ def __init__(self, **kw): super().__init__() self.update(kw) def __getattr__(self, key): """Get attributes like items.""" if key in self: return self[key] super().__getattribute__(key) def age_in_Gyr(z): """Cosmology age in Gyr. Returns `NotImplemented`.""" return NotImplemented def __eq__(self, other): """Equality check.""" if not isinstance(other, MyCosmology): return NotImplemented return all(np.all(v == other[k]) for k, v in self.items()) myplanck = MyCosmology( name="planck", hubble_parameter=67.66, # km/s/Mpc initial_dm_density=0.2607, # X/rho_critical initial_baryon_density=0.04897, # X/rho_critical initial_matter_density=0.30966, # X/rho_critical n=0.9665, sigma8=0.8102, z_reionization=7.82, # redshift current_age=13.787, # Gyr initial_temperature=2.7255, # Kelvin Neff=3.046, neutrino_masses=[0., 0., 0.06], # eV )
3716b438eb2f0524ed5d1f70f259c3431726794a929a0921e281f85b1419de61
from . import cosmology, io
1df69f480d3d3e4c7b86ec7551812d43b879abb3fc3b4b5433070a0d7a84f23e
# -*- coding: utf-8 -*- # STDLIB import json import os # THIRD PARTY import numpy as np # LOCAL from mypackage.cosmology import MyCosmology def file_reader(filename): """Read files in format 'myformat'. Parameters ---------- filename : str, bytes, or `~os.PathLike` Returns ------- `~mypackage.cosmology.MyCosmology` instance """ # read if isinstance(filename, (str, bytes, os.PathLike)): with open(filename, "r") as file: data = file.read() else: # file-like : this also handles errors in dumping data = filename.read() mapping = json.loads(data) # parse json mappable to dict # deserialize list to ndarray for k, v in mapping.items(): if isinstance(v, list): mapping[k] = np.array(v) return MyCosmology(**mapping) def file_writer(file, cosmo, overwrite=False): """Write files in format 'myformat'. Parameters ---------- file : str, bytes, `~os.PathLike`, or file-like cosmo : `~mypackage.cosmology.MyCosmology` instance overwrite : bool (optional, keyword-only) Whether to overwrite an existing file. Default is False. """ output = dict(cosmo) # serialize ndarray for k, v in output.items(): if isinstance(v, np.ndarray): output[k] = v.tolist() # write if isinstance(file, (str, bytes, os.PathLike)): # check that file exists and whether to overwrite. if os.path.exists(file) and not overwrite: raise IOError(f"{file} exists. Set 'overwrite' to write over.") with open(file, "w") as write_file: json.dump(output, write_file) else: # file-like json.dump(output, file)
56547f67ccbe20c5abae405330a32d39f7272ffdcc3a5366cb590c3ddcf3a50f
# -*- coding: utf-8 -*- """ Readers, Writers, and I/O Miscellany. """ __all__ = ["file_reader", "file_writer"] # from `mypackage` # e.g. a file reader and writer for ``myformat`` # this will be used in ``astropy_io.py`` from .core import file_reader, file_writer # Register read and write methods into Astropy: # determine if it is 1) installed and 2) the correct version (v5.0+) try: import astropy from astropy.utils.introspection import minversion except ImportError: ASTROPY_GE_5 = False else: ASTROPY_GE_5 = minversion(astropy, "5.0") if ASTROPY_GE_5: # Astropy is installed and v5.0+ so we import the following modules # to register "myformat" with Cosmology read/write and "mypackage" # with Cosmology to/from_format. from . import astropy_convert, astropy_io
bc5282bae41ff11a7f203c34d8a454652cdd8f8f4577c7613c71f40cdd584cc6
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see Astropy LICENSE.rst """ Register conversion methods for cosmology objects with Astropy Cosmology. With this registered, we can start with a Cosmology from ``mypackage`` and convert it to an astropy Cosmology instance. >>> from mypackage.cosmology import myplanck >>> from astropy.cosmology import Cosmology >>> cosmo = Cosmology.from_format(myplanck, format="mypackage") >>> cosmo We can also do the reverse: start with an astropy Cosmology and convert it to a ``mypackage`` object. >>> from astropy.cosmology import Planck18 >>> myplanck = Planck18.to_format("mypackage") >>> myplanck """ # THIRD PARTY import astropy.cosmology.units as cu import astropy.units as u from astropy.cosmology import FLRW, Cosmology, FlatLambdaCDM from astropy.cosmology.connect import convert_registry # LOCAL from mypackage.cosmology import MyCosmology __doctest_skip__ = ['*'] def from_mypackage(mycosmo): """Load `~astropy.cosmology.Cosmology` from ``mypackage`` object. Parameters ---------- mycosmo : `~mypackage.cosmology.MyCosmology` Returns ------- `~astropy.cosmology.Cosmology` """ m = dict(mycosmo) m["name"] = mycosmo.name # ---------------- # remap Parameters m["H0"] = m.pop("hubble_parameter") * (u.km / u.s / u.Mpc) m["Om0"] = m.pop("initial_matter_density") m["Tcmb0"] = m.pop("initial_temperature") * u.K # m["Neff"] = m.pop("Neff") # skip b/c unchanged m["m_nu"] = m.pop("neutrino_masses") * u.eV m["Ob0"] = m.pop("initial_baryon_density") # ---------------- # remap metadata m["t0"] = m.pop("current_age") * u.Gyr # optional if "reionization_redshift" in m: m["z_reion"] = m.pop("reionization_redshift") # ... # keep building `m` # ---------------- # Detect which type of Astropy cosmology to build. # TODO! CUSTOMIZE FOR DETECTION # Here we just force FlatLambdaCDM, but if your package allows for # non-flat cosmologies... m["cosmology"] = FlatLambdaCDM # build cosmology return Cosmology.from_format(m, format="mapping", move_to_meta=True) def to_mypackage(cosmology, *args): """Return the cosmology as a ``mycosmo``. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` Returns ------- `~mypackage.cosmology.MyCosmology` """ if not isinstance(cosmology, FLRW): raise TypeError("format 'mypackage' only supports FLRW cosmologies.") # ---------------- # Cosmology provides a nice method "mapping", so all that needs to # be done here is initialize from the dictionary m = cosmology.to_format("mapping") # Detect which type of MyCosmology to build. # Here we have forced FlatLambdaCDM, but if your package allows for # non-flat cosmologies... m.pop("cosmology") # MyCosmology doesn't support metadata. If your cosmology class does... meta = m.pop("meta") m = {**meta, **m} # merge, preferring current values # ---------------- # remap values # MyCosmology doesn't support units, so take values. m["hubble_parameter"] = m.pop("H0").to_value(u.km/u.s/u.Mpc) m["initial_matter_density"] = m.pop("Om0") m["initial_temperature"] = m.pop("Tcmb0").to_value(u.K) # m["Neff"] = m.pop("Neff") # skip b/c unchanged m["neutrino_masses"] = m.pop("m_nu").to_value(u.eV) m["initial_baryon_density"] = m.pop("Ob0") m["current_age"] = m.pop("t0", cosmology.age(0 * cu.redshift)).to_value(u.Gyr) # optional if "z_reion" in m: m["reionization_redshift"] = (m.pop("z_reion") << cu.redshift).value # ... # keep remapping return MyCosmology(**m) def mypackage_identify(origin, format, *args, **kwargs): """Identify if object uses format "mypackage".""" itis = False if origin == "read": itis = isinstance(args[1], MyCosmology) and (format in (None, "mypackage")) return itis # ------------------------------------------------------------------- # Register to/from_format & identify methods with Astropy Unified I/O convert_registry.register_reader("mypackage", Cosmology, from_mypackage, force=True) convert_registry.register_writer("mypackage", Cosmology, to_mypackage, force=True) convert_registry.register_identifier("mypackage", Cosmology, mypackage_identify, force=True)
2d064dcaba3ed3c63781551840b2ff8545f1d1a52bed3a07b3f58be3ffead1d2
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see Astropy LICENSE.rst """ Register Read/Write methods for "myformat" (JSON) with Astropy Cosmology. With this format registered, we can start with a Cosmology from ``mypackage``, write it to a file, and read it with Astropy to create an astropy Cosmology instance. >>> from mypackage.cosmology import myplanck >>> from mypackage.io import file_writer >>> file_writer('<file name>', myplanck) >>> from astropy.cosmology import Cosmology >>> cosmo = Cosmology.read('<file name>', format="myformat") >>> cosmo We can also do the reverse: start with an astropy Cosmology, save it and read it with ``mypackage``. >>> from astropy.cosmology import Planck18 >>> Planck18.write('<file name>', format="myformat") >>> from mypackage.io import file_reader >>> cosmo2 = file_reader('<file name>') >>> cosmo2 """ # STDLIB import json import os # THIRD PARTY import astropy.units as u from astropy.cosmology import Cosmology from astropy.cosmology.connect import readwrite_registry # LOCAL from .core import file_reader, file_writer __doctest_skip__ = ['*'] def read_myformat(filename, **kwargs): """Read files in format 'myformat'. Parameters ---------- filename : str **kwargs Keyword arguments into `astropy.cosmology.Cosmology.from_format` with ``format="mypackage"``. Returns ------- `~mypackage.cosmology.MyCosmology` instance """ mycosmo = file_reader(filename) # ← read file ↓ build Cosmology return Cosmology.from_format(mycosmo, format="mypackage", **kwargs) def write_myformat(cosmology, file, *, overwrite=False): """Write files in format 'myformat'. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` instance file : str, bytes, or `~os.PathLike` overwrite : bool (optional, keyword-only) Whether to overwrite an existing file. Default is False. """ cosmo = cosmology.to_format("mypackage") # ← convert Cosmology ↓ write file file_writer(file, cosmo, overwrite=overwrite) def myformat_identify(origin, filepath, fileobj, *args, **kwargs): """Identify if object uses ``myformat`` (JSON).""" return filepath is not None and filepath.endswith(".myformat") # ------------------------------------------------------------------- # Register read/write/identify methods with Astropy Unified I/O readwrite_registry.register_reader("myformat", Cosmology, read_myformat, force=True) readwrite_registry.register_writer("myformat", Cosmology, write_myformat, force=True) readwrite_registry.register_identifier("myformat", Cosmology, myformat_identify, force=True)
23d408e689a9d6e7cd9efb9793a2de87cf55e10ff04e32419450a3ef48d05e22
# Licensed under a 3-clause BSD style license - see LICENSE.rst # THIRD PARTY import pytest # LOCAL from mypackage.io import ASTROPY_GE_5 @pytest.fixture(scope="session", autouse=True) def teardown_mypackage(): """Clean up module after tests.""" yield # to let all tests within the scope run if ASTROPY_GE_5: from astropy.cosmology import Cosmology from astropy.cosmology.connect import convert_registry, readwrite_registry readwrite_registry.unregister_reader("myformat", Cosmology) readwrite_registry.unregister_writer("myformat", Cosmology) readwrite_registry.unregister_identifier("myformat", Cosmology) convert_registry.unregister_reader("mypackage", Cosmology) convert_registry.unregister_writer("mypackage", Cosmology) convert_registry.unregister_identifier("mypackage", Cosmology)
6919cf48a30c4aee62e1795acac114307458363ea8651a74b68ee81d6050c7f9
# -*- coding: utf-8 -*- """ Test that the conversion interface with Astropy works as expected. """ # STDLIB import os # THIRD PARTY import pytest # LOCAL from mypackage.cosmology import MyCosmology, myplanck # skip all tests in module if import is missing astropy = pytest.importorskip("astropy", minversion="4.3") # isort: skip # can now import freely from astropy from astropy import cosmology from astropy.cosmology import Cosmology from astropy.io import registry as io_registry from astropy.utils.compat.optional_deps import HAS_SCIPY # cosmology instances to test reading and writing astropy_cosmos = [ getattr(cosmology.realizations, n) for n in cosmology.realizations.available ] mypackage_cosmos = [myplanck] # list of ``mypackage`` cosmology realizations @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy.") class TestAstropyCosmologyConvert: """Test conversion to/from Astropy.""" @pytest.mark.parametrize("expected", astropy_cosmos) def test_roundtrip_from_astropy(self, expected): # convert to ``mypackage`` mycosmo = expected.to_format("mypackage") # Read back got = Cosmology.from_format(mycosmo, format="mypackage") # test round-tripped as expected assert got == expected # tests immutable parameters, e.g. H0 # NOTE: if your package's cosmology supports metadata # assert got.meta == expected.meta # (metadata not tested above) @pytest.mark.parametrize("expected", mypackage_cosmos) def test_roundtrip_from_mypackage(self, expected): # convert to Astropy acosmo = Cosmology.from_format(expected, format="mypackage") # convert back to ``mypackage``` got = acosmo.to_format("mypackage") # test round-tripped as expected assert isinstance(got, MyCosmology) assert got == expected # assuming ``MyCosmology`` has an __eq__ method ... # more equality tests @pytest.mark.parametrize("acosmo", astropy_cosmos) def test_package_equality(self, acosmo): """ The most important test: are the ``mypackage`` and ``astropy`` cosmologies equivalent!? They should be if read from the same source file. """ # convert to ``mypacakge`` mycosmo = acosmo.to_format("mypackage") # ------------ # test that the mypackage and astropy cosmologies are equivalent assert mycosmo.name == acosmo.name assert mycosmo.hubble_parameter == acosmo.H0.value ... # continue with the tests # e.g. test some methods # assert mycosmo.age_in_Gyr(1100) == acosmo.age(1100).to_value(u.Gyr)
b45ec3e61aa875af14e335bcd2f10d77e2d2fa849b533e5177991fcd057be722
# -*- coding: utf-8 -*- """ Test that the interface with Astropy works as expected. """ # STDLIB import os # THIRD PARTY import pytest # LOCAL from mypackage.cosmology import myplanck from mypackage.io import file_reader # skip all tests in module if import is missing astropy = pytest.importorskip("astropy", minversion="4.3") # isort: skip # can now import freely from astropy import astropy.units as u from astropy import cosmology from astropy.cosmology import Cosmology from astropy.io import registry as io_registry from astropy.utils.compat.optional_deps import HAS_SCIPY # the formats being registered with Astropy readwrite_formats = ["myformat"] # cosmology instances to test reading and writing astropy_cosmos = cosmology.realizations.available # list of ``mypackage`` cosmology realizations mypackage_cosmos = [myplanck] @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy.") class TestAstropyCosmologyIO: """Test read/write interoperability with Astropy.""" @pytest.mark.parametrize("format", readwrite_formats) @pytest.mark.parametrize("instance", astropy_cosmos) def test_roundtrip_from_astropy(self, tmp_path, instance, format): cosmo = getattr(cosmology.realizations, instance) fname = tmp_path / f"{instance}.{format}" # write to file cosmo.write(str(fname), format=format) # also test kwarg "overwrite" assert os.path.exists(str(fname)) # file exists with pytest.raises(IOError): cosmo.write(str(fname), format=format, overwrite=False) assert os.path.exists(str(fname)) # overwrite file existing file cosmo.write(str(fname), format=format, overwrite=True) # Read back got = Cosmology.read(fname, format=format) # test round-tripped as expected assert got == cosmo # tests immutable parameters, e.g. H0 # NOTE: if your package's cosmology supports metadata # assert got.meta == expected.meta # (metadata not tested above) @pytest.mark.parametrize("format", readwrite_formats) @pytest.mark.parametrize("instance", astropy_cosmos) def test_package_equality(self, tmp_path, instance, format): """ The most important test: are the ``mypackage`` and ``astropy`` cosmologies equivalent!? They should be if read from the same source file. """ original = getattr(cosmology.realizations, instance) fname = tmp_path / f"{instance}.{format}" # write with Astropy original.write(str(fname), format=format) # Read back with ``myformat`` cosmo = file_reader(str(fname)) # read to instance from mypackage # and a read comparison from Astropy cosmo2 = cosmology.Cosmology.read(str(fname), format=format) # ------------ # test that the mypackage and astropy cosmologies are equivalent assert original.H0.value == cosmo.hubble_parameter assert cosmo2.H0.value == cosmo.hubble_parameter ... # continue with the tests # e.g. test some methods # assert original.age(1100).to_value(u.Gyr) == cosmo.age_in_Gyr(1100) # assert cosmo2.age(1100) == cosmo.age(1100)
1fdb025eee151dcd6c7aa0f8e374779370b8f3e96a614e8e05974bc36517be0d
# Download an example FITS file, create a 2D cutout, and save it to a # new FITS file, including the updated cutout WCS. from astropy.io import fits from astropy.nddata import Cutout2D from astropy.utils.data import download_file from astropy.wcs import WCS def download_image_save_cutout(url, position, size): # Download the image filename = download_file(url) # Load the image and the WCS hdu = fits.open(filename)[0] wcs = WCS(hdu.header) # Make the cutout, including the WCS cutout = Cutout2D(hdu.data, position=position, size=size, wcs=wcs) # Put the cutout image in the FITS HDU hdu.data = cutout.data # Update the FITS header with the cutout WCS hdu.header.update(cutout.wcs.to_header()) # Write the cutout to a new FITS file cutout_filename = 'example_cutout.fits' hdu.writeto(cutout_filename, overwrite=True) if __name__ == '__main__': url = 'https://astropy.stsci.edu/data/photometry/spitzer_example_image.fits' position = (500, 300) size = (400, 400) download_image_save_cutout(url, position, size)
839026e538d2ac6883513804183bb95eb43f906603ed3d0ba8a29873e8c52d6d
# Load the WCS information from a fits header, and use it # to convert pixel coordinates to world coordinates. import numpy as np from astropy import wcs from astropy.io import fits import sys def load_wcs_from_file(filename): # Load the FITS hdulist using astropy.io.fits hdulist = fits.open(filename) # Parse the WCS keywords in the primary HDU w = wcs.WCS(hdulist[0].header) # Print out the "name" of the WCS, as defined in the FITS header print(w.wcs.name) # Print out all of the settings that were parsed from the header w.wcs.print_contents() # Three pixel coordinates of interest. # Note we've silently assumed an NAXIS=2 image here. # The pixel coordinates are pairs of [X, Y]. # The "origin" argument indicates whether the input coordinates # are 0-based (as in Numpy arrays) or # 1-based (as in the FITS convention, for example coordinates # coming from DS9). pixcrd = np.array([[0, 0], [24, 38], [45, 98]], dtype=np.float64) # Convert pixel coordinates to world coordinates # The second argument is "origin" -- in this case we're declaring we # have 0-based (Numpy-like) coordinates. world = w.wcs_pix2world(pixcrd, 0) print(world) # Convert the same coordinates back to pixel coordinates. pixcrd2 = w.wcs_world2pix(world, 0) print(pixcrd2) # These should be the same as the original pixel coordinates, modulo # some floating-point error. assert np.max(np.abs(pixcrd - pixcrd2)) < 1e-6 # The example below illustrates the use of "origin" to convert between # 0- and 1- based coordinates when executing the forward and backward # WCS transform. x = 0 y = 0 origin = 0 assert (w.wcs_pix2world(x, y, origin) == w.wcs_pix2world(x + 1, y + 1, origin + 1)) if __name__ == '__main__': load_wcs_from_file(sys.argv[-1])
63316d80f43a6920cf6a0e0f61b924cfabadfde0818e7ceaa3ada73e3c6efd1e
# Set the WCS information manually by setting properties of the WCS # object. import numpy as np from astropy import wcs from astropy.io import fits # Create a new WCS object. The number of axes must be set # from the start w = wcs.WCS(naxis=2) # Set up an "Airy's zenithal" projection # Vector properties may be set with Python lists, or Numpy arrays w.wcs.crpix = [-234.75, 8.3393] w.wcs.cdelt = np.array([-0.066667, 0.066667]) w.wcs.crval = [0, -90] w.wcs.ctype = ["RA---AIR", "DEC--AIR"] w.wcs.set_pv([(2, 1, 45.0)]) # Three pixel coordinates of interest. # The pixel coordinates are pairs of [X, Y]. # The "origin" argument indicates whether the input coordinates # are 0-based (as in Numpy arrays) or # 1-based (as in the FITS convention, for example coordinates # coming from DS9). pixcrd = np.array([[0, 0], [24, 38], [45, 98]], dtype=np.float64) # Convert pixel coordinates to world coordinates. # The second argument is "origin" -- in this case we're declaring we # have 0-based (Numpy-like) coordinates. world = w.wcs_pix2world(pixcrd, 0) print(world) # Convert the same coordinates back to pixel coordinates. pixcrd2 = w.wcs_world2pix(world, 0) print(pixcrd2) # These should be the same as the original pixel coordinates, modulo # some floating-point error. assert np.max(np.abs(pixcrd - pixcrd2)) < 1e-6 # The example below illustrates the use of "origin" to convert between # 0- and 1- based coordinates when executing the forward and backward # WCS transform. x = 0 y = 0 origin = 0 assert (w.wcs_pix2world(x, y, origin) == w.wcs_pix2world(x + 1, y + 1, origin + 1)) # Now, write out the WCS object as a FITS header header = w.to_header() # header is an astropy.io.fits.Header object. We can use it to create a new # PrimaryHDU and write it to a file. hdu = fits.PrimaryHDU(header=header) # Save to FITS file # hdu.writeto('test.fits')
76f4e3289aabfe27ae56e8785af13d17cc0dca6f3e6a5d72a9ed9887d158df9f
# Define the astropy.wcs.WCS object using a Python dictionary as input import astropy.wcs wcs_dict = { 'CTYPE1': 'WAVE ', 'CUNIT1': 'Angstrom', 'CDELT1': 0.2, 'CRPIX1': 0, 'CRVAL1': 10, 'NAXIS1': 5, 'CTYPE2': 'HPLT-TAN', 'CUNIT2': 'deg', 'CDELT2': 0.5, 'CRPIX2': 2, 'CRVAL2': 0.5, 'NAXIS2': 4, 'CTYPE3': 'HPLN-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.4, 'CRPIX3': 2, 'CRVAL3': 1, 'NAXIS3': 3} input_wcs = astropy.wcs.WCS(wcs_dict)
c07338c94a815b26b8846754da892de4c0e1b22dde30525e9980609aeaba4861
# NOTE: this hook should be added to # https://github.com/pyinstaller/pyinstaller-hooks-contrib # once that repository is ready for pull requests from PyInstaller.utils.hooks import collect_data_files datas = collect_data_files('skyfield')
95f267c0d172fc33e841eb2a4a6d509b2bdd77dbabff6ad2309186383f6db52a
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # # Astropy documentation build configuration file. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this file. # # All configuration values have a default. Some values are defined in # the global Astropy configuration which is loaded here before anything else. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('..')) # IMPORTANT: the above commented section was generated by sphinx-quickstart, but # is *NOT* appropriate for astropy or Astropy affiliated packages. It is left # commented out with this explanation to make it clear why this should not be # done. If the sys.path entry above is added, when the astropy.sphinx.conf # import occurs, it will import the *source* version of astropy instead of the # version installed (if invoked as "make html" or directly with sphinx), or the # version in the build directory. # Thus, any C-extensions that are needed to build the documentation will *not* # be accessible, and the documentation will not build correctly. # See sphinx_astropy.conf for which values are set there. import os import sys import configparser from datetime import datetime from importlib import metadata import doctest from packaging.requirements import Requirement from packaging.specifiers import SpecifierSet # -- Check for missing dependencies ------------------------------------------- missing_requirements = {} for line in metadata.requires('astropy'): if 'extra == "docs"' in line: req = Requirement(line.split(';')[0]) req_package = req.name.lower() req_specifier = str(req.specifier) try: version = metadata.version(req_package) except metadata.PackageNotFoundError: missing_requirements[req_package] = req_specifier if version not in SpecifierSet(req_specifier, prereleases=True): missing_requirements[req_package] = req_specifier if missing_requirements: print('The following packages could not be found and are required to ' 'build the documentation:') for key, val in missing_requirements.items(): print(f' * {key} {val}') print('Please install the "docs" requirements.') sys.exit(1) from sphinx_astropy.conf.v1 import * # noqa # -- Plot configuration ------------------------------------------------------- plot_rcparams = {} plot_rcparams['figure.figsize'] = (6, 6) plot_rcparams['savefig.facecolor'] = 'none' plot_rcparams['savefig.bbox'] = 'tight' plot_rcparams['axes.labelsize'] = 'large' plot_rcparams['figure.subplot.hspace'] = 0.5 plot_apply_rcparams = True plot_html_show_source_link = False plot_formats = ['png', 'svg', 'pdf'] # Don't use the default - which includes a numpy and matplotlib import plot_pre_code = "" # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.7' # To perform a Sphinx version check that needs to be more specific than # major.minor, call `check_sphinx_version("X.Y.Z")` here. check_sphinx_version("1.2.1") # noqa: F405 # The intersphinx_mapping in sphinx_astropy.sphinx refers to astropy for # the benefit of other packages who want to refer to objects in the # astropy core. However, we don't want to cyclically reference astropy in its # own build so we remove it here. del intersphinx_mapping['astropy'] # noqa: F405 # add any custom intersphinx for astropy intersphinx_mapping['astropy-dev'] = ('https://docs.astropy.org/en/latest/', None) # noqa: F405 intersphinx_mapping['pyerfa'] = ('https://pyerfa.readthedocs.io/en/stable/', None) # noqa: F405 intersphinx_mapping['pytest'] = ('https://docs.pytest.org/en/stable/', None) # noqa: F405 intersphinx_mapping['ipython'] = ('https://ipython.readthedocs.io/en/stable/', None) # noqa: F405 intersphinx_mapping['pandas'] = ('https://pandas.pydata.org/pandas-docs/stable/', None) # noqa: F405, E501 intersphinx_mapping['sphinx_automodapi'] = ('https://sphinx-automodapi.readthedocs.io/en/stable/', None) # noqa: F405, E501 intersphinx_mapping['packagetemplate'] = ('https://docs.astropy.org/projects/package-template/en/latest/', None) # noqa: F405, E501 intersphinx_mapping['h5py'] = ('https://docs.h5py.org/en/stable/', None) # noqa: F405 intersphinx_mapping['asdf-astropy'] = ('https://asdf-astropy.readthedocs.io/en/latest/', None) # noqa: F405 # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns.append('_templates') # noqa: F405 exclude_patterns.append('changes') # noqa: F405 exclude_patterns.append('_pkgtemplate.rst') # noqa: F405 exclude_patterns.append('**/*.inc.rst') # .inc.rst mean *include* files, don't have sphinx process them # noqa: F405, E501 # Add any paths that contain templates here, relative to this directory. if 'templates_path' not in locals(): # in case parent conf.py defines it templates_path = [] templates_path.append('_templates') extensions += ["sphinx_changelog"] # noqa: F405 # Grab minversion from setup.cfg setup_cfg = configparser.ConfigParser() setup_cfg.read(os.path.join(os.path.pardir, 'setup.cfg')) __minimum_python_version__ = setup_cfg['options']['python_requires'].replace('>=', '') project = u'Astropy' min_versions = {} for line in metadata.requires('astropy'): req = Requirement(line.split(';')[0]) min_versions[req.name.lower()] = str(req.specifier) # This is added to the end of RST files - a good place to put substitutions to # be used globally. with open("common_links.txt", "r") as cl: rst_epilog += cl.read().format(minimum_python=__minimum_python_version__, **min_versions) # Manually register doctest options since matplotlib 3.5 messed up allowing them # from pytest-doctestplus IGNORE_OUTPUT = doctest.register_optionflag('IGNORE_OUTPUT') REMOTE_DATA = doctest.register_optionflag('REMOTE_DATA') FLOAT_CMP = doctest.register_optionflag('FLOAT_CMP') # Whether to create cross-references for the parameter types in the # Parameters, Other Parameters, Returns and Yields sections of the docstring. numpydoc_xref_param_type = True # Words not to cross-reference. Most likely, these are common words used in # parameter type descriptions that may be confused for classes of the same # name. The base set comes from sphinx-astropy. We add more here. numpydoc_xref_ignore.update({ "mixin", "Any", # aka something that would be annotated with `typing.Any` # needed in subclassing numpy # TODO! revisit "Arguments", "Path", # TODO! not need to ignore. "flag", "bits", }) # Mappings to fully qualified paths (or correct ReST references) for the # aliases/shortcuts used when specifying the types of parameters. # Numpy provides some defaults # https://github.com/numpy/numpydoc/blob/b352cd7635f2ea7748722f410a31f937d92545cc/numpydoc/xref.py#L62-L94 # and a base set comes from sphinx-astropy. # so here we mostly need to define Astropy-specific x-refs numpydoc_xref_aliases.update({ # python & adjacent "Any": "`~typing.Any`", "file-like": ":term:`python:file-like object`", "file": ":term:`python:file object`", "path-like": ":term:`python:path-like object`", "module": ":term:`python:module`", "buffer-like": ":term:buffer-like", "hashable": ":term:`python:hashable`", # for matplotlib "color": ":term:`color`", # for numpy "ints": ":class:`python:int`", # for astropy "number": ":term:`number`", "Representation": ":class:`~astropy.coordinates.BaseRepresentation`", "writable": ":term:`writable file-like object`", "readable": ":term:`readable file-like object`", "BaseHDU": ":doc:`HDU </io/fits/api/hdus>`" }) # Add from sphinx-astropy 1) glossary aliases 2) physical types. numpydoc_xref_aliases.update(numpydoc_xref_astropy_aliases) # -- Project information ------------------------------------------------------ author = u'The Astropy Developers' copyright = f'2011–{datetime.utcnow().year}, ' + author # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # The full version, including alpha/beta/rc tags. release = metadata.version(project) # The short X.Y version. version = '.'.join(release.split('.')[:2]) # Only include dev docs in dev version. dev = 'dev' in release if not dev: exclude_patterns.append('development/*') # noqa: F405 exclude_patterns.append('testhelpers.rst') # noqa: F405 # -- Options for the module index --------------------------------------------- modindex_common_prefix = ['astropy.'] # -- Options for HTML output --------------------------------------------------- # A NOTE ON HTML THEMES # # The global astropy configuration uses a custom theme, # 'bootstrap-astropy', which is installed along with astropy. The # theme has options for controlling the text of the logo in the upper # left corner. This is how you would specify the options in order to # override the theme defaults (The following options *are* the # defaults, so we do not actually need to set them here.) # html_theme_options = { # 'logotext1': 'astro', # white, semi-bold # 'logotext2': 'py', # orange, light # 'logotext3': ':docs' # white, light # } # A different theme can be used, or other parts of this theme can be # modified, by overriding some of the variables set in the global # configuration. The variables set in the global configuration are # listed below, commented out. # Add any paths that contain custom themes here, relative to this directory. # To use a different custom theme, add the directory containing the theme. # html_theme_path = [] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. To override the custom theme, set this to the # name of a builtin theme or the name of a custom theme in html_theme_path. # html_theme = None # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = '' # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '' # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = f'{project} v{release}' # Output file base name for HTML help builder. htmlhelp_basename = project + 'doc' # A dictionary of values to pass into the template engine’s context for all pages. html_context = { 'to_be_indexed': ['stable', 'latest'], 'is_development': dev } # -- Options for LaTeX output -------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [('index', project + '.tex', project + u' Documentation', author, 'manual')] latex_logo = '_static/astropy_logo.pdf' # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [('index', project.lower(), project + u' Documentation', [author], 1)] # Setting this URL is requited by sphinx-astropy github_issues_url = 'https://github.com/astropy/astropy/issues/' edit_on_github_branch = 'main' # Enable nitpicky mode - which ensures that all references in the docs # resolve. nitpicky = True # This is not used. See docs/nitpick-exceptions file for the actual listing. nitpick_ignore = [] for line in open('nitpick-exceptions'): if line.strip() == "" or line.startswith("#"): continue dtype, target = line.split(None, 1) target = target.strip() nitpick_ignore.append((dtype, target)) # -- Options for the Sphinx gallery ------------------------------------------- try: import warnings import sphinx_gallery # noqa: F401 extensions += ["sphinx_gallery.gen_gallery"] # noqa: F405 sphinx_gallery_conf = { 'backreferences_dir': 'generated/modules', # path to store the module using example template # noqa: E501 'filename_pattern': '^((?!skip_).)*$', # execute all examples except those that start with "skip_" # noqa: E501 'examples_dirs': f'..{os.sep}examples', # path to the examples scripts 'gallery_dirs': 'generated/examples', # path to save gallery generated examples 'reference_url': { 'astropy': None, 'matplotlib': 'https://matplotlib.org/stable/', 'numpy': 'https://numpy.org/doc/stable/', }, 'abort_on_example_error': True } # Filter out backend-related warnings as described in # https://github.com/sphinx-gallery/sphinx-gallery/pull/564 warnings.filterwarnings("ignore", category=UserWarning, message='Matplotlib is currently using agg, which is a' ' non-GUI backend, so cannot show the figure.') except ImportError: sphinx_gallery = None # -- Options for linkcheck output ------------------------------------------- linkcheck_retry = 5 linkcheck_ignore = ['https://journals.aas.org/manuscript-preparation/', 'https://maia.usno.navy.mil/', 'https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer', 'https://aa.usno.navy.mil/publications/docs/Circular_179.php', 'http://data.astropy.org', 'https://doi.org/10.1017/S0251107X00002406', # internal server error 'https://doi.org/10.1017/pasa.2013.31', # internal server error r'https://github\.com/astropy/astropy/(?:issues|pull)/\d+'] linkcheck_timeout = 180 linkcheck_anchors = False # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. html_extra_path = ['robots.txt'] def rstjinja(app, docname, source): """Render pages as a jinja template to hide/show dev docs. """ # Make sure we're outputting HTML if app.builder.format != 'html': return files_to_render = ["index", "install"] if docname in files_to_render: print(f"Jinja rendering {docname}") rendered = app.builder.templates.render_string( source[0], app.config.html_context) source[0] = rendered def resolve_astropy_and_dev_reference(app, env, node, contnode): """ Reference targets for ``astropy:`` and ``astropy-dev:`` are special cases. Documentation links in astropy can be set up as intersphinx links so that affiliate packages do not have to override the docstrings when building the docs. If we are building the development docs it is a local ref targeting the label ``astropy-dev:<label>``, but for stable docs it should be an intersphinx resolution to the development docs. See https://github.com/astropy/astropy/issues/11366 """ # should the node be processed? reftarget = node.get('reftarget') # str or None if str(reftarget).startswith('astropy:'): # This allows Astropy to use intersphinx links to itself and have # them resolve to local links. Downstream packages will see intersphinx. # TODO! deprecate this if sphinx-doc/sphinx/issues/9169 is implemented. process, replace = True, 'astropy:' elif dev and str(reftarget).startswith('astropy-dev:'): process, replace = True, 'astropy-dev:' else: process, replace = False, '' # make link local if process: reftype = node.get('reftype') refdoc = node.get('refdoc', app.env.docname) # convert astropy intersphinx targets to local links. # there are a few types of intersphinx link patters, as described in # https://docs.readthedocs.io/en/stable/guides/intersphinx.html reftarget = reftarget.replace(replace, '') if reftype == "doc": # also need to replace the doc link node.replace_attr("reftarget", reftarget) # Delegate to the ref node's original domain/target (typically :ref:) try: domain = app.env.domains[node['refdomain']] return domain.resolve_xref(app.env, refdoc, app.builder, reftype, reftarget, node, contnode) except Exception: pass # Otherwise return None which should delegate to intersphinx def setup(app): if sphinx_gallery is None: msg = ('The sphinx_gallery extension is not installed, so the ' 'gallery will not be built. You will probably see ' 'additional warnings about undefined references due ' 'to this.') try: app.warn(msg) except AttributeError: # Sphinx 1.6+ from sphinx.util import logging logger = logging.getLogger(__name__) logger.warning(msg) # Generate the page from Jinja template app.connect("source-read", rstjinja) # Set this to higher priority than intersphinx; this way when building # dev docs astropy-dev: targets will go to the local docs instead of the # intersphinx mapping app.connect("missing-reference", resolve_astropy_and_dev_reference, priority=400)
a2076d6f7fec9ca455b82c8b927c90f3ee8ebde69d44d7eadd60291e5933216b
# Licensed under a 3-clause BSD style license - see LICENSE.rst import astropy.config as _config from astropy.utils.compat import optional_deps from .column import Column, MaskedColumn, StringTruncateWarning, ColumnInfo __all__ = ['BST', 'Column', 'ColumnGroups', 'ColumnInfo', 'Conf', 'JSViewer', 'MaskedColumn', 'NdarrayMixin', 'QTable', 'Row', 'SCEngine', 'SerializedColumn', 'SortedArray', 'StringTruncateWarning', 'Table', 'TableAttribute', 'TableColumns', 'TableFormatter', 'TableGroups', 'TableMergeError', 'TableReplaceWarning', 'conf', 'connect', 'hstack', 'join', 'registry', 'represent_mixins_as_columns', 'setdiff', 'unique', 'vstack', 'dstack', 'conf', 'join_skycoord', 'join_distance', 'PprintIncludeExclude'] class Conf(_config.ConfigNamespace): # noqa """ Configuration parameters for `astropy.table`. """ auto_colname = _config.ConfigItem( 'col{0}', 'The template that determines the name of a column if it cannot be ' 'determined. Uses new-style (format method) string formatting.', aliases=['astropy.table.column.auto_colname']) default_notebook_table_class = _config.ConfigItem( 'table-striped table-bordered table-condensed', 'The table class to be used in Jupyter notebooks when displaying ' 'tables (and not overridden). See <https://getbootstrap.com/css/#tables ' 'for a list of useful bootstrap classes.') replace_warnings = _config.ConfigItem( [], 'List of conditions for issuing a warning when replacing a table ' "column using setitem, e.g. t['a'] = value. Allowed options are " "'always', 'slice', 'refcount', 'attributes'.", 'string_list') replace_inplace = _config.ConfigItem( False, 'Always use in-place update of a table column when using setitem, ' "e.g. t['a'] = value. This overrides the default behavior of " "replacing the column entirely with the new value when possible. " "This configuration option will be deprecated and then removed in " "subsequent major releases.") conf = Conf() # noqa from . import connect # noqa: E402 from .groups import TableGroups, ColumnGroups # noqa: E402 from .table import (Table, QTable, TableColumns, Row, TableFormatter, NdarrayMixin, TableReplaceWarning, TableAttribute, PprintIncludeExclude) # noqa: E402 from .operations import (join, setdiff, hstack, dstack, vstack, unique, # noqa: E402 TableMergeError, join_skycoord, join_distance) # noqa: E402 from .bst import BST # noqa: E402 from .sorted_array import SortedArray # noqa: E402 from .soco import SCEngine # noqa: E402 from .serialize import SerializedColumn, represent_mixins_as_columns # noqa: E402 # Finally import the formats for the read and write method but delay building # the documentation until all are loaded. (#5275) from astropy.io import registry # noqa: E402 with registry.delay_doc_updates(Table): # Import routines that connect readers/writers to astropy.table from .jsviewer import JSViewer import astropy.io.ascii.connect import astropy.io.fits.connect import astropy.io.misc.connect import astropy.io.votable.connect import astropy.io.misc.pandas.connect # noqa: F401 if optional_deps.HAS_ASDF_ASTROPY: import asdf_astropy.io.connect else: import astropy.io.misc.asdf.connect
23d665a34fb026a203c3d0b68ecd88812c21606766629426f1d591e31ee5c89f
# Licensed under a 3-clause BSD style license - see LICENSE.rst import ctypes import numpy as np from astropy.modeling.core import Model, custom_model __all__ = ['discretize_model', 'KernelSizeError'] class DiscretizationError(Exception): """ Called when discretization of models goes wrong. """ class KernelSizeError(Exception): """ Called when size of kernels is even. """ def has_even_axis(array): if isinstance(array, (list, tuple)): return not len(array) % 2 else: return any(not axes_size % 2 for axes_size in array.shape) def raise_even_kernel_exception(): raise KernelSizeError("Kernel size must be odd in all axes.") def add_kernel_arrays_1D(array_1, array_2): """ Add two 1D kernel arrays of different size. The arrays are added with the centers lying upon each other. """ if array_1.size > array_2.size: new_array = array_1.copy() center = array_1.size // 2 slice_ = slice(center - array_2.size // 2, center + array_2.size // 2 + 1) new_array[slice_] += array_2 return new_array elif array_2.size > array_1.size: new_array = array_2.copy() center = array_2.size // 2 slice_ = slice(center - array_1.size // 2, center + array_1.size // 2 + 1) new_array[slice_] += array_1 return new_array return array_2 + array_1 def add_kernel_arrays_2D(array_1, array_2): """ Add two 2D kernel arrays of different size. The arrays are added with the centers lying upon each other. """ if array_1.size > array_2.size: new_array = array_1.copy() center = [axes_size // 2 for axes_size in array_1.shape] slice_x = slice(center[1] - array_2.shape[1] // 2, center[1] + array_2.shape[1] // 2 + 1) slice_y = slice(center[0] - array_2.shape[0] // 2, center[0] + array_2.shape[0] // 2 + 1) new_array[slice_y, slice_x] += array_2 return new_array elif array_2.size > array_1.size: new_array = array_2.copy() center = [axes_size // 2 for axes_size in array_2.shape] slice_x = slice(center[1] - array_1.shape[1] // 2, center[1] + array_1.shape[1] // 2 + 1) slice_y = slice(center[0] - array_1.shape[0] // 2, center[0] + array_1.shape[0] // 2 + 1) new_array[slice_y, slice_x] += array_1 return new_array return array_2 + array_1 def discretize_model(model, x_range, y_range=None, mode='center', factor=10): """ Function to evaluate analytical model functions on a grid. So far the function can only deal with pixel coordinates. Parameters ---------- model : `~astropy.modeling.Model` or callable. Analytic model function to be discretized. Callables, which are not an instances of `~astropy.modeling.Model` are passed to `~astropy.modeling.custom_model` and then evaluated. x_range : tuple x range in which the model is evaluated. The difference between the upper an lower limit must be a whole number, so that the output array size is well defined. y_range : tuple, optional y range in which the model is evaluated. The difference between the upper an lower limit must be a whole number, so that the output array size is well defined. Necessary only for 2D models. mode : str, optional One of the following modes: * ``'center'`` (default) Discretize model by taking the value at the center of the bin. * ``'linear_interp'`` Discretize model by linearly interpolating between the values at the corners of the bin. For 2D models interpolation is bilinear. * ``'oversample'`` Discretize model by taking the average on an oversampled grid. * ``'integrate'`` Discretize model by integrating the model over the bin using `scipy.integrate.quad`. Very slow. factor : float or int Factor of oversampling. Default = 10. Returns ------- array : `numpy.array` Model value array Notes ----- The ``oversample`` mode allows to conserve the integral on a subpixel scale. Here is the example of a normalized Gaussian1D: .. plot:: :include-source: import matplotlib.pyplot as plt import numpy as np from astropy.modeling.models import Gaussian1D from astropy.convolution.utils import discretize_model gauss_1D = Gaussian1D(1 / (0.5 * np.sqrt(2 * np.pi)), 0, 0.5) y_center = discretize_model(gauss_1D, (-2, 3), mode='center') y_corner = discretize_model(gauss_1D, (-2, 3), mode='linear_interp') y_oversample = discretize_model(gauss_1D, (-2, 3), mode='oversample') plt.plot(y_center, label='center sum = {0:3f}'.format(y_center.sum())) plt.plot(y_corner, label='linear_interp sum = {0:3f}'.format(y_corner.sum())) plt.plot(y_oversample, label='oversample sum = {0:3f}'.format(y_oversample.sum())) plt.xlabel('pixels') plt.ylabel('value') plt.legend() plt.show() """ if not callable(model): raise TypeError('Model must be callable.') if not isinstance(model, Model): model = custom_model(model)() ndim = model.n_inputs if ndim > 2: raise ValueError('discretize_model only supports 1-d and 2-d models.') if not float(np.diff(x_range)).is_integer(): raise ValueError("The difference between the upper and lower limit of" " 'x_range' must be a whole number.") if y_range: if not float(np.diff(y_range)).is_integer(): raise ValueError("The difference between the upper and lower limit of" " 'y_range' must be a whole number.") if ndim == 2 and y_range is None: raise ValueError("y range not specified, but model is 2-d") if ndim == 1 and y_range is not None: raise ValueError("y range specified, but model is only 1-d.") if mode == "center": if ndim == 1: return discretize_center_1D(model, x_range) elif ndim == 2: return discretize_center_2D(model, x_range, y_range) elif mode == "linear_interp": if ndim == 1: return discretize_linear_1D(model, x_range) if ndim == 2: return discretize_bilinear_2D(model, x_range, y_range) elif mode == "oversample": if ndim == 1: return discretize_oversample_1D(model, x_range, factor) if ndim == 2: return discretize_oversample_2D(model, x_range, y_range, factor) elif mode == "integrate": if ndim == 1: return discretize_integrate_1D(model, x_range) if ndim == 2: return discretize_integrate_2D(model, x_range, y_range) else: raise DiscretizationError('Invalid mode.') def discretize_center_1D(model, x_range): """ Discretize model by taking the value at the center of the bin. """ x = np.arange(*x_range) return model(x) def discretize_center_2D(model, x_range, y_range): """ Discretize model by taking the value at the center of the pixel. """ x = np.arange(*x_range) y = np.arange(*y_range) x, y = np.meshgrid(x, y) return model(x, y) def discretize_linear_1D(model, x_range): """ Discretize model by performing a linear interpolation. """ # Evaluate model 0.5 pixel outside the boundaries x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) values_intermediate_grid = model(x) return 0.5 * (values_intermediate_grid[1:] + values_intermediate_grid[:-1]) def discretize_bilinear_2D(model, x_range, y_range): """ Discretize model by performing a bilinear interpolation. """ # Evaluate model 0.5 pixel outside the boundaries x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5) x, y = np.meshgrid(x, y) values_intermediate_grid = model(x, y) # Mean in y direction values = 0.5 * (values_intermediate_grid[1:, :] + values_intermediate_grid[:-1, :]) # Mean in x direction values = 0.5 * (values[:, 1:] + values[:, :-1]) return values def discretize_oversample_1D(model, x_range, factor=10): """ Discretize model by taking the average on an oversampled grid. """ # Evaluate model on oversampled grid x = np.linspace(x_range[0] - 0.5 * (1 - 1 / factor), x_range[1] - 0.5 * (1 + 1 / factor), num=int((x_range[1] - x_range[0]) * factor)) values = model(x) # Reshape and compute mean values = np.reshape(values, (x.size // factor, factor)) return values.mean(axis=1) def discretize_oversample_2D(model, x_range, y_range, factor=10): """ Discretize model by taking the average on an oversampled grid. """ # Evaluate model on oversampled grid x = np.linspace(x_range[0] - 0.5 * (1 - 1 / factor), x_range[1] - 0.5 * (1 + 1 / factor), num=int((x_range[1] - x_range[0]) * factor)) y = np.linspace(y_range[0] - 0.5 * (1 - 1 / factor), y_range[1] - 0.5 * (1 + 1 / factor), num=int((y_range[1] - y_range[0]) * factor)) x_grid, y_grid = np.meshgrid(x, y) values = model(x_grid, y_grid) # Reshape and compute mean shape = (y.size // factor, factor, x.size // factor, factor) values = np.reshape(values, shape) return values.mean(axis=3).mean(axis=1) def discretize_integrate_1D(model, x_range): """ Discretize model by integrating numerically the model over the bin. """ from scipy.integrate import quad # Set up grid x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) values = np.array([]) # Integrate over all bins for i in range(x.size - 1): values = np.append(values, quad(model, x[i], x[i + 1])[0]) return values def discretize_integrate_2D(model, x_range, y_range): """ Discretize model by integrating the model over the pixel. """ from scipy.integrate import dblquad # Set up grid x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5) values = np.empty((y.size - 1, x.size - 1)) # Integrate over all pixels for i in range(x.size - 1): for j in range(y.size - 1): values[j, i] = dblquad(lambda y, x: model(x, y), x[i], x[i + 1], lambda x: y[j], lambda x: y[j + 1])[0] return values
e63d52ca3d13b31d866ffb97cd1b32a72f50792feff7f1b8f47c42ee6486ffd8
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module implements classes (called Fitters) which combine optimization algorithms (typically from `scipy.optimize`) with statistic functions to perform fitting. Fitters are implemented as callable classes. In addition to the data to fit, the ``__call__`` method takes an instance of `~astropy.modeling.core.FittableModel` as input, and returns a copy of the model with its parameters determined by the optimizer. Optimization algorithms, called "optimizers" are implemented in `~astropy.modeling.optimizers` and statistic functions are in `~astropy.modeling.statistic`. The goal is to provide an easy to extend framework and allow users to easily create new fitters by combining statistics with optimizers. There are two exceptions to the above scheme. `~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq` function. `~astropy.modeling.fitting.LevMarLSQFitter` uses `~scipy.optimize.leastsq` which combines optimization and statistic in one implementation. """ # pylint: disable=invalid-name import abc import inspect import operator import warnings from functools import reduce, wraps from importlib.metadata import entry_points import numpy as np from astropy.units import Quantity from astropy.utils.decorators import deprecated from astropy.utils.exceptions import AstropyUserWarning from .optimizers import DEFAULT_ACC, DEFAULT_EPS, DEFAULT_MAXITER, SLSQP, Simplex from .spline import (SplineExactKnotsFitter, SplineInterpolateFitter, SplineSmoothingFitter, SplineSplrepFitter) from .statistic import leastsquare from .utils import _combine_equivalency_dict, poly_map_domain __all__ = ['LinearLSQFitter', 'LevMarLSQFitter', 'FittingWithOutlierRemoval', 'SLSQPLSQFitter', 'SimplexLSQFitter', 'JointFitter', 'Fitter', "ModelLinearityError", "ModelsError"] # Statistic functions implemented in `astropy.modeling.statistic.py STATISTICS = [leastsquare] # Optimizers implemented in `astropy.modeling.optimizers.py OPTIMIZERS = [Simplex, SLSQP] class NonFiniteValueError(RuntimeError): """ Error raised when attempting to a non-finite value """ class Covariance(): """Class for covariance matrix calculated by fitter. """ def __init__(self, cov_matrix, param_names): self.cov_matrix = cov_matrix self.param_names = param_names def pprint(self, max_lines, round_val): # Print and label lower triangle of covariance matrix # Print rows for params up to `max_lines`, round floats to 'round_val' longest_name = max([len(x) for x in self.param_names]) ret_str = 'parameter variances / covariances \n' fstring = f'{"": <{longest_name}}| {{0}}\n' for i, row in enumerate(self.cov_matrix): if i <= max_lines-1: param = self.param_names[i] ret_str += fstring.replace(' '*len(param), param, 1).\ format(repr(np.round(row[:i+1], round_val))[7:-2]) else: ret_str += '...' return(ret_str.rstrip()) def __repr__(self): return(self.pprint(max_lines=10, round_val=3)) def __getitem__(self, params): # index covariance matrix by parameter names or indices if len(params) != 2: raise ValueError('Covariance must be indexed by two values.') if all(isinstance(item, str) for item in params): i1, i2 = self.param_names.index(params[0]), self.param_names.index(params[1]) elif all(isinstance(item, int) for item in params): i1, i2 = params else: raise TypeError('Covariance can be indexed by two parameter names or integer indices.') return(self.cov_matrix[i1][i2]) class StandardDeviations(): """ Class for fitting uncertainties.""" def __init__(self, cov_matrix, param_names): self.param_names = param_names self.stds = self._calc_stds(cov_matrix) def _calc_stds(self, cov_matrix): # sometimes scipy lstsq returns a non-sensical negative vals in the # diagonals of the cov_x it computes. stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov_matrix)] return stds def pprint(self, max_lines, round_val): longest_name = max([len(x) for x in self.param_names]) ret_str = 'standard deviations\n' fstring = '{0}{1}| {2}\n' for i, std in enumerate(self.stds): if i <= max_lines-1: param = self.param_names[i] ret_str += fstring.format(param, ' ' * (longest_name - len(param)), str(np.round(std, round_val))) else: ret_str += '...' return(ret_str.rstrip()) def __repr__(self): return(self.pprint(max_lines=10, round_val=3)) def __getitem__(self, param): if isinstance(param, str): i = self.param_names.index(param) elif isinstance(param, int): i = param else: raise TypeError('Standard deviation can be indexed by parameter name or integer.') return(self.stds[i]) class ModelsError(Exception): """Base class for model exceptions""" class ModelLinearityError(ModelsError): """ Raised when a non-linear model is passed to a linear fitter.""" class UnsupportedConstraintError(ModelsError, ValueError): """ Raised when a fitter does not support a type of constraint. """ class _FitterMeta(abc.ABCMeta): """ Currently just provides a registry for all Fitter classes. """ registry = set() def __new__(mcls, name, bases, members): cls = super().__new__(mcls, name, bases, members) if not inspect.isabstract(cls) and not name.startswith('_'): mcls.registry.add(cls) return cls def fitter_unit_support(func): """ This is a decorator that can be used to add support for dealing with quantities to any __call__ method on a fitter which may not support quantities itself. This is done by temporarily removing units from all parameters then adding them back once the fitting has completed. """ @wraps(func) def wrapper(self, model, x, y, z=None, **kwargs): equivalencies = kwargs.pop('equivalencies', None) data_has_units = (isinstance(x, Quantity) or isinstance(y, Quantity) or isinstance(z, Quantity)) model_has_units = model._has_units if data_has_units or model_has_units: if model._supports_unit_fitting: # We now combine any instance-level input equivalencies with user # specified ones at call-time. input_units_equivalencies = _combine_equivalency_dict( model.inputs, equivalencies, model.input_units_equivalencies) # If input_units is defined, we transform the input data into those # expected by the model. We hard-code the input names 'x', and 'y' # here since FittableModel instances have input names ('x',) or # ('x', 'y') if model.input_units is not None: if isinstance(x, Quantity): x = x.to(model.input_units[model.inputs[0]], equivalencies=input_units_equivalencies[model.inputs[0]]) if isinstance(y, Quantity) and z is not None: y = y.to(model.input_units[model.inputs[1]], equivalencies=input_units_equivalencies[model.inputs[1]]) # Create a dictionary mapping the real model inputs and outputs # names to the data. This remapping of names must be done here, after # the input data is converted to the correct units. rename_data = {model.inputs[0]: x} if z is not None: rename_data[model.outputs[0]] = z rename_data[model.inputs[1]] = y else: rename_data[model.outputs[0]] = y rename_data['z'] = None # We now strip away the units from the parameters, taking care to # first convert any parameters to the units that correspond to the # input units (to make sure that initial guesses on the parameters) # are in the right unit system model = model.without_units_for_data(**rename_data) if isinstance(model, tuple): rename_data['_left_kwargs'] = model[1] rename_data['_right_kwargs'] = model[2] model = model[0] # We strip away the units from the input itself add_back_units = False if isinstance(x, Quantity): add_back_units = True xdata = x.value else: xdata = np.asarray(x) if isinstance(y, Quantity): add_back_units = True ydata = y.value else: ydata = np.asarray(y) if z is not None: if isinstance(z, Quantity): add_back_units = True zdata = z.value else: zdata = np.asarray(z) # We run the fitting if z is None: model_new = func(self, model, xdata, ydata, **kwargs) else: model_new = func(self, model, xdata, ydata, zdata, **kwargs) # And finally we add back units to the parameters if add_back_units: model_new = model_new.with_units_from_data(**rename_data) return model_new else: raise NotImplementedError("This model does not support being " "fit to data with units.") else: return func(self, model, x, y, z=z, **kwargs) return wrapper class Fitter(metaclass=_FitterMeta): """ Base class for all fitters. Parameters ---------- optimizer : callable A callable implementing an optimization algorithm statistic : callable Statistic function """ supported_constraints = [] def __init__(self, optimizer, statistic): if optimizer is None: raise ValueError("Expected an optimizer.") if statistic is None: raise ValueError("Expected a statistic function.") if inspect.isclass(optimizer): # a callable class self._opt_method = optimizer() elif inspect.isfunction(optimizer): self._opt_method = optimizer else: raise ValueError("Expected optimizer to be a callable class or a function.") if inspect.isclass(statistic): self._stat_method = statistic() else: self._stat_method = statistic def objective_function(self, fps, *args): """ Function to minimize. Parameters ---------- fps : list parameters returned by the fitter args : list [model, [other_args], [input coordinates]] other_args may include weights or any other quantities specific for a statistic Notes ----- The list of arguments (args) is set in the `__call__` method. Fitters may overwrite this method, e.g. when statistic functions require other arguments. """ model = args[0] meas = args[-1] fitter_to_model_params(model, fps) res = self._stat_method(meas, model, *args[1:-1]) return res @staticmethod def _add_fitting_uncertainties(*args): """ When available, calculate and sets the parameter covariance matrix (model.cov_matrix) and standard deviations (model.stds). """ return None @abc.abstractmethod def __call__(self): """ This method performs the actual fitting and modifies the parameter list of a model. Fitter subclasses should implement this method. """ raise NotImplementedError("Subclasses should implement this method.") # TODO: I have ongoing branch elsewhere that's refactoring this module so that # all the fitter classes in here are Fitter subclasses. In the meantime we # need to specify that _FitterMeta is its metaclass. class LinearLSQFitter(metaclass=_FitterMeta): """ A class performing a linear least square fitting. Uses `numpy.linalg.lstsq` to do the fitting. Given a model and data, fits the model to the data and changes the model's parameters. Keeps a dictionary of auxiliary fitting information. Notes ----- Note that currently LinearLSQFitter does not support compound models. """ supported_constraints = ['fixed'] supports_masked_input = True def __init__(self, calc_uncertainties=False): self.fit_info = {'residuals': None, 'rank': None, 'singular_values': None, 'params': None } self._calc_uncertainties=calc_uncertainties @staticmethod def _is_invertible(m): """Check if inverse of matrix can be obtained.""" if m.shape[0] != m.shape[1]: return False if np.linalg.matrix_rank(m) < m.shape[0]: return False return True def _add_fitting_uncertainties(self, model, a, n_coeff, x, y, z=None, resids=None): """ Calculate and parameter covariance matrix and standard deviations and set `cov_matrix` and `stds` attributes. """ x_dot_x_prime = np.dot(a.T, a) masked = False or hasattr(y, 'mask') # check if invertible. if not, can't calc covariance. if not self._is_invertible(x_dot_x_prime): return(model) inv_x_dot_x_prime = np.linalg.inv(x_dot_x_prime) if z is None: # 1D models if len(model) == 1: # single model mask = None if masked: mask = y.mask xx = np.ma.array(x, mask=mask) RSS = [(1/(xx.count()-n_coeff)) * resids] if len(model) > 1: # model sets RSS = [] # collect sum residuals squared for each model in set for j in range(len(model)): mask = None if masked: mask = y.mask[..., j].flatten() xx = np.ma.array(x, mask=mask) eval_y = model(xx, model_set_axis=False) eval_y = np.rollaxis(eval_y, model.model_set_axis)[j] RSS.append((1/(xx.count()-n_coeff)) * np.sum((y[..., j] - eval_y)**2)) else: # 2D model if len(model) == 1: mask = None if masked: warnings.warn('Calculation of fitting uncertainties ' 'for 2D models with masked values not ' 'currently supported.\n', AstropyUserWarning) return xx, yy = np.ma.array(x, mask=mask), np.ma.array(y, mask=mask) # len(xx) instead of xx.count. this will break if values are masked? RSS = [(1/(len(xx)-n_coeff)) * resids] else: RSS = [] for j in range(len(model)): eval_z = model(x, y, model_set_axis=False) mask = None # need to figure out how to deal w/ masking here. if model.model_set_axis == 1: # model_set_axis passed when evaluating only refers to input shapes # so output must be reshaped for model_set_axis=1. eval_z = np.rollaxis(eval_z, 1) eval_z = eval_z[j] RSS.append([(1/(len(x)-n_coeff)) * np.sum((z[j] - eval_z)**2)]) covs = [inv_x_dot_x_prime * r for r in RSS] free_param_names = [x for x in model.fixed if (model.fixed[x] is False) and (model.tied[x] is False)] if len(covs) == 1: model.cov_matrix = Covariance(covs[0], model.param_names) model.stds = StandardDeviations(covs[0], free_param_names) else: model.cov_matrix = [Covariance(cov, model.param_names) for cov in covs] model.stds = [StandardDeviations(cov, free_param_names) for cov in covs] @staticmethod def _deriv_with_constraints(model, param_indices, x=None, y=None): if y is None: d = np.array(model.fit_deriv(x, *model.parameters)) else: d = np.array(model.fit_deriv(x, y, *model.parameters)) if model.col_fit_deriv: return d[param_indices] else: return d[..., param_indices] def _map_domain_window(self, model, x, y=None): """ Maps domain into window for a polynomial model which has these attributes. """ if y is None: if hasattr(model, 'domain') and model.domain is None: model.domain = [x.min(), x.max()] if hasattr(model, 'window') and model.window is None: model.window = [-1, 1] return poly_map_domain(x, model.domain, model.window) else: if hasattr(model, 'x_domain') and model.x_domain is None: model.x_domain = [x.min(), x.max()] if hasattr(model, 'y_domain') and model.y_domain is None: model.y_domain = [y.min(), y.max()] if hasattr(model, 'x_window') and model.x_window is None: model.x_window = [-1., 1.] if hasattr(model, 'y_window') and model.y_window is None: model.y_window = [-1., 1.] xnew = poly_map_domain(x, model.x_domain, model.x_window) ynew = poly_map_domain(y, model.y_domain, model.y_window) return xnew, ynew @fitter_unit_support def __call__(self, model, x, y, z=None, weights=None, rcond=None): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array Input coordinates y : array-like Input coordinates z : array-like, optional Input coordinates. If the dependent (``y`` or ``z``) coordinate values are provided as a `numpy.ma.MaskedArray`, any masked points are ignored when fitting. Note that model set fitting is significantly slower when there are masked points (not just an empty mask), as the matrix equation has to be solved for each model separately when their coordinate grids differ. weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. rcond : float, optional Cut-off ratio for small singular values of ``a``. Singular values are set to zero if they are smaller than ``rcond`` times the largest singular value of ``a``. equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ if not model.fittable: raise ValueError("Model must be a subclass of FittableModel") if not model.linear: raise ModelLinearityError('Model is not linear in parameters, ' 'linear fit methods should not be used.') if hasattr(model, "submodel_names"): raise ValueError("Model must be simple, not compound") _validate_constraints(self.supported_constraints, model) model_copy = model.copy() model_copy.sync_constraints = False _, fitparam_indices = model_to_fit_params(model_copy) if model_copy.n_inputs == 2 and z is None: raise ValueError("Expected x, y and z for a 2 dimensional model.") farg = _convert_input(x, y, z, n_models=len(model_copy), model_set_axis=model_copy.model_set_axis) has_fixed = any(model_copy.fixed.values()) # This is also done by _convert_inputs, but we need it here to allow # checking the array dimensionality before that gets called: if weights is not None: weights = np.asarray(weights, dtype=float) if has_fixed: # The list of fixed params is the complement of those being fitted: fixparam_indices = [idx for idx in range(len(model_copy.param_names)) if idx not in fitparam_indices] # Construct matrix of user-fixed parameters that can be dotted with # the corresponding fit_deriv() terms, to evaluate corrections to # the dependent variable in order to fit only the remaining terms: fixparams = np.asarray([getattr(model_copy, model_copy.param_names[idx]).value for idx in fixparam_indices]) if len(farg) == 2: x, y = farg if weights is not None: # If we have separate weights for each model, apply the same # conversion as for the data, otherwise check common weights # as if for a single model: _, weights = _convert_input( x, weights, n_models=len(model_copy) if weights.ndim == y.ndim else 1, model_set_axis=model_copy.model_set_axis ) # map domain into window if hasattr(model_copy, 'domain'): x = self._map_domain_window(model_copy, x) if has_fixed: lhs = np.asarray(self._deriv_with_constraints(model_copy, fitparam_indices, x=x)) fixderivs = self._deriv_with_constraints(model_copy, fixparam_indices, x=x) else: lhs = np.asarray(model_copy.fit_deriv(x, *model_copy.parameters)) sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x) rhs = y else: x, y, z = farg if weights is not None: # If we have separate weights for each model, apply the same # conversion as for the data, otherwise check common weights # as if for a single model: _, _, weights = _convert_input( x, y, weights, n_models=len(model_copy) if weights.ndim == z.ndim else 1, model_set_axis=model_copy.model_set_axis ) # map domain into window if hasattr(model_copy, 'x_domain'): x, y = self._map_domain_window(model_copy, x, y) if has_fixed: lhs = np.asarray(self._deriv_with_constraints(model_copy, fitparam_indices, x=x, y=y)) fixderivs = self._deriv_with_constraints(model_copy, fixparam_indices, x=x, y=y) else: lhs = np.asanyarray(model_copy.fit_deriv(x, y, *model_copy.parameters)) sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y) if len(model_copy) > 1: # Just to be explicit (rather than baking in False == 0): model_axis = model_copy.model_set_axis or 0 if z.ndim > 2: # For higher-dimensional z, flatten all the axes except the # dimension along which models are stacked and transpose so # the model axis is *last* (I think this resolves Erik's # pending generalization from 80a6f25a): rhs = np.rollaxis(z, model_axis, z.ndim) rhs = rhs.reshape(-1, rhs.shape[-1]) else: # This "else" seems to handle the corner case where the # user has already flattened x/y before attempting a 2D fit # but z has a second axis for the model set. NB. This is # ~5-10x faster than using rollaxis. rhs = z.T if model_axis == 0 else z if weights is not None: # Same for weights if weights.ndim > 2: # Separate 2D weights for each model: weights = np.rollaxis(weights, model_axis, weights.ndim) weights = weights.reshape(-1, weights.shape[-1]) elif weights.ndim == z.ndim: # Separate, flattened weights for each model: weights = weights.T if model_axis == 0 else weights else: # Common weights for all the models: weights = weights.flatten() else: rhs = z.flatten() if weights is not None: weights = weights.flatten() # If the derivative is defined along rows (as with non-linear models) if model_copy.col_fit_deriv: lhs = np.asarray(lhs).T # Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs # when constructing their Vandermonde matrix, which can lead to obscure # failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices, # so just raise a slightly more informative error when this happens: if np.asanyarray(lhs).ndim > 2: raise ValueError('{} gives unsupported >2D derivative matrix for ' 'this x/y'.format(type(model_copy).__name__)) # Subtract any terms fixed by the user from (a copy of) the RHS, in # order to fit the remaining terms correctly: if has_fixed: if model_copy.col_fit_deriv: fixderivs = np.asarray(fixderivs).T # as for lhs above rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms # Subtract any terms implicit in the model from the RHS, which, like # user-fixed terms, affect the dependent variable but are not fitted: if sum_of_implicit_terms is not None: # If we have a model set, the extra axis must be added to # sum_of_implicit_terms as its innermost dimension, to match the # dimensionality of rhs after _convert_input "rolls" it as needed # by np.linalg.lstsq. The vector then gets broadcast to the right # number of sets (columns). This assumes all the models share the # same input coordinates, as is currently the case. if len(model_copy) > 1: sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis] rhs = rhs - sum_of_implicit_terms if weights is not None: if rhs.ndim == 2: if weights.shape == rhs.shape: # separate weights for multiple models case: broadcast # lhs to have more dimension (for each model) lhs = lhs[..., np.newaxis] * weights[:, np.newaxis] rhs = rhs * weights else: lhs *= weights[:, np.newaxis] # Don't modify in-place in case rhs was the original # dependent variable array rhs = rhs * weights[:, np.newaxis] else: lhs *= weights[:, np.newaxis] rhs = rhs * weights scl = (lhs * lhs).sum(0) lhs /= scl masked = np.any(np.ma.getmask(rhs)) if weights is not None and not masked and np.any(np.isnan(lhs)): raise ValueError('Found NaNs in the coefficient matrix, which ' 'should not happen and would crash the lapack ' 'routine. Maybe check that weights are not null.') a = None # need for calculating covarience if ((masked and len(model_copy) > 1) or (weights is not None and weights.ndim > 1)): # Separate masks or weights for multiple models case: Numpy's # lstsq supports multiple dimensions only for rhs, so we need to # loop manually on the models. This may be fixed in the future # with https://github.com/numpy/numpy/pull/15777. # Initialize empty array of coefficients and populate it one model # at a time. The shape matches the number of coefficients from the # Vandermonde matrix and the number of models from the RHS: lacoef = np.zeros(lhs.shape[1:2] + rhs.shape[-1:], dtype=rhs.dtype) # Arrange the lhs as a stack of 2D matrices that we can iterate # over to get the correctly-orientated lhs for each model: if lhs.ndim > 2: lhs_stack = np.rollaxis(lhs, -1, 0) else: lhs_stack = np.broadcast_to(lhs, rhs.shape[-1:] + lhs.shape) # Loop over the models and solve for each one. By this point, the # model set axis is the second of two. Transpose rather than using, # say, np.moveaxis(array, -1, 0), since it's slightly faster and # lstsq can't handle >2D arrays anyway. This could perhaps be # optimized by collecting together models with identical masks # (eg. those with no rejected points) into one operation, though it # will still be relatively slow when calling lstsq repeatedly. for model_lhs, model_rhs, model_lacoef in zip(lhs_stack, rhs.T, lacoef.T): # Cull masked points on both sides of the matrix equation: good = ~model_rhs.mask if masked else slice(None) model_lhs = model_lhs[good] model_rhs = model_rhs[good][..., np.newaxis] a = model_lhs # Solve for this model: t_coef, resids, rank, sval = np.linalg.lstsq(model_lhs, model_rhs, rcond) model_lacoef[:] = t_coef.T else: # If we're fitting one or more models over a common set of points, # we only have to solve a single matrix equation, which is an order # of magnitude faster than calling lstsq() once per model below: good = ~rhs.mask if masked else slice(None) # latter is a no-op a = lhs[good] # Solve for one or more models: lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good], rhs[good], rcond) self.fit_info['residuals'] = resids self.fit_info['rank'] = rank self.fit_info['singular_values'] = sval lacoef /= scl[:, np.newaxis] if scl.ndim < rhs.ndim else scl self.fit_info['params'] = lacoef fitter_to_model_params(model_copy, lacoef.flatten()) # TODO: Only Polynomial models currently have an _order attribute; # maybe change this to read isinstance(model, PolynomialBase) if hasattr(model_copy, '_order') and len(model_copy) == 1 \ and not has_fixed and rank != model_copy._order: warnings.warn("The fit may be poorly conditioned\n", AstropyUserWarning) # calculate and set covariance matrix and standard devs. on model if self._calc_uncertainties: if len(y) > len(lacoef): self._add_fitting_uncertainties(model_copy, a*scl, len(lacoef), x, y, z, resids) model_copy.sync_constraints = True return model_copy class FittingWithOutlierRemoval: """ This class combines an outlier removal technique with a fitting procedure. Basically, given a maximum number of iterations ``niter``, outliers are removed and fitting is performed for each iteration, until no new outliers are found or ``niter`` is reached. Parameters ---------- fitter : `Fitter` An instance of any Astropy fitter, i.e., LinearLSQFitter, LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For model set fitting, this must understand masked input data (as indicated by the fitter class attribute ``supports_masked_input``). outlier_func : callable A function for outlier removal. If this accepts an ``axis`` parameter like the `numpy` functions, the appropriate value will be supplied automatically when fitting model sets (unless overridden in ``outlier_kwargs``), to find outliers for each model separately; otherwise, the same filtering must be performed in a loop over models, which is almost an order of magnitude slower. niter : int, optional Maximum number of iterations. outlier_kwargs : dict, optional Keyword arguments for outlier_func. Attributes ---------- fit_info : dict The ``fit_info`` (if any) from the last iteration of the wrapped ``fitter`` during the most recent fit. An entry is also added with the keyword ``niter`` that records the actual number of fitting iterations performed (as opposed to the user-specified maximum). """ def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs): self.fitter = fitter self.outlier_func = outlier_func self.niter = niter self.outlier_kwargs = outlier_kwargs self.fit_info = {'niter': None} def __str__(self): return ("Fitter: {0}\nOutlier function: {1}\nNum. of iterations: {2}" + ("\nOutlier func. args.: {3}"))\ .format(self.fitter.__class__.__name__, self.outlier_func.__name__, self.niter, self.outlier_kwargs) def __repr__(self): return ("{0}(fitter: {1}, outlier_func: {2}," + " niter: {3}, outlier_kwargs: {4})")\ .format(self.__class__.__name__, self.fitter.__class__.__name__, self.outlier_func.__name__, self.niter, self.outlier_kwargs) def __call__(self, model, x, y, z=None, weights=None, **kwargs): """ Parameters ---------- model : `~astropy.modeling.FittableModel` An analytic model which will be fit to the provided data. This also contains the initial guess for an optimization algorithm. x : array-like Input coordinates. y : array-like Data measurements (1D case) or input coordinates (2D case). z : array-like, optional Data measurements (2D case). weights : array-like, optional Weights to be passed to the fitter. kwargs : dict, optional Keyword arguments to be passed to the fitter. Returns ------- fitted_model : `~astropy.modeling.FittableModel` Fitted model after outlier removal. mask : `numpy.ndarray` Boolean mask array, identifying which points were used in the final fitting iteration (False) and which were found to be outliers or were masked in the input (True). """ # For single models, the data get filtered here at each iteration and # then passed to the fitter, which is the historical behavior and # works even for fitters that don't understand masked arrays. For model # sets, the fitter must be able to filter masked data internally, # because fitters require a single set of x/y coordinates whereas the # eliminated points can vary between models. To avoid this limitation, # we could fall back to looping over individual model fits, but it # would likely be fiddly and involve even more overhead (and the # non-linear fitters don't work with model sets anyway, as of writing). if len(model) == 1: model_set_axis = None else: if not hasattr(self.fitter, 'supports_masked_input') or \ self.fitter.supports_masked_input is not True: raise ValueError("{} cannot fit model sets with masked " "values".format(type(self.fitter).__name__)) # Fitters use their input model's model_set_axis to determine how # their input data are stacked: model_set_axis = model.model_set_axis # Construct input coordinate tuples for fitters & models that are # appropriate for the dimensionality being fitted: if z is None: coords = (x, ) data = y else: coords = x, y data = z # For model sets, construct a numpy-standard "axis" tuple for the # outlier function, to treat each model separately (if supported): if model_set_axis is not None: if model_set_axis < 0: model_set_axis += data.ndim if 'axis' not in self.outlier_kwargs: # allow user override # This also works for False (like model instantiation): self.outlier_kwargs['axis'] = tuple( n for n in range(data.ndim) if n != model_set_axis ) loop = False # Starting fit, prior to any iteration and masking: fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs) filtered_data = np.ma.masked_array(data) if filtered_data.mask is np.ma.nomask: filtered_data.mask = False filtered_weights = weights last_n_masked = filtered_data.mask.sum() n = 0 # (allow recording no. of iterations when 0) # Perform the iterative fitting: for n in range(1, self.niter + 1): # (Re-)evaluate the last model: model_vals = fitted_model(*coords, model_set_axis=False) # Determine the outliers: if not loop: # Pass axis parameter if outlier_func accepts it, otherwise # prepare for looping over models: try: filtered_data = self.outlier_func( filtered_data - model_vals, **self.outlier_kwargs ) # If this happens to catch an error with a parameter other # than axis, the next attempt will fail accordingly: except TypeError: if model_set_axis is None: raise else: self.outlier_kwargs.pop('axis', None) loop = True # Construct MaskedArray to hold filtered values: filtered_data = np.ma.masked_array( filtered_data, dtype=np.result_type(filtered_data, model_vals), copy=True ) # Make sure the mask is an array, not just nomask: if filtered_data.mask is np.ma.nomask: filtered_data.mask = False # Get views transposed appropriately for iteration # over the set (handling data & mask separately due to # NumPy issue #8506): data_T = np.rollaxis(filtered_data, model_set_axis, 0) mask_T = np.rollaxis(filtered_data.mask, model_set_axis, 0) if loop: model_vals_T = np.rollaxis(model_vals, model_set_axis, 0) for row_data, row_mask, row_mod_vals in zip(data_T, mask_T, model_vals_T): masked_residuals = self.outlier_func( row_data - row_mod_vals, **self.outlier_kwargs ) row_data.data[:] = masked_residuals.data row_mask[:] = masked_residuals.mask # Issue speed warning after the fact, so it only shows up when # the TypeError is genuinely due to the axis argument. warnings.warn('outlier_func did not accept axis argument; ' 'reverted to slow loop over models.', AstropyUserWarning) # Recombine newly-masked residuals with model to get masked values: filtered_data += model_vals # Re-fit the data after filtering, passing masked/unmasked values # for single models / sets, respectively: if model_set_axis is None: good = ~filtered_data.mask if weights is not None: filtered_weights = weights[good] fitted_model = self.fitter(fitted_model, *(c[good] for c in coords), filtered_data.data[good], weights=filtered_weights, **kwargs) else: fitted_model = self.fitter(fitted_model, *coords, filtered_data, weights=filtered_weights, **kwargs) # Stop iteration if the masked points are no longer changing (with # cumulative rejection we only need to compare how many there are): this_n_masked = filtered_data.mask.sum() # (minimal overhead) if this_n_masked == last_n_masked: break last_n_masked = this_n_masked self.fit_info = {'niter': n} self.fit_info.update(getattr(self.fitter, 'fit_info', {})) return fitted_model, filtered_data.mask class LevMarLSQFitter(metaclass=_FitterMeta): """ Levenberg-Marquardt algorithm and least squares statistic. Attributes ---------- fit_info : dict The `scipy.optimize.leastsq` result for the most recent fit (see notes). Notes ----- The ``fit_info`` dictionary contains the values returned by `scipy.optimize.leastsq` for the most recent fit, including the values from the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq` documentation for details on the meaning of these values. Note that the ``x`` return value is *not* included (as it is instead the parameter values of the returned model). Additionally, one additional element of ``fit_info`` is computed whenever a model is fit, with the key 'param_cov'. The corresponding value is the covariance matrix of the parameters as a 2D numpy array. The order of the matrix elements matches the order of the parameters in the fitted model (i.e., the same order as ``model.param_names``). """ supported_constraints = ['fixed', 'tied', 'bounds'] """ The constraint types supported by this fitter type. """ def __init__(self, calc_uncertainties=False): self.fit_info = {'nfev': None, 'fvec': None, 'fjac': None, 'ipvt': None, 'qtf': None, 'message': None, 'ierr': None, 'param_jac': None, 'param_cov': None} self._calc_uncertainties=calc_uncertainties super().__init__() def objective_function(self, fps, *args): """ Function to minimize. Parameters ---------- fps : list parameters returned by the fitter args : list [model, [weights], [input coordinates]] """ model = args[0] weights = args[1] fitter_to_model_params(model, fps) meas = args[-1] if weights is None: value = np.ravel(model(*args[2: -1]) - meas) else: value = np.ravel(weights * (model(*args[2: -1]) - meas)) if not np.all(np.isfinite(value)): raise NonFiniteValueError("Objective function has encountered a non-finite value, this will cause the fit to fail!") return value @staticmethod def _add_fitting_uncertainties(model, cov_matrix): """ Set ``cov_matrix`` and ``stds`` attributes on model with parameter covariance matrix returned by ``optimize.leastsq``. """ free_param_names = [x for x in model.fixed if (model.fixed[x] is False) and (model.tied[x] is False)] model.cov_matrix = Covariance(cov_matrix, free_param_names) model.stds = StandardDeviations(cov_matrix, free_param_names) @fitter_unit_support def __call__(self, model, x, y, z=None, weights=None, maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC, epsilon=DEFAULT_EPS, estimate_jacobian=False): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array input coordinates y : array input coordinates z : array, optional input coordinates weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. maxiter : int maximum number of iterations acc : float Relative error desired in the approximate solution epsilon : float A suitable step length for the forward-difference approximation of the Jacobian (if model.fjac=None). If epsfcn is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. estimate_jacobian : bool If False (default) and if the model has a fit_deriv method, it will be used. Otherwise the Jacobian will be estimated. If True, the Jacobian will be estimated in any case. equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ from scipy import optimize model_copy = _validate_model(model, self.supported_constraints) model_copy.sync_constraints = False farg = (model_copy, weights, ) + _convert_input(x, y, z) if model_copy.fit_deriv is None or estimate_jacobian: dfunc = None else: dfunc = self._wrap_deriv init_values, _ = model_to_fit_params(model_copy) fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq( self.objective_function, init_values, args=farg, Dfun=dfunc, col_deriv=model_copy.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon, xtol=acc, full_output=True) fitter_to_model_params(model_copy, fitparams) self.fit_info.update(dinfo) self.fit_info['cov_x'] = cov_x self.fit_info['message'] = mess self.fit_info['ierr'] = ierr if ierr not in [1, 2, 3, 4]: warnings.warn("The fit may be unsuccessful; check " "fit_info['message'] for more information.", AstropyUserWarning) # now try to compute the true covariance matrix if (len(y) > len(init_values)) and cov_x is not None: sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2) dof = len(y) - len(init_values) self.fit_info['param_cov'] = cov_x * sum_sqrs / dof else: self.fit_info['param_cov'] = None if self._calc_uncertainties is True: if self.fit_info['param_cov'] is not None: self._add_fitting_uncertainties(model_copy, self.fit_info['param_cov']) model_copy.sync_constraints = True return model_copy @staticmethod def _wrap_deriv(params, model, weights, x, y, z=None): """ Wraps the method calculating the Jacobian of the function to account for model constraints. `scipy.optimize.leastsq` expects the function derivative to have the above signature (parlist, (argtuple)). In order to accommodate model constraints, instead of using p directly, we set the parameter list in this function. """ if weights is None: weights = 1.0 if any(model.fixed.values()) or any(model.tied.values()): # update the parameters with the current values from the fitter fitter_to_model_params(model, params) if z is None: full = np.array(model.fit_deriv(x, *model.parameters)) if not model.col_fit_deriv: full_deriv = np.ravel(weights) * full.T else: full_deriv = np.ravel(weights) * full else: full = np.array([np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)]) if not model.col_fit_deriv: full_deriv = np.ravel(weights) * full.T else: full_deriv = np.ravel(weights) * full pars = [getattr(model, name) for name in model.param_names] fixed = [par.fixed for par in pars] tied = [par.tied for par in pars] tied = list(np.where([par.tied is not False for par in pars], True, tied)) fix_and_tie = np.logical_or(fixed, tied) ind = np.logical_not(fix_and_tie) if not model.col_fit_deriv: residues = np.asarray(full_deriv[np.nonzero(ind)]).T else: residues = full_deriv[np.nonzero(ind)] return [np.ravel(_) for _ in residues] else: if z is None: try: return np.array([np.ravel(_) for _ in np.array(weights) * np.array(model.fit_deriv(x, *params))]) except ValueError: return np.array([np.ravel(_) for _ in np.array(weights) * np.moveaxis( np.array(model.fit_deriv(x, *params)), -1, 0)]).transpose() else: if not model.col_fit_deriv: return [np.ravel(_) for _ in (np.ravel(weights) * np.array(model.fit_deriv(x, y, *params)).T).T] return [np.ravel(_) for _ in weights * np.array(model.fit_deriv(x, y, *params))] class SLSQPLSQFitter(Fitter): """ Sequential Least Squares Programming (SLSQP) optimization algorithm and least squares statistic. Raises ------ ModelLinearityError A linear model is passed to a nonlinear fitter Notes ----- See also the `~astropy.modeling.optimizers.SLSQP` optimizer. """ supported_constraints = SLSQP.supported_constraints def __init__(self): super().__init__(optimizer=SLSQP, statistic=leastsquare) self.fit_info = {} @fitter_unit_support def __call__(self, model, x, y, z=None, weights=None, **kwargs): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array input coordinates y : array input coordinates z : array, optional input coordinates weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. kwargs : dict optional keyword arguments to be passed to the optimizer or the statistic verblevel : int 0-silent 1-print summary upon completion, 2-print summary after each iteration maxiter : int maximum number of iterations epsilon : float the step size for finite-difference derivative estimates acc : float Requested accuracy equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ model_copy = _validate_model(model, self._opt_method.supported_constraints) model_copy.sync_constraints = False farg = _convert_input(x, y, z) farg = (model_copy, weights, ) + farg init_values, _ = model_to_fit_params(model_copy) fitparams, self.fit_info = self._opt_method( self.objective_function, init_values, farg, **kwargs) fitter_to_model_params(model_copy, fitparams) model_copy.sync_constraints = True return model_copy class SimplexLSQFitter(Fitter): """ Simplex algorithm and least squares statistic. Raises ------ `ModelLinearityError` A linear model is passed to a nonlinear fitter """ supported_constraints = Simplex.supported_constraints def __init__(self): super().__init__(optimizer=Simplex, statistic=leastsquare) self.fit_info = {} @fitter_unit_support def __call__(self, model, x, y, z=None, weights=None, **kwargs): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array input coordinates y : array input coordinates z : array, optional input coordinates weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. kwargs : dict optional keyword arguments to be passed to the optimizer or the statistic maxiter : int maximum number of iterations acc : float Relative error in approximate solution equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ model_copy = _validate_model(model, self._opt_method.supported_constraints) model_copy.sync_constraints = False farg = _convert_input(x, y, z) farg = (model_copy, weights, ) + farg init_values, _ = model_to_fit_params(model_copy) fitparams, self.fit_info = self._opt_method( self.objective_function, init_values, farg, **kwargs) fitter_to_model_params(model_copy, fitparams) model_copy.sync_constraints = True return model_copy class JointFitter(metaclass=_FitterMeta): """ Fit models which share a parameter. For example, fit two gaussians to two data sets but keep the FWHM the same. Parameters ---------- models : list a list of model instances jointparameters : list a list of joint parameters initvals : list a list of initial values """ def __init__(self, models, jointparameters, initvals): self.models = list(models) self.initvals = list(initvals) self.jointparams = jointparameters self._verify_input() self.fitparams = self.model_to_fit_params() # a list of model.n_inputs self.modeldims = [m.n_inputs for m in self.models] # sum all model dimensions self.ndim = np.sum(self.modeldims) def model_to_fit_params(self): fparams = [] fparams.extend(self.initvals) for model in self.models: params = model.parameters.tolist() joint_params = self.jointparams[model] param_metrics = model._param_metrics for param_name in joint_params: slice_ = param_metrics[param_name]['slice'] del params[slice_] fparams.extend(params) return fparams def objective_function(self, fps, *args): """ Function to minimize. Parameters ---------- fps : list the fitted parameters - result of an one iteration of the fitting algorithm args : dict tuple of measured and input coordinates args is always passed as a tuple from optimize.leastsq """ lstsqargs = list(args) fitted = [] fitparams = list(fps) numjp = len(self.initvals) # make a separate list of the joint fitted parameters jointfitparams = fitparams[:numjp] del fitparams[:numjp] for model in self.models: joint_params = self.jointparams[model] margs = lstsqargs[:model.n_inputs + 1] del lstsqargs[:model.n_inputs + 1] # separate each model separately fitted parameters numfp = len(model._parameters) - len(joint_params) mfparams = fitparams[:numfp] del fitparams[:numfp] # recreate the model parameters mparams = [] param_metrics = model._param_metrics for param_name in model.param_names: if param_name in joint_params: index = joint_params.index(param_name) # should do this with slices in case the # parameter is not a number mparams.extend([jointfitparams[index]]) else: slice_ = param_metrics[param_name]['slice'] plen = slice_.stop - slice_.start mparams.extend(mfparams[:plen]) del mfparams[:plen] modelfit = model.evaluate(margs[:-1], *mparams) fitted.extend(modelfit - margs[-1]) return np.ravel(fitted) def _verify_input(self): if len(self.models) <= 1: raise TypeError(f"Expected >1 models, {len(self.models)} is given") if len(self.jointparams.keys()) < 2: raise TypeError("At least two parameters are expected, " "{} is given".format(len(self.jointparams.keys()))) for j in self.jointparams.keys(): if len(self.jointparams[j]) != len(self.initvals): raise TypeError("{} parameter(s) provided but {} expected".format( len(self.jointparams[j]), len(self.initvals))) def __call__(self, *args): """ Fit data to these models keeping some of the parameters common to the two models. """ from scipy import optimize if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims): raise ValueError("Expected {} coordinates in args but {} provided" .format(reduce(lambda x, y: x + 1 + y + 1, self.modeldims), len(args))) self.fitparams[:], _ = optimize.leastsq(self.objective_function, self.fitparams, args=args) fparams = self.fitparams[:] numjp = len(self.initvals) # make a separate list of the joint fitted parameters jointfitparams = fparams[:numjp] del fparams[:numjp] for model in self.models: # extract each model's fitted parameters joint_params = self.jointparams[model] numfp = len(model._parameters) - len(joint_params) mfparams = fparams[:numfp] del fparams[:numfp] # recreate the model parameters mparams = [] param_metrics = model._param_metrics for param_name in model.param_names: if param_name in joint_params: index = joint_params.index(param_name) # should do this with slices in case the parameter # is not a number mparams.extend([jointfitparams[index]]) else: slice_ = param_metrics[param_name]['slice'] plen = slice_.stop - slice_.start mparams.extend(mfparams[:plen]) del mfparams[:plen] model.parameters = np.array(mparams) def _convert_input(x, y, z=None, n_models=1, model_set_axis=0): """Convert inputs to float arrays.""" x = np.asanyarray(x, dtype=float) y = np.asanyarray(y, dtype=float) if z is not None: z = np.asanyarray(z, dtype=float) data_ndim, data_shape = z.ndim, z.shape else: data_ndim, data_shape = y.ndim, y.shape # For compatibility with how the linear fitter code currently expects to # work, shift the dependent variable's axes to the expected locations if n_models > 1 or data_ndim > x.ndim: if (model_set_axis or 0) >= data_ndim: raise ValueError("model_set_axis out of range") if data_shape[model_set_axis] != n_models: raise ValueError( "Number of data sets (y or z array) is expected to equal " "the number of parameter sets" ) if z is None: # For a 1-D model the y coordinate's model-set-axis is expected to # be last, so that its first dimension is the same length as the x # coordinates. This is in line with the expectations of # numpy.linalg.lstsq: # https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html # That is, each model should be represented by a column. TODO: # Obviously this is a detail of np.linalg.lstsq and should be # handled specifically by any fitters that use it... y = np.rollaxis(y, model_set_axis, y.ndim) data_shape = y.shape[:-1] else: # Shape of z excluding model_set_axis data_shape = (z.shape[:model_set_axis] + z.shape[model_set_axis + 1:]) if z is None: if data_shape != x.shape: raise ValueError("x and y should have the same shape") farg = (x, y) else: if not (x.shape == y.shape == data_shape): raise ValueError("x, y and z should have the same shape") farg = (x, y, z) return farg # TODO: These utility functions are really particular to handling # bounds/tied/fixed constraints for scipy.optimize optimizers that do not # support them inherently; this needs to be reworked to be clear about this # distinction (and the fact that these are not necessarily applicable to any # arbitrary fitter--as evidenced for example by the fact that JointFitter has # its own versions of these) # TODO: Most of this code should be entirely rewritten; it should not be as # inefficient as it is. def fitter_to_model_params(model, fps): """ Constructs the full list of model parameters from the fitted and constrained parameters. """ _, fit_param_indices = model_to_fit_params(model) has_tied = any(model.tied.values()) has_fixed = any(model.fixed.values()) has_bound = any(b != (None, None) for b in model.bounds.values()) parameters = model.parameters if not (has_tied or has_fixed or has_bound): # We can just assign directly model.parameters = fps return fit_param_indices = set(fit_param_indices) offset = 0 param_metrics = model._param_metrics for idx, name in enumerate(model.param_names): if idx not in fit_param_indices: continue slice_ = param_metrics[name]['slice'] shape = param_metrics[name]['shape'] # This is determining which range of fps (the fitted parameters) maps # to parameters of the model size = reduce(operator.mul, shape, 1) values = fps[offset:offset + size] # Check bounds constraints if model.bounds[name] != (None, None): _min, _max = model.bounds[name] if _min is not None: values = np.fmax(values, _min) if _max is not None: values = np.fmin(values, _max) parameters[slice_] = values offset += size # Update model parameters before calling ``tied`` constraints. model._array_to_parameters() # This has to be done in a separate loop due to how tied parameters are # currently evaluated (the fitted parameters need to actually be *set* on # the model first, for use in evaluating the "tied" expression--it might be # better to change this at some point if has_tied: for idx, name in enumerate(model.param_names): if model.tied[name]: value = model.tied[name](model) slice_ = param_metrics[name]['slice'] # To handle multiple tied constraints, model parameters # need to be updated after each iteration. parameters[slice_] = value model._array_to_parameters() @deprecated('5.1', 'private method: _fitter_to_model_params has been made public now') def _fitter_to_model_params(model, fps): return fitter_to_model_params(model, fps) def model_to_fit_params(model): """ Convert a model instance's parameter array to an array that can be used with a fitter that doesn't natively support fixed or tied parameters. In particular, it removes fixed/tied parameters from the parameter array. These may be a subset of the model parameters, if some of them are held constant or tied. """ fitparam_indices = list(range(len(model.param_names))) if any(model.fixed.values()) or any(model.tied.values()): params = list(model.parameters) param_metrics = model._param_metrics for idx, name in list(enumerate(model.param_names))[::-1]: if model.fixed[name] or model.tied[name]: slice_ = param_metrics[name]['slice'] del params[slice_] del fitparam_indices[idx] return (np.array(params), fitparam_indices) return (model.parameters, fitparam_indices) @deprecated('5.1', 'private method: _model_to_fit_params has been made public now') def _model_to_fit_params(model): return model_to_fit_params(model) def _validate_constraints(supported_constraints, model): """Make sure model constraints are supported by the current fitter.""" message = 'Optimizer cannot handle {0} constraints.' if (any(model.fixed.values()) and 'fixed' not in supported_constraints): raise UnsupportedConstraintError( message.format('fixed parameter')) if any(model.tied.values()) and 'tied' not in supported_constraints: raise UnsupportedConstraintError( message.format('tied parameter')) if (any(tuple(b) != (None, None) for b in model.bounds.values()) and 'bounds' not in supported_constraints): raise UnsupportedConstraintError( message.format('bound parameter')) if model.eqcons and 'eqcons' not in supported_constraints: raise UnsupportedConstraintError(message.format('equality')) if model.ineqcons and 'ineqcons' not in supported_constraints: raise UnsupportedConstraintError(message.format('inequality')) def _validate_model(model, supported_constraints): """ Check that model and fitter are compatible and return a copy of the model. """ if not model.fittable: raise ValueError("Model does not appear to be fittable.") if model.linear: warnings.warn('Model is linear in parameters; ' 'consider using linear fitting methods.', AstropyUserWarning) elif len(model) != 1: # for now only single data sets ca be fitted raise ValueError("Non-linear fitters can only fit " "one data set at a time.") _validate_constraints(supported_constraints, model) model_copy = model.copy() return model_copy def populate_entry_points(entry_points): """ This injects entry points into the `astropy.modeling.fitting` namespace. This provides a means of inserting a fitting routine without requirement of it being merged into astropy's core. Parameters ---------- entry_points : list of `~importlib.metadata.EntryPoint` entry_points are objects which encapsulate importable objects and are defined on the installation of a package. Notes ----- An explanation of entry points can be found `here <http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>` """ for entry_point in entry_points: name = entry_point.name try: entry_point = entry_point.load() except Exception as e: # This stops the fitting from choking if an entry_point produces an error. warnings.warn(AstropyUserWarning( f'{type(e).__name__} error occurred in entry point {name}.')) else: if not inspect.isclass(entry_point): warnings.warn(AstropyUserWarning( f'Modeling entry point {name} expected to be a Class.')) else: if issubclass(entry_point, Fitter): name = entry_point.__name__ globals()[name] = entry_point __all__.append(name) else: warnings.warn(AstropyUserWarning( 'Modeling entry point {} expected to extend ' 'astropy.modeling.Fitter' .format(name))) def _populate_ep(): # TODO: Exclusively use select when Python minversion is 3.10 ep = entry_points() if hasattr(ep, 'select'): populate_entry_points(ep.select(group='astropy.modeling')) else: populate_entry_points(ep.get('astropy.modeling', [])) _populate_ep()
8d303ef2839341a6e887eb4b0f78c060a51e6d407de103f487adc8f7d57a65e6
# Licensed under a 3-clause BSD style license - see LICENSE.rst # pylint: disable=invalid-name """ This module defines classes that deal with parameters. It is unlikely users will need to work with these classes directly, unless they define their own models. """ import functools import numbers import operator import numpy as np from astropy.units import Quantity from astropy.utils import isiterable from .utils import array_repr_oneline, get_inputs_and_params __all__ = ['Parameter', 'InputParameterError', 'ParameterError'] class ParameterError(Exception): """Generic exception class for all exceptions pertaining to Parameters.""" class InputParameterError(ValueError, ParameterError): """Used for incorrect input parameter values and definitions.""" class ParameterDefinitionError(ParameterError): """Exception in declaration of class-level Parameters.""" def _tofloat(value): """Convert a parameter to float or float array""" if isiterable(value): try: value = np.asanyarray(value, dtype=float) except (TypeError, ValueError): # catch arrays with strings or user errors like different # types of parameters in a parameter set raise InputParameterError( f"Parameter of {type(value)} could not be converted to float") elif isinstance(value, Quantity): # Quantities are fine as is pass elif isinstance(value, np.ndarray): # A scalar/dimensionless array value = float(value.item()) elif isinstance(value, (numbers.Number, np.number)) and not isinstance(value, bool): value = float(value) elif isinstance(value, bool): raise InputParameterError( "Expected parameter to be of numerical type, not boolean") else: raise InputParameterError( f"Don't know how to convert parameter of {type(value)} to float") return value # Helpers for implementing operator overloading on Parameter def _binary_arithmetic_operation(op, reflected=False): @functools.wraps(op) def wrapper(self, val): if self.unit is not None: self_value = Quantity(self.value, self.unit) else: self_value = self.value if reflected: return op(val, self_value) else: return op(self_value, val) return wrapper def _binary_comparison_operation(op): @functools.wraps(op) def wrapper(self, val): if self.unit is not None: self_value = Quantity(self.value, self.unit) else: self_value = self.value return op(self_value, val) return wrapper def _unary_arithmetic_operation(op): @functools.wraps(op) def wrapper(self): if self.unit is not None: self_value = Quantity(self.value, self.unit) else: self_value = self.value return op(self_value) return wrapper class Parameter: """ Wraps individual parameters. Since 4.0 Parameters are no longer descriptors and are based on a new implementation of the Parameter class. Parameters now (as of 4.0) store values locally (as instead previously in the associated model) This class represents a model's parameter (in a somewhat broad sense). It serves a number of purposes: 1) A type to be recognized by models and treated specially at class initialization (i.e., if it is found that there is a class definition of a Parameter, the model initializer makes a copy at the instance level). 2) Managing the handling of allowable parameter values and once defined, ensuring updates are consistent with the Parameter definition. This includes the optional use of units and quantities as well as transforming values to an internally consistent representation (e.g., from degrees to radians through the use of getters and setters). 3) Holding attributes of parameters relevant to fitting, such as whether the parameter may be varied in fitting, or whether there are constraints that must be satisfied. See :ref:`astropy:modeling-parameters` for more details. Parameters ---------- name : str parameter name .. warning:: The fact that `Parameter` accepts ``name`` as an argument is an implementation detail, and should not be used directly. When defining a new `Model` class, parameter names are always automatically defined by the class attribute they're assigned to. description : str parameter description default : float or array default value to use for this parameter unit : `~astropy.units.Unit` if specified, the parameter will be in these units, and when the parameter is updated in future, it should be set to a :class:`~astropy.units.Quantity` that has equivalent units. getter : callable a function that wraps the raw (internal) value of the parameter when returning the value through the parameter proxy (eg. a parameter may be stored internally as radians but returned to the user as degrees) setter : callable a function that wraps any values assigned to this parameter; should be the inverse of getter fixed : bool if True the parameter is not varied during fitting tied : callable or False if callable is supplied it provides a way to link the value of this parameter to another parameter (or some other arbitrary function) min : float the lower bound of a parameter max : float the upper bound of a parameter bounds : tuple specify min and max as a single tuple--bounds may not be specified simultaneously with min or max """ constraints = ('fixed', 'tied', 'bounds') """ Types of constraints a parameter can have. Excludes 'min' and 'max' which are just aliases for the first and second elements of the 'bounds' constraint (which is represented as a 2-tuple). 'prior' and 'posterior' are available for use by user fitters but are not used by any built-in fitters as of this writing. """ def __init__(self, name='', description='', default=None, unit=None, getter=None, setter=None, fixed=False, tied=False, min=None, max=None, bounds=None, prior=None, posterior=None): super().__init__() self._model = None self._model_required = False self._setter = self._create_value_wrapper(setter, None) self._getter = self._create_value_wrapper(getter, None) self._name = name self.__doc__ = self._description = description.strip() # We only need to perform this check on unbound parameters if isinstance(default, Quantity): if unit is not None and not unit.is_equivalent(default.unit): raise ParameterDefinitionError( "parameter default {0} does not have units equivalent to " "the required unit {1}".format(default, unit)) unit = default.unit default = default.value self._default = default self._unit = unit # Internal units correspond to raw_units held by the model in the # previous implementation. The private _getter and _setter methods # use this to convert to and from the public unit defined for the # parameter. self._internal_unit = None if not self._model_required: if self._default is not None: self.value = self._default else: self._value = None # NOTE: These are *default* constraints--on model instances constraints # are taken from the model if set, otherwise the defaults set here are # used if bounds is not None: if min is not None or max is not None: raise ValueError( 'bounds may not be specified simultaneously with min or ' 'max when instantiating Parameter {}'.format(name)) else: bounds = (min, max) self._fixed = fixed self._tied = tied self._bounds = bounds self._order = None self._validator = None self._prior = prior self._posterior = posterior self._std = None def __set_name__(self, owner, name): self._name = name def __len__(self): val = self.value if val.shape == (): return 1 else: return val.shape[0] def __getitem__(self, key): value = self.value if len(value.shape) == 0: # Wrap the value in a list so that getitem can work for sensible # indices like [0] and [-1] value = [value] return value[key] def __setitem__(self, key, value): # Get the existing value and check whether it even makes sense to # apply this index oldvalue = self.value if isinstance(key, slice): if len(oldvalue[key]) == 0: raise InputParameterError( "Slice assignment outside the parameter dimensions for " "'{}'".format(self.name)) for idx, val in zip(range(*key.indices(len(self))), value): self.__setitem__(idx, val) else: try: oldvalue[key] = value except IndexError: raise InputParameterError( "Input dimension {} invalid for {!r} parameter with " "dimension {}".format(key, self.name, value.shape[0])) # likely wrong def __repr__(self): args = f"'{self._name}'" args += f', value={self.value}' if self.unit is not None: args += f', unit={self.unit}' for cons in self.constraints: val = getattr(self, cons) if val not in (None, False, (None, None)): # Maybe non-obvious, but False is the default for the fixed and # tied constraints args += f', {cons}={val}' return f"{self.__class__.__name__}({args})" @property def name(self): """Parameter name""" return self._name @property def default(self): """Parameter default value""" return self._default @property def value(self): """The unadorned value proxied by this parameter.""" if self._getter is None and self._setter is None: return np.float64(self._value) else: # This new implementation uses the names of internal_unit # in place of raw_unit used previously. The contrast between # internal values and units is that between the public # units that the parameter advertises to what it actually # uses internally. if self.internal_unit: return np.float64(self._getter(self._internal_value, self.internal_unit, self.unit).value) elif self._getter: return np.float64(self._getter(self._internal_value)) elif self._setter: return np.float64(self._internal_value) @value.setter def value(self, value): if isinstance(value, Quantity): raise TypeError("The .value property on parameters should be set" " to unitless values, not Quantity objects. To set" "a parameter to a quantity simply set the " "parameter directly without using .value") if self._setter is None: self._value = np.array(value, dtype=np.float64) else: self._internal_value = np.array(self._setter(value), dtype=np.float64) @property def unit(self): """ The unit attached to this parameter, if any. On unbound parameters (i.e. parameters accessed through the model class, rather than a model instance) this is the required/ default unit for the parameter. """ return self._unit @unit.setter def unit(self, unit): if self.unit is None: raise ValueError('Cannot attach units to parameters that were ' 'not initially specified with units') else: raise ValueError('Cannot change the unit attribute directly, ' 'instead change the parameter to a new quantity') def _set_unit(self, unit, force=False): if force: self._unit = unit else: self.unit = unit @property def internal_unit(self): """ Return the internal unit the parameter uses for the internal value stored """ return self._internal_unit @internal_unit.setter def internal_unit(self, internal_unit): """ Set the unit the parameter will convert the supplied value to the representation used internally. """ self._internal_unit = internal_unit @property def quantity(self): """ This parameter, as a :class:`~astropy.units.Quantity` instance. """ if self.unit is None: return None return self.value * self.unit @quantity.setter def quantity(self, quantity): if not isinstance(quantity, Quantity): raise TypeError("The .quantity attribute should be set " "to a Quantity object") self.value = quantity.value self._unit = quantity.unit @property def shape(self): """The shape of this parameter's value array.""" if self._setter is None: return self._value.shape return self._internal_value.shape @shape.setter def shape(self, value): if isinstance(self.value, np.generic): if value not in ((), (1,)): raise ValueError("Cannot assign this shape to a scalar quantity") else: self.value.shape = value @property def size(self): """The size of this parameter's value array.""" return np.size(self.value) @property def std(self): """Standard deviation, if available from fit.""" return self._std @std.setter def std(self, value): self._std = value @property def prior(self): return self._prior @prior.setter def prior(self, val): self._prior = val @property def posterior(self): return self._posterior @posterior.setter def posterior(self, val): self._posterior = val @property def fixed(self): """ Boolean indicating if the parameter is kept fixed during fitting. """ return self._fixed @fixed.setter def fixed(self, value): """ Fix a parameter. """ if not isinstance(value, bool): raise ValueError("Value must be boolean") self._fixed = value @property def tied(self): """ Indicates that this parameter is linked to another one. A callable which provides the relationship of the two parameters. """ return self._tied @tied.setter def tied(self, value): """Tie a parameter""" if not callable(value) and value not in (False, None): raise TypeError("Tied must be a callable or set to False or None") self._tied = value @property def bounds(self): """The minimum and maximum values of a parameter as a tuple""" return self._bounds @bounds.setter def bounds(self, value): """Set the minimum and maximum values of a parameter from a tuple""" _min, _max = value if _min is not None: if not isinstance(_min, (numbers.Number, Quantity)): raise TypeError("Min value must be a number or a Quantity") if isinstance(_min, Quantity): _min = float(_min.value) else: _min = float(_min) if _max is not None: if not isinstance(_max, (numbers.Number, Quantity)): raise TypeError("Max value must be a number or a Quantity") if isinstance(_max, Quantity): _max = float(_max.value) else: _max = float(_max) self._bounds = (_min, _max) @property def min(self): """A value used as a lower bound when fitting a parameter""" return self.bounds[0] @min.setter def min(self, value): """Set a minimum value of a parameter""" self.bounds = (value, self.max) @property def max(self): """A value used as an upper bound when fitting a parameter""" return self.bounds[1] @max.setter def max(self, value): """Set a maximum value of a parameter.""" self.bounds = (self.min, value) @property def validator(self): """ Used as a decorator to set the validator method for a `Parameter`. The validator method validates any value set for that parameter. It takes two arguments--``self``, which refers to the `Model` instance (remember, this is a method defined on a `Model`), and the value being set for this parameter. The validator method's return value is ignored, but it may raise an exception if the value set on the parameter is invalid (typically an `InputParameterError` should be raised, though this is not currently a requirement). """ def validator(func, self=self): if callable(func): self._validator = func return self else: raise ValueError("This decorator method expects a callable.\n" "The use of this method as a direct validator is\n" "deprecated; use the new validate method instead\n") return validator def validate(self, value): """ Run the validator on this parameter""" if self._validator is not None and self._model is not None: self._validator(self._model, value) def copy(self, name=None, description=None, default=None, unit=None, getter=None, setter=None, fixed=False, tied=False, min=None, max=None, bounds=None, prior=None, posterior=None): """ Make a copy of this `Parameter`, overriding any of its core attributes in the process (or an exact copy). The arguments to this method are the same as those for the `Parameter` initializer. This simply returns a new `Parameter` instance with any or all of the attributes overridden, and so returns the equivalent of: .. code:: python Parameter(self.name, self.description, ...) """ kwargs = locals().copy() del kwargs['self'] for key, value in kwargs.items(): if value is None: # Annoying special cases for min/max where are just aliases for # the components of bounds if key in ('min', 'max'): continue else: if hasattr(self, key): value = getattr(self, key) elif hasattr(self, '_' + key): value = getattr(self, '_' + key) kwargs[key] = value return self.__class__(**kwargs) @property def model(self): """ Return the model this parameter is associated with.""" return self._model @model.setter def model(self, value): self._model = value self._setter = self._create_value_wrapper(self._setter, value) self._getter = self._create_value_wrapper(self._getter, value) if self._model_required: if self._default is not None: self.value = self._default else: self._value = None @property def _raw_value(self): """ Currently for internal use only. Like Parameter.value but does not pass the result through Parameter.getter. By design this should only be used from bound parameters. This will probably be removed are retweaked at some point in the process of rethinking how parameter values are stored/updated. """ if self._setter: return self._internal_value return self.value def _create_value_wrapper(self, wrapper, model): """Wraps a getter/setter function to support optionally passing in a reference to the model object as the second argument. If a model is tied to this parameter and its getter/setter supports a second argument then this creates a partial function using the model instance as the second argument. """ if isinstance(wrapper, np.ufunc): if wrapper.nin != 1: raise TypeError("A numpy.ufunc used for Parameter " "getter/setter may only take one input " "argument") elif wrapper is None: # Just allow non-wrappers to fall through silently, for convenience return None else: inputs, _ = get_inputs_and_params(wrapper) nargs = len(inputs) if nargs == 1: pass elif nargs == 2: self._model_required = True if model is not None: # Don't make a partial function unless we're tied to a # specific model instance model_arg = inputs[1].name wrapper = functools.partial(wrapper, **{model_arg: model}) else: raise TypeError("Parameter getter/setter must be a function " "of either one or two arguments") return wrapper def __array__(self, dtype=None): # Make np.asarray(self) work a little more straightforwardly arr = np.asarray(self.value, dtype=dtype) if self.unit is not None: arr = Quantity(arr, self.unit, copy=False) return arr def __bool__(self): return bool(np.all(self.value)) __add__ = _binary_arithmetic_operation(operator.add) __radd__ = _binary_arithmetic_operation(operator.add, reflected=True) __sub__ = _binary_arithmetic_operation(operator.sub) __rsub__ = _binary_arithmetic_operation(operator.sub, reflected=True) __mul__ = _binary_arithmetic_operation(operator.mul) __rmul__ = _binary_arithmetic_operation(operator.mul, reflected=True) __pow__ = _binary_arithmetic_operation(operator.pow) __rpow__ = _binary_arithmetic_operation(operator.pow, reflected=True) __truediv__ = _binary_arithmetic_operation(operator.truediv) __rtruediv__ = _binary_arithmetic_operation(operator.truediv, reflected=True) __eq__ = _binary_comparison_operation(operator.eq) __ne__ = _binary_comparison_operation(operator.ne) __lt__ = _binary_comparison_operation(operator.lt) __gt__ = _binary_comparison_operation(operator.gt) __le__ = _binary_comparison_operation(operator.le) __ge__ = _binary_comparison_operation(operator.ge) __neg__ = _unary_arithmetic_operation(operator.neg) __abs__ = _unary_arithmetic_operation(operator.abs) def param_repr_oneline(param): """ Like array_repr_oneline but works on `Parameter` objects and supports rendering parameters with units like quantities. """ out = array_repr_oneline(param.value) if param.unit is not None: out = f'{out} {param.unit!s}' return out
4e70ac2e0c9a0a247306bc28446853f7c4fe5b8ccacd461abea6b7c50be2e1cf
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module defines base classes for all models. The base class of all models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is the base class for all fittable models. Fittable models can be linear or nonlinear in a regression analysis sense. All models provide a `__call__` method which performs the transformation in a purely mathematical way, i.e. the models are unitless. Model instances can represent either a single model, or a "model set" representing multiple copies of the same type of model, but with potentially different values of the parameters in each model making up the set. """ # pylint: disable=invalid-name, protected-access, redefined-outer-name import abc import copy import functools import inspect import itertools import numbers import operator import types from collections import defaultdict, deque from inspect import signature from itertools import chain import numpy as np from astropy.nddata.utils import add_array, extract_array from astropy.table import Table from astropy.units import Quantity, UnitsError, dimensionless_unscaled from astropy.units.utils import quantity_asanyarray from astropy.utils import (IncompatibleShapeError, check_broadcast, find_current_module, indent, isiterable, metadata, sharedmethod) from astropy.utils.codegen import make_function_with_signature from .bounding_box import CompoundBoundingBox, ModelBoundingBox from .parameters import InputParameterError, Parameter, _tofloat, param_repr_oneline from .utils import (_combine_equivalency_dict, _ConstraintsDict, _SpecialOperatorsDict, combine_labels, get_inputs_and_params, make_binary_operator_eval) __all__ = ['Model', 'FittableModel', 'Fittable1DModel', 'Fittable2DModel', 'CompoundModel', 'fix_inputs', 'custom_model', 'ModelDefinitionError', 'bind_bounding_box', 'bind_compound_bounding_box'] def _model_oper(oper, **kwargs): """ Returns a function that evaluates a given Python arithmetic operator between two models. The operator should be given as a string, like ``'+'`` or ``'**'``. """ return lambda left, right: CompoundModel(oper, left, right, **kwargs) class ModelDefinitionError(TypeError): """Used for incorrect models definitions.""" class _ModelMeta(abc.ABCMeta): """ Metaclass for Model. Currently just handles auto-generating the param_names list based on Parameter descriptors declared at the class-level of Model subclasses. """ _is_dynamic = False """ This flag signifies whether this class was created in the "normal" way, with a class statement in the body of a module, as opposed to a call to `type` or some other metaclass constructor, such that the resulting class does not belong to a specific module. This is important for pickling of dynamic classes. This flag is always forced to False for new classes, so code that creates dynamic classes should manually set it to True on those classes when creating them. """ # Default empty dict for _parameters_, which will be empty on model # classes that don't have any Parameters def __new__(mcls, name, bases, members, **kwds): # See the docstring for _is_dynamic above if '_is_dynamic' not in members: members['_is_dynamic'] = mcls._is_dynamic opermethods = [ ('__add__', _model_oper('+')), ('__sub__', _model_oper('-')), ('__mul__', _model_oper('*')), ('__truediv__', _model_oper('/')), ('__pow__', _model_oper('**')), ('__or__', _model_oper('|')), ('__and__', _model_oper('&')), ('_fix_inputs', _model_oper('fix_inputs')) ] members['_parameters_'] = {k: v for k, v in members.items() if isinstance(v, Parameter)} for opermethod, opercall in opermethods: members[opermethod] = opercall cls = super().__new__(mcls, name, bases, members, **kwds) param_names = list(members['_parameters_']) # Need to walk each base MRO to collect all parameter names for base in bases: for tbase in base.__mro__: if issubclass(tbase, Model): # Preserve order of definitions param_names = list(tbase._parameters_) + param_names # Remove duplicates (arising from redefinition in subclass). param_names = list(dict.fromkeys(param_names)) if cls._parameters_: if hasattr(cls, '_param_names'): # Slight kludge to support compound models, where # cls.param_names is a property; could be improved with a # little refactoring but fine for now cls._param_names = tuple(param_names) else: cls.param_names = tuple(param_names) return cls def __init__(cls, name, bases, members, **kwds): super(_ModelMeta, cls).__init__(name, bases, members, **kwds) cls._create_inverse_property(members) cls._create_bounding_box_property(members) pdict = {} for base in bases: for tbase in base.__mro__: if issubclass(tbase, Model): for parname, val in cls._parameters_.items(): pdict[parname] = val cls._handle_special_methods(members, pdict) def __repr__(cls): """ Custom repr for Model subclasses. """ return cls._format_cls_repr() def _repr_pretty_(cls, p, cycle): """ Repr for IPython's pretty printer. By default IPython "pretty prints" classes, so we need to implement this so that IPython displays the custom repr for Models. """ p.text(repr(cls)) def __reduce__(cls): if not cls._is_dynamic: # Just return a string specifying where the class can be imported # from return cls.__name__ members = dict(cls.__dict__) # Delete any ABC-related attributes--these will be restored when # the class is reconstructed: for key in list(members): if key.startswith('_abc_'): del members[key] # Delete custom __init__ and __call__ if they exist: for key in ('__init__', '__call__'): if key in members: del members[key] return (type(cls), (cls.__name__, cls.__bases__, members)) @property def name(cls): """ The name of this model class--equivalent to ``cls.__name__``. This attribute is provided for symmetry with the `Model.name` attribute of model instances. """ return cls.__name__ @property def _is_concrete(cls): """ A class-level property that determines whether the class is a concrete implementation of a Model--i.e. it is not some abstract base class or internal implementation detail (i.e. begins with '_'). """ return not (cls.__name__.startswith('_') or inspect.isabstract(cls)) def rename(cls, name=None, inputs=None, outputs=None): """ Creates a copy of this model class with a new name, inputs or outputs. The new class is technically a subclass of the original class, so that instance and type checks will still work. For example:: >>> from astropy.modeling.models import Rotation2D >>> SkyRotation = Rotation2D.rename('SkyRotation') >>> SkyRotation <class 'astropy.modeling.core.SkyRotation'> Name: SkyRotation (Rotation2D) N_inputs: 2 N_outputs: 2 Fittable parameters: ('angle',) >>> issubclass(SkyRotation, Rotation2D) True >>> r = SkyRotation(90) >>> isinstance(r, Rotation2D) True """ mod = find_current_module(2) if mod: modname = mod.__name__ else: modname = '__main__' if name is None: name = cls.name if inputs is None: inputs = cls.inputs else: if not isinstance(inputs, tuple): raise TypeError("Expected 'inputs' to be a tuple of strings.") elif len(inputs) != len(cls.inputs): raise ValueError(f'{cls.name} expects {len(cls.inputs)} inputs') if outputs is None: outputs = cls.outputs else: if not isinstance(outputs, tuple): raise TypeError("Expected 'outputs' to be a tuple of strings.") elif len(outputs) != len(cls.outputs): raise ValueError(f'{cls.name} expects {len(cls.outputs)} outputs') new_cls = type(name, (cls,), {"inputs": inputs, "outputs": outputs}) new_cls.__module__ = modname new_cls.__qualname__ = name return new_cls def _create_inverse_property(cls, members): inverse = members.get('inverse') if inverse is None or cls.__bases__[0] is object: # The latter clause is the prevent the below code from running on # the Model base class, which implements the default getter and # setter for .inverse return if isinstance(inverse, property): # We allow the @property decorator to be omitted entirely from # the class definition, though its use should be encouraged for # clarity inverse = inverse.fget # Store the inverse getter internally, then delete the given .inverse # attribute so that cls.inverse resolves to Model.inverse instead cls._inverse = inverse del cls.inverse def _create_bounding_box_property(cls, members): """ Takes any bounding_box defined on a concrete Model subclass (either as a fixed tuple or a property or method) and wraps it in the generic getter/setter interface for the bounding_box attribute. """ # TODO: Much of this is verbatim from _create_inverse_property--I feel # like there could be a way to generify properties that work this way, # but for the time being that would probably only confuse things more. bounding_box = members.get('bounding_box') if bounding_box is None or cls.__bases__[0] is object: return if isinstance(bounding_box, property): bounding_box = bounding_box.fget if not callable(bounding_box): # See if it's a hard-coded bounding_box (as a sequence) and # normalize it try: bounding_box = ModelBoundingBox.validate(cls, bounding_box, _preserve_ignore=True) except ValueError as exc: raise ModelDefinitionError(exc.args[0]) else: sig = signature(bounding_box) # May be a method that only takes 'self' as an argument (like a # property, but the @property decorator was forgotten) # # However, if the method takes additional arguments then this is a # parameterized bounding box and should be callable if len(sig.parameters) > 1: bounding_box = \ cls._create_bounding_box_subclass(bounding_box, sig) # See the Model.bounding_box getter definition for how this attribute # is used cls._bounding_box = bounding_box del cls.bounding_box def _create_bounding_box_subclass(cls, func, sig): """ For Models that take optional arguments for defining their bounding box, we create a subclass of ModelBoundingBox with a ``__call__`` method that supports those additional arguments. Takes the function's Signature as an argument since that is already computed in _create_bounding_box_property, so no need to duplicate that effort. """ # TODO: Might be convenient if calling the bounding box also # automatically sets the _user_bounding_box. So that # # >>> model.bounding_box(arg=1) # # in addition to returning the computed bbox, also sets it, so that # it's a shortcut for # # >>> model.bounding_box = model.bounding_box(arg=1) # # Not sure if that would be non-obvious / confusing though... def __call__(self, **kwargs): return func(self._model, **kwargs) kwargs = [] for idx, param in enumerate(sig.parameters.values()): if idx == 0: # Presumed to be a 'self' argument continue if param.default is param.empty: raise ModelDefinitionError( 'The bounding_box method for {0} is not correctly ' 'defined: If defined as a method all arguments to that ' 'method (besides self) must be keyword arguments with ' 'default values that can be used to compute a default ' 'bounding box.'.format(cls.name)) kwargs.append((param.name, param.default)) __call__.__signature__ = sig return type(f'{cls.name}ModelBoundingBox', (ModelBoundingBox,), {'__call__': __call__}) def _handle_special_methods(cls, members, pdict): # Handle init creation from inputs def update_wrapper(wrapper, cls): # Set up the new __call__'s metadata attributes as though it were # manually defined in the class definition # A bit like functools.update_wrapper but uses the class instead of # the wrapped function wrapper.__module__ = cls.__module__ wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__ if hasattr(cls, '__qualname__'): wrapper.__qualname__ = f'{cls.__qualname__}.{wrapper.__name__}' if ('__call__' not in members and 'n_inputs' in members and isinstance(members['n_inputs'], int) and members['n_inputs'] > 0): # Don't create a custom __call__ for classes that already have one # explicitly defined (this includes the Model base class, and any # other classes that manually override __call__ def __call__(self, *inputs, **kwargs): """Evaluate this model on the supplied inputs.""" return super(cls, self).__call__(*inputs, **kwargs) # When called, models can take two optional keyword arguments: # # * model_set_axis, which indicates (for multi-dimensional input) # which axis is used to indicate different models # # * equivalencies, a dictionary of equivalencies to be applied to # the input values, where each key should correspond to one of # the inputs. # # The following code creates the __call__ function with these # two keyword arguments. args = ('self',) kwargs = dict([('model_set_axis', None), ('with_bounding_box', False), ('fill_value', np.nan), ('equivalencies', None), ('inputs_map', None)]) new_call = make_function_with_signature( __call__, args, kwargs, varargs='inputs', varkwargs='new_inputs') # The following makes it look like __call__ # was defined in the class update_wrapper(new_call, cls) cls.__call__ = new_call if ('__init__' not in members and not inspect.isabstract(cls) and cls._parameters_): # Build list of all parameters including inherited ones # If *all* the parameters have default values we can make them # keyword arguments; otherwise they must all be positional # arguments if all(p.default is not None for p in pdict.values()): args = ('self',) kwargs = [] for param_name, param_val in pdict.items(): default = param_val.default unit = param_val.unit # If the unit was specified in the parameter but the # default is not a Quantity, attach the unit to the # default. if unit is not None: default = Quantity(default, unit, copy=False) kwargs.append((param_name, default)) else: args = ('self',) + tuple(pdict.keys()) kwargs = {} def __init__(self, *params, **kwargs): return super(cls, self).__init__(*params, **kwargs) new_init = make_function_with_signature( __init__, args, kwargs, varkwargs='kwargs') update_wrapper(new_init, cls) cls.__init__ = new_init # *** Arithmetic operators for creating compound models *** __add__ = _model_oper('+') __sub__ = _model_oper('-') __mul__ = _model_oper('*') __truediv__ = _model_oper('/') __pow__ = _model_oper('**') __or__ = _model_oper('|') __and__ = _model_oper('&') _fix_inputs = _model_oper('fix_inputs') # *** Other utilities *** def _format_cls_repr(cls, keywords=[]): """ Internal implementation of ``__repr__``. This is separated out for ease of use by subclasses that wish to override the default ``__repr__`` while keeping the same basic formatting. """ # For the sake of familiarity start the output with the standard class # __repr__ parts = [super().__repr__()] if not cls._is_concrete: return parts[0] def format_inheritance(cls): bases = [] for base in cls.mro()[1:]: if not issubclass(base, Model): continue elif (inspect.isabstract(base) or base.__name__.startswith('_')): break bases.append(base.name) if bases: return f"{cls.name} ({' -> '.join(bases)})" return cls.name try: default_keywords = [ ('Name', format_inheritance(cls)), ('N_inputs', cls.n_inputs), ('N_outputs', cls.n_outputs), ] if cls.param_names: default_keywords.append(('Fittable parameters', cls.param_names)) for keyword, value in default_keywords + keywords: if value is not None: parts.append(f'{keyword}: {value}') return '\n'.join(parts) except Exception: # If any of the above formatting fails fall back on the basic repr # (this is particularly useful in debugging) return parts[0] class Model(metaclass=_ModelMeta): """ Base class for all models. This is an abstract class and should not be instantiated directly. The following initialization arguments apply to the majority of Model subclasses by default (exceptions include specialized utility models like `~astropy.modeling.mappings.Mapping`). Parametric models take all their parameters as arguments, followed by any of the following optional keyword arguments: Parameters ---------- name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict, optional An optional dict of user-defined metadata to attach to this model. How this is used and interpreted is up to the user or individual use case. n_models : int, optional If given an integer greater than 1, a *model set* is instantiated instead of a single model. This affects how the parameter arguments are interpreted. In this case each parameter must be given as a list or array--elements of this array are taken along the first axis (or ``model_set_axis`` if specified), such that the Nth element is the value of that parameter for the Nth model in the set. See the section on model sets in the documentation for more details. model_set_axis : int, optional This argument only applies when creating a model set (i.e. ``n_models > 1``). It changes how parameter values are interpreted. Normally the first axis of each input parameter array (properly the 0th axis) is taken as the axis corresponding to the model sets. However, any axis of an input array may be taken as this "model set axis". This accepts negative integers as well--for example use ``model_set_axis=-1`` if the last (most rapidly changing) axis should be associated with the model sets. Also, ``model_set_axis=False`` can be used to tell that a given input should be used to evaluate all the models in the model set. fixed : dict, optional Dictionary ``{parameter_name: bool}`` setting the fixed constraint for one or more parameters. `True` means the parameter is held fixed during fitting and is prevented from updates once an instance of the model has been created. Alternatively the `~astropy.modeling.Parameter.fixed` property of a parameter may be used to lock or unlock individual parameters. tied : dict, optional Dictionary ``{parameter_name: callable}`` of parameters which are linked to some other parameter. The dictionary values are callables providing the linking relationship. Alternatively the `~astropy.modeling.Parameter.tied` property of a parameter may be used to set the ``tied`` constraint on individual parameters. bounds : dict, optional A dictionary ``{parameter_name: value}`` of lower and upper bounds of parameters. Keys are parameter names. Values are a list or a tuple of length 2 giving the desired range for the parameter. Alternatively the `~astropy.modeling.Parameter.min` and `~astropy.modeling.Parameter.max` or ~astropy.modeling.Parameter.bounds` properties of a parameter may be used to set bounds on individual parameters. eqcons : list, optional List of functions of length n such that ``eqcons[j](x0, *args) == 0.0`` in a successfully optimized problem. ineqcons : list, optional List of functions of length n such that ``ieqcons[j](x0, *args) >= 0.0`` is a successfully optimized problem. Examples -------- >>> from astropy.modeling import models >>> def tie_center(model): ... mean = 50 * model.stddev ... return mean >>> tied_parameters = {'mean': tie_center} Specify that ``'mean'`` is a tied parameter in one of two ways: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... tied=tied_parameters) or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.mean.tied False >>> g1.mean.tied = tie_center >>> g1.mean.tied <function tie_center at 0x...> Fixed parameters: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... fixed={'stddev': True}) >>> g1.stddev.fixed True or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.stddev.fixed False >>> g1.stddev.fixed = True >>> g1.stddev.fixed True """ parameter_constraints = Parameter.constraints """ Primarily for informational purposes, these are the types of constraints that can be set on a model's parameters. """ model_constraints = ('eqcons', 'ineqcons') """ Primarily for informational purposes, these are the types of constraints that constrain model evaluation. """ param_names = () """ Names of the parameters that describe models of this type. The parameters in this tuple are in the same order they should be passed in when initializing a model of a specific type. Some types of models, such as polynomial models, have a different number of parameters depending on some other property of the model, such as the degree. When defining a custom model class the value of this attribute is automatically set by the `~astropy.modeling.Parameter` attributes defined in the class body. """ n_inputs = 0 """The number of inputs.""" n_outputs = 0 """ The number of outputs.""" standard_broadcasting = True fittable = False linear = True _separable = None """ A boolean flag to indicate whether a model is separable.""" meta = metadata.MetaData() """A dict-like object to store optional information.""" # By default models either use their own inverse property or have no # inverse at all, but users may also assign a custom inverse to a model, # optionally; in that case it is of course up to the user to determine # whether their inverse is *actually* an inverse to the model they assign # it to. _inverse = None _user_inverse = None _bounding_box = None _user_bounding_box = None _has_inverse_bounding_box = False # Default n_models attribute, so that __len__ is still defined even when a # model hasn't completed initialization yet _n_models = 1 # New classes can set this as a boolean value. # It is converted to a dictionary mapping input name to a boolean value. _input_units_strict = False # Allow dimensionless input (and corresponding output). If this is True, # input values to evaluate will gain the units specified in input_units. If # this is a dictionary then it should map input name to a bool to allow # dimensionless numbers for that input. # Only has an effect if input_units is defined. _input_units_allow_dimensionless = False # Default equivalencies to apply to input values. If set, this should be a # dictionary where each key is a string that corresponds to one of the # model inputs. Only has an effect if input_units is defined. input_units_equivalencies = None # Covariance matrix can be set by fitter if available. # If cov_matrix is available, then std will set as well _cov_matrix = None _stds = None def __init_subclass__(cls, **kwargs): super().__init_subclass__() def __init__(self, *args, meta=None, name=None, **kwargs): super().__init__() self._default_inputs_outputs() if meta is not None: self.meta = meta self._name = name # add parameters to instance level by walking MRO list mro = self.__class__.__mro__ for cls in mro: if issubclass(cls, Model): for parname, val in cls._parameters_.items(): newpar = copy.deepcopy(val) newpar.model = self if parname not in self.__dict__: self.__dict__[parname] = newpar self._initialize_constraints(kwargs) kwargs = self._initialize_setters(kwargs) # Remaining keyword args are either parameter values or invalid # Parameter values must be passed in as keyword arguments in order to # distinguish them self._initialize_parameters(args, kwargs) self._initialize_slices() self._initialize_unit_support() def _default_inputs_outputs(self): if self.n_inputs == 1 and self.n_outputs == 1: self._inputs = ("x",) self._outputs = ("y",) elif self.n_inputs == 2 and self.n_outputs == 1: self._inputs = ("x", "y") self._outputs = ("z",) else: try: self._inputs = tuple("x" + str(idx) for idx in range(self.n_inputs)) self._outputs = tuple("x" + str(idx) for idx in range(self.n_outputs)) except TypeError: # self.n_inputs and self.n_outputs are properties # This is the case when subclasses of Model do not define # ``n_inputs``, ``n_outputs``, ``inputs`` or ``outputs``. self._inputs = () self._outputs = () def _initialize_setters(self, kwargs): """ This exists to inject defaults for settable properties for models originating from `custom_model`. """ if hasattr(self, '_settable_properties'): setters = {name: kwargs.pop(name, default) for name, default in self._settable_properties.items()} for name, value in setters.items(): setattr(self, name, value) return kwargs @property def inputs(self): return self._inputs @inputs.setter def inputs(self, val): if len(val) != self.n_inputs: raise ValueError(f"Expected {self.n_inputs} number of inputs, got {len(val)}.") self._inputs = val self._initialize_unit_support() @property def outputs(self): return self._outputs @outputs.setter def outputs(self, val): if len(val) != self.n_outputs: raise ValueError(f"Expected {self.n_outputs} number of outputs, got {len(val)}.") self._outputs = val @property def n_inputs(self): # TODO: remove the code in the ``if`` block when support # for models with ``inputs`` as class variables is removed. if hasattr(self.__class__, 'n_inputs') and isinstance(self.__class__.n_inputs, property): try: return len(self.__class__.inputs) except TypeError: try: return len(self.inputs) except AttributeError: return 0 return self.__class__.n_inputs @property def n_outputs(self): # TODO: remove the code in the ``if`` block when support # for models with ``outputs`` as class variables is removed. if hasattr(self.__class__, 'n_outputs') and isinstance(self.__class__.n_outputs, property): try: return len(self.__class__.outputs) except TypeError: try: return len(self.outputs) except AttributeError: return 0 return self.__class__.n_outputs def _calculate_separability_matrix(self): """ This is a hook which customises the behavior of modeling.separable. This allows complex subclasses to customise the separability matrix. If it returns `NotImplemented` the default behavior is used. """ return NotImplemented def _initialize_unit_support(self): """ Convert self._input_units_strict and self.input_units_allow_dimensionless to dictionaries mapping input name to a boolean value. """ if isinstance(self._input_units_strict, bool): self._input_units_strict = {key: self._input_units_strict for key in self.inputs} if isinstance(self._input_units_allow_dimensionless, bool): self._input_units_allow_dimensionless = {key: self._input_units_allow_dimensionless for key in self.inputs} @property def input_units_strict(self): """ Enforce strict units on inputs to evaluate. If this is set to True, input values to evaluate will be in the exact units specified by input_units. If the input quantities are convertible to input_units, they are converted. If this is a dictionary then it should map input name to a bool to set strict input units for that parameter. """ val = self._input_units_strict if isinstance(val, bool): return {key: val for key in self.inputs} return dict(zip(self.inputs, val.values())) @property def input_units_allow_dimensionless(self): """ Allow dimensionless input (and corresponding output). If this is True, input values to evaluate will gain the units specified in input_units. If this is a dictionary then it should map input name to a bool to allow dimensionless numbers for that input. Only has an effect if input_units is defined. """ val = self._input_units_allow_dimensionless if isinstance(val, bool): return {key: val for key in self.inputs} return dict(zip(self.inputs, val.values())) @property def uses_quantity(self): """ True if this model has been created with `~astropy.units.Quantity` objects or if there are no parameters. This can be used to determine if this model should be evaluated with `~astropy.units.Quantity` or regular floats. """ pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)] return (len(pisq) == 0) or any(pisq) def __repr__(self): return self._format_repr() def __str__(self): return self._format_str() def __len__(self): return self._n_models @staticmethod def _strip_ones(intup): return tuple(item for item in intup if item != 1) def __setattr__(self, attr, value): if isinstance(self, CompoundModel): param_names = self._param_names param_names = self.param_names if param_names is not None and attr in self.param_names: param = self.__dict__[attr] value = _tofloat(value) if param._validator is not None: param._validator(self, value) # check consistency with previous shape and size eshape = self._param_metrics[attr]['shape'] if eshape == (): eshape = (1,) vshape = np.array(value).shape if vshape == (): vshape = (1,) esize = self._param_metrics[attr]['size'] if (np.size(value) != esize or self._strip_ones(vshape) != self._strip_ones(eshape)): raise InputParameterError( "Value for parameter {0} does not match shape or size\n" "expected by model ({1}, {2}) vs ({3}, {4})".format( attr, vshape, np.size(value), eshape, esize)) if param.unit is None: if isinstance(value, Quantity): param._unit = value.unit param.value = value.value else: param.value = value else: if not isinstance(value, Quantity): raise UnitsError(f"The '{param.name}' parameter should be given as a" " Quantity because it was originally " "initialized as a Quantity") param._unit = value.unit param.value = value.value else: if attr in ['fittable', 'linear']: self.__dict__[attr] = value else: super().__setattr__(attr, value) def _pre_evaluate(self, *args, **kwargs): """ Model specific input setup that needs to occur prior to model evaluation """ # Broadcast inputs into common size inputs, broadcasted_shapes = self.prepare_inputs(*args, **kwargs) # Setup actual model evaluation method parameters = self._param_sets(raw=True, units=True) def evaluate(_inputs): return self.evaluate(*chain(_inputs, parameters)) return evaluate, inputs, broadcasted_shapes, kwargs def get_bounding_box(self, with_bbox=True): """ Return the ``bounding_box`` of a model if it exists or ``None`` otherwise. Parameters ---------- with_bbox : The value of the ``with_bounding_box`` keyword argument when calling the model. Default is `True` for usage when looking up the model's ``bounding_box`` without risk of error. """ bbox = None if not isinstance(with_bbox, bool) or with_bbox: try: bbox = self.bounding_box except NotImplementedError: pass if isinstance(bbox, CompoundBoundingBox) and not isinstance(with_bbox, bool): bbox = bbox[with_bbox] return bbox @property def _argnames(self): """The inputs used to determine input_shape for bounding_box evaluation""" return self.inputs def _validate_input_shape(self, _input, idx, argnames, model_set_axis, check_model_set_axis): """ Perform basic validation of a single model input's shape -- it has the minimum dimensions for the given model_set_axis Returns the shape of the input if validation succeeds. """ input_shape = np.shape(_input) # Ensure that the input's model_set_axis matches the model's # n_models if input_shape and check_model_set_axis: # Note: Scalar inputs *only* get a pass on this if len(input_shape) < model_set_axis + 1: raise ValueError( f"For model_set_axis={model_set_axis}, all inputs must be at " f"least {model_set_axis + 1}-dimensional.") if input_shape[model_set_axis] != self._n_models: try: argname = argnames[idx] except IndexError: # the case of model.inputs = () argname = str(idx) raise ValueError( f"Input argument '{argname}' does not have the correct " f"dimensions in model_set_axis={model_set_axis} for a model set with " f"n_models={self._n_models}.") return input_shape def _validate_input_shapes(self, inputs, argnames, model_set_axis): """ Perform basic validation of model inputs --that they are mutually broadcastable and that they have the minimum dimensions for the given model_set_axis. If validation succeeds, returns the total shape that will result from broadcasting the input arrays with each other. """ check_model_set_axis = self._n_models > 1 and model_set_axis is not False all_shapes = [] for idx, _input in enumerate(inputs): all_shapes.append(self._validate_input_shape(_input, idx, argnames, model_set_axis, check_model_set_axis)) input_shape = check_broadcast(*all_shapes) if input_shape is None: raise ValueError( "All inputs must have identical shapes or must be scalars.") return input_shape def input_shape(self, inputs): """Get input shape for bounding_box evaluation""" return self._validate_input_shapes(inputs, self._argnames, self.model_set_axis) def _generic_evaluate(self, evaluate, _inputs, fill_value, with_bbox): """ Generic model evaluation routine Selects and evaluates model with or without bounding_box enforcement """ # Evaluate the model using the prepared evaluation method either # enforcing the bounding_box or not. bbox = self.get_bounding_box(with_bbox) if (not isinstance(with_bbox, bool) or with_bbox) and bbox is not None: outputs = bbox.evaluate(evaluate, _inputs, fill_value) else: outputs = evaluate(_inputs) return outputs def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs): """ Model specific post evaluation processing of outputs """ if self.get_bounding_box(with_bbox) is None and self.n_outputs == 1: outputs = (outputs,) outputs = self.prepare_outputs(broadcasted_shapes, *outputs, **kwargs) outputs = self._process_output_units(inputs, outputs) if self.n_outputs == 1: return outputs[0] return outputs @property def bbox_with_units(self): return (not isinstance(self, CompoundModel)) def __call__(self, *args, **kwargs): """ Evaluate this model using the given input(s) and the parameter values that were specified when the model was instantiated. """ # Turn any keyword arguments into positional arguments. args, kwargs = self._get_renamed_inputs_as_positional(*args, **kwargs) # Read model evaluation related parameters with_bbox = kwargs.pop('with_bounding_box', False) fill_value = kwargs.pop('fill_value', np.nan) # prepare for model evaluation (overridden in CompoundModel) evaluate, inputs, broadcasted_shapes, kwargs = self._pre_evaluate(*args, **kwargs) outputs = self._generic_evaluate(evaluate, inputs, fill_value, with_bbox) # post-process evaluation results (overridden in CompoundModel) return self._post_evaluate(inputs, outputs, broadcasted_shapes, with_bbox, **kwargs) def _get_renamed_inputs_as_positional(self, *args, **kwargs): def _keyword2positional(kwargs): # Inputs were passed as keyword (not positional) arguments. # Because the signature of the ``__call__`` is defined at # the class level, the name of the inputs cannot be changed at # the instance level and the old names are always present in the # signature of the method. In order to use the new names of the # inputs, the old names are taken out of ``kwargs``, the input # values are sorted in the order of self.inputs and passed as # positional arguments to ``__call__``. # These are the keys that are always present as keyword arguments. keys = ['model_set_axis', 'with_bounding_box', 'fill_value', 'equivalencies', 'inputs_map'] new_inputs = {} # kwargs contain the names of the new inputs + ``keys`` allkeys = list(kwargs.keys()) # Remove the names of the new inputs from kwargs and save them # to a dict ``new_inputs``. for key in allkeys: if key not in keys: new_inputs[key] = kwargs[key] del kwargs[key] return new_inputs, kwargs n_args = len(args) new_inputs, kwargs = _keyword2positional(kwargs) n_all_args = n_args + len(new_inputs) if n_all_args < self.n_inputs: raise ValueError(f"Missing input arguments - expected {self.n_inputs}, got {n_all_args}") elif n_all_args > self.n_inputs: raise ValueError(f"Too many input arguments - expected {self.n_inputs}, got {n_all_args}") if n_args == 0: # Create positional arguments from the keyword arguments in ``new_inputs``. new_args = [] for k in self.inputs: new_args.append(new_inputs[k]) elif n_args != self.n_inputs: # Some inputs are passed as positional, others as keyword arguments. args = list(args) # Create positional arguments from the keyword arguments in ``new_inputs``. new_args = [] for k in self.inputs: if k in new_inputs: new_args.append(new_inputs[k]) else: new_args.append(args[0]) del args[0] else: new_args = args return new_args, kwargs # *** Properties *** @property def name(self): """User-provided name for this model instance.""" return self._name @name.setter def name(self, val): """Assign a (new) name to this model.""" self._name = val @property def model_set_axis(self): """ The index of the model set axis--that is the axis of a parameter array that pertains to which model a parameter value pertains to--as specified when the model was initialized. See the documentation on :ref:`astropy:modeling-model-sets` for more details. """ return self._model_set_axis @property def param_sets(self): """ Return parameters as a pset. This is a list with one item per parameter set, which is an array of that parameter's values across all parameter sets, with the last axis associated with the parameter set. """ return self._param_sets() @property def parameters(self): """ A flattened array of all parameter values in all parameter sets. Fittable parameters maintain this list and fitters modify it. """ # Currently the sequence of a model's parameters must be contiguous # within the _parameters array (which may be a view of a larger array, # for example when taking a sub-expression of a compound model), so # the assumption here is reliable: if not self.param_names: # Trivial, but not unheard of return self._parameters self._parameters_to_array() start = self._param_metrics[self.param_names[0]]['slice'].start stop = self._param_metrics[self.param_names[-1]]['slice'].stop return self._parameters[start:stop] @parameters.setter def parameters(self, value): """ Assigning to this attribute updates the parameters array rather than replacing it. """ if not self.param_names: return start = self._param_metrics[self.param_names[0]]['slice'].start stop = self._param_metrics[self.param_names[-1]]['slice'].stop try: value = np.array(value).flatten() self._parameters[start:stop] = value except ValueError as e: raise InputParameterError( "Input parameter values not compatible with the model " "parameters array: {0}".format(e)) self._array_to_parameters() @property def sync_constraints(self): ''' This is a boolean property that indicates whether or not accessing constraints automatically check the constituent models current values. It defaults to True on creation of a model, but for fitting purposes it should be set to False for performance reasons. ''' if not hasattr(self, '_sync_constraints'): self._sync_constraints = True return self._sync_constraints @sync_constraints.setter def sync_constraints(self, value): if not isinstance(value, bool): raise ValueError('sync_constraints only accepts True or False as values') self._sync_constraints = value @property def fixed(self): """ A ``dict`` mapping parameter names to their fixed constraint. """ if not hasattr(self, '_fixed') or self.sync_constraints: self._fixed = _ConstraintsDict(self, 'fixed') return self._fixed @property def bounds(self): """ A ``dict`` mapping parameter names to their upper and lower bounds as ``(min, max)`` tuples or ``[min, max]`` lists. """ if not hasattr(self, '_bounds') or self.sync_constraints: self._bounds = _ConstraintsDict(self, 'bounds') return self._bounds @property def tied(self): """ A ``dict`` mapping parameter names to their tied constraint. """ if not hasattr(self, '_tied') or self.sync_constraints: self._tied = _ConstraintsDict(self, 'tied') return self._tied @property def eqcons(self): """List of parameter equality constraints.""" return self._mconstraints['eqcons'] @property def ineqcons(self): """List of parameter inequality constraints.""" return self._mconstraints['ineqcons'] def has_inverse(self): """ Returns True if the model has an analytic or user inverse defined. """ try: self.inverse except NotImplementedError: return False return True @property def inverse(self): """ Returns a new `~astropy.modeling.Model` instance which performs the inverse transform, if an analytic inverse is defined for this model. Even on models that don't have an inverse defined, this property can be set with a manually-defined inverse, such a pre-computed or experimentally determined inverse (often given as a `~astropy.modeling.polynomial.PolynomialModel`, but not by requirement). A custom inverse can be deleted with ``del model.inverse``. In this case the model's inverse is reset to its default, if a default exists (otherwise the default is to raise `NotImplementedError`). Note to authors of `~astropy.modeling.Model` subclasses: To define an inverse for a model simply override this property to return the appropriate model representing the inverse. The machinery that will make the inverse manually-overridable is added automatically by the base class. """ if self._user_inverse is not None: return self._user_inverse elif self._inverse is not None: result = self._inverse() if result is not NotImplemented: if not self._has_inverse_bounding_box: result.bounding_box = None return result raise NotImplementedError("No analytical or user-supplied inverse transform " "has been implemented for this model.") @inverse.setter def inverse(self, value): if not isinstance(value, (Model, type(None))): raise ValueError( "The ``inverse`` attribute may be assigned a `Model` " "instance or `None` (where `None` explicitly forces the " "model to have no inverse.") self._user_inverse = value @inverse.deleter def inverse(self): """ Resets the model's inverse to its default (if one exists, otherwise the model will have no inverse). """ try: del self._user_inverse except AttributeError: pass @property def has_user_inverse(self): """ A flag indicating whether or not a custom inverse model has been assigned to this model by a user, via assignment to ``model.inverse``. """ return self._user_inverse is not None @property def bounding_box(self): r""" A `tuple` of length `n_inputs` defining the bounding box limits, or raise `NotImplementedError` for no bounding_box. The default limits are given by a ``bounding_box`` property or method defined in the class body of a specific model. If not defined then this property just raises `NotImplementedError` by default (but may be assigned a custom value by a user). ``bounding_box`` can be set manually to an array-like object of shape ``(model.n_inputs, 2)``. For further usage, see :ref:`astropy:bounding-boxes` The limits are ordered according to the `numpy` ``'C'`` indexing convention, and are the reverse of the model input order, e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined: * for 1D: ``(x_low, x_high)`` * for 2D: ``((y_low, y_high), (x_low, x_high))`` * for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))`` Examples -------- Setting the ``bounding_box`` limits for a 1D and 2D model: >>> from astropy.modeling.models import Gaussian1D, Gaussian2D >>> model_1d = Gaussian1D() >>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1) >>> model_1d.bounding_box = (-5, 5) >>> model_2d.bounding_box = ((-6, 6), (-5, 5)) Setting the bounding_box limits for a user-defined 3D `custom_model`: >>> from astropy.modeling.models import custom_model >>> def const3d(x, y, z, amp=1): ... return amp ... >>> Const3D = custom_model(const3d) >>> model_3d = Const3D() >>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4)) To reset ``bounding_box`` to its default limits just delete the user-defined value--this will reset it back to the default defined on the class: >>> del model_1d.bounding_box To disable the bounding box entirely (including the default), set ``bounding_box`` to `None`: >>> model_1d.bounding_box = None >>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): NotImplementedError: No bounding box is defined for this model (note: the bounding box was explicitly disabled for this model; use `del model.bounding_box` to restore the default bounding box, if one is defined for this model). """ if self._user_bounding_box is not None: if self._user_bounding_box is NotImplemented: raise NotImplementedError( "No bounding box is defined for this model (note: the " "bounding box was explicitly disabled for this model; " "use `del model.bounding_box` to restore the default " "bounding box, if one is defined for this model).") return self._user_bounding_box elif self._bounding_box is None: raise NotImplementedError( "No bounding box is defined for this model.") elif isinstance(self._bounding_box, ModelBoundingBox): # This typically implies a hard-coded bounding box. This will # probably be rare, but it is an option return self._bounding_box elif isinstance(self._bounding_box, types.MethodType): return ModelBoundingBox.validate(self, self._bounding_box()) else: # The only other allowed possibility is that it's a ModelBoundingBox # subclass, so we call it with its default arguments and return an # instance of it (that can be called to recompute the bounding box # with any optional parameters) # (In other words, in this case self._bounding_box is a *class*) bounding_box = self._bounding_box((), model=self)() return self._bounding_box(bounding_box, model=self) @bounding_box.setter def bounding_box(self, bounding_box): """ Assigns the bounding box limits. """ if bounding_box is None: cls = None # We use this to explicitly set an unimplemented bounding box (as # opposed to no user bounding box defined) bounding_box = NotImplemented elif (isinstance(bounding_box, CompoundBoundingBox) or isinstance(bounding_box, dict)): cls = CompoundBoundingBox elif (isinstance(self._bounding_box, type) and issubclass(self._bounding_box, ModelBoundingBox)): cls = self._bounding_box else: cls = ModelBoundingBox if cls is not None: try: bounding_box = cls.validate(self, bounding_box, _preserve_ignore=True) except ValueError as exc: raise ValueError(exc.args[0]) self._user_bounding_box = bounding_box def set_slice_args(self, *args): if isinstance(self._user_bounding_box, CompoundBoundingBox): self._user_bounding_box.slice_args = args else: raise RuntimeError('The bounding_box for this model is not compound') @bounding_box.deleter def bounding_box(self): self._user_bounding_box = None @property def has_user_bounding_box(self): """ A flag indicating whether or not a custom bounding_box has been assigned to this model by a user, via assignment to ``model.bounding_box``. """ return self._user_bounding_box is not None @property def cov_matrix(self): """ Fitter should set covariance matrix, if available. """ return self._cov_matrix @cov_matrix.setter def cov_matrix(self, cov): self._cov_matrix = cov unfix_untied_params = [p for p in self.param_names if (self.fixed[p] is False) and (self.tied[p] is False)] if type(cov) == list: # model set param_stds = [] for c in cov: param_stds.append([np.sqrt(x) if x > 0 else None for x in np.diag(c.cov_matrix)]) for p, param_name in enumerate(unfix_untied_params): par = getattr(self, param_name) par.std = [item[p] for item in param_stds] setattr(self, param_name, par) else: param_stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov.cov_matrix)] for param_name in unfix_untied_params: par = getattr(self, param_name) par.std = param_stds.pop(0) setattr(self, param_name, par) @property def stds(self): """ Standard deviation of parameters, if covariance matrix is available. """ return self._stds @stds.setter def stds(self, stds): self._stds = stds @property def separable(self): """ A flag indicating whether a model is separable.""" if self._separable is not None: return self._separable raise NotImplementedError( 'The "separable" property is not defined for ' 'model {}'.format(self.__class__.__name__)) # *** Public methods *** def without_units_for_data(self, **kwargs): """ Return an instance of the model for which the parameter values have been converted to the right units for the data, then the units have been stripped away. The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). The units that the parameters should be converted to are not necessarily the units of the input data, but are derived from them. Model subclasses that want fitting to work in the presence of quantities need to define a ``_parameter_units_for_data_units`` method that takes the input and output units (as two dictionaries) and returns a dictionary giving the target units for each parameter. """ model = self.copy() inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled) for inp in self.inputs if kwargs[inp] is not None} outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled) for out in self.outputs if kwargs[out] is not None} parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit) for name, unit in parameter_units.items(): parameter = getattr(model, name) if parameter.unit is not None: parameter.value = parameter.quantity.to(unit).value parameter._set_unit(None, force=True) if isinstance(model, CompoundModel): model.strip_units_from_tree() return model def output_units(self, **kwargs): """ Return a dictionary of output units for this model given a dictionary of fitting inputs and outputs The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). This method will force extra model evaluations, which maybe computationally expensive. To avoid this, one can add a return_units property to the model, see :ref:`astropy:models_return_units`. """ units = self.return_units if units is None or units == {}: inputs = {inp: kwargs[inp] for inp in self.inputs} values = self(**inputs) if self.n_outputs == 1: values = (values,) units = {out: getattr(values[index], 'unit', dimensionless_unscaled) for index, out in enumerate(self.outputs)} return units def strip_units_from_tree(self): for item in self._leaflist: for parname in item.param_names: par = getattr(item, parname) par._set_unit(None, force=True) def with_units_from_data(self, **kwargs): """ Return an instance of the model which has units for which the parameter values are compatible with the data units specified. The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). The units that the parameters will gain are not necessarily the units of the input data, but are derived from them. Model subclasses that want fitting to work in the presence of quantities need to define a ``_parameter_units_for_data_units`` method that takes the input and output units (as two dictionaries) and returns a dictionary giving the target units for each parameter. """ model = self.copy() inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled) for inp in self.inputs if kwargs[inp] is not None} outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled) for out in self.outputs if kwargs[out] is not None} parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit) # We are adding units to parameters that already have a value, but we # don't want to convert the parameter, just add the unit directly, # hence the call to ``_set_unit``. for name, unit in parameter_units.items(): parameter = getattr(model, name) parameter._set_unit(unit, force=True) return model @property def _has_units(self): # Returns True if any of the parameters have units for param in self.param_names: if getattr(self, param).unit is not None: return True else: return False @property def _supports_unit_fitting(self): # If the model has a ``_parameter_units_for_data_units`` method, this # indicates that we have enough information to strip the units away # and add them back after fitting, when fitting quantities return hasattr(self, '_parameter_units_for_data_units') @abc.abstractmethod def evaluate(self, *args, **kwargs): """Evaluate the model on some input variables.""" def sum_of_implicit_terms(self, *args, **kwargs): """ Evaluate the sum of any implicit model terms on some input variables. This includes any fixed terms used in evaluating a linear model that do not have corresponding parameters exposed to the user. The prototypical case is `astropy.modeling.functional_models.Shift`, which corresponds to a function y = a + bx, where b=1 is intrinsically fixed by the type of model, such that sum_of_implicit_terms(x) == x. This method is needed by linear fitters to correct the dependent variable for the implicit term(s) when solving for the remaining terms (ie. a = y - bx). """ def render(self, out=None, coords=None): """ Evaluate a model at fixed positions, respecting the ``bounding_box``. The key difference relative to evaluating the model directly is that this method is limited to a bounding box if the `Model.bounding_box` attribute is set. Parameters ---------- out : `numpy.ndarray`, optional An array that the evaluated model will be added to. If this is not given (or given as ``None``), a new array will be created. coords : array-like, optional An array to be used to translate from the model's input coordinates to the ``out`` array. It should have the property that ``self(coords)`` yields the same shape as ``out``. If ``out`` is not specified, ``coords`` will be used to determine the shape of the returned array. If this is not provided (or None), the model will be evaluated on a grid determined by `Model.bounding_box`. Returns ------- out : `numpy.ndarray` The model added to ``out`` if ``out`` is not ``None``, or else a new array from evaluating the model over ``coords``. If ``out`` and ``coords`` are both `None`, the returned array is limited to the `Model.bounding_box` limits. If `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Raises ------ ValueError If ``coords`` are not given and the the `Model.bounding_box` of this model is not set. Examples -------- :ref:`astropy:bounding-boxes` """ try: bbox = self.bounding_box except NotImplementedError: bbox = None if isinstance(bbox, ModelBoundingBox): bbox = bbox.bounding_box() ndim = self.n_inputs if (coords is None) and (out is None) and (bbox is None): raise ValueError('If no bounding_box is set, ' 'coords or out must be input.') # for consistent indexing if ndim == 1: if coords is not None: coords = [coords] if bbox is not None: bbox = [bbox] if coords is not None: coords = np.asanyarray(coords, dtype=float) # Check dimensions match out and model assert len(coords) == ndim if out is not None: if coords[0].shape != out.shape: raise ValueError('inconsistent shape of the output.') else: out = np.zeros(coords[0].shape) if out is not None: out = np.asanyarray(out) if out.ndim != ndim: raise ValueError('the array and model must have the same ' 'number of dimensions.') if bbox is not None: # Assures position is at center pixel, # important when using add_array. pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox]).astype(int).T pos, delta = pd if coords is not None: sub_shape = tuple(delta * 2 + 1) sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords]) else: limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T] sub_coords = np.mgrid[limits] sub_coords = sub_coords[::-1] if out is None: out = self(*sub_coords) else: try: out = add_array(out, self(*sub_coords), pos) except ValueError: raise ValueError( 'The `bounding_box` is larger than the input out in ' 'one or more dimensions. Set ' '`model.bounding_box = None`.') else: if coords is None: im_shape = out.shape limits = [slice(i) for i in im_shape] coords = np.mgrid[limits] coords = coords[::-1] out += self(*coords) return out @property def input_units(self): """ This property is used to indicate what units or sets of units the evaluate method expects, and returns a dictionary mapping inputs to units (or `None` if any units are accepted). Model sub-classes can also use function annotations in evaluate to indicate valid input units, in which case this property should not be overridden since it will return the input units based on the annotations. """ if hasattr(self, '_input_units'): return self._input_units elif hasattr(self.evaluate, '__annotations__'): annotations = self.evaluate.__annotations__.copy() annotations.pop('return', None) if annotations: # If there are not annotations for all inputs this will error. return dict((name, annotations[name]) for name in self.inputs) else: # None means any unit is accepted return None @property def return_units(self): """ This property is used to indicate what units or sets of units the output of evaluate should be in, and returns a dictionary mapping outputs to units (or `None` if any units are accepted). Model sub-classes can also use function annotations in evaluate to indicate valid output units, in which case this property should not be overridden since it will return the return units based on the annotations. """ if hasattr(self, '_return_units'): return self._return_units elif hasattr(self.evaluate, '__annotations__'): return self.evaluate.__annotations__.get('return', None) else: # None means any unit is accepted return None def _prepare_inputs_single_model(self, params, inputs, **kwargs): broadcasts = [] for idx, _input in enumerate(inputs): input_shape = _input.shape # Ensure that array scalars are always upgrade to 1-D arrays for the # sake of consistency with how parameters work. They will be cast back # to scalars at the end if not input_shape: inputs[idx] = _input.reshape((1,)) if not params: max_broadcast = input_shape else: max_broadcast = () for param in params: try: if self.standard_broadcasting: broadcast = check_broadcast(input_shape, param.shape) else: broadcast = input_shape except IncompatibleShapeError: raise ValueError( "self input argument {0!r} of shape {1!r} cannot be " "broadcast with parameter {2!r} of shape " "{3!r}.".format(self.inputs[idx], input_shape, param.name, param.shape)) if len(broadcast) > len(max_broadcast): max_broadcast = broadcast elif len(broadcast) == len(max_broadcast): max_broadcast = max(max_broadcast, broadcast) broadcasts.append(max_broadcast) if self.n_outputs > self.n_inputs: extra_outputs = self.n_outputs - self.n_inputs if not broadcasts: # If there were no inputs then the broadcasts list is empty # just add a None since there is no broadcasting of outputs and # inputs necessary (see _prepare_outputs_single_self) broadcasts.append(None) broadcasts.extend([broadcasts[0]] * extra_outputs) return inputs, (broadcasts,) @staticmethod def _remove_axes_from_shape(shape, axis): """ Given a shape tuple as the first input, construct a new one by removing that particular axis from the shape and all preceeding axes. Negative axis numbers are permittted, where the axis is relative to the last axis. """ if len(shape) == 0: return shape if axis < 0: axis = len(shape) + axis return shape[:axis] + shape[axis+1:] if axis >= len(shape): axis = len(shape)-1 shape = shape[axis+1:] return shape def _prepare_inputs_model_set(self, params, inputs, model_set_axis_input, **kwargs): reshaped = [] pivots = [] model_set_axis_param = self.model_set_axis # needed to reshape param for idx, _input in enumerate(inputs): max_param_shape = () if self._n_models > 1 and model_set_axis_input is not False: # Use the shape of the input *excluding* the model axis input_shape = (_input.shape[:model_set_axis_input] + _input.shape[model_set_axis_input + 1:]) else: input_shape = _input.shape for param in params: try: check_broadcast(input_shape, self._remove_axes_from_shape(param.shape, model_set_axis_param)) except IncompatibleShapeError: raise ValueError( "Model input argument {0!r} of shape {1!r} cannot be " "broadcast with parameter {2!r} of shape " "{3!r}.".format(self.inputs[idx], input_shape, param.name, self._remove_axes_from_shape(param.shape, model_set_axis_param))) if len(param.shape) - 1 > len(max_param_shape): max_param_shape = self._remove_axes_from_shape(param.shape, model_set_axis_param) # We've now determined that, excluding the model_set_axis, the # input can broadcast with all the parameters input_ndim = len(input_shape) if model_set_axis_input is False: if len(max_param_shape) > input_ndim: # Just needs to prepend new axes to the input n_new_axes = 1 + len(max_param_shape) - input_ndim new_axes = (1,) * n_new_axes new_shape = new_axes + _input.shape pivot = model_set_axis_param else: pivot = input_ndim - len(max_param_shape) new_shape = (_input.shape[:pivot] + (1,) + _input.shape[pivot:]) new_input = _input.reshape(new_shape) else: if len(max_param_shape) >= input_ndim: n_new_axes = len(max_param_shape) - input_ndim pivot = self.model_set_axis new_axes = (1,) * n_new_axes new_shape = (_input.shape[:pivot + 1] + new_axes + _input.shape[pivot + 1:]) new_input = _input.reshape(new_shape) else: pivot = _input.ndim - len(max_param_shape) - 1 new_input = np.rollaxis(_input, model_set_axis_input, pivot + 1) pivots.append(pivot) reshaped.append(new_input) if self.n_inputs < self.n_outputs: pivots.extend([model_set_axis_input] * (self.n_outputs - self.n_inputs)) return reshaped, (pivots,) def prepare_inputs(self, *inputs, model_set_axis=None, equivalencies=None, **kwargs): """ This method is used in `~astropy.modeling.Model.__call__` to ensure that all the inputs to the model can be broadcast into compatible shapes (if one or both of them are input as arrays), particularly if there are more than one parameter sets. This also makes sure that (if applicable) the units of the input will be compatible with the evaluate method. """ # When we instantiate the model class, we make sure that __call__ can # take the following two keyword arguments: model_set_axis and # equivalencies. if model_set_axis is None: # By default the model_set_axis for the input is assumed to be the # same as that for the parameters the model was defined with # TODO: Ensure that negative model_set_axis arguments are respected model_set_axis = self.model_set_axis params = [getattr(self, name) for name in self.param_names] inputs = [np.asanyarray(_input, dtype=float) for _input in inputs] self._validate_input_shapes(inputs, self.inputs, model_set_axis) inputs_map = kwargs.get('inputs_map', None) inputs = self._validate_input_units(inputs, equivalencies, inputs_map) # The input formatting required for single models versus a multiple # model set are different enough that they've been split into separate # subroutines if self._n_models == 1: return self._prepare_inputs_single_model(params, inputs, **kwargs) else: return self._prepare_inputs_model_set(params, inputs, model_set_axis, **kwargs) def _validate_input_units(self, inputs, equivalencies=None, inputs_map=None): inputs = list(inputs) name = self.name or self.__class__.__name__ # Check that the units are correct, if applicable if self.input_units is not None: # If a leaflist is provided that means this is in the context of # a compound model and it is necessary to create the appropriate # alias for the input coordinate name for the equivalencies dict if inputs_map: edict = {} for mod, mapping in inputs_map: if self is mod: edict[mapping[0]] = equivalencies[mapping[1]] else: edict = equivalencies # We combine any instance-level input equivalencies with user # specified ones at call-time. input_units_equivalencies = _combine_equivalency_dict(self.inputs, edict, self.input_units_equivalencies) # We now iterate over the different inputs and make sure that their # units are consistent with those specified in input_units. for i in range(len(inputs)): input_name = self.inputs[i] input_unit = self.input_units.get(input_name, None) if input_unit is None: continue if isinstance(inputs[i], Quantity): # We check for consistency of the units with input_units, # taking into account any equivalencies if inputs[i].unit.is_equivalent( input_unit, equivalencies=input_units_equivalencies[input_name]): # If equivalencies have been specified, we need to # convert the input to the input units - this is # because some equivalencies are non-linear, and # we need to be sure that we evaluate the model in # its own frame of reference. If input_units_strict # is set, we also need to convert to the input units. if len(input_units_equivalencies) > 0 or self.input_units_strict[input_name]: inputs[i] = inputs[i].to(input_unit, equivalencies=input_units_equivalencies[input_name]) else: # We consider the following two cases separately so as # to be able to raise more appropriate/nicer exceptions if input_unit is dimensionless_unscaled: raise UnitsError("{0}: Units of input '{1}', {2} ({3})," "could not be converted to " "required dimensionless " "input".format(name, self.inputs[i], inputs[i].unit, inputs[i].unit.physical_type)) else: raise UnitsError("{0}: Units of input '{1}', {2} ({3})," " could not be " "converted to required input" " units of {4} ({5})".format( name, self.inputs[i], inputs[i].unit, inputs[i].unit.physical_type, input_unit, input_unit.physical_type)) else: # If we allow dimensionless input, we add the units to the # input values without conversion, otherwise we raise an # exception. if (not self.input_units_allow_dimensionless[input_name] and input_unit is not dimensionless_unscaled and input_unit is not None): if np.any(inputs[i] != 0): raise UnitsError("{0}: Units of input '{1}', (dimensionless), could not be " "converted to required input units of " "{2} ({3})".format(name, self.inputs[i], input_unit, input_unit.physical_type)) return inputs def _process_output_units(self, inputs, outputs): inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs]) if self.return_units and inputs_are_quantity: # We allow a non-iterable unit only if there is one output if self.n_outputs == 1 and not isiterable(self.return_units): return_units = {self.outputs[0]: self.return_units} else: return_units = self.return_units outputs = tuple([Quantity(out, return_units.get(out_name, None), subok=True) for out, out_name in zip(outputs, self.outputs)]) return outputs @staticmethod def _prepare_output_single_model(output, broadcast_shape): if broadcast_shape is not None: if not broadcast_shape: return output.item() else: try: return output.reshape(broadcast_shape) except ValueError: try: return output.item() except ValueError: return output return output def _prepare_outputs_single_model(self, outputs, broadcasted_shapes): outputs = list(outputs) for idx, output in enumerate(outputs): try: broadcast_shape = check_broadcast(*broadcasted_shapes[0]) except (IndexError, TypeError): broadcast_shape = broadcasted_shapes[0][idx] outputs[idx] = self._prepare_output_single_model(output, broadcast_shape) return tuple(outputs) def _prepare_outputs_model_set(self, outputs, broadcasted_shapes, model_set_axis): pivots = broadcasted_shapes[0] # If model_set_axis = False was passed then use # self._model_set_axis to format the output. if model_set_axis is None or model_set_axis is False: model_set_axis = self.model_set_axis outputs = list(outputs) for idx, output in enumerate(outputs): pivot = pivots[idx] if pivot < output.ndim and pivot != model_set_axis: outputs[idx] = np.rollaxis(output, pivot, model_set_axis) return tuple(outputs) def prepare_outputs(self, broadcasted_shapes, *outputs, **kwargs): model_set_axis = kwargs.get('model_set_axis', None) if len(self) == 1: return self._prepare_outputs_single_model(outputs, broadcasted_shapes) else: return self._prepare_outputs_model_set(outputs, broadcasted_shapes, model_set_axis) def copy(self): """ Return a copy of this model. Uses a deep copy so that all model attributes, including parameter values, are copied as well. """ return copy.deepcopy(self) def deepcopy(self): """ Return a deep copy of this model. """ return self.copy() @sharedmethod def rename(self, name): """ Return a copy of this model with a new name. """ new_model = self.copy() new_model._name = name return new_model def coerce_units( self, input_units=None, return_units=None, input_units_equivalencies=None, input_units_allow_dimensionless=False ): """ Attach units to this (unitless) model. Parameters ---------- input_units : dict or tuple, optional Input units to attach. If dict, each key is the name of a model input, and the value is the unit to attach. If tuple, the elements are units to attach in order corresponding to `Model.inputs`. return_units : dict or tuple, optional Output units to attach. If dict, each key is the name of a model output, and the value is the unit to attach. If tuple, the elements are units to attach in order corresponding to `Model.outputs`. input_units_equivalencies : dict, optional Default equivalencies to apply to input values. If set, this should be a dictionary where each key is a string that corresponds to one of the model inputs. input_units_allow_dimensionless : bool or dict, optional Allow dimensionless input. If this is True, input values to evaluate will gain the units specified in input_units. If this is a dictionary then it should map input name to a bool to allow dimensionless numbers for that input. Returns ------- `CompoundModel` A `CompoundModel` composed of the current model plus `~astropy.modeling.mappings.UnitsMapping` model(s) that attach the units. Raises ------ ValueError If the current model already has units. Examples -------- Wrapping a unitless model to require and convert units: >>> from astropy.modeling.models import Polynomial1D >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = poly.coerce_units((u.m,), (u.s,)) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP <Quantity 1.2 s> Wrapping a unitless model but still permitting unitless input: >>> from astropy.modeling.models import Polynomial1D >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = poly.coerce_units((u.m,), (u.s,), input_units_allow_dimensionless=True) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(10) # doctest: +FLOAT_CMP <Quantity 21. s> """ from .mappings import UnitsMapping result = self if input_units is not None: if self.input_units is not None: model_units = self.input_units else: model_units = {} for unit in [model_units.get(i) for i in self.inputs]: if unit is not None and unit != dimensionless_unscaled: raise ValueError("Cannot specify input_units for model with existing input units") if isinstance(input_units, dict): if input_units.keys() != set(self.inputs): message = ( f"""input_units keys ({", ".join(input_units.keys())}) """ f"""do not match model inputs ({", ".join(self.inputs)})""" ) raise ValueError(message) input_units = [input_units[i] for i in self.inputs] if len(input_units) != self.n_inputs: message = ( "input_units length does not match n_inputs: " f"expected {self.n_inputs}, received {len(input_units)}" ) raise ValueError(message) mapping = tuple((unit, model_units.get(i)) for i, unit in zip(self.inputs, input_units)) input_mapping = UnitsMapping( mapping, input_units_equivalencies=input_units_equivalencies, input_units_allow_dimensionless=input_units_allow_dimensionless ) input_mapping.inputs = self.inputs input_mapping.outputs = self.inputs result = input_mapping | result if return_units is not None: if self.return_units is not None: model_units = self.return_units else: model_units = {} for unit in [model_units.get(i) for i in self.outputs]: if unit is not None and unit != dimensionless_unscaled: raise ValueError("Cannot specify return_units for model with existing output units") if isinstance(return_units, dict): if return_units.keys() != set(self.outputs): message = ( f"""return_units keys ({", ".join(return_units.keys())}) """ f"""do not match model outputs ({", ".join(self.outputs)})""" ) raise ValueError(message) return_units = [return_units[i] for i in self.outputs] if len(return_units) != self.n_outputs: message = ( "return_units length does not match n_outputs: " f"expected {self.n_outputs}, received {len(return_units)}" ) raise ValueError(message) mapping = tuple((model_units.get(i), unit) for i, unit in zip(self.outputs, return_units)) return_mapping = UnitsMapping(mapping) return_mapping.inputs = self.outputs return_mapping.outputs = self.outputs result = result | return_mapping return result @property def n_submodels(self): """ Return the number of components in a single model, which is obviously 1. """ return 1 def _initialize_constraints(self, kwargs): """ Pop parameter constraint values off the keyword arguments passed to `Model.__init__` and store them in private instance attributes. """ # Pop any constraints off the keyword arguments for constraint in self.parameter_constraints: values = kwargs.pop(constraint, {}) for ckey, cvalue in values.items(): param = getattr(self, ckey) setattr(param, constraint, cvalue) self._mconstraints = {} for constraint in self.model_constraints: values = kwargs.pop(constraint, []) self._mconstraints[constraint] = values def _initialize_parameters(self, args, kwargs): """ Initialize the _parameters array that stores raw parameter values for all parameter sets for use with vectorized fitting algorithms; on FittableModels the _param_name attributes actually just reference slices of this array. """ n_models = kwargs.pop('n_models', None) if not (n_models is None or (isinstance(n_models, (int, np.integer)) and n_models >= 1)): raise ValueError( "n_models must be either None (in which case it is " "determined from the model_set_axis of the parameter initial " "values) or it must be a positive integer " "(got {0!r})".format(n_models)) model_set_axis = kwargs.pop('model_set_axis', None) if model_set_axis is None: if n_models is not None and n_models > 1: # Default to zero model_set_axis = 0 else: # Otherwise disable model_set_axis = False else: if not (model_set_axis is False or np.issubdtype(type(model_set_axis), np.integer)): raise ValueError( "model_set_axis must be either False or an integer " "specifying the parameter array axis to map to each " "model in a set of models (got {0!r}).".format( model_set_axis)) # Process positional arguments by matching them up with the # corresponding parameters in self.param_names--if any also appear as # keyword arguments this presents a conflict params = set() if len(args) > len(self.param_names): raise TypeError( "{0}.__init__() takes at most {1} positional arguments ({2} " "given)".format(self.__class__.__name__, len(self.param_names), len(args))) self._model_set_axis = model_set_axis self._param_metrics = defaultdict(dict) for idx, arg in enumerate(args): if arg is None: # A value of None implies using the default value, if exists continue # We use quantity_asanyarray here instead of np.asanyarray because # if any of the arguments are quantities, we need to return a # Quantity object not a plain Numpy array. param_name = self.param_names[idx] params.add(param_name) if not isinstance(arg, Parameter): value = quantity_asanyarray(arg, dtype=float) else: value = arg self._initialize_parameter_value(param_name, value) # At this point the only remaining keyword arguments should be # parameter names; any others are in error. for param_name in self.param_names: if param_name in kwargs: if param_name in params: raise TypeError( "{0}.__init__() got multiple values for parameter " "{1!r}".format(self.__class__.__name__, param_name)) value = kwargs.pop(param_name) if value is None: continue # We use quantity_asanyarray here instead of np.asanyarray # because if any of the arguments are quantities, we need # to return a Quantity object not a plain Numpy array. value = quantity_asanyarray(value, dtype=float) params.add(param_name) self._initialize_parameter_value(param_name, value) # Now deal with case where param_name is not supplied by args or kwargs for param_name in self.param_names: if param_name not in params: self._initialize_parameter_value(param_name, None) if kwargs: # If any keyword arguments were left over at this point they are # invalid--the base class should only be passed the parameter # values, constraints, and param_dim for kwarg in kwargs: # Just raise an error on the first unrecognized argument raise TypeError( '{0}.__init__() got an unrecognized parameter ' '{1!r}'.format(self.__class__.__name__, kwarg)) # Determine the number of model sets: If the model_set_axis is # None then there is just one parameter set; otherwise it is determined # by the size of that axis on the first parameter--if the other # parameters don't have the right number of axes or the sizes of their # model_set_axis don't match an error is raised if model_set_axis is not False and n_models != 1 and params: max_ndim = 0 if model_set_axis < 0: min_ndim = abs(model_set_axis) else: min_ndim = model_set_axis + 1 for name in self.param_names: value = getattr(self, name) param_ndim = np.ndim(value) if param_ndim < min_ndim: raise InputParameterError( "All parameter values must be arrays of dimension " "at least {0} for model_set_axis={1} (the value " "given for {2!r} is only {3}-dimensional)".format( min_ndim, model_set_axis, name, param_ndim)) max_ndim = max(max_ndim, param_ndim) if n_models is None: # Use the dimensions of the first parameter to determine # the number of model sets n_models = value.shape[model_set_axis] elif value.shape[model_set_axis] != n_models: raise InputParameterError( "Inconsistent dimensions for parameter {0!r} for " "{1} model sets. The length of axis {2} must be the " "same for all input parameter values".format( name, n_models, model_set_axis)) self._check_param_broadcast(max_ndim) else: if n_models is None: n_models = 1 self._check_param_broadcast(None) self._n_models = n_models # now validate parameters for name in params: param = getattr(self, name) if param._validator is not None: param._validator(self, param.value) def _initialize_parameter_value(self, param_name, value): """Mostly deals with consistency checks and determining unit issues.""" if isinstance(value, Parameter): self.__dict__[param_name] = value return param = getattr(self, param_name) # Use default if value is not provided if value is None: default = param.default if default is None: # No value was supplied for the parameter and the # parameter does not have a default, therefore the model # is underspecified raise TypeError("{0}.__init__() requires a value for parameter " "{1!r}".format(self.__class__.__name__, param_name)) value = default unit = param.unit else: if isinstance(value, Quantity): unit = value.unit value = value.value else: unit = None if unit is None and param.unit is not None: raise InputParameterError( "{0}.__init__() requires a Quantity for parameter " "{1!r}".format(self.__class__.__name__, param_name)) param._unit = unit param.internal_unit = None if param._setter is not None: if unit is not None: _val = param._setter(value * unit) else: _val = param._setter(value) if isinstance(_val, Quantity): param.internal_unit = _val.unit param._internal_value = np.array(_val.value) else: param.internal_unit = None param._internal_value = np.array(_val) else: param._value = np.array(value) def _initialize_slices(self): param_metrics = self._param_metrics total_size = 0 for name in self.param_names: param = getattr(self, name) value = param.value param_size = np.size(value) param_shape = np.shape(value) param_slice = slice(total_size, total_size + param_size) param_metrics[name]['slice'] = param_slice param_metrics[name]['shape'] = param_shape param_metrics[name]['size'] = param_size total_size += param_size self._parameters = np.empty(total_size, dtype=np.float64) def _parameters_to_array(self): # Now set the parameter values (this will also fill # self._parameters) param_metrics = self._param_metrics for name in self.param_names: param = getattr(self, name) value = param.value if not isinstance(value, np.ndarray): value = np.array([value]) self._parameters[param_metrics[name]['slice']] = value.ravel() # Finally validate all the parameters; we do this last so that # validators that depend on one of the other parameters' values will # work def _array_to_parameters(self): param_metrics = self._param_metrics for name in self.param_names: param = getattr(self, name) value = self._parameters[param_metrics[name]['slice']] value.shape = param_metrics[name]['shape'] param.value = value def _check_param_broadcast(self, max_ndim): """ This subroutine checks that all parameter arrays can be broadcast against each other, and determines the shapes parameters must have in order to broadcast correctly. If model_set_axis is None this merely checks that the parameters broadcast and returns an empty dict if so. This mode is only used for single model sets. """ all_shapes = [] model_set_axis = self._model_set_axis for name in self.param_names: param = getattr(self, name) value = param.value param_shape = np.shape(value) param_ndim = len(param_shape) if max_ndim is not None and param_ndim < max_ndim: # All arrays have the same number of dimensions up to the # model_set_axis dimension, but after that they may have a # different number of trailing axes. The number of trailing # axes must be extended for mutual compatibility. For example # if max_ndim = 3 and model_set_axis = 0, an array with the # shape (2, 2) must be extended to (2, 1, 2). However, an # array with shape (2,) is extended to (2, 1). new_axes = (1,) * (max_ndim - param_ndim) if model_set_axis < 0: # Just need to prepend axes to make up the difference broadcast_shape = new_axes + param_shape else: broadcast_shape = (param_shape[:model_set_axis + 1] + new_axes + param_shape[model_set_axis + 1:]) self._param_metrics[name]['broadcast_shape'] = broadcast_shape all_shapes.append(broadcast_shape) else: all_shapes.append(param_shape) # Now check mutual broadcastability of all shapes try: check_broadcast(*all_shapes) except IncompatibleShapeError as exc: shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args param_a = self.param_names[shape_a_idx] param_b = self.param_names[shape_b_idx] raise InputParameterError( "Parameter {0!r} of shape {1!r} cannot be broadcast with " "parameter {2!r} of shape {3!r}. All parameter arrays " "must have shapes that are mutually compatible according " "to the broadcasting rules.".format(param_a, shape_a, param_b, shape_b)) def _param_sets(self, raw=False, units=False): """ Implementation of the Model.param_sets property. This internal implementation has a ``raw`` argument which controls whether or not to return the raw parameter values (i.e. the values that are actually stored in the ._parameters array, as opposed to the values displayed to users. In most cases these are one in the same but there are currently a few exceptions. Note: This is notably an overcomplicated device and may be removed entirely in the near future. """ values = [] shapes = [] for name in self.param_names: param = getattr(self, name) if raw and param._setter: value = param._internal_value else: value = param.value broadcast_shape = self._param_metrics[name].get('broadcast_shape') if broadcast_shape is not None: value = value.reshape(broadcast_shape) shapes.append(np.shape(value)) if len(self) == 1: # Add a single param set axis to the parameter's value (thus # converting scalars to shape (1,) array values) for # consistency value = np.array([value]) if units: if raw and param.internal_unit is not None: unit = param.internal_unit else: unit = param.unit if unit is not None: value = Quantity(value, unit) values.append(value) if len(set(shapes)) != 1 or units: # If the parameters are not all the same shape, converting to an # array is going to produce an object array # However the way Numpy creates object arrays is tricky in that it # will recurse into array objects in the list and break them up # into separate objects. Doing things this way ensures a 1-D # object array the elements of which are the individual parameter # arrays. There's not much reason to do this over returning a list # except for consistency psets = np.empty(len(values), dtype=object) psets[:] = values return psets return np.array(values) def _format_repr(self, args=[], kwargs={}, defaults={}): """ Internal implementation of ``__repr__``. This is separated out for ease of use by subclasses that wish to override the default ``__repr__`` while keeping the same basic formatting. """ parts = [repr(a) for a in args] parts.extend( f"{name}={param_repr_oneline(getattr(self, name))}" for name in self.param_names) if self.name is not None: parts.append(f'name={self.name!r}') for kwarg, value in kwargs.items(): if kwarg in defaults and defaults[kwarg] == value: continue parts.append(f'{kwarg}={value!r}') if len(self) > 1: parts.append(f"n_models={len(self)}") return f"<{self.__class__.__name__}({', '.join(parts)})>" def _format_str(self, keywords=[], defaults={}): """ Internal implementation of ``__str__``. This is separated out for ease of use by subclasses that wish to override the default ``__str__`` while keeping the same basic formatting. """ default_keywords = [ ('Model', self.__class__.__name__), ('Name', self.name), ('Inputs', self.inputs), ('Outputs', self.outputs), ('Model set size', len(self)) ] parts = [f'{keyword}: {value}' for keyword, value in default_keywords if value is not None] for keyword, value in keywords: if keyword.lower() in defaults and defaults[keyword.lower()] == value: continue parts.append(f'{keyword}: {value}') parts.append('Parameters:') if len(self) == 1: columns = [[getattr(self, name).value] for name in self.param_names] else: columns = [getattr(self, name).value for name in self.param_names] if columns: param_table = Table(columns, names=self.param_names) # Set units on the columns for name in self.param_names: param_table[name].unit = getattr(self, name).unit parts.append(indent(str(param_table), width=4)) return '\n'.join(parts) class FittableModel(Model): """ Base class for models that can be fitted using the built-in fitting algorithms. """ linear = False # derivative with respect to parameters fit_deriv = None """ Function (similar to the model's `~Model.evaluate`) to compute the derivatives of the model with respect to its parameters, for use by fitting algorithms. In other words, this computes the Jacobian matrix with respect to the model's parameters. """ # Flag that indicates if the model derivatives with respect to parameters # are given in columns or rows col_fit_deriv = True fittable = True class Fittable1DModel(FittableModel): """ Base class for one-dimensional fittable models. This class provides an easier interface to defining new models. Examples can be found in `astropy.modeling.functional_models`. """ n_inputs = 1 n_outputs = 1 _separable = True class Fittable2DModel(FittableModel): """ Base class for two-dimensional fittable models. This class provides an easier interface to defining new models. Examples can be found in `astropy.modeling.functional_models`. """ n_inputs = 2 n_outputs = 1 def _make_arithmetic_operator(oper): # We don't bother with tuple unpacking here for efficiency's sake, but for # documentation purposes: # # f_eval, f_n_inputs, f_n_outputs = f # # and similarly for g def op(f, g): return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2]) return op def _composition_operator(f, g): # We don't bother with tuple unpacking here for efficiency's sake, but for # documentation purposes: # # f_eval, f_n_inputs, f_n_outputs = f # # and similarly for g return (lambda inputs, params: g[0](f[0](inputs, params), params), f[1], g[2]) def _join_operator(f, g): # We don't bother with tuple unpacking here for efficiency's sake, but for # documentation purposes: # # f_eval, f_n_inputs, f_n_outputs = f # # and similarly for g return (lambda inputs, params: (f[0](inputs[:f[1]], params) + g[0](inputs[f[1]:], params)), f[1] + g[1], f[2] + g[2]) BINARY_OPERATORS = { '+': _make_arithmetic_operator(operator.add), '-': _make_arithmetic_operator(operator.sub), '*': _make_arithmetic_operator(operator.mul), '/': _make_arithmetic_operator(operator.truediv), '**': _make_arithmetic_operator(operator.pow), '|': _composition_operator, '&': _join_operator } SPECIAL_OPERATORS = _SpecialOperatorsDict() def _add_special_operator(sop_name, sop): return SPECIAL_OPERATORS.add(sop_name, sop) class CompoundModel(Model): ''' Base class for compound models. While it can be used directly, the recommended way to combine models is through the model operators. ''' def __init__(self, op, left, right, name=None): self.__dict__['_param_names'] = None self._n_submodels = None self.op = op self.left = left self.right = right self._bounding_box = None self._user_bounding_box = None self._leaflist = None self._tdict = None self._parameters = None self._parameters_ = None self._param_metrics = None if op != 'fix_inputs' and len(left) != len(right): raise ValueError( 'Both operands must have equal values for n_models') self._n_models = len(left) if op != 'fix_inputs' and ((left.model_set_axis != right.model_set_axis) or left.model_set_axis): # not False and not 0 raise ValueError("model_set_axis must be False or 0 and consistent for operands") self._model_set_axis = left.model_set_axis if op in ['+', '-', '*', '/', '**'] or op in SPECIAL_OPERATORS: if (left.n_inputs != right.n_inputs) or \ (left.n_outputs != right.n_outputs): raise ModelDefinitionError( 'Both operands must match numbers of inputs and outputs') self.n_inputs = left.n_inputs self.n_outputs = left.n_outputs self.inputs = left.inputs self.outputs = left.outputs elif op == '&': self.n_inputs = left.n_inputs + right.n_inputs self.n_outputs = left.n_outputs + right.n_outputs self.inputs = combine_labels(left.inputs, right.inputs) self.outputs = combine_labels(left.outputs, right.outputs) elif op == '|': if left.n_outputs != right.n_inputs: raise ModelDefinitionError( "Unsupported operands for |: {0} (n_inputs={1}, " "n_outputs={2}) and {3} (n_inputs={4}, n_outputs={5}); " "n_outputs for the left-hand model must match n_inputs " "for the right-hand model.".format( left.name, left.n_inputs, left.n_outputs, right.name, right.n_inputs, right.n_outputs)) self.n_inputs = left.n_inputs self.n_outputs = right.n_outputs self.inputs = left.inputs self.outputs = right.outputs elif op == 'fix_inputs': if not isinstance(left, Model): raise ValueError('First argument to "fix_inputs" must be an instance of an astropy Model.') if not isinstance(right, dict): raise ValueError('Expected a dictionary for second argument of "fix_inputs".') # Dict keys must match either possible indices # for model on left side, or names for inputs. self.n_inputs = left.n_inputs - len(right) # Assign directly to the private attribute (instead of using the setter) # to avoid asserting the new number of outputs matches the old one. self._outputs = left.outputs self.n_outputs = left.n_outputs newinputs = list(left.inputs) keys = right.keys() input_ind = [] for key in keys: if np.issubdtype(type(key), np.integer): if key >= left.n_inputs or key < 0: raise ValueError( 'Substitution key integer value ' 'not among possible input choices.') if key in input_ind: raise ValueError("Duplicate specification of " "same input (index/name).") input_ind.append(key) elif isinstance(key, str): if key not in left.inputs: raise ValueError( 'Substitution key string not among possible ' 'input choices.') # Check to see it doesn't match positional # specification. ind = left.inputs.index(key) if ind in input_ind: raise ValueError("Duplicate specification of " "same input (index/name).") input_ind.append(ind) # Remove substituted inputs input_ind.sort() input_ind.reverse() for ind in input_ind: del newinputs[ind] self.inputs = tuple(newinputs) # Now check to see if the input model has bounding_box defined. # If so, remove the appropriate dimensions and set it for this # instance. try: self.bounding_box = \ self.left.bounding_box.fix_inputs(self, right) except NotImplementedError: pass else: raise ModelDefinitionError('Illegal operator: ', self.op) self.name = name self._fittable = None self.fit_deriv = None self.col_fit_deriv = None if op in ('|', '+', '-'): self.linear = left.linear and right.linear else: self.linear = False self.eqcons = [] self.ineqcons = [] self.n_left_params = len(self.left.parameters) self._map_parameters() def _get_left_inputs_from_args(self, args): return args[:self.left.n_inputs] def _get_right_inputs_from_args(self, args): op = self.op if op == '&': # Args expected to look like (*left inputs, *right inputs, *left params, *right params) return args[self.left.n_inputs: self.left.n_inputs + self.right.n_inputs] elif op == '|' or op == 'fix_inputs': return None else: return args[:self.left.n_inputs] def _get_left_params_from_args(self, args): op = self.op if op == '&': # Args expected to look like (*left inputs, *right inputs, *left params, *right params) n_inputs = self.left.n_inputs + self.right.n_inputs return args[n_inputs: n_inputs + self.n_left_params] else: return args[self.left.n_inputs: self.left.n_inputs + self.n_left_params] def _get_right_params_from_args(self, args): op = self.op if op == 'fix_inputs': return None if op == '&': # Args expected to look like (*left inputs, *right inputs, *left params, *right params) return args[self.left.n_inputs + self.right.n_inputs + self.n_left_params:] else: return args[self.left.n_inputs + self.n_left_params:] def _get_kwarg_model_parameters_as_positional(self, args, kwargs): # could do it with inserts but rebuilding seems like simpilist way #TODO: Check if any param names are in kwargs maybe as an intersection of sets? if self.op == "&": new_args = list(args[:self.left.n_inputs + self.right.n_inputs]) args_pos = self.left.n_inputs + self.right.n_inputs else: new_args = list(args[:self.left.n_inputs]) args_pos = self.left.n_inputs for param_name in self.param_names: kw_value = kwargs.pop(param_name, None) if kw_value is not None: value = kw_value else: try: value = args[args_pos] except IndexError: raise IndexError("Missing parameter or input") args_pos += 1 new_args.append(value) return new_args, kwargs def _apply_operators_to_value_lists(self, leftval, rightval, **kw): op = self.op if op == '+': return binary_operation(operator.add, leftval, rightval) elif op == '-': return binary_operation(operator.sub, leftval, rightval) elif op == '*': return binary_operation(operator.mul, leftval, rightval) elif op == '/': return binary_operation(operator.truediv, leftval, rightval) elif op == '**': return binary_operation(operator.pow, leftval, rightval) elif op == '&': if not isinstance(leftval, tuple): leftval = (leftval,) if not isinstance(rightval, tuple): rightval = (rightval,) return leftval + rightval elif op in SPECIAL_OPERATORS: return binary_operation(SPECIAL_OPERATORS[op], leftval, rightval) else: raise ModelDefinitionError('Unrecognized operator {op}') def evaluate(self, *args, **kw): op = self.op args, kw = self._get_kwarg_model_parameters_as_positional(args, kw) left_inputs = self._get_left_inputs_from_args(args) left_params = self._get_left_params_from_args(args) if op == 'fix_inputs': pos_index = dict(zip(self.left.inputs, range(self.left.n_inputs))) fixed_inputs = { key if np.issubdtype(type(key), np.integer) else pos_index[key]: value for key, value in self.right.items() } left_inputs = [ fixed_inputs[ind] if ind in fixed_inputs.keys() else inp for ind, inp in enumerate(left_inputs) ] leftval = self.left.evaluate(*itertools.chain(left_inputs, left_params)) if op == 'fix_inputs': return leftval right_inputs = self._get_right_inputs_from_args(args) right_params = self._get_right_params_from_args(args) if op == "|": if isinstance(leftval, tuple): return self.right.evaluate(*itertools.chain(leftval, right_params)) else: return self.right.evaluate(leftval, *right_params) else: rightval = self.right.evaluate(*itertools.chain(right_inputs, right_params)) return self._apply_operators_to_value_lists(leftval, rightval, **kw) @property def n_submodels(self): if self._leaflist is None: self._make_leaflist() return len(self._leaflist) @property def submodel_names(self): """ Return the names of submodels in a ``CompoundModel``.""" if self._leaflist is None: self._make_leaflist() names = [item.name for item in self._leaflist] nonecount = 0 newnames = [] for item in names: if item is None: newnames.append(f'None_{nonecount}') nonecount += 1 else: newnames.append(item) return tuple(newnames) def both_inverses_exist(self): ''' if both members of this compound model have inverses return True ''' warnings.warn( "CompoundModel.both_inverses_exist is deprecated. " "Use has_inverse instead.", AstropyDeprecationWarning ) try: linv = self.left.inverse rinv = self.right.inverse except NotImplementedError: return False return True def _pre_evaluate(self, *args, **kwargs): """ CompoundModel specific input setup that needs to occur prior to model evaluation. Note ---- All of the _pre_evaluate for each component model will be performed at the time that the individual model is evaluated. """ # If equivalencies are provided, necessary to map parameters and pass # the leaflist as a keyword input for use by model evaluation so that # the compound model input names can be matched to the model input # names. if 'equivalencies' in kwargs: # Restructure to be useful for the individual model lookup kwargs['inputs_map'] = [(value[0], (value[1], key)) for key, value in self.inputs_map().items()] # Setup actual model evaluation method def evaluate(_inputs): return self._evaluate(*_inputs, **kwargs) return evaluate, args, None, kwargs @property def _argnames(self): """No inputs should be used to determine input_shape when handling compound models""" return () def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs): """ CompoundModel specific post evaluation processing of outputs Note ---- All of the _post_evaluate for each component model will be performed at the time that the individual model is evaluated. """ if self.get_bounding_box(with_bbox) is not None and self.n_outputs == 1: return outputs[0] return outputs def _evaluate(self, *args, **kw): op = self.op if op != 'fix_inputs': if op != '&': leftval = self.left(*args, **kw) if op != '|': rightval = self.right(*args, **kw) else: rightval = None else: leftval = self.left(*(args[:self.left.n_inputs]), **kw) rightval = self.right(*(args[self.left.n_inputs:]), **kw) if op != "|": return self._apply_operators_to_value_lists(leftval, rightval, **kw) elif op == '|': if isinstance(leftval, tuple): return self.right(*leftval, **kw) else: return self.right(leftval, **kw) else: subs = self.right newargs = list(args) subinds = [] subvals = [] for key in subs.keys(): if np.issubdtype(type(key), np.integer): subinds.append(key) elif isinstance(key, str): ind = self.left.inputs.index(key) subinds.append(ind) subvals.append(subs[key]) # Turn inputs specified in kw into positional indices. # Names for compound inputs do not propagate to sub models. kwind = [] kwval = [] for kwkey in list(kw.keys()): if kwkey in self.inputs: ind = self.inputs.index(kwkey) if ind < len(args): raise ValueError("Keyword argument duplicates " "positional value supplied.") kwind.append(ind) kwval.append(kw[kwkey]) del kw[kwkey] # Build new argument list # Append keyword specified args first if kwind: kwargs = list(zip(kwind, kwval)) kwargs.sort() kwindsorted, kwvalsorted = list(zip(*kwargs)) newargs = newargs + list(kwvalsorted) if subinds: subargs = list(zip(subinds, subvals)) subargs.sort() # subindsorted, subvalsorted = list(zip(*subargs)) # The substitutions must be inserted in order for ind, val in subargs: newargs.insert(ind, val) return self.left(*newargs, **kw) @property def param_names(self): """ An ordered list of parameter names.""" return self._param_names def _make_leaflist(self): tdict = {} leaflist = [] make_subtree_dict(self, '', tdict, leaflist) self._leaflist = leaflist self._tdict = tdict def __getattr__(self, name): """ If someone accesses an attribute not already defined, map the parameters, and then see if the requested attribute is one of the parameters """ # The following test is needed to avoid infinite recursion # caused by deepcopy. There may be other such cases discovered. if name == '__setstate__': raise AttributeError if name in self._param_names: return self.__dict__[name] else: raise AttributeError(f'Attribute "{name}" not found') def __getitem__(self, index): if self._leaflist is None: self._make_leaflist() leaflist = self._leaflist tdict = self._tdict if isinstance(index, slice): if index.step: raise ValueError('Steps in slices not supported ' 'for compound models') if index.start is not None: if isinstance(index.start, str): start = self._str_index_to_int(index.start) else: start = index.start else: start = 0 if index.stop is not None: if isinstance(index.stop, str): stop = self._str_index_to_int(index.stop) else: stop = index.stop - 1 else: stop = len(leaflist) - 1 if index.stop == 0: raise ValueError("Slice endpoint cannot be 0") if start < 0: start = len(leaflist) + start if stop < 0: stop = len(leaflist) + stop # now search for matching node: if stop == start: # only single value, get leaf instead in code below index = start else: for key in tdict: node, leftind, rightind = tdict[key] if leftind == start and rightind == stop: return node raise IndexError("No appropriate subtree matches slice") if np.issubdtype(type(index), np.integer): return leaflist[index] elif isinstance(index, str): return leaflist[self._str_index_to_int(index)] else: raise TypeError('index must be integer, slice, or model name string') def _str_index_to_int(self, str_index): # Search through leaflist for item with that name found = [] for nleaf, leaf in enumerate(self._leaflist): if getattr(leaf, 'name', None) == str_index: found.append(nleaf) if len(found) == 0: raise IndexError(f"No component with name '{str_index}' found") if len(found) > 1: raise IndexError("Multiple components found using '{}' as name\n" "at indices {}".format(str_index, found)) return found[0] @property def n_inputs(self): """ The number of inputs of a model.""" return self._n_inputs @n_inputs.setter def n_inputs(self, value): self._n_inputs = value @property def n_outputs(self): """ The number of outputs of a model.""" return self._n_outputs @n_outputs.setter def n_outputs(self, value): self._n_outputs = value @property def eqcons(self): return self._eqcons @eqcons.setter def eqcons(self, value): self._eqcons = value @property def ineqcons(self): return self._eqcons @ineqcons.setter def ineqcons(self, value): self._eqcons = value def traverse_postorder(self, include_operator=False): """ Postorder traversal of the CompoundModel tree.""" res = [] if isinstance(self.left, CompoundModel): res = res + self.left.traverse_postorder(include_operator) else: res = res + [self.left] if isinstance(self.right, CompoundModel): res = res + self.right.traverse_postorder(include_operator) else: res = res + [self.right] if include_operator: res.append(self.op) else: res.append(self) return res def _format_expression(self, format_leaf=None): leaf_idx = 0 operands = deque() if format_leaf is None: format_leaf = lambda i, l: f'[{i}]' for node in self.traverse_postorder(): if not isinstance(node, CompoundModel): operands.append(format_leaf(leaf_idx, node)) leaf_idx += 1 continue right = operands.pop() left = operands.pop() if node.op in OPERATOR_PRECEDENCE: oper_order = OPERATOR_PRECEDENCE[node.op] if isinstance(node, CompoundModel): if (isinstance(node.left, CompoundModel) and OPERATOR_PRECEDENCE[node.left.op] < oper_order): left = f'({left})' if (isinstance(node.right, CompoundModel) and OPERATOR_PRECEDENCE[node.right.op] < oper_order): right = f'({right})' operands.append(' '.join((left, node.op, right))) else: left = f'(({left}),' right = f'({right}))' operands.append(' '.join((node.op[0], left, right))) return ''.join(operands) def _format_components(self): if self._parameters_ is None: self._map_parameters() return '\n\n'.join('[{0}]: {1!r}'.format(idx, m) for idx, m in enumerate(self._leaflist)) def __str__(self): expression = self._format_expression() components = self._format_components() keywords = [ ('Expression', expression), ('Components', '\n' + indent(components)) ] return super()._format_str(keywords=keywords) def rename(self, name): self.name = name return self @property def isleaf(self): return False @property def inverse(self): if self.op == '|': return self.right.inverse | self.left.inverse elif self.op == '&': return self.left.inverse & self.right.inverse else: return NotImplemented @property def fittable(self): """ Set the fittable attribute on a compound model.""" if self._fittable is None: if self._leaflist is None: self._map_parameters() self._fittable = all(m.fittable for m in self._leaflist) return self._fittable __add__ = _model_oper('+') __sub__ = _model_oper('-') __mul__ = _model_oper('*') __truediv__ = _model_oper('/') __pow__ = _model_oper('**') __or__ = _model_oper('|') __and__ = _model_oper('&') def _map_parameters(self): """ Map all the constituent model parameters to the compound object, renaming as necessary by appending a suffix number. This can be an expensive operation, particularly for a complex expression tree. All the corresponding parameter attributes are created that one expects for the Model class. The parameter objects that the attributes point to are the same objects as in the constiutent models. Changes made to parameter values to either are seen by both. Prior to calling this, none of the associated attributes will exist. This method must be called to make the model usable by fitting engines. If oldnames=True, then parameters are named as in the original implementation of compound models. """ if self._parameters is not None: # do nothing return if self._leaflist is None: self._make_leaflist() self._parameters_ = {} param_map = {} self._param_names = [] for lindex, leaf in enumerate(self._leaflist): if not isinstance(leaf, dict): for param_name in leaf.param_names: param = getattr(leaf, param_name) new_param_name = f"{param_name}_{lindex}" self.__dict__[new_param_name] = param self._parameters_[new_param_name] = param self._param_names.append(new_param_name) param_map[new_param_name] = (lindex, param_name) self._param_metrics = {} self._param_map = param_map self._param_map_inverse = dict((v, k) for k, v in param_map.items()) self._initialize_slices() self._param_names = tuple(self._param_names) def _initialize_slices(self): param_metrics = self._param_metrics total_size = 0 for name in self.param_names: param = getattr(self, name) value = param.value param_size = np.size(value) param_shape = np.shape(value) param_slice = slice(total_size, total_size + param_size) param_metrics[name] = {} param_metrics[name]['slice'] = param_slice param_metrics[name]['shape'] = param_shape param_metrics[name]['size'] = param_size total_size += param_size self._parameters = np.empty(total_size, dtype=np.float64) @staticmethod def _recursive_lookup(branch, adict, key): if isinstance(branch, CompoundModel): return adict[key] return branch, key def inputs_map(self): """ Map the names of the inputs to this ExpressionTree to the inputs to the leaf models. """ inputs_map = {} if not isinstance(self.op, str): # If we don't have an operator the mapping is trivial return {inp: (self, inp) for inp in self.inputs} elif self.op == '|': if isinstance(self.left, CompoundModel): l_inputs_map = self.left.inputs_map() for inp in self.inputs: if isinstance(self.left, CompoundModel): inputs_map[inp] = l_inputs_map[inp] else: inputs_map[inp] = self.left, inp elif self.op == '&': if isinstance(self.left, CompoundModel): l_inputs_map = self.left.inputs_map() if isinstance(self.right, CompoundModel): r_inputs_map = self.right.inputs_map() for i, inp in enumerate(self.inputs): if i < len(self.left.inputs): # Get from left if isinstance(self.left, CompoundModel): inputs_map[inp] = l_inputs_map[self.left.inputs[i]] else: inputs_map[inp] = self.left, self.left.inputs[i] else: # Get from right if isinstance(self.right, CompoundModel): inputs_map[inp] = r_inputs_map[self.right.inputs[i - len(self.left.inputs)]] else: inputs_map[inp] = self.right, self.right.inputs[i - len(self.left.inputs)] elif self.op == 'fix_inputs': fixed_ind = list(self.right.keys()) ind = [list(self.left.inputs).index(i) if isinstance(i, str) else i for i in fixed_ind] inp_ind = list(range(self.left.n_inputs)) for i in ind: inp_ind.remove(i) for i in inp_ind: inputs_map[self.left.inputs[i]] = self.left, self.left.inputs[i] else: if isinstance(self.left, CompoundModel): l_inputs_map = self.left.inputs_map() for inp in self.left.inputs: if isinstance(self.left, CompoundModel): inputs_map[inp] = l_inputs_map[inp] else: inputs_map[inp] = self.left, inp return inputs_map def _parameter_units_for_data_units(self, input_units, output_units): if self._leaflist is None: self._map_parameters() units_for_data = {} for imodel, model in enumerate(self._leaflist): units_for_data_leaf = model._parameter_units_for_data_units(input_units, output_units) for param_leaf in units_for_data_leaf: param = self._param_map_inverse[(imodel, param_leaf)] units_for_data[param] = units_for_data_leaf[param_leaf] return units_for_data @property def input_units(self): inputs_map = self.inputs_map() input_units_dict = {key: inputs_map[key][0].input_units[orig_key] for key, (mod, orig_key) in inputs_map.items() if inputs_map[key][0].input_units is not None} if input_units_dict: return input_units_dict return None @property def input_units_equivalencies(self): inputs_map = self.inputs_map() input_units_equivalencies_dict = { key: inputs_map[key][0].input_units_equivalencies[orig_key] for key, (mod, orig_key) in inputs_map.items() if inputs_map[key][0].input_units_equivalencies is not None } if not input_units_equivalencies_dict: return None return input_units_equivalencies_dict @property def input_units_allow_dimensionless(self): inputs_map = self.inputs_map() return {key: inputs_map[key][0].input_units_allow_dimensionless[orig_key] for key, (mod, orig_key) in inputs_map.items()} @property def input_units_strict(self): inputs_map = self.inputs_map() return {key: inputs_map[key][0].input_units_strict[orig_key] for key, (mod, orig_key) in inputs_map.items()} @property def return_units(self): outputs_map = self.outputs_map() return {key: outputs_map[key][0].return_units[orig_key] for key, (mod, orig_key) in outputs_map.items() if outputs_map[key][0].return_units is not None} def outputs_map(self): """ Map the names of the outputs to this ExpressionTree to the outputs to the leaf models. """ outputs_map = {} if not isinstance(self.op, str): # If we don't have an operator the mapping is trivial return {out: (self, out) for out in self.outputs} elif self.op == '|': if isinstance(self.right, CompoundModel): r_outputs_map = self.right.outputs_map() for out in self.outputs: if isinstance(self.right, CompoundModel): outputs_map[out] = r_outputs_map[out] else: outputs_map[out] = self.right, out elif self.op == '&': if isinstance(self.left, CompoundModel): l_outputs_map = self.left.outputs_map() if isinstance(self.right, CompoundModel): r_outputs_map = self.right.outputs_map() for i, out in enumerate(self.outputs): if i < len(self.left.outputs): # Get from left if isinstance(self.left, CompoundModel): outputs_map[out] = l_outputs_map[self.left.outputs[i]] else: outputs_map[out] = self.left, self.left.outputs[i] else: # Get from right if isinstance(self.right, CompoundModel): outputs_map[out] = r_outputs_map[self.right.outputs[i - len(self.left.outputs)]] else: outputs_map[out] = self.right, self.right.outputs[i - len(self.left.outputs)] elif self.op == 'fix_inputs': return self.left.outputs_map() else: if isinstance(self.left, CompoundModel): l_outputs_map = self.left.outputs_map() for out in self.left.outputs: if isinstance(self.left, CompoundModel): outputs_map[out] = l_outputs_map()[out] else: outputs_map[out] = self.left, out return outputs_map @property def has_user_bounding_box(self): """ A flag indicating whether or not a custom bounding_box has been assigned to this model by a user, via assignment to ``model.bounding_box``. """ return self._user_bounding_box is not None def render(self, out=None, coords=None): """ Evaluate a model at fixed positions, respecting the ``bounding_box``. The key difference relative to evaluating the model directly is that this method is limited to a bounding box if the `Model.bounding_box` attribute is set. Parameters ---------- out : `numpy.ndarray`, optional An array that the evaluated model will be added to. If this is not given (or given as ``None``), a new array will be created. coords : array-like, optional An array to be used to translate from the model's input coordinates to the ``out`` array. It should have the property that ``self(coords)`` yields the same shape as ``out``. If ``out`` is not specified, ``coords`` will be used to determine the shape of the returned array. If this is not provided (or None), the model will be evaluated on a grid determined by `Model.bounding_box`. Returns ------- out : `numpy.ndarray` The model added to ``out`` if ``out`` is not ``None``, or else a new array from evaluating the model over ``coords``. If ``out`` and ``coords`` are both `None`, the returned array is limited to the `Model.bounding_box` limits. If `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Raises ------ ValueError If ``coords`` are not given and the the `Model.bounding_box` of this model is not set. Examples -------- :ref:`astropy:bounding-boxes` """ bbox = self.get_bounding_box() ndim = self.n_inputs if (coords is None) and (out is None) and (bbox is None): raise ValueError('If no bounding_box is set, ' 'coords or out must be input.') # for consistent indexing if ndim == 1: if coords is not None: coords = [coords] if bbox is not None: bbox = [bbox] if coords is not None: coords = np.asanyarray(coords, dtype=float) # Check dimensions match out and model assert len(coords) == ndim if out is not None: if coords[0].shape != out.shape: raise ValueError('inconsistent shape of the output.') else: out = np.zeros(coords[0].shape) if out is not None: out = np.asanyarray(out) if out.ndim != ndim: raise ValueError('the array and model must have the same ' 'number of dimensions.') if bbox is not None: # Assures position is at center pixel, important when using # add_array. pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox]).astype(int).T pos, delta = pd if coords is not None: sub_shape = tuple(delta * 2 + 1) sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords]) else: limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T] sub_coords = np.mgrid[limits] sub_coords = sub_coords[::-1] if out is None: out = self(*sub_coords) else: try: out = add_array(out, self(*sub_coords), pos) except ValueError: raise ValueError( 'The `bounding_box` is larger than the input out in ' 'one or more dimensions. Set ' '`model.bounding_box = None`.') else: if coords is None: im_shape = out.shape limits = [slice(i) for i in im_shape] coords = np.mgrid[limits] coords = coords[::-1] out += self(*coords) return out def replace_submodel(self, name, model): """ Construct a new `~astropy.modeling.CompoundModel` instance from an existing CompoundModel, replacing the named submodel with a new model. In order to ensure that inverses and names are kept/reconstructed, it's necessary to rebuild the CompoundModel from the replaced node all the way back to the base. The original CompoundModel is left untouched. Parameters ---------- name : str name of submodel to be replaced model : `~astropy.modeling.Model` replacement model """ submodels = [m for m in self.traverse_postorder() if getattr(m, 'name', None) == name] if submodels: if len(submodels) > 1: raise ValueError(f"More than one submodel named {name}") old_model = submodels.pop() if len(old_model) != len(model): raise ValueError("New and old models must have equal values " "for n_models") # Do this check first in order to raise a more helpful Exception, # although it would fail trying to construct the new CompoundModel if (old_model.n_inputs != model.n_inputs or old_model.n_outputs != model.n_outputs): raise ValueError("New model must match numbers of inputs and " "outputs of existing model") tree = _get_submodel_path(self, name) while tree: branch = self.copy() for node in tree[:-1]: branch = getattr(branch, node) setattr(branch, tree[-1], model) model = CompoundModel(branch.op, branch.left, branch.right, name=branch.name) tree = tree[:-1] return model else: raise ValueError(f"No submodels found named {name}") def _set_sub_models_and_parameter_units(self, left, right): """ Provides a work-around to properly set the sub models and respective parameters's units/values when using ``without_units_for_data`` or ``without_units_for_data`` methods. """ model = CompoundModel(self.op, left, right) self.left = left self.right = right for name in model.param_names: model_parameter = getattr(model, name) parameter = getattr(self, name) parameter.value = model_parameter.value parameter._set_unit(model_parameter.unit, force=True) def without_units_for_data(self, **kwargs): """ See `~astropy.modeling.Model.without_units_for_data` for overview of this method. Notes ----- This modifies the behavior of the base method to account for the case where the sub-models of a compound model have different output units. This is only valid for compound * and / compound models as in that case it is reasonable to mix the output units. It does this by modifying the output units of each sub model by using the output units of the other sub model so that we can apply the original function and get the desired result. Additional data has to be output in the mixed output unit case so that the units can be properly rebuilt by `~astropy.modeling.CompoundModel.with_units_from_data`. Outside the mixed output units, this method is identical to the base method. """ if self.op in ['*', '/']: model = self.copy() inputs = {inp: kwargs[inp] for inp in self.inputs} left_units = self.left.output_units(**kwargs) right_units = self.right.output_units(**kwargs) if self.op == '*': left_kwargs = {out: kwargs[out] / right_units[out] for out in self.left.outputs if kwargs[out] is not None} right_kwargs = {out: kwargs[out] / left_units[out] for out in self.right.outputs if kwargs[out] is not None} else: left_kwargs = {out: kwargs[out] * right_units[out] for out in self.left.outputs if kwargs[out] is not None} right_kwargs = {out: 1 / kwargs[out] * left_units[out] for out in self.right.outputs if kwargs[out] is not None} left_kwargs.update(inputs.copy()) right_kwargs.update(inputs.copy()) left = self.left.without_units_for_data(**left_kwargs) if isinstance(left, tuple): left_kwargs['_left_kwargs'] = left[1] left_kwargs['_right_kwargs'] = left[2] left = left[0] right = self.right.without_units_for_data(**right_kwargs) if isinstance(right, tuple): right_kwargs['_left_kwargs'] = right[1] right_kwargs['_right_kwargs'] = right[2] right = right[0] model._set_sub_models_and_parameter_units(left, right) return model, left_kwargs, right_kwargs else: return super().without_units_for_data(**kwargs) def with_units_from_data(self, **kwargs): """ See `~astropy.modeling.Model.with_units_from_data` for overview of this method. Notes ----- This modifies the behavior of the base method to account for the case where the sub-models of a compound model have different output units. This is only valid for compound * and / compound models as in that case it is reasonable to mix the output units. In order to do this it requires some additional information output by `~astropy.modeling.CompoundModel.without_units_for_data` passed as keyword arguments under the keywords ``_left_kwargs`` and ``_right_kwargs``. Outside the mixed output units, this method is identical to the base method. """ if self.op in ['*', '/']: left_kwargs = kwargs.pop('_left_kwargs') right_kwargs = kwargs.pop('_right_kwargs') left = self.left.with_units_from_data(**left_kwargs) right = self.right.with_units_from_data(**right_kwargs) model = self.copy() model._set_sub_models_and_parameter_units(left, right) return model else: return super().with_units_from_data(**kwargs) def _get_submodel_path(model, name): """Find the route down a CompoundModel's tree to the model with the specified name (whether it's a leaf or not)""" if getattr(model, 'name', None) == name: return [] try: return ['left'] + _get_submodel_path(model.left, name) except (AttributeError, TypeError): pass try: return ['right'] + _get_submodel_path(model.right, name) except (AttributeError, TypeError): pass def binary_operation(binoperator, left, right): ''' Perform binary operation. Operands may be matching tuples of operands. ''' if isinstance(left, tuple) and isinstance(right, tuple): return tuple([binoperator(item[0], item[1]) for item in zip(left, right)]) return binoperator(left, right) def get_ops(tree, opset): """ Recursive function to collect operators used. """ if isinstance(tree, CompoundModel): opset.add(tree.op) get_ops(tree.left, opset) get_ops(tree.right, opset) else: return def make_subtree_dict(tree, nodepath, tdict, leaflist): ''' Traverse a tree noting each node by a key that indicates all the left/right choices necessary to reach that node. Each key will reference a tuple that contains: - reference to the compound model for that node. - left most index contained within that subtree (relative to all indices for the whole tree) - right most index contained within that subtree ''' # if this is a leaf, just append it to the leaflist if not hasattr(tree, 'isleaf'): leaflist.append(tree) else: leftmostind = len(leaflist) make_subtree_dict(tree.left, nodepath+'l', tdict, leaflist) make_subtree_dict(tree.right, nodepath+'r', tdict, leaflist) rightmostind = len(leaflist)-1 tdict[nodepath] = (tree, leftmostind, rightmostind) _ORDER_OF_OPERATORS = [('fix_inputs',), ('|',), ('&',), ('+', '-'), ('*', '/'), ('**',)] OPERATOR_PRECEDENCE = {} for idx, ops in enumerate(_ORDER_OF_OPERATORS): for op in ops: OPERATOR_PRECEDENCE[op] = idx del idx, op, ops def fix_inputs(modelinstance, values, bounding_boxes=None, selector_args=None): """ This function creates a compound model with one or more of the input values of the input model assigned fixed values (scalar or array). Parameters ---------- modelinstance : `~astropy.modeling.Model` instance This is the model that one or more of the model input values will be fixed to some constant value. values : dict A dictionary where the key identifies which input to fix and its value is the value to fix it at. The key may either be the name of the input or a number reflecting its order in the inputs. Examples -------- >>> from astropy.modeling.models import Gaussian2D >>> g = Gaussian2D(1, 2, 3, 4, 5) >>> gv = fix_inputs(g, {0: 2.5}) Results in a 1D function equivalent to Gaussian2D(1, 2, 3, 4, 5)(x=2.5, y) """ model = CompoundModel('fix_inputs', modelinstance, values) if bounding_boxes is not None: if selector_args is None: selector_args = tuple([(key, True) for key in values.keys()]) bbox = CompoundBoundingBox.validate(modelinstance, bounding_boxes, selector_args) _selector = bbox.selector_args.get_fixed_values(modelinstance, values) new_bbox = bbox[_selector] new_bbox = new_bbox.__class__.validate(model, new_bbox) model.bounding_box = new_bbox return model def bind_bounding_box(modelinstance, bounding_box, ignored=None, order='C'): """ Set a validated bounding box to a model instance. Parameters ---------- modelinstance : `~astropy.modeling.Model` instance This is the model that the validated bounding box will be set on. bounding_box : tuple A bounding box tuple, see :ref:`astropy:bounding-boxes` for details ignored : list List of the inputs to be ignored by the bounding box. order : str, optional The ordering of the bounding box tuple, can be either ``'C'`` or ``'F'``. """ modelinstance.bounding_box = ModelBoundingBox.validate(modelinstance, bounding_box, ignored=ignored, order=order) def bind_compound_bounding_box(modelinstance, bounding_boxes, selector_args, create_selector=None, ignored=None, order='C'): """ Add a validated compound bounding box to a model instance. Parameters ---------- modelinstance : `~astropy.modeling.Model` instance This is the model that the validated compound bounding box will be set on. bounding_boxes : dict A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes` for details. selector_args : list List of selector argument tuples to define selection for compound bounding box, see :ref:`astropy:bounding-boxes` for details. create_selector : callable, optional An optional callable with interface (selector_value, model) which can generate a bounding box based on a selector value and model if there is no bounding box in the compound bounding box listed under that selector value. Default is ``None``, meaning new bounding box entries will not be automatically generated. ignored : list List of the inputs to be ignored by the bounding box. order : str, optional The ordering of the bounding box tuple, can be either ``'C'`` or ``'F'``. """ modelinstance.bounding_box = CompoundBoundingBox.validate(modelinstance, bounding_boxes, selector_args, create_selector=create_selector, ignored=ignored, order=order) def custom_model(*args, fit_deriv=None): """ Create a model from a user defined function. The inputs and parameters of the model will be inferred from the arguments of the function. This can be used either as a function or as a decorator. See below for examples of both usages. The model is separable only if there is a single input. .. note:: All model parameters have to be defined as keyword arguments with default values in the model function. Use `None` as a default argument value if you do not want to have a default value for that parameter. The standard settable model properties can be configured by default using keyword arguments matching the name of the property; however, these values are not set as model "parameters". Moreover, users cannot use keyword arguments matching non-settable model properties, with the exception of ``n_outputs`` which should be set to the number of outputs of your function. Parameters ---------- func : function Function which defines the model. It should take N positional arguments where ``N`` is dimensions of the model (the number of independent variable in the model), and any number of keyword arguments (the parameters). It must return the value of the model (typically as an array, but can also be a scalar for scalar inputs). This corresponds to the `~astropy.modeling.Model.evaluate` method. fit_deriv : function, optional Function which defines the Jacobian derivative of the model. I.e., the derivative with respect to the *parameters* of the model. It should have the same argument signature as ``func``, but should return a sequence where each element of the sequence is the derivative with respect to the corresponding argument. This corresponds to the :meth:`~astropy.modeling.FittableModel.fit_deriv` method. Examples -------- Define a sinusoidal model function as a custom 1D model:: >>> from astropy.modeling.models import custom_model >>> import numpy as np >>> def sine_model(x, amplitude=1., frequency=1.): ... return amplitude * np.sin(2 * np.pi * frequency * x) >>> def sine_deriv(x, amplitude=1., frequency=1.): ... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x) >>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv) Create an instance of the custom model and evaluate it:: >>> model = SineModel() >>> model(0.25) 1.0 This model instance can now be used like a usual astropy model. The next example demonstrates a 2D Moffat function model, and also demonstrates the support for docstrings (this example could also include a derivative, but it has been omitted for simplicity):: >>> @custom_model ... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0, ... alpha=1.0): ... \"\"\"Two dimensional Moffat function.\"\"\" ... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2 ... return amplitude * (1 + rr_gg) ** (-alpha) ... >>> print(Moffat2D.__doc__) Two dimensional Moffat function. >>> model = Moffat2D() >>> model(1, 1) # doctest: +FLOAT_CMP 0.3333333333333333 """ if len(args) == 1 and callable(args[0]): return _custom_model_wrapper(args[0], fit_deriv=fit_deriv) elif not args: return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv) else: raise TypeError( "{0} takes at most one positional argument (the callable/" "function to be turned into a model. When used as a decorator " "it should be passed keyword arguments only (if " "any).".format(__name__)) def _custom_model_inputs(func): """ Processes the inputs to the `custom_model`'s function into the appropriate categories. Parameters ---------- func : callable Returns ------- inputs : list list of evaluation inputs special_params : dict dictionary of model properties which require special treatment settable_params : dict dictionary of defaults for settable model properties params : dict dictionary of model parameters set by `custom_model`'s function """ inputs, parameters = get_inputs_and_params(func) special = ['n_outputs'] settable = [attr for attr, value in vars(Model).items() if isinstance(value, property) and value.fset is not None] properties = [attr for attr, value in vars(Model).items() if isinstance(value, property) and value.fset is None and attr not in special] special_params = {} settable_params = {} params = {} for param in parameters: if param.name in special: special_params[param.name] = param.default elif param.name in settable: settable_params[param.name] = param.default elif param.name in properties: raise ValueError(f"Parameter '{param.name}' cannot be a model property: {properties}.") else: params[param.name] = param.default return inputs, special_params, settable_params, params def _custom_model_wrapper(func, fit_deriv=None): """ Internal implementation `custom_model`. When `custom_model` is called as a function its arguments are passed to this function, and the result of this function is returned. When `custom_model` is used as a decorator a partial evaluation of this function is returned by `custom_model`. """ if not callable(func): raise ModelDefinitionError( "func is not callable; it must be a function or other callable " "object") if fit_deriv is not None and not callable(fit_deriv): raise ModelDefinitionError( "fit_deriv not callable; it must be a function or other " "callable object") model_name = func.__name__ inputs, special_params, settable_params, params = _custom_model_inputs(func) if (fit_deriv is not None and len(fit_deriv.__defaults__) != len(params)): raise ModelDefinitionError("derivative function should accept " "same number of parameters as func.") params = {param: Parameter(param, default=default) for param, default in params.items()} mod = find_current_module(2) if mod: modname = mod.__name__ else: modname = '__main__' members = { '__module__': str(modname), '__doc__': func.__doc__, 'n_inputs': len(inputs), 'n_outputs': special_params.pop('n_outputs', 1), 'evaluate': staticmethod(func), '_settable_properties': settable_params } if fit_deriv is not None: members['fit_deriv'] = staticmethod(fit_deriv) members.update(params) cls = type(model_name, (FittableModel,), members) cls._separable = True if (len(inputs) == 1) else False return cls def render_model(model, arr=None, coords=None): """ Evaluates a model on an input array. Evaluation is limited to a bounding box if the `Model.bounding_box` attribute is set. Parameters ---------- model : `Model` Model to be evaluated. arr : `numpy.ndarray`, optional Array on which the model is evaluated. coords : array-like, optional Coordinate arrays mapping to ``arr``, such that ``arr[coords] == arr``. Returns ------- array : `numpy.ndarray` The model evaluated on the input ``arr`` or a new array from ``coords``. If ``arr`` and ``coords`` are both `None`, the returned array is limited to the `Model.bounding_box` limits. If `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Examples -------- :ref:`astropy:bounding-boxes` """ bbox = model.bounding_box if (coords is None) & (arr is None) & (bbox is None): raise ValueError('If no bounding_box is set,' 'coords or arr must be input.') # for consistent indexing if model.n_inputs == 1: if coords is not None: coords = [coords] if bbox is not None: bbox = [bbox] if arr is not None: arr = arr.copy() # Check dimensions match model if arr.ndim != model.n_inputs: raise ValueError('number of array dimensions inconsistent with ' 'number of model inputs.') if coords is not None: # Check dimensions match arr and model coords = np.array(coords) if len(coords) != model.n_inputs: raise ValueError('coordinate length inconsistent with the number ' 'of model inputs.') if arr is not None: if coords[0].shape != arr.shape: raise ValueError('coordinate shape inconsistent with the ' 'array shape.') else: arr = np.zeros(coords[0].shape) if bbox is not None: # assures position is at center pixel, important when using add_array pd = pos, delta = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox]).astype(int).T if coords is not None: sub_shape = tuple(delta * 2 + 1) sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords]) else: limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T] sub_coords = np.mgrid[limits] sub_coords = sub_coords[::-1] if arr is None: arr = model(*sub_coords) else: try: arr = add_array(arr, model(*sub_coords), pos) except ValueError: raise ValueError('The `bounding_box` is larger than the input' ' arr in one or more dimensions. Set ' '`model.bounding_box = None`.') else: if coords is None: im_shape = arr.shape limits = [slice(i) for i in im_shape] coords = np.mgrid[limits] arr += model(*coords[::-1]) return arr def hide_inverse(model): """ This is a convenience function intended to disable automatic generation of the inverse in compound models by disabling one of the constituent model's inverse. This is to handle cases where user provided inverse functions are not compatible within an expression. Example: compound_model.inverse = hide_inverse(m1) + m2 + m3 This will insure that the defined inverse itself won't attempt to build its own inverse, which would otherwise fail in this example (e.g., m = m1 + m2 + m3 happens to raises an exception for this reason.) Note that this permanently disables it. To prevent that either copy the model or restore the inverse later. """ del model.inverse return model
85cf63cf9d73faa6a41b70e0b661b3d35e16dee181c65736cab7896232e9d29b
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Convolution Model""" # pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name import numpy as np from .core import SPECIAL_OPERATORS, CompoundModel class Convolution(CompoundModel): """ Wrapper class for a convolution model. Parameters ---------- operator: tuple The SPECIAL_OPERATORS entry for the convolution being used. model : Model The model for the convolution. kernel: Model The kernel model for the convolution. bounding_box : tuple A bounding box to define the limits of the integration approximation for the convolution. resolution : float The resolution for the approximation of the convolution. cache : bool, optional Allow convolution computation to be cached for reuse. This is enabled by default. Notes ----- This is wrapper is necessary to handle the limitations of the pseudospectral convolution binary operator implemented in astropy.convolution under `~astropy.convolution.convolve_fft`. In this `~astropy.convolution.convolve_fft` it is assumed that the inputs ``array`` and ``kernel`` span a sufficient portion of the support of the functions of the convolution. Consequently, the ``Compound`` created by the `~astropy.convolution.convolve_models` function makes the assumption that one should pass an input array that sufficiently spans this space. This means that slightly different input arrays to this model will result in different outputs, even on points of intersection between these arrays. This issue is solved by requiring a ``bounding_box`` together with a resolution so that one can pre-calculate the entire domain and then (by default) cache the convolution values. The function then just interpolates the results from this cache. """ def __init__(self, operator, model, kernel, bounding_box, resolution, cache=True): super().__init__(operator, model, kernel) self.bounding_box = bounding_box self._resolution = resolution self._cache_convolution = cache self._kwargs = None self._convolution = None def clear_cache(self): """ Clears the cached convolution """ self._kwargs = None self._convolution = None def _get_convolution(self, **kwargs): if (self._convolution is None) or (self._kwargs != kwargs): domain = self.bounding_box.domain(self._resolution) mesh = np.meshgrid(*domain) data = super().__call__(*mesh, **kwargs) from scipy.interpolate import RegularGridInterpolator convolution = RegularGridInterpolator(domain, data) if self._cache_convolution: self._kwargs = kwargs self._convolution = convolution else: convolution = self._convolution return convolution @staticmethod def _convolution_inputs(*args): not_scalar = np.where([not np.isscalar(arg) for arg in args])[0] if len(not_scalar) == 0: return np.array(args), (1,) else: output_shape = args[not_scalar[0]].shape if not all(args[index].shape == output_shape for index in not_scalar): raise ValueError('Values have differing shapes') inputs = [] for arg in args: if np.isscalar(arg): inputs.append(np.full(output_shape, arg)) else: inputs.append(arg) return np.reshape(inputs, (len(inputs), -1)).T, output_shape @staticmethod def _convolution_outputs(outputs, output_shape): return outputs.reshape(output_shape) def __call__(self, *args, **kw): inputs, output_shape = self._convolution_inputs(*args) convolution = self._get_convolution(**kw) outputs = convolution(inputs) return self._convolution_outputs(outputs, output_shape)
a79eb0a5a2c34995fd2d8df8bcdbf6d6568ecd5e4dded59a27aebde9178cd1bf
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage provides a framework for representing models and performing model evaluation and fitting. It supports 1D and 2D models and fitting with parameter constraints. It has some predefined models and fitting routines. """ from . import fitting, models from .core import * from .parameters import * from .separable import *
efb055ba5d6c2b05b0e3b0ea25973ee67aafdc613212f2b2fe11b40df22f29a4
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Creates a common namespace for all pre-defined models. """ # pylint: disable=unused-wildcard-import, unused-import, wildcard-import from . import math_functions as math from .core import custom_model, fix_inputs, hide_inverse # pylint: disable=W0611 from .functional_models import * from .mappings import * from .physical_models import * from .polynomial import * from .powerlaws import * from .projections import * from .rotations import * from .spline import * from .tabular import * # Attach a docstring explaining constraints to all models which support them. # Note: add new models to this list CONSTRAINTS_DOC = """ Other Parameters ---------------- fixed : a dict, optional A dictionary ``{parameter_name: boolean}`` of parameters to not be varied during fitting. True means the parameter is held fixed. Alternatively the `~astropy.modeling.Parameter.fixed` property of a parameter may be used. tied : dict, optional A dictionary ``{parameter_name: callable}`` of parameters which are linked to some other parameter. The dictionary values are callables providing the linking relationship. Alternatively the `~astropy.modeling.Parameter.tied` property of a parameter may be used. bounds : dict, optional A dictionary ``{parameter_name: value}`` of lower and upper bounds of parameters. Keys are parameter names. Values are a list or a tuple of length 2 giving the desired range for the parameter. Alternatively, the `~astropy.modeling.Parameter.min` and `~astropy.modeling.Parameter.max` properties of a parameter may be used. eqcons : list, optional A list of functions of length ``n`` such that ``eqcons[j](x0,*args) == 0.0`` in a successfully optimized problem. ineqcons : list, optional A list of functions of length ``n`` such that ``ieqcons[j](x0,*args) >= 0.0`` is a successfully optimized problem. """ MODELS_WITH_CONSTRAINTS = [ AiryDisk2D, Moffat1D, Moffat2D, Box1D, Box2D, Const1D, Const2D, Ellipse2D, Disk2D, Gaussian1D, Gaussian2D, Linear1D, Lorentz1D, RickerWavelet1D, RickerWavelet2D, PowerLaw1D, Sersic1D, Sersic2D, Sine1D, Cosine1D, Tangent1D, ArcSine1D, ArcCosine1D, ArcTangent1D, Trapezoid1D, TrapezoidDisk2D, Chebyshev1D, Chebyshev2D, Hermite1D, Hermite2D, Legendre2D, Legendre1D, Polynomial1D, Polynomial2D, Voigt1D, KingProjectedAnalytic1D, NFW ] for item in MODELS_WITH_CONSTRAINTS: if isinstance(item.__doc__, str): item.__doc__ += CONSTRAINTS_DOC
8e0d234be06fc901c88f52d1e8a0bf625ba29ea969c429e2c31062ac66f5cda9
""" Special models useful for complex compound models where control is needed over which outputs from a source model are mapped to which inputs of a target model. """ # pylint: disable=invalid-name from astropy.units import Quantity from .core import FittableModel, Model __all__ = ['Mapping', 'Identity', 'UnitsMapping'] class Mapping(FittableModel): """ Allows inputs to be reordered, duplicated or dropped. Parameters ---------- mapping : tuple A tuple of integers representing indices of the inputs to this model to return and in what order to return them. See :ref:`astropy:compound-model-mappings` for more details. n_inputs : int Number of inputs; if `None` (default) then ``max(mapping) + 1`` is used (i.e. the highest input index used in the mapping). name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict-like Free-form metadata to associate with this model. Raises ------ TypeError Raised when number of inputs is less that ``max(mapping)``. Examples -------- >>> from astropy.modeling.models import Polynomial2D, Shift, Mapping >>> poly1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3) >>> poly2 = Polynomial2D(1, c0_0=1, c1_0=2.4, c0_1=2.1) >>> model = (Shift(1) & Shift(2)) | Mapping((0, 1, 0, 1)) | (poly1 & poly2) >>> model(1, 2) # doctest: +FLOAT_CMP (17.0, 14.2) """ linear = True # FittableModel is non-linear by default def __init__(self, mapping, n_inputs=None, name=None, meta=None): self._inputs = () self._outputs = () if n_inputs is None: self._n_inputs = max(mapping) + 1 else: self._n_inputs = n_inputs self._n_outputs = len(mapping) super().__init__(name=name, meta=meta) self.inputs = tuple('x' + str(idx) for idx in range(self._n_inputs)) self.outputs = tuple('x' + str(idx) for idx in range(self._n_outputs)) self._mapping = mapping self._input_units_strict = {key: False for key in self._inputs} self._input_units_allow_dimensionless = {key: False for key in self._inputs} @property def n_inputs(self): return self._n_inputs @property def n_outputs(self): return self._n_outputs @property def mapping(self): """Integers representing indices of the inputs.""" return self._mapping def __repr__(self): if self.name is None: return f'<Mapping({self.mapping})>' return f'<Mapping({self.mapping}, name={self.name!r})>' def evaluate(self, *args): if len(args) != self.n_inputs: name = self.name if self.name is not None else "Mapping" raise TypeError(f'{name} expects {self.n_inputs} inputs; got {len(args)}') result = tuple(args[idx] for idx in self._mapping) if self.n_outputs == 1: return result[0] return result @property def inverse(self): """ A `Mapping` representing the inverse of the current mapping. Raises ------ `NotImplementedError` An inverse does no exist on mappings that drop some of its inputs (there is then no way to reconstruct the inputs that were dropped). """ try: mapping = tuple(self.mapping.index(idx) for idx in range(self.n_inputs)) except ValueError: raise NotImplementedError( "Mappings such as {} that drop one or more of their inputs " "are not invertible at this time.".format(self.mapping)) inv = self.__class__(mapping) inv._inputs = self._outputs inv._outputs = self._inputs inv._n_inputs = len(inv._inputs) inv._n_outputs = len(inv._outputs) return inv class Identity(Mapping): """ Returns inputs unchanged. This class is useful in compound models when some of the inputs must be passed unchanged to the next model. Parameters ---------- n_inputs : int Specifies the number of inputs this identity model accepts. name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict-like Free-form metadata to associate with this model. Examples -------- Transform ``(x, y)`` by a shift in x, followed by scaling the two inputs:: >>> from astropy.modeling.models import (Polynomial1D, Shift, Scale, ... Identity) >>> model = (Shift(1) & Identity(1)) | Scale(1.2) & Scale(2) >>> model(1,1) # doctest: +FLOAT_CMP (2.4, 2.0) >>> model.inverse(2.4, 2) # doctest: +FLOAT_CMP (1.0, 1.0) """ linear = True # FittableModel is non-linear by default def __init__(self, n_inputs, name=None, meta=None): mapping = tuple(range(n_inputs)) super().__init__(mapping, name=name, meta=meta) def __repr__(self): if self.name is None: return f'<Identity({self.n_inputs})>' return f'<Identity({self.n_inputs}, name={self.name!r})>' @property def inverse(self): """ The inverse transformation. In this case of `Identity`, ``self.inverse is self``. """ return self class UnitsMapping(Model): """ Mapper that operates on the units of the input, first converting to canonical units, then assigning new units without further conversion. Used by Model.coerce_units to support units on otherwise unitless models such as Polynomial1D. Parameters ---------- mapping : tuple A tuple of (input_unit, output_unit) pairs, one per input, matched to the inputs by position. The first element of the each pair is the unit that the model will accept (specify ``dimensionless_unscaled`` to accept dimensionless input). The second element is the unit that the model will return. Specify ``dimensionless_unscaled`` to return dimensionless Quantity, and `None` to return raw values without Quantity. input_units_equivalencies : dict, optional Default equivalencies to apply to input values. If set, this should be a dictionary where each key is a string that corresponds to one of the model inputs. input_units_allow_dimensionless : dict or bool, optional Allow dimensionless input. If this is True, input values to evaluate will gain the units specified in input_units. If this is a dictionary then it should map input name to a bool to allow dimensionless numbers for that input. name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict-like, optional Free-form metadata to associate with this model. Examples -------- Wrapping a unitless model to require and convert units: >>> from astropy.modeling.models import Polynomial1D, UnitsMapping >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = UnitsMapping(((u.m, None),)) | poly >>> model = model | UnitsMapping(((None, u.s),)) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP <Quantity 1.2 s> Wrapping a unitless model but still permitting unitless input: >>> from astropy.modeling.models import Polynomial1D, UnitsMapping >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = UnitsMapping(((u.m, None),), input_units_allow_dimensionless=True) | poly >>> model = model | UnitsMapping(((None, u.s),)) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(10) # doctest: +FLOAT_CMP <Quantity 21. s> """ def __init__( self, mapping, input_units_equivalencies=None, input_units_allow_dimensionless=False, name=None, meta=None ): self._mapping = mapping none_mapping_count = len([m for m in mapping if m[-1] is None]) if none_mapping_count > 0 and none_mapping_count != len(mapping): raise ValueError("If one return unit is None, then all must be None") # These attributes are read and handled by Model self._input_units_strict = True self.input_units_equivalencies = input_units_equivalencies self._input_units_allow_dimensionless = input_units_allow_dimensionless super().__init__(name=name, meta=meta) # Can't invoke this until after super().__init__, since # we need self.inputs and self.outputs to be populated. self._rebuild_units() def _rebuild_units(self): self._input_units = {input_name: input_unit for input_name, (input_unit, _) in zip(self.inputs, self.mapping)} @property def n_inputs(self): return len(self._mapping) @property def n_outputs(self): return len(self._mapping) @property def inputs(self): return super().inputs @inputs.setter def inputs(self, value): super(UnitsMapping, self.__class__).inputs.fset(self, value) self._rebuild_units() @property def outputs(self): return super().outputs @outputs.setter def outputs(self, value): super(UnitsMapping, self.__class__).outputs.fset(self, value) self._rebuild_units() @property def input_units(self): return self._input_units @property def mapping(self): return self._mapping def evaluate(self, *args): result = [] for arg, (_, return_unit) in zip(args, self.mapping): if isinstance(arg, Quantity): value = arg.value else: value = arg if return_unit is None: result.append(value) else: result.append(Quantity(value, return_unit, subok=True)) if self.n_outputs == 1: return result[0] else: return tuple(result) def __repr__(self): if self.name is None: return f"<UnitsMapping({self.mapping})>" else: return f"<UnitsMapping({self.mapping}, name={self.name!r})>"
e5f22bc145a3296853fb04d8fdedccce4ffaae5908e754cad844a5f846d9022d
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Spline models and fitters.""" # pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name import abc import functools import warnings import numpy as np from astropy.utils import isiterable from astropy.utils.exceptions import AstropyUserWarning from .core import FittableModel, ModelDefinitionError from .parameters import Parameter __all__ = ['Spline1D', 'SplineInterpolateFitter', 'SplineSmoothingFitter', 'SplineExactKnotsFitter', 'SplineSplrepFitter'] __doctest_requires__ = {('Spline1D'): ['scipy']} class _Spline(FittableModel): """Base class for spline models""" _knot_names = () _coeff_names = () optional_inputs = {} def __init__(self, knots=None, coeffs=None, degree=None, bounds=None, n_models=None, model_set_axis=None, name=None, meta=None): super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta) self._user_knots = False self._init_tck(degree) # Hack to allow an optional model argument self._create_optional_inputs() if knots is not None: self._init_spline(knots, coeffs, bounds) elif coeffs is not None: raise ValueError("If one passes a coeffs vector one needs to also pass knots!") @property def param_names(self): """ Coefficient names generated based on the spline's degree and number of knots. """ return tuple(list(self._knot_names) + list(self._coeff_names)) @staticmethod def _optional_arg(arg): return f'_{arg}' def _create_optional_inputs(self): for arg in self.optional_inputs: attribute = self._optional_arg(arg) if hasattr(self, attribute): raise ValueError(f'Optional argument {arg} already exists in this class!') else: setattr(self, attribute, None) def _intercept_optional_inputs(self, **kwargs): new_kwargs = kwargs for arg in self.optional_inputs: if (arg in kwargs): attribute = self._optional_arg(arg) if getattr(self, attribute) is None: setattr(self, attribute, kwargs[arg]) del new_kwargs[arg] else: raise RuntimeError(f'{arg} has already been set, something has gone wrong!') return new_kwargs def evaluate(self, *args, **kwargs): """ Extract the optional kwargs passed to call """ optional_inputs = kwargs for arg in self.optional_inputs: attribute = self._optional_arg(arg) if arg in kwargs: # Options passed in optional_inputs[arg] = kwargs[arg] elif getattr(self, attribute) is not None: # No options passed in and Options set optional_inputs[arg] = getattr(self, attribute) setattr(self, attribute, None) else: # No options passed in and No options set optional_inputs[arg] = self.optional_inputs[arg] return optional_inputs def __call__(self, *args, **kwargs): """ Make model callable to model evaluation """ # Hack to allow an optional model argument kwargs = self._intercept_optional_inputs(**kwargs) return super().__call__(*args, **kwargs) def _create_parameter(self, name: str, index: int, attr: str, fixed=False): """ Create a spline parameter linked to an attribute array. Parameters ---------- name : str Name for the parameter index : int The index of the parameter in the array attr : str The name for the attribute array fixed : optional, bool If the parameter should be fixed or not """ # Hack to allow parameters and attribute array to freely exchange values # _getter forces reading value from attribute array # _setter forces setting value to attribute array def _getter(value, model: "_Spline", index: int, attr: str): return getattr(model, attr)[index] def _setter(value, model: "_Spline", index: int, attr: str): getattr(model, attr)[index] = value return value getter = functools.partial(_getter, index=index, attr=attr) setter = functools.partial(_setter, index=index, attr=attr) default = getattr(self, attr) param = Parameter(name=name, default=default[index], fixed=fixed, getter=getter, setter=setter) # setter/getter wrapper for parameters in this case require the # parameter to have a reference back to its parent model param.model = self param.value = default[index] # Add parameter to model self.__dict__[name] = param def _create_parameters(self, base_name: str, attr: str, fixed=False): """ Create a spline parameters linked to an attribute array for all elements in that array Parameters ---------- base_name : str Base name for the parameters attr : str The name for the attribute array fixed : optional, bool If the parameters should be fixed or not """ names = [] for index in range(len(getattr(self, attr))): name = f"{base_name}{index}" names.append(name) self._create_parameter(name, index, attr, fixed) return tuple(names) @abc.abstractmethod def _init_parameters(self): raise NotImplementedError("This needs to be implemented") @abc.abstractmethod def _init_data(self, knots, coeffs, bounds=None): raise NotImplementedError("This needs to be implemented") def _init_spline(self, knots, coeffs, bounds=None): self._init_data(knots, coeffs, bounds) self._init_parameters() # fill _parameters and related attributes self._initialize_parameters((), {}) self._initialize_slices() # Calling this will properly fill the _parameter vector, which is # used directly sometimes without being properly filled. _ = self.parameters def _init_tck(self, degree): self._c = None self._t = None self._degree = degree class Spline1D(_Spline): """ One dimensional Spline Model Parameters ---------- knots : optional Define the knots for the spline. Can be 1) the number of interior knots for the spline, 2) the array of all knots for the spline, or 3) If both bounds are defined, the interior knots for the spline coeffs : optional The array of knot coefficients for the spline degree : optional The degree of the spline. It must be 1 <= degree <= 5, default is 3. bounds : optional The upper and lower bounds of the spline. Notes ----- Much of the functionality of this model is provided by `scipy.interpolate.BSpline` which can be directly accessed via the bspline property. Fitting for this model is provided by wrappers for: `scipy.interpolate.UnivariateSpline`, `scipy.interpolate.InterpolatedUnivariateSpline`, and `scipy.interpolate.LSQUnivariateSpline`. If one fails to define any knots/coefficients, no parameters will be added to this model until a fitter is called. This is because some of the fitters for splines vary the number of parameters and so we cannot define the parameter set until after fitting in these cases. Since parameters are not necessarily known at model initialization, setting model parameters directly via the model interface has been disabled. Direct constructors are provided for this model which incorporate the fitting to data directly into model construction. Knot parameters are declared as "fixed" parameters by default to enable the use of other `astropy.modeling` fitters to be used to fit this model. Examples -------- >>> import numpy as np >>> from astropy.modeling.models import Spline1D >>> from astropy.modeling import fitting >>> np.random.seed(42) >>> x = np.linspace(-3, 3, 50) >>> y = np.exp(-x**2) + 0.1 * np.random.randn(50) >>> xs = np.linspace(-3, 3, 1000) A 1D interpolating spline can be fit to data: >>> fitter = fitting.SplineInterpolateFitter() >>> spl = fitter(Spline1D(), x, y) Similarly, a smoothing spline can be fit to data: >>> fitter = fitting.SplineSmoothingFitter() >>> spl = fitter(Spline1D(), x, y, s=0.5) Similarly, a spline can be fit to data using an exact set of interior knots: >>> t = [-1, 0, 1] >>> fitter = fitting.SplineExactKnotsFitter() >>> spl = fitter(Spline1D(), x, y, t=t) """ n_inputs = 1 n_outputs = 1 _separable = True optional_inputs = {'nu': 0} def __init__(self, knots=None, coeffs=None, degree=3, bounds=None, n_models=None, model_set_axis=None, name=None, meta=None): super().__init__( knots=knots, coeffs=coeffs, degree=degree, bounds=bounds, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta ) @property def t(self): """ The knots vector """ if self._t is None: return np.concatenate((np.zeros(self._degree + 1), np.ones(self._degree + 1))) else: return self._t @t.setter def t(self, value): if self._t is None: raise ValueError("The model parameters must be initialized before setting knots.") elif len(value) == len(self._t): self._t = value else: raise ValueError("There must be exactly as many knots as previously defined.") @property def t_interior(self): """ The interior knots """ return self.t[self.degree + 1: -(self.degree + 1)] @property def c(self): """ The coefficients vector """ if self._c is None: return np.zeros(len(self.t)) else: return self._c @c.setter def c(self, value): if self._c is None: raise ValueError("The model parameters must be initialized before setting coeffs.") elif len(value) == len(self._c): self._c = value else: raise ValueError("There must be exactly as many coeffs as previously defined.") @property def degree(self): """ The degree of the spline polynomials """ return self._degree @property def _initialized(self): return self._t is not None and self._c is not None @property def tck(self): """ Scipy 'tck' tuple representation """ return (self.t, self.c, self.degree) @tck.setter def tck(self, value): if self._initialized: if value[2] != self.degree: raise ValueError("tck has incompatible degree!") self.t = value[0] self.c = value[1] else: self._init_spline(value[0], value[1]) # Calling this will properly fill the _parameter vector, which is # used directly sometimes without being properly filled. _ = self.parameters @property def bspline(self): """ Scipy bspline object representation """ from scipy.interpolate import BSpline return BSpline(*self.tck) @bspline.setter def bspline(self, value): from scipy.interpolate import BSpline if isinstance(value, BSpline): self.tck = value.tck else: self.tck = value @property def knots(self): """ Dictionary of knot parameters """ return [getattr(self, knot) for knot in self._knot_names] @property def user_knots(self): """If the knots have been supplied by the user""" return self._user_knots @user_knots.setter def user_knots(self, value): self._user_knots = value @property def coeffs(self): """ Dictionary of coefficient parameters """ return [getattr(self, coeff) for coeff in self._coeff_names] def _init_parameters(self): self._knot_names = self._create_parameters("knot", "t", fixed=True) self._coeff_names = self._create_parameters("coeff", "c") def _init_bounds(self, bounds=None): if bounds is None: bounds = [None, None] if bounds[0] is None: lower = np.zeros(self._degree + 1) else: lower = np.array([bounds[0]] * (self._degree + 1)) if bounds[1] is None: upper = np.ones(self._degree + 1) else: upper = np.array([bounds[1]] * (self._degree + 1)) if bounds[0] is not None and bounds[1] is not None: self.bounding_box = bounds has_bounds = True else: has_bounds = False return has_bounds, lower, upper def _init_knots(self, knots, has_bounds, lower, upper): if np.issubdtype(type(knots), np.integer): self._t = np.concatenate( (lower, np.zeros(knots), upper) ) elif isiterable(knots): self._user_knots = True if has_bounds: self._t = np.concatenate( (lower, np.array(knots), upper) ) else: if len(knots) < 2*(self._degree + 1): raise ValueError(f"Must have at least {2*(self._degree + 1)} knots.") self._t = np.array(knots) else: raise ValueError(f"Knots: {knots} must be iterable or value") # check that knots form a viable spline self.bspline def _init_coeffs(self, coeffs=None): if coeffs is None: self._c = np.zeros(len(self._t)) else: self._c = np.array(coeffs) # check that coeffs form a viable spline self.bspline def _init_data(self, knots, coeffs, bounds=None): self._init_knots(knots, *self._init_bounds(bounds)) self._init_coeffs(coeffs) def evaluate(self, *args, **kwargs): """ Evaluate the spline. Parameters ---------- x : (positional) The points where the model is evaluating the spline at nu : optional (kwarg) The derivative of the spline for evaluation, 0 <= nu <= degree + 1. Default: 0. """ kwargs = super().evaluate(*args, **kwargs) x = args[0] if 'nu' in kwargs: if kwargs['nu'] > self.degree + 1: raise RuntimeError("Cannot evaluate a derivative of " f"order higher than {self.degree + 1}") return self.bspline(x, **kwargs) def derivative(self, nu=1): """ Create a spline that is the derivative of this one Parameters ---------- nu : int, optional Derivative order, default is 1. """ if nu <= self.degree: bspline = self.bspline.derivative(nu=nu) derivative = Spline1D(degree=bspline.k) derivative.bspline = bspline return derivative else: raise ValueError(f'Must have nu <= {self.degree}') def antiderivative(self, nu=1): """ Create a spline that is an antiderivative of this one Parameters ---------- nu : int, optional Antiderivative order, default is 1. Notes ----- Assumes constant of integration is 0 """ if (nu + self.degree) <= 5: bspline = self.bspline.antiderivative(nu=nu) antiderivative = Spline1D(degree=bspline.k) antiderivative.bspline = bspline return antiderivative else: raise ValueError("Supported splines can have max degree 5, " f"antiderivative degree will be {nu + self.degree}") class _SplineFitter(abc.ABC): """ Base Spline Fitter """ def __init__(self): self.fit_info = { 'resid': None, 'spline': None } def _set_fit_info(self, spline): self.fit_info['resid'] = spline.get_residual() self.fit_info['spline'] = spline @abc.abstractmethod def _fit_method(self, model, x, y, **kwargs): raise NotImplementedError("This has not been implemented for _SplineFitter.") def __call__(self, model, x, y, z=None, **kwargs): model_copy = model.copy() if isinstance(model_copy, Spline1D): if z is not None: raise ValueError("1D model can only have 2 data points.") spline = self._fit_method(model_copy, x, y, **kwargs) else: raise ModelDefinitionError("Only spline models are compatible with this fitter.") self._set_fit_info(spline) return model_copy class SplineInterpolateFitter(_SplineFitter): """ Fit an interpolating spline """ def _fit_method(self, model, x, y, **kwargs): weights = kwargs.pop('weights', None) bbox = kwargs.pop('bbox', [None, None]) if model.user_knots: warnings.warn("The current user specified knots maybe ignored for interpolating data", AstropyUserWarning) model.user_knots = False if bbox != [None, None]: model.bounding_box = bbox from scipy.interpolate import InterpolatedUnivariateSpline spline = InterpolatedUnivariateSpline(x, y, w=weights, bbox=bbox, k=model.degree) model.tck = spline._eval_args return spline class SplineSmoothingFitter(_SplineFitter): """ Fit a smoothing spline """ def _fit_method(self, model, x, y, **kwargs): s = kwargs.pop('s', None) weights = kwargs.pop('weights', None) bbox = kwargs.pop('bbox', [None, None]) if model.user_knots: warnings.warn("The current user specified knots maybe ignored for smoothing data", AstropyUserWarning) model.user_knots = False if bbox != [None, None]: model.bounding_box = bbox from scipy.interpolate import UnivariateSpline spline = UnivariateSpline(x, y, w=weights, bbox=bbox, k=model.degree, s=s) model.tck = spline._eval_args return spline class SplineExactKnotsFitter(_SplineFitter): """ Fit a spline using least-squares regression. """ def _fit_method(self, model, x, y, **kwargs): t = kwargs.pop('t', None) weights = kwargs.pop('weights', None) bbox = kwargs.pop('bbox', [None, None]) if t is not None: if model.user_knots: warnings.warn("The current user specified knots will be " "overwritten for by knots passed into this function", AstropyUserWarning) else: if model.user_knots: t = model.t_interior else: raise RuntimeError("No knots have been provided") if bbox != [None, None]: model.bounding_box = bbox from scipy.interpolate import LSQUnivariateSpline spline = LSQUnivariateSpline(x, y, t, w=weights, bbox=bbox, k=model.degree) model.tck = spline._eval_args return spline class SplineSplrepFitter(_SplineFitter): """ Fit a spline using the `scipy.interpolate.splrep` function interface. """ def __init__(self): super().__init__() self.fit_info = { 'fp': None, 'ier': None, 'msg': None } def _fit_method(self, model, x, y, **kwargs): t = kwargs.pop('t', None) s = kwargs.pop('s', None) task = kwargs.pop('task', 0) weights = kwargs.pop('weights', None) bbox = kwargs.pop('bbox', [None, None]) if t is not None: if model.user_knots: warnings.warn("The current user specified knots will be " "overwritten for by knots passed into this function", AstropyUserWarning) else: if model.user_knots: t = model.t_interior if bbox != [None, None]: model.bounding_box = bbox from scipy.interpolate import splrep tck, fp, ier, msg = splrep(x, y, w=weights, xb=bbox[0], xe=bbox[1], k=model.degree, s=s, t=t, task=task, full_output=1) model.tck = tck return fp, ier, msg def _set_fit_info(self, spline): self.fit_info['fp'] = spline[0] self.fit_info['ier'] = spline[1] self.fit_info['msg'] = spline[2]
3dbad1d8ad2b3c167566fbc44823eb5bd8333efd2884e083021c637851fd22d8
# Licensed under a 3-clause BSD style license - see LICENSE.rst # pylint: disable=invalid-name """ Optimization algorithms used in `~astropy.modeling.fitting`. """ import abc import warnings import numpy as np from astropy.utils.exceptions import AstropyUserWarning __all__ = ["Optimization", "SLSQP", "Simplex"] # Maximum number of iterations DEFAULT_MAXITER = 100 # Step for the forward difference approximation of the Jacobian DEFAULT_EPS = np.sqrt(np.finfo(float).eps) # Default requested accuracy DEFAULT_ACC = 1e-07 DEFAULT_BOUNDS = (-10 ** 12, 10 ** 12) class Optimization(metaclass=abc.ABCMeta): """ Base class for optimizers. Parameters ---------- opt_method : callable Implements optimization method Notes ----- The base Optimizer does not support any constraints by default; individual optimizers should explicitly set this list to the specific constraints it supports. """ supported_constraints = [] def __init__(self, opt_method): self._opt_method = opt_method self._maxiter = DEFAULT_MAXITER self._eps = DEFAULT_EPS self._acc = DEFAULT_ACC @property def maxiter(self): """Maximum number of iterations""" return self._maxiter @maxiter.setter def maxiter(self, val): """Set maxiter""" self._maxiter = val @property def eps(self): """Step for the forward difference approximation of the Jacobian""" return self._eps @eps.setter def eps(self, val): """Set eps value""" self._eps = val @property def acc(self): """Requested accuracy""" return self._acc @acc.setter def acc(self, val): """Set accuracy""" self._acc = val def __repr__(self): fmt = f"{self.__class__.__name__}()" return fmt @property def opt_method(self): """ Return the optimization method.""" return self._opt_method @abc.abstractmethod def __call__(self): raise NotImplementedError("Subclasses should implement this method") class SLSQP(Optimization): """ Sequential Least Squares Programming optimization algorithm. The algorithm is described in [1]_. It supports tied and fixed parameters, as well as bounded constraints. Uses `scipy.optimize.fmin_slsqp`. References ---------- .. [1] http://www.netlib.org/toms/733 """ supported_constraints = ['bounds', 'eqcons', 'ineqcons', 'fixed', 'tied'] def __init__(self): from scipy.optimize import fmin_slsqp super().__init__(fmin_slsqp) self.fit_info = { 'final_func_val': None, 'numiter': None, 'exit_mode': None, 'message': None } def __call__(self, objfunc, initval, fargs, **kwargs): """ Run the solver. Parameters ---------- objfunc : callable objection function initval : iterable initial guess for the parameter values fargs : tuple other arguments to be passed to the statistic function kwargs : dict other keyword arguments to be passed to the solver """ kwargs['iter'] = kwargs.pop('maxiter', self._maxiter) if 'epsilon' not in kwargs: kwargs['epsilon'] = self._eps if 'acc' not in kwargs: kwargs['acc'] = self._acc # Get the verbosity level disp = kwargs.pop('verblevel', None) # set the values of constraints to match the requirements of fmin_slsqp model = fargs[0] pars = [getattr(model, name) for name in model.param_names] bounds = [par.bounds for par in pars if not (par.fixed or par.tied)] bounds = np.asarray(bounds) for i in bounds: if i[0] is None: i[0] = DEFAULT_BOUNDS[0] if i[1] is None: i[1] = DEFAULT_BOUNDS[1] # older versions of scipy require this array to be float bounds = np.asarray(bounds, dtype=float) eqcons = np.array(model.eqcons) ineqcons = np.array(model.ineqcons) fitparams, final_func_val, numiter, exit_mode, mess = self.opt_method( objfunc, initval, args=fargs, full_output=True, disp=disp, bounds=bounds, eqcons=eqcons, ieqcons=ineqcons, **kwargs) self.fit_info['final_func_val'] = final_func_val self.fit_info['numiter'] = numiter self.fit_info['exit_mode'] = exit_mode self.fit_info['message'] = mess if exit_mode != 0: warnings.warn("The fit may be unsuccessful; check " "fit_info['message'] for more information.", AstropyUserWarning) return fitparams, self.fit_info class Simplex(Optimization): """ Neald-Mead (downhill simplex) algorithm. This algorithm [1]_ only uses function values, not derivatives. Uses `scipy.optimize.fmin`. References ---------- .. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function minimization", The Computer Journal, 7, pp. 308-313 """ supported_constraints = ['bounds', 'fixed', 'tied'] def __init__(self): from scipy.optimize import fmin as simplex super().__init__(simplex) self.fit_info = { 'final_func_val': None, 'numiter': None, 'exit_mode': None, 'num_function_calls': None } def __call__(self, objfunc, initval, fargs, **kwargs): """ Run the solver. Parameters ---------- objfunc : callable objection function initval : iterable initial guess for the parameter values fargs : tuple other arguments to be passed to the statistic function kwargs : dict other keyword arguments to be passed to the solver """ if 'maxiter' not in kwargs: kwargs['maxiter'] = self._maxiter if 'acc' in kwargs: self._acc = kwargs['acc'] kwargs.pop('acc') if 'xtol' in kwargs: self._acc = kwargs['xtol'] kwargs.pop('xtol') # Get the verbosity level disp = kwargs.pop('verblevel', None) fitparams, final_func_val, numiter, funcalls, exit_mode = self.opt_method( objfunc, initval, args=fargs, xtol=self._acc, disp=disp, full_output=True, **kwargs) self.fit_info['final_func_val'] = final_func_val self.fit_info['numiter'] = numiter self.fit_info['exit_mode'] = exit_mode self.fit_info['num_function_calls'] = funcalls if self.fit_info['exit_mode'] == 1: warnings.warn("The fit may be unsuccessful; " "Maximum number of function evaluations reached.", AstropyUserWarning) if self.fit_info['exit_mode'] == 2: warnings.warn("The fit may be unsuccessful; " "Maximum number of iterations reached.", AstropyUserWarning) return fitparams, self.fit_info
66ca7890dabf1d575ae134ea53b9d3897d06f1ba79d2be75b97fb7e85b31f125
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Implements rotations, including spherical rotations as defined in WCS Paper II [1]_ `RotateNative2Celestial` and `RotateCelestial2Native` follow the convention in WCS Paper II to rotate to/from a native sphere and the celestial sphere. The implementation uses `EulerAngleRotation`. The model parameters are three angles: the longitude (``lon``) and latitude (``lat``) of the fiducial point in the celestial system (``CRVAL`` keywords in FITS), and the longitude of the celestial pole in the native system (``lon_pole``). The Euler angles are ``lon+90``, ``90-lat`` and ``-(lon_pole-90)``. References ---------- .. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II) """ # pylint: disable=invalid-name, too-many-arguments, no-member import math import numpy as np from astropy import units as u from astropy.coordinates.matrix_utilities import matrix_product, rotation_matrix from .core import Model from .parameters import Parameter from .utils import _to_orig_unit, _to_radian __all__ = ['RotateCelestial2Native', 'RotateNative2Celestial', 'Rotation2D', 'EulerAngleRotation', 'RotationSequence3D', 'SphericalRotationSequence'] def _create_matrix(angles, axes_order): matrices = [] for angle, axis in zip(angles, axes_order): if isinstance(angle, u.Quantity): angle = angle.value angle = angle.item() matrices.append(rotation_matrix(angle, axis, unit=u.rad)) result = matrix_product(*matrices[::-1]) return result def spherical2cartesian(alpha, delta): alpha = np.deg2rad(alpha) delta = np.deg2rad(delta) x = np.cos(alpha) * np.cos(delta) y = np.cos(delta) * np.sin(alpha) z = np.sin(delta) return np.array([x, y, z]) def cartesian2spherical(x, y, z): h = np.hypot(x, y) alpha = np.rad2deg(np.arctan2(y, x)) delta = np.rad2deg(np.arctan2(z, h)) return alpha, delta class RotationSequence3D(Model): """ Perform a series of rotations about different axis in 3D space. Positive angles represent a counter-clockwise rotation. Parameters ---------- angles : array-like Angles of rotation in deg in the order of axes_order. axes_order : str A sequence of 'x', 'y', 'z' corresponding to axis of rotation. Examples -------- >>> model = RotationSequence3D([1.1, 2.1, 3.1, 4.1], axes_order='xyzx') """ standard_broadcasting = False _separable = False n_inputs = 3 n_outputs = 3 angles = Parameter(default=[], getter=_to_orig_unit, setter=_to_radian, description="Angles of rotation in deg in the order of axes_order") def __init__(self, angles, axes_order, name=None): self.axes = ['x', 'y', 'z'] unrecognized = set(axes_order).difference(self.axes) if unrecognized: raise ValueError("Unrecognized axis label {0}; " "should be one of {1} ".format(unrecognized, self.axes)) self.axes_order = axes_order if len(angles) != len(axes_order): raise ValueError("The number of angles {0} should match the number of axes {1}." .format(len(angles), len(axes_order))) super().__init__(angles, name=name) self._inputs = ('x', 'y', 'z') self._outputs = ('x', 'y', 'z') @property def inverse(self): """Inverse rotation.""" angles = self.angles.value[::-1] * -1 return self.__class__(angles, axes_order=self.axes_order[::-1]) def evaluate(self, x, y, z, angles): """ Apply the rotation to a set of 3D Cartesian coordinates. """ if x.shape != y.shape or x.shape != z.shape: raise ValueError("Expected input arrays to have the same shape") # Note: If the original shape was () (an array scalar) convert to a # 1-element 1-D array on output for consistency with most other models orig_shape = x.shape or (1,) inarr = np.array([x.flatten(), y.flatten(), z.flatten()]) result = np.dot(_create_matrix(angles[0], self.axes_order), inarr) x, y, z = result[0], result[1], result[2] x.shape = y.shape = z.shape = orig_shape return x, y, z class SphericalRotationSequence(RotationSequence3D): """ Perform a sequence of rotations about arbitrary number of axes in spherical coordinates. Parameters ---------- angles : list A sequence of angles (in deg). axes_order : str A sequence of characters ('x', 'y', or 'z') corresponding to the axis of rotation and matching the order in ``angles``. """ def __init__(self, angles, axes_order, name=None, **kwargs): self._n_inputs = 2 self._n_outputs = 2 super().__init__(angles, axes_order=axes_order, name=name, **kwargs) self._inputs = ("lon", "lat") self._outputs = ("lon", "lat") @property def n_inputs(self): return self._n_inputs @property def n_outputs(self): return self._n_outputs def evaluate(self, lon, lat, angles): x, y, z = spherical2cartesian(lon, lat) x1, y1, z1 = super().evaluate(x, y, z, angles) lon, lat = cartesian2spherical(x1, y1, z1) return lon, lat class _EulerRotation: """ Base class which does the actual computation. """ _separable = False def evaluate(self, alpha, delta, phi, theta, psi, axes_order): shape = None if isinstance(alpha, np.ndarray): alpha = alpha.flatten() delta = delta.flatten() shape = alpha.shape inp = spherical2cartesian(alpha, delta) matrix = _create_matrix([phi, theta, psi], axes_order) result = np.dot(matrix, inp) a, b = cartesian2spherical(*result) if shape is not None: a.shape = shape b.shape = shape return a, b _input_units_strict = True _input_units_allow_dimensionless = True @property def input_units(self): """ Input units. """ return {self.inputs[0]: u.deg, self.inputs[1]: u.deg} @property def return_units(self): """ Output units. """ return {self.outputs[0]: u.deg, self.outputs[1]: u.deg} class EulerAngleRotation(_EulerRotation, Model): """ Implements Euler angle intrinsic rotations. Rotates one coordinate system into another (fixed) coordinate system. All coordinate systems are right-handed. The sign of the angles is determined by the right-hand rule.. Parameters ---------- phi, theta, psi : float or `~astropy.units.Quantity` ['angle'] "proper" Euler angles in deg. If floats, they should be in deg. axes_order : str A 3 character string, a combination of 'x', 'y' and 'z', where each character denotes an axis in 3D space. """ n_inputs = 2 n_outputs = 2 phi = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian, description="1st Euler angle (Quantity or value in deg)") theta = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian, description="2nd Euler angle (Quantity or value in deg)") psi = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian, description="3rd Euler angle (Quantity or value in deg)") def __init__(self, phi, theta, psi, axes_order, **kwargs): self.axes = ['x', 'y', 'z'] if len(axes_order) != 3: raise TypeError( "Expected axes_order to be a character sequence of length 3, " "got {}".format(axes_order)) unrecognized = set(axes_order).difference(self.axes) if unrecognized: raise ValueError("Unrecognized axis label {}; " "should be one of {} ".format(unrecognized, self.axes)) self.axes_order = axes_order qs = [isinstance(par, u.Quantity) for par in [phi, theta, psi]] if any(qs) and not all(qs): raise TypeError("All parameters should be of the same type - float or Quantity.") super().__init__(phi=phi, theta=theta, psi=psi, **kwargs) self._inputs = ('alpha', 'delta') self._outputs = ('alpha', 'delta') @property def inverse(self): return self.__class__(phi=-self.psi, theta=-self.theta, psi=-self.phi, axes_order=self.axes_order[::-1]) def evaluate(self, alpha, delta, phi, theta, psi): a, b = super().evaluate(alpha, delta, phi, theta, psi, self.axes_order) return a, b class _SkyRotation(_EulerRotation, Model): """ Base class for RotateNative2Celestial and RotateCelestial2Native. """ lon = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian, description="Latitude") lat = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian, description="Longtitude") lon_pole = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian, description="Longitude of a pole") def __init__(self, lon, lat, lon_pole, **kwargs): qs = [isinstance(par, u.Quantity) for par in [lon, lat, lon_pole]] if any(qs) and not all(qs): raise TypeError("All parameters should be of the same type - float or Quantity.") super().__init__(lon, lat, lon_pole, **kwargs) self.axes_order = 'zxz' def _evaluate(self, phi, theta, lon, lat, lon_pole): alpha, delta = super().evaluate(phi, theta, lon, lat, lon_pole, self.axes_order) mask = alpha < 0 if isinstance(mask, np.ndarray): alpha[mask] += 360 else: alpha += 360 return alpha, delta class RotateNative2Celestial(_SkyRotation): """ Transform from Native to Celestial Spherical Coordinates. Parameters ---------- lon : float or `~astropy.units.Quantity` ['angle'] Celestial longitude of the fiducial point. lat : float or `~astropy.units.Quantity` ['angle'] Celestial latitude of the fiducial point. lon_pole : float or `~astropy.units.Quantity` ['angle'] Longitude of the celestial pole in the native system. Notes ----- If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be in units of deg. Inputs are angles on the native sphere. Outputs are angles on the celestial sphere. """ n_inputs = 2 n_outputs = 2 @property def input_units(self): """ Input units. """ return {self.inputs[0]: u.deg, self.inputs[1]: u.deg} @property def return_units(self): """ Output units. """ return {self.outputs[0]: u.deg, self.outputs[1]: u.deg} def __init__(self, lon, lat, lon_pole, **kwargs): super().__init__(lon, lat, lon_pole, **kwargs) self.inputs = ('phi_N', 'theta_N') self.outputs = ('alpha_C', 'delta_C') def evaluate(self, phi_N, theta_N, lon, lat, lon_pole): """ Parameters ---------- phi_N, theta_N : float or `~astropy.units.Quantity` ['angle'] Angles in the Native coordinate system. it is assumed that numerical only inputs are in degrees. If float, assumed in degrees. lon, lat, lon_pole : float or `~astropy.units.Quantity` ['angle'] Parameter values when the model was initialized. If float, assumed in degrees. Returns ------- alpha_C, delta_C : float or `~astropy.units.Quantity` ['angle'] Angles on the Celestial sphere. If float, in degrees. """ # The values are in radians since they have already been through the setter. if isinstance(lon, u.Quantity): lon = lon.value lat = lat.value lon_pole = lon_pole.value # Convert to Euler angles phi = lon_pole - np.pi / 2 theta = - (np.pi / 2 - lat) psi = -(np.pi / 2 + lon) alpha_C, delta_C = super()._evaluate(phi_N, theta_N, phi, theta, psi) return alpha_C, delta_C @property def inverse(self): # convert to angles on the celestial sphere return RotateCelestial2Native(self.lon, self.lat, self.lon_pole) class RotateCelestial2Native(_SkyRotation): """ Transform from Celestial to Native Spherical Coordinates. Parameters ---------- lon : float or `~astropy.units.Quantity` ['angle'] Celestial longitude of the fiducial point. lat : float or `~astropy.units.Quantity` ['angle'] Celestial latitude of the fiducial point. lon_pole : float or `~astropy.units.Quantity` ['angle'] Longitude of the celestial pole in the native system. Notes ----- If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be in units of deg. Inputs are angles on the celestial sphere. Outputs are angles on the native sphere. """ n_inputs = 2 n_outputs = 2 @property def input_units(self): """ Input units. """ return {self.inputs[0]: u.deg, self.inputs[1]: u.deg} @property def return_units(self): """ Output units. """ return {self.outputs[0]: u.deg, self.outputs[1]: u.deg} def __init__(self, lon, lat, lon_pole, **kwargs): super().__init__(lon, lat, lon_pole, **kwargs) # Inputs are angles on the celestial sphere self.inputs = ('alpha_C', 'delta_C') # Outputs are angles on the native sphere self.outputs = ('phi_N', 'theta_N') def evaluate(self, alpha_C, delta_C, lon, lat, lon_pole): """ Parameters ---------- alpha_C, delta_C : float or `~astropy.units.Quantity` ['angle'] Angles in the Celestial coordinate frame. If float, assumed in degrees. lon, lat, lon_pole : float or `~astropy.units.Quantity` ['angle'] Parameter values when the model was initialized. If float, assumed in degrees. Returns ------- phi_N, theta_N : float or `~astropy.units.Quantity` ['angle'] Angles on the Native sphere. If float, in degrees. """ if isinstance(lon, u.Quantity): lon = lon.value lat = lat.value lon_pole = lon_pole.value # Convert to Euler angles phi = (np.pi / 2 + lon) theta = (np.pi / 2 - lat) psi = -(lon_pole - np.pi / 2) phi_N, theta_N = super()._evaluate(alpha_C, delta_C, phi, theta, psi) return phi_N, theta_N @property def inverse(self): return RotateNative2Celestial(self.lon, self.lat, self.lon_pole) class Rotation2D(Model): """ Perform a 2D rotation given an angle. Positive angles represent a counter-clockwise rotation and vice-versa. Parameters ---------- angle : float or `~astropy.units.Quantity` ['angle'] Angle of rotation (if float it should be in deg). """ n_inputs = 2 n_outputs = 2 _separable = False angle = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian, description="Angle of rotation (Quantity or value in deg)") def __init__(self, angle=angle, **kwargs): super().__init__(angle=angle, **kwargs) self._inputs = ("x", "y") self._outputs = ("x", "y") @property def inverse(self): """Inverse rotation.""" return self.__class__(angle=-self.angle) @classmethod def evaluate(cls, x, y, angle): """ Rotate (x, y) about ``angle``. Parameters ---------- x, y : array-like Input quantities angle : float or `~astropy.units.Quantity` ['angle'] Angle of rotations. If float, assumed in degrees. """ if x.shape != y.shape: raise ValueError("Expected input arrays to have the same shape") # If one argument has units, enforce they both have units and they are compatible. x_unit = getattr(x, 'unit', None) y_unit = getattr(y, 'unit', None) has_units = x_unit is not None and y_unit is not None if x_unit != y_unit: if has_units and y_unit.is_equivalent(x_unit): y = y.to(x_unit) y_unit = x_unit else: raise u.UnitsError("x and y must have compatible units") # Note: If the original shape was () (an array scalar) convert to a # 1-element 1-D array on output for consistency with most other models orig_shape = x.shape or (1,) inarr = np.array([x.flatten(), y.flatten()]) if isinstance(angle, u.Quantity): angle = angle.to_value(u.rad) result = np.dot(cls._compute_matrix(angle), inarr) x, y = result[0], result[1] x.shape = y.shape = orig_shape if has_units: return u.Quantity(x, unit=x_unit), u.Quantity(y, unit=y_unit) return x, y @staticmethod def _compute_matrix(angle): return np.array([[math.cos(angle), -math.sin(angle)], [math.sin(angle), math.cos(angle)]], dtype=np.float64)
4d9028672dd5ed8e5551df7fa81e9aa96de2e1177ceda10ad9539beec4960d02
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Define Numpy Ufuncs as Models. """ import numpy as np from astropy.modeling.core import Model from astropy.utils.exceptions import AstropyUserWarning trig_ufuncs = ["sin", "cos", "tan", "arcsin", "arccos", "arctan", "arctan2", "hypot", "sinh", "cosh", "tanh", "arcsinh", "arccosh", "arctanh", "deg2rad", "rad2deg"] math_ops = ["add", "subtract", "multiply", "logaddexp", "logaddexp2", "true_divide", "floor_divide", "negative", "positive", "power", "remainder", "fmod", "divmod", "absolute", "fabs", "rint", "exp", "exp2", "log", "log2", "log10", "expm1", "log1p", "sqrt", "square", "cbrt", "reciprocal", "divide", "mod"] supported_ufuncs = trig_ufuncs + math_ops # These names are just aliases for other ufunc objects # in the numpy API. The alias name must occur later # in the lists above. alias_ufuncs = { "divide": "true_divide", "mod": "remainder", } class _NPUfuncModel(Model): _is_dynamic = True def __init__(self, **kwargs): super().__init__(**kwargs) def _make_class_name(name): """ Make a ufunc model class name from the name of the ufunc. """ return name[0].upper() + name[1:] + 'Ufunc' def ufunc_model(name): """ Define a Model from a Numpy ufunc name.""" ufunc = getattr(np, name) nin = ufunc.nin nout = ufunc.nout if nin == 1: separable = True def evaluate(self, x): return self.func(x) else: separable = False def evaluate(self, x, y): return self.func(x, y) klass_name = _make_class_name(name) members = {'n_inputs': nin, 'n_outputs': nout, 'func': ufunc, 'linear': False, 'fittable': False, '_separable': separable, '_is_dynamic': True, 'evaluate': evaluate} klass = type(str(klass_name), (_NPUfuncModel,), members) klass.__module__ = 'astropy.modeling.math_functions' return klass __all__ = [] for name in supported_ufuncs: if name in alias_ufuncs: klass_name = _make_class_name(name) alias_klass_name = _make_class_name(alias_ufuncs[name]) globals()[klass_name] = globals()[alias_klass_name] __all__.append(klass_name) else: m = ufunc_model(name) klass_name = m.__name__ globals()[klass_name] = m __all__.append(klass_name)
b68e156aa20a4ab6c9fada59c7f18cfc4a1249d31eff93d892dc9671660bea78
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Functions to determine if a model is separable, i.e. if the model outputs are independent. It analyzes ``n_inputs``, ``n_outputs`` and the operators in a compound model by stepping through the transforms and creating a ``coord_matrix`` of shape (``n_outputs``, ``n_inputs``). Each modeling operator is represented by a function which takes two simple models (or two ``coord_matrix`` arrays) and returns an array of shape (``n_outputs``, ``n_inputs``). """ import numpy as np from .core import CompoundModel, Model, ModelDefinitionError from .mappings import Mapping __all__ = ["is_separable", "separability_matrix"] def is_separable(transform): """ A separability test for the outputs of a transform. Parameters ---------- transform : `~astropy.modeling.core.Model` A (compound) model. Returns ------- is_separable : ndarray A boolean array with size ``transform.n_outputs`` where each element indicates whether the output is independent and the result of a separable transform. Examples -------- >>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D >>> is_separable(Shift(1) & Shift(2) | Scale(1) & Scale(2)) array([ True, True]...) >>> is_separable(Shift(1) & Shift(2) | Rotation2D(2)) array([False, False]...) >>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | \ Polynomial2D(1) & Polynomial2D(2)) array([False, False]...) >>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1])) array([ True, True, True, True]...) """ if transform.n_inputs == 1 and transform.n_outputs > 1: is_separable = np.array([False] * transform.n_outputs).T return is_separable separable_matrix = _separable(transform) is_separable = separable_matrix.sum(1) is_separable = np.where(is_separable != 1, False, True) return is_separable def separability_matrix(transform): """ Compute the correlation between outputs and inputs. Parameters ---------- transform : `~astropy.modeling.core.Model` A (compound) model. Returns ------- separable_matrix : ndarray A boolean correlation matrix of shape (n_outputs, n_inputs). Indicates the dependence of outputs on inputs. For completely independent outputs, the diagonal elements are True and off-diagonal elements are False. Examples -------- >>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D >>> separability_matrix(Shift(1) & Shift(2) | Scale(1) & Scale(2)) array([[ True, False], [False, True]]...) >>> separability_matrix(Shift(1) & Shift(2) | Rotation2D(2)) array([[ True, True], [ True, True]]...) >>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | \ Polynomial2D(1) & Polynomial2D(2)) array([[ True, True], [ True, True]]...) >>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1])) array([[ True, False], [False, True], [ True, False], [False, True]]...) """ if transform.n_inputs == 1 and transform.n_outputs > 1: return np.ones((transform.n_outputs, transform.n_inputs), dtype=np.bool_) separable_matrix = _separable(transform) separable_matrix = np.where(separable_matrix != 0, True, False) return separable_matrix def _compute_n_outputs(left, right): """ Compute the number of outputs of two models. The two models are the left and right model to an operation in the expression tree of a compound model. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. """ if isinstance(left, Model): lnout = left.n_outputs else: lnout = left.shape[0] if isinstance(right, Model): rnout = right.n_outputs else: rnout = right.shape[0] noutp = lnout + rnout return noutp def _arith_oper(left, right): """ Function corresponding to one of the arithmetic operators ['+', '-'. '*', '/', '**']. This always returns a nonseparable output. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. Returns ------- result : ndarray Result from this operation. """ # models have the same number of inputs and outputs def _n_inputs_outputs(input): if isinstance(input, Model): n_outputs, n_inputs = input.n_outputs, input.n_inputs else: n_outputs, n_inputs = input.shape return n_inputs, n_outputs left_inputs, left_outputs = _n_inputs_outputs(left) right_inputs, right_outputs = _n_inputs_outputs(right) if left_inputs != right_inputs or left_outputs != right_outputs: raise ModelDefinitionError( "Unsupported operands for arithmetic operator: left (n_inputs={}, " "n_outputs={}) and right (n_inputs={}, n_outputs={}); " "models must have the same n_inputs and the same " "n_outputs for this operator.".format( left_inputs, left_outputs, right_inputs, right_outputs)) result = np.ones((left_outputs, left_inputs)) return result def _coord_matrix(model, pos, noutp): """ Create an array representing inputs and outputs of a simple model. The array has a shape (noutp, model.n_inputs). Parameters ---------- model : `astropy.modeling.Model` model pos : str Position of this model in the expression tree. One of ['left', 'right']. noutp : int Number of outputs of the compound model of which the input model is a left or right child. """ if isinstance(model, Mapping): axes = [] for i in model.mapping: axis = np.zeros((model.n_inputs,)) axis[i] = 1 axes.append(axis) m = np.vstack(axes) mat = np.zeros((noutp, model.n_inputs)) if pos == 'left': mat[: model.n_outputs, :model.n_inputs] = m else: mat[-model.n_outputs:, -model.n_inputs:] = m return mat if not model.separable: # this does not work for more than 2 coordinates mat = np.zeros((noutp, model.n_inputs)) if pos == 'left': mat[:model.n_outputs, : model.n_inputs] = 1 else: mat[-model.n_outputs:, -model.n_inputs:] = 1 else: mat = np.zeros((noutp, model.n_inputs)) for i in range(model.n_inputs): mat[i, i] = 1 if pos == 'right': mat = np.roll(mat, (noutp - model.n_outputs)) return mat def _cstack(left, right): """ Function corresponding to '&' operation. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. Returns ------- result : ndarray Result from this operation. """ noutp = _compute_n_outputs(left, right) if isinstance(left, Model): cleft = _coord_matrix(left, 'left', noutp) else: cleft = np.zeros((noutp, left.shape[1])) cleft[: left.shape[0], : left.shape[1]] = left if isinstance(right, Model): cright = _coord_matrix(right, 'right', noutp) else: cright = np.zeros((noutp, right.shape[1])) cright[-right.shape[0]:, -right.shape[1]:] = right return np.hstack([cleft, cright]) def _cdot(left, right): """ Function corresponding to "|" operation. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. Returns ------- result : ndarray Result from this operation. """ left, right = right, left def _n_inputs_outputs(input, position): """ Return ``n_inputs``, ``n_outputs`` for a model or coord_matrix. """ if isinstance(input, Model): coords = _coord_matrix(input, position, input.n_outputs) else: coords = input return coords cleft = _n_inputs_outputs(left, 'left') cright = _n_inputs_outputs(right, 'right') try: result = np.dot(cleft, cright) except ValueError: raise ModelDefinitionError( 'Models cannot be combined with the "|" operator; ' 'left coord_matrix is {}, right coord_matrix is {}'.format( cright, cleft)) return result def _separable(transform): """ Calculate the separability of outputs. Parameters ---------- transform : `astropy.modeling.Model` A transform (usually a compound model). Returns : is_separable : ndarray of dtype np.bool An array of shape (transform.n_outputs,) of boolean type Each element represents the separablity of the corresponding output. """ if (transform_matrix := transform._calculate_separability_matrix()) is not NotImplemented: return transform_matrix elif isinstance(transform, CompoundModel): sepleft = _separable(transform.left) sepright = _separable(transform.right) return _operators[transform.op](sepleft, sepright) elif isinstance(transform, Model): return _coord_matrix(transform, 'left', transform.n_outputs) # Maps modeling operators to a function computing and represents the # relationship of axes as an array of 0-es and 1-s _operators = {'&': _cstack, '|': _cdot, '+': _arith_oper, '-': _arith_oper, '*': _arith_oper, '/': _arith_oper, '**': _arith_oper}
fac3476e78df776525ebf9ffea7ff0063b2f5b10bfa7b2ee6520a1daae1fe3b1
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module provides utility functions for the models package. """ import warnings # pylint: disable=invalid-name from collections import UserDict from collections.abc import MutableMapping from inspect import signature import numpy as np from astropy import units as u from astropy.utils.decorators import deprecated __doctest_skip__ = ['AliasDict'] __all__ = ['AliasDict', 'poly_map_domain', 'comb', 'ellipse_extent'] deprecation_msg = """ AliasDict is deprecated because it no longer serves a function anywhere inside astropy. """ @deprecated('5.0', deprecation_msg) class AliasDict(MutableMapping): """ Creates a `dict` like object that wraps an existing `dict` or other `MutableMapping`, along with a `dict` of *key aliases* that translate between specific keys in this dict to different keys in the underlying dict. In other words, keys that do not have an associated alias are accessed and stored like a normal `dict`. However, a key that has an alias is accessed and stored to the "parent" dict via the alias. Parameters ---------- parent : dict-like The parent `dict` that aliased keys and accessed from and stored to. aliases : dict-like Maps keys in this dict to their associated keys in the parent dict. Examples -------- >>> parent = {'a': 1, 'b': 2, 'c': 3} >>> aliases = {'foo': 'a', 'bar': 'c'} >>> alias_dict = AliasDict(parent, aliases) >>> alias_dict['foo'] 1 >>> alias_dict['bar'] 3 Keys in the original parent dict are not visible if they were not aliased: >>> alias_dict['b'] Traceback (most recent call last): ... KeyError: 'b' Likewise, updates to aliased keys are reflected back in the parent dict: >>> alias_dict['foo'] = 42 >>> alias_dict['foo'] 42 >>> parent['a'] 42 However, updates/insertions to keys that are *not* aliased are not reflected in the parent dict: >>> alias_dict['qux'] = 99 >>> alias_dict['qux'] 99 >>> 'qux' in parent False In particular, updates on the `AliasDict` to a key that is equal to one of the aliased keys in the parent dict does *not* update the parent dict. For example, ``alias_dict`` aliases ``'foo'`` to ``'a'``. But assigning to a key ``'a'`` on the `AliasDict` does not impact the parent: >>> alias_dict['a'] = 'nope' >>> alias_dict['a'] 'nope' >>> parent['a'] 42 """ _store_type = dict """ Subclasses may override this to use other mapping types as the underlying storage, for example an `OrderedDict`. However, even in this case additional work may be needed to get things like the ordering right. """ def __init__(self, parent, aliases): self._parent = parent self._store = self._store_type() self._aliases = dict(aliases) def __getitem__(self, key): if key in self._aliases: try: return self._parent[self._aliases[key]] except KeyError: raise KeyError(key) return self._store[key] def __setitem__(self, key, value): if key in self._aliases: self._parent[self._aliases[key]] = value else: self._store[key] = value def __delitem__(self, key): if key in self._aliases: try: del self._parent[self._aliases[key]] except KeyError: raise KeyError(key) else: del self._store[key] def __iter__(self): """ First iterates over keys from the parent dict (if the aliased keys are present in the parent), followed by any keys in the local store. """ for key, alias in self._aliases.items(): if alias in self._parent: yield key for key in self._store: yield key def __len__(self): return len(list(iter(self))) def __repr__(self): # repr() just like any other dict--this should look transparent store_copy = self._store_type() for key, alias in self._aliases.items(): if alias in self._parent: store_copy[key] = self._parent[alias] store_copy.update(self._store) return repr(store_copy) def make_binary_operator_eval(oper, f, g): """ Given a binary operator (as a callable of two arguments) ``oper`` and two callables ``f`` and ``g`` which accept the same arguments, returns a *new* function that takes the same arguments as ``f`` and ``g``, but passes the outputs of ``f`` and ``g`` in the given ``oper``. ``f`` and ``g`` are assumed to return tuples (which may be 1-tuples). The given operator is applied element-wise to tuple outputs). Example ------- >>> from operator import add >>> def prod(x, y): ... return (x * y,) ... >>> sum_of_prod = make_binary_operator_eval(add, prod, prod) >>> sum_of_prod(3, 5) (30,) """ return lambda inputs, params: \ tuple(oper(x, y) for x, y in zip(f(inputs, params), g(inputs, params))) def poly_map_domain(oldx, domain, window): """ Map domain into window by shifting and scaling. Parameters ---------- oldx : array original coordinates domain : list or tuple of length 2 function domain window : list or tuple of length 2 range into which to map the domain """ domain = np.array(domain, dtype=np.float64) window = np.array(window, dtype=np.float64) if domain.shape != (2,) or window.shape != (2,): raise ValueError('Expected "domain" and "window" to be a tuple of size 2.') scl = (window[1] - window[0]) / (domain[1] - domain[0]) off = (window[0] * domain[1] - window[1] * domain[0]) / (domain[1] - domain[0]) return off + scl * oldx def _validate_domain_window(value): if value is not None: if np.asanyarray(value).shape != (2, ): raise ValueError('domain and window should be tuples of size 2.') return tuple(value) return value def comb(N, k): """ The number of combinations of N things taken k at a time. Parameters ---------- N : int, array Number of things. k : int, array Number of elements taken. """ if (k > N) or (N < 0) or (k < 0): return 0 val = 1 for j in range(min(k, N - k)): val = (val * (N - j)) / (j + 1) return val def array_repr_oneline(array): """ Represents a multi-dimensional Numpy array flattened onto a single line. """ r = np.array2string(array, separator=', ', suppress_small=True) return ' '.join(l.strip() for l in r.splitlines()) def combine_labels(left, right): """ For use with the join operator &: Combine left input/output labels with right input/output labels. If none of the labels conflict then this just returns a sum of tuples. However if *any* of the labels conflict, this appends '0' to the left-hand labels and '1' to the right-hand labels so there is no ambiguity). """ if set(left).intersection(right): left = tuple(l + '0' for l in left) right = tuple(r + '1' for r in right) return left + right def ellipse_extent(a, b, theta): """ Calculates the half size of a box encapsulating a rotated 2D ellipse. Parameters ---------- a : float or `~astropy.units.Quantity` The ellipse semimajor axis. b : float or `~astropy.units.Quantity` The ellipse semiminor axis. theta : float or `~astropy.units.Quantity` ['angle'] The rotation angle as an angular quantity (`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or a value in radians (as a float). The rotation angle increases counterclockwise. Returns ------- offsets : tuple The absolute value of the offset distances from the ellipse center that define its bounding box region, ``(dx, dy)``. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Ellipse2D from astropy.modeling.utils import ellipse_extent, render_model amplitude = 1 x0 = 50 y0 = 50 a = 30 b = 10 theta = np.pi / 4 model = Ellipse2D(amplitude, x0, y0, a, b, theta) dx, dy = ellipse_extent(a, b, theta) limits = [x0 - dx, x0 + dx, y0 - dy, y0 + dy] model.bounding_box = limits image = render_model(model) plt.imshow(image, cmap='binary', interpolation='nearest', alpha=.5, extent = limits) plt.show() """ t = np.arctan2(-b * np.tan(theta), a) dx = a * np.cos(t) * np.cos(theta) - b * np.sin(t) * np.sin(theta) t = np.arctan2(b, a * np.tan(theta)) dy = b * np.sin(t) * np.cos(theta) + a * np.cos(t) * np.sin(theta) if isinstance(dx, u.Quantity) or isinstance(dy, u.Quantity): return np.abs(u.Quantity([dx, dy])) return np.abs([dx, dy]) def get_inputs_and_params(func): """ Given a callable, determine the input variables and the parameters. Parameters ---------- func : callable Returns ------- inputs, params : tuple Each entry is a list of inspect.Parameter objects """ sig = signature(func) inputs = [] params = [] for param in sig.parameters.values(): if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD): raise ValueError("Signature must not have *args or **kwargs") if param.default == param.empty: inputs.append(param) else: params.append(param) return inputs, params def _combine_equivalency_dict(keys, eq1=None, eq2=None): # Given two dictionaries that give equivalencies for a set of keys, for # example input value names, return a dictionary that includes all the # equivalencies eq = {} for key in keys: eq[key] = [] if eq1 is not None and key in eq1: eq[key].extend(eq1[key]) if eq2 is not None and key in eq2: eq[key].extend(eq2[key]) return eq def _to_radian(value): """ Convert ``value`` to radian. """ if isinstance(value, u.Quantity): return value.to(u.rad) return np.deg2rad(value) def _to_orig_unit(value, raw_unit=None, orig_unit=None): """ Convert value with ``raw_unit`` to ``orig_unit``. """ if raw_unit is not None: return (value * raw_unit).to(orig_unit) return np.rad2deg(value) class _ConstraintsDict(UserDict): """ Wrapper around UserDict to allow updating the constraints on a Parameter when the dictionary is updated. """ def __init__(self, model, constraint_type): self._model = model self.constraint_type = constraint_type c = {} for name in model.param_names: param = getattr(model, name) c[name] = getattr(param, constraint_type) super().__init__(c) def __setitem__(self, key, val): super().__setitem__(key, val) param = getattr(self._model, key) setattr(param, self.constraint_type, val) class _SpecialOperatorsDict(UserDict): """ Wrapper around UserDict to allow for better tracking of the Special Operators for CompoundModels. This dictionary is structured so that one cannot inadvertently overwrite an existing special operator. Parameters ---------- unique_id: int the last used unique_id for a SPECIAL OPERATOR special_operators: dict a dictionary containing the special_operators Notes ----- Direct setting of operators (`dict[key] = value`) into the dictionary has been deprecated in favor of the `.add(name, value)` method, so that unique dictionary keys can be generated and tracked consistently. """ def __init__(self, unique_id=0, special_operators={}): super().__init__(special_operators) self._unique_id = unique_id def _set_value(self, key, val): if key in self: raise ValueError(f'Special operator "{key}" already exists') else: super().__setitem__(key, val) def __setitem__(self, key, val): self._set_value(key, val) warnings.warn(DeprecationWarning( """ Special operator dictionary assignment has been deprecated. Please use `.add` instead, so that you can capture a unique key for your operator. """ )) def _get_unique_id(self): self._unique_id += 1 return self._unique_id def add(self, operator_name, operator): """ Adds a special operator to the dictionary, and then returns the unique key that the operator is stored under for later reference. Parameters ---------- operator_name: str the name for the operator operator: function the actual operator function which will be used Returns ------- the unique operator key for the dictionary `(operator_name, unique_id)` """ key = (operator_name, self._get_unique_id()) self._set_value(key, operator) return key
8da004d14652272a72c1e70568f614f85771987c6c6cfe23e4eabbfe46c9ac00
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module is to contain an improved bounding box """ import abc import copy import warnings from collections import namedtuple from typing import Any, Callable, Dict, List, Tuple import numpy as np from astropy.units import Quantity from astropy.utils import isiterable __all__ = ['ModelBoundingBox', 'CompoundBoundingBox'] _BaseInterval = namedtuple('_BaseInterval', "lower upper") class _Interval(_BaseInterval): """ A single input's bounding box interval. Parameters ---------- lower : float The lower bound of the interval upper : float The upper bound of the interval Methods ------- validate : Contructs a valid interval outside : Determine which parts of an input array are outside the interval. domain : Contructs a discretization of the points inside the interval. """ def __repr__(self): return f"Interval(lower={self.lower}, upper={self.upper})" def copy(self): return copy.deepcopy(self) @staticmethod def _validate_shape(interval): """Validate the shape of an interval representation""" MESSAGE = """An interval must be some sort of sequence of length 2""" try: shape = np.shape(interval) except TypeError: try: # np.shape does not work with lists of Quantities if len(interval) == 1: interval = interval[0] shape = np.shape([b.to_value() for b in interval]) except (ValueError, TypeError, AttributeError): raise ValueError(MESSAGE) valid_shape = shape in ((2,), (1, 2), (2, 0)) if not valid_shape: valid_shape = (len(shape) > 0) and (shape[0] == 2) and \ all(isinstance(b, np.ndarray) for b in interval) if not isiterable(interval) or not valid_shape: raise ValueError(MESSAGE) @classmethod def _validate_bounds(cls, lower, upper): """Validate the bounds are reasonable and construct an interval from them.""" if (np.asanyarray(lower) > np.asanyarray(upper)).all(): warnings.warn(f"Invalid interval: upper bound {upper} " f"is strictly less than lower bound {lower}.", RuntimeWarning) return cls(lower, upper) @classmethod def validate(cls, interval): """ Construct and validate an interval Parameters ---------- interval : iterable A representation of the interval. Returns ------- A validated interval. """ cls._validate_shape(interval) if len(interval) == 1: interval = tuple(interval[0]) else: interval = tuple(interval) return cls._validate_bounds(interval[0], interval[1]) def outside(self, _input: np.ndarray): """ Parameters ---------- _input : np.ndarray The evaluation input in the form of an array. Returns ------- Boolean array indicating which parts of _input are outside the interval: True -> position outside interval False -> position inside interval """ return np.logical_or(_input < self.lower, _input > self.upper) def domain(self, resolution): return np.arange(self.lower, self.upper + resolution, resolution) # The interval where all ignored inputs can be found. _ignored_interval = _Interval.validate((-np.inf, np.inf)) def get_index(model, key) -> int: """ Get the input index corresponding to the given key. Can pass in either: the string name of the input or the input index itself. """ if isinstance(key, str): if key in model.inputs: index = model.inputs.index(key) else: raise ValueError(f"'{key}' is not one of the inputs: {model.inputs}.") elif np.issubdtype(type(key), np.integer): if 0 <= key < len(model.inputs): index = key else: raise IndexError(f"Integer key: {key} must be non-negative and < {len(model.inputs)}.") else: raise ValueError(f"Key value: {key} must be string or integer.") return index def get_name(model, index: int): """Get the input name corresponding to the input index""" return model.inputs[index] class _BoundingDomain(abc.ABC): """ Base class for ModelBoundingBox and CompoundBoundingBox. This is where all the `~astropy.modeling.core.Model` evaluation code for evaluating with a bounding box is because it is common to both types of bounding box. Parameters ---------- model : `~astropy.modeling.Model` The Model this bounding domain is for. prepare_inputs : Generates the necessary input information so that model can be evaluated only for input points entirely inside bounding_box. This needs to be implemented by a subclass. Note that most of the implementation is in ModelBoundingBox. prepare_outputs : Fills the output values in for any input points outside the bounding_box. evaluate : Performs a complete model evaluation while enforcing the bounds on the inputs and returns a complete output. """ def __init__(self, model, ignored: List[int] = None, order: str = 'C'): self._model = model self._ignored = self._validate_ignored(ignored) self._order = self._get_order(order) @property def model(self): return self._model @property def order(self) -> str: return self._order @property def ignored(self) -> List[int]: return self._ignored def _get_order(self, order: str = None) -> str: """ Get if bounding_box is C/python ordered or Fortran/mathematically ordered """ if order is None: order = self._order if order not in ('C', 'F'): raise ValueError("order must be either 'C' (C/python order) or " f"'F' (Fortran/mathematical order), got: {order}.") return order def _get_index(self, key) -> int: """ Get the input index corresponding to the given key. Can pass in either: the string name of the input or the input index itself. """ return get_index(self._model, key) def _get_name(self, index: int): """Get the input name corresponding to the input index""" return get_name(self._model, index) @property def ignored_inputs(self) -> List[str]: return [self._get_name(index) for index in self._ignored] def _validate_ignored(self, ignored: list) -> List[int]: if ignored is None: return [] else: return [self._get_index(key) for key in ignored] def __call__(self, *args, **kwargs): raise NotImplementedError( "This bounding box is fixed by the model and does not have " "adjustable parameters.") @abc.abstractmethod def fix_inputs(self, model, fixed_inputs: dict): """ Fix the bounding_box for a `fix_inputs` compound model. Parameters ---------- model : `~astropy.modeling.Model` The new model for which this will be a bounding_box fixed_inputs : dict Dictionary of inputs which have been fixed by this bounding box. """ raise NotImplementedError("This should be implemented by a child class.") @abc.abstractmethod def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]: """ Get prepare the inputs with respect to the bounding box. Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into inputs : list List of all the model inputs Returns ------- valid_inputs : list The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : array_like array of all indices inside the bounding box all_out: bool if all of the inputs are outside the bounding_box """ raise NotImplementedError("This has not been implemented for BoundingDomain.") @staticmethod def _base_output(input_shape, fill_value): """ Create a baseline output, assuming that the entire input is outside the bounding box Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box Returns ------- An array of the correct shape containing all fill_value """ return np.zeros(input_shape) + fill_value def _all_out_output(self, input_shape, fill_value): """ Create output if all inputs are outside the domain Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box Returns ------- A full set of outputs for case that all inputs are outside domain. """ return [self._base_output(input_shape, fill_value) for _ in range(self._model.n_outputs)], None def _modify_output(self, valid_output, valid_index, input_shape, fill_value): """ For a single output fill in all the parts corresponding to inputs outside the bounding box. Parameters ---------- valid_output : numpy array The output from the model corresponding to inputs inside the bounding box valid_index : numpy array array of all indices of inputs inside the bounding box input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box Returns ------- An output array with all the indices corresponding to inputs outside the bounding box filled in by fill_value """ output = self._base_output(input_shape, fill_value) if not output.shape: output = np.array(valid_output) else: output[valid_index] = valid_output if np.isscalar(valid_output): output = output.item(0) return output def _prepare_outputs(self, valid_outputs, valid_index, input_shape, fill_value): """ Fill in all the outputs of the model corresponding to inputs outside the bounding_box. Parameters ---------- valid_outputs : list of numpy array The list of outputs from the model corresponding to inputs inside the bounding box valid_index : numpy array array of all indices of inputs inside the bounding box input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box Returns ------- List of filled in output arrays. """ outputs = [] for valid_output in valid_outputs: outputs.append(self._modify_output(valid_output, valid_index, input_shape, fill_value)) return outputs def prepare_outputs(self, valid_outputs, valid_index, input_shape, fill_value): """ Fill in all the outputs of the model corresponding to inputs outside the bounding_box, adjusting any single output model so that its output becomes a list of containing that output. Parameters ---------- valid_outputs : list The list of outputs from the model corresponding to inputs inside the bounding box valid_index : array_like array of all indices of inputs inside the bounding box input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box """ if self._model.n_outputs == 1: valid_outputs = [valid_outputs] return self._prepare_outputs(valid_outputs, valid_index, input_shape, fill_value) @staticmethod def _get_valid_outputs_unit(valid_outputs, with_units: bool): """ Get the unit for outputs if one is required. Parameters ---------- valid_outputs : list of numpy array The list of outputs from the model corresponding to inputs inside the bounding box with_units : bool whether or not a unit is required """ if with_units: return getattr(valid_outputs, 'unit', None) def _evaluate_model(self, evaluate: Callable, valid_inputs, valid_index, input_shape, fill_value, with_units: bool): """ Evaluate the model using the given evaluate routine Parameters ---------- evaluate : Callable callable which takes in the valid inputs to evaluate model valid_inputs : list of numpy arrays The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : numpy array array of all indices inside the bounding box input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box with_units : bool whether or not a unit is required Returns ------- outputs : list containing filled in output values valid_outputs_unit : the unit that will be attached to the outputs """ valid_outputs = evaluate(valid_inputs) valid_outputs_unit = self._get_valid_outputs_unit(valid_outputs, with_units) return self.prepare_outputs(valid_outputs, valid_index, input_shape, fill_value), valid_outputs_unit def _evaluate(self, evaluate: Callable, inputs, input_shape, fill_value, with_units: bool): """ Perform model evaluation steps: prepare_inputs -> evaluate -> prepare_outputs Parameters ---------- evaluate : Callable callable which takes in the valid inputs to evaluate model valid_inputs : list of numpy arrays The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : numpy array array of all indices inside the bounding box input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box with_units : bool whether or not a unit is required Returns ------- outputs : list containing filled in output values valid_outputs_unit : the unit that will be attached to the outputs """ valid_inputs, valid_index, all_out = self.prepare_inputs(input_shape, inputs) if all_out: return self._all_out_output(input_shape, fill_value) else: return self._evaluate_model(evaluate, valid_inputs, valid_index, input_shape, fill_value, with_units) @staticmethod def _set_outputs_unit(outputs, valid_outputs_unit): """ Set the units on the outputs prepare_inputs -> evaluate -> prepare_outputs -> set output units Parameters ---------- outputs : list containing filled in output values valid_outputs_unit : the unit that will be attached to the outputs Returns ------- List containing filled in output values and units """ if valid_outputs_unit is not None: return Quantity(outputs, valid_outputs_unit, copy=False) return outputs def evaluate(self, evaluate: Callable, inputs, fill_value): """ Perform full model evaluation steps: prepare_inputs -> evaluate -> prepare_outputs -> set output units Parameters ---------- evaluate : callable callable which takes in the valid inputs to evaluate model valid_inputs : list The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : array_like array of all indices inside the bounding box fill_value : float The value which will be assigned to inputs which are outside the bounding box """ input_shape = self._model.input_shape(inputs) # NOTE: CompoundModel does not currently support units during # evaluation for bounding_box so this feature is turned off # for CompoundModel(s). outputs, valid_outputs_unit = self._evaluate(evaluate, inputs, input_shape, fill_value, self._model.bbox_with_units) return tuple(self._set_outputs_unit(outputs, valid_outputs_unit)) class ModelBoundingBox(_BoundingDomain): """ A model's bounding box Parameters ---------- intervals : dict A dictionary containing all the intervals for each model input keys -> input index values -> interval for that index model : `~astropy.modeling.Model` The Model this bounding_box is for. ignored : list A list containing all the inputs (index) which will not be checked for whether or not their elements are in/out of an interval. order : optional, str The ordering that is assumed for the tuple representation of this bounding_box. Options: 'C': C/Python order, e.g. z, y, x. (default), 'F': Fortran/mathematical notation order, e.g. x, y, z. """ def __init__(self, intervals: Dict[int, _Interval], model, ignored: List[int] = None, order: str = 'C'): super().__init__(model, ignored, order) self._intervals = {} if intervals != () and intervals != {}: self._validate(intervals, order=order) def copy(self, ignored=None): intervals = {index: interval.copy() for index, interval in self._intervals.items()} if ignored is None: ignored = self._ignored.copy() return ModelBoundingBox(intervals, self._model, ignored=ignored, order=self._order) @property def intervals(self) -> Dict[int, _Interval]: """Return bounding_box labeled using input positions""" return self._intervals @property def named_intervals(self) -> Dict[str, _Interval]: """Return bounding_box labeled using input names""" return {self._get_name(index): bbox for index, bbox in self._intervals.items()} def __repr__(self): parts = [ 'ModelBoundingBox(', ' intervals={' ] for name, interval in self.named_intervals.items(): parts.append(f" {name}: {interval}") parts.append(' }') if len(self._ignored) > 0: parts.append(f" ignored={self.ignored_inputs}") parts.append(f' model={self._model.__class__.__name__}(inputs={self._model.inputs})') parts.append(f" order='{self._order}'") parts.append(')') return '\n'.join(parts) def __len__(self): return len(self._intervals) def __contains__(self, key): try: return self._get_index(key) in self._intervals or self._ignored except (IndexError, ValueError): return False def has_interval(self, key): return self._get_index(key) in self._intervals def __getitem__(self, key): """Get bounding_box entries by either input name or input index""" index = self._get_index(key) if index in self._ignored: return _ignored_interval else: return self._intervals[self._get_index(key)] def bounding_box(self, order: str = None): """ Return the old tuple of tuples representation of the bounding_box order='C' corresponds to the old bounding_box ordering order='F' corresponds to the gwcs bounding_box ordering. """ if len(self._intervals) == 1: return tuple(list(self._intervals.values())[0]) else: order = self._get_order(order) inputs = self._model.inputs if order == 'C': inputs = inputs[::-1] bbox = tuple([tuple(self[input_name]) for input_name in inputs]) if len(bbox) == 1: bbox = bbox[0] return bbox def __eq__(self, value): """Note equality can be either with old representation or new one.""" if isinstance(value, tuple): return self.bounding_box() == value elif isinstance(value, ModelBoundingBox): return (self.intervals == value.intervals) and (self.ignored == value.ignored) else: return False def __setitem__(self, key, value): """Validate and store interval under key (input index or input name).""" index = self._get_index(key) if index in self._ignored: self._ignored.remove(index) self._intervals[index] = _Interval.validate(value) def __delitem__(self, key): """Delete stored interval""" index = self._get_index(key) if index in self._ignored: raise RuntimeError(f"Cannot delete ignored input: {key}!") del self._intervals[index] self._ignored.append(index) def _validate_dict(self, bounding_box: dict): """Validate passing dictionary of intervals and setting them.""" for key, value in bounding_box.items(): self[key] = value def _validate_sequence(self, bounding_box, order: str = None): """Validate passing tuple of tuples representation (or related) and setting them.""" order = self._get_order(order) if order == 'C': # If bounding_box is C/python ordered, it needs to be reversed # to be in Fortran/mathematical/input order. bounding_box = bounding_box[::-1] for index, value in enumerate(bounding_box): self[index] = value @property def _n_inputs(self) -> int: n_inputs = self._model.n_inputs - len(self._ignored) if n_inputs > 0: return n_inputs else: return 0 def _validate_iterable(self, bounding_box, order: str = None): """Validate and set any iterable representation""" if len(bounding_box) != self._n_inputs: raise ValueError(f"Found {len(bounding_box)} intervals, " f"but must have exactly {self._n_inputs}.") if isinstance(bounding_box, dict): self._validate_dict(bounding_box) else: self._validate_sequence(bounding_box, order) def _validate(self, bounding_box, order: str = None): """Validate and set any representation""" if self._n_inputs == 1 and not isinstance(bounding_box, dict): self[0] = bounding_box else: self._validate_iterable(bounding_box, order) @classmethod def validate(cls, model, bounding_box, ignored: list = None, order: str = 'C', _preserve_ignore: bool = False, **kwargs): """ Construct a valid bounding box for a model. Parameters ---------- model : `~astropy.modeling.Model` The model for which this will be a bounding_box bounding_box : dict, tuple A possible representation of the bounding box order : optional, str The order that a tuple representation will be assumed to be Default: 'C' """ if isinstance(bounding_box, ModelBoundingBox): order = bounding_box.order if _preserve_ignore: ignored = bounding_box.ignored bounding_box = bounding_box.intervals new = cls({}, model, ignored=ignored, order=order) new._validate(bounding_box) return new def fix_inputs(self, model, fixed_inputs: dict, _keep_ignored=False): """ Fix the bounding_box for a `fix_inputs` compound model. Parameters ---------- model : `~astropy.modeling.Model` The new model for which this will be a bounding_box fixed_inputs : dict Dictionary of inputs which have been fixed by this bounding box. keep_ignored : bool Keep the ignored inputs of the bounding box (internal argument only) """ new = self.copy() for _input in fixed_inputs.keys(): del new[_input] if _keep_ignored: ignored = new.ignored else: ignored = None return ModelBoundingBox.validate(model, new.named_intervals, ignored=ignored, order=new._order) @property def dimension(self): return len(self) def domain(self, resolution, order: str = None): inputs = self._model.inputs order = self._get_order(order) if order == 'C': inputs = inputs[::-1] return [self[input_name].domain(resolution) for input_name in inputs] def _outside(self, input_shape, inputs): """ Get all the input positions which are outside the bounding_box, so that the corresponding outputs can be filled with the fill value (default NaN). Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into inputs : list List of all the model inputs Returns ------- outside_index : bool-numpy array True -> position outside bounding_box False -> position inside bounding_box all_out : bool if all of the inputs are outside the bounding_box """ all_out = False outside_index = np.zeros(input_shape, dtype=bool) for index, _input in enumerate(inputs): _input = np.asanyarray(_input) outside = np.broadcast_to(self[index].outside(_input), input_shape) outside_index[outside] = True if outside_index.all(): all_out = True break return outside_index, all_out def _valid_index(self, input_shape, inputs): """ Get the indices of all the inputs inside the bounding_box. Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into inputs : list List of all the model inputs Returns ------- valid_index : numpy array array of all indices inside the bounding box all_out : bool if all of the inputs are outside the bounding_box """ outside_index, all_out = self._outside(input_shape, inputs) valid_index = np.atleast_1d(np.logical_not(outside_index)).nonzero() if len(valid_index[0]) == 0: all_out = True return valid_index, all_out def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]: """ Get prepare the inputs with respect to the bounding box. Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into inputs : list List of all the model inputs Returns ------- valid_inputs : list The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : array_like array of all indices inside the bounding box all_out: bool if all of the inputs are outside the bounding_box """ valid_index, all_out = self._valid_index(input_shape, inputs) valid_inputs = [] if not all_out: for _input in inputs: if input_shape: valid_input = np.broadcast_to(np.atleast_1d(_input), input_shape)[valid_index] if np.isscalar(_input): valid_input = valid_input.item(0) valid_inputs.append(valid_input) else: valid_inputs.append(_input) return tuple(valid_inputs), valid_index, all_out _BaseSelectorArgument = namedtuple('_BaseSelectorArgument', "index ignore") class _SelectorArgument(_BaseSelectorArgument): """ Contains a single CompoundBoundingBox slicing input. Parameters ---------- index : int The index of the input in the input list ignore : bool Whether or not this input will be ignored by the bounding box. Methods ------- validate : Returns a valid SelectorArgument for a given model. get_selector : Returns the value of the input for use in finding the correct bounding_box. get_fixed_value : Gets the slicing value from a fix_inputs set of values. """ def __new__(cls, index, ignore): self = super().__new__(cls, index, ignore) return self @classmethod def validate(cls, model, argument, ignored: bool = True): """ Construct a valid selector argument for a CompoundBoundingBox. Parameters ---------- model : `~astropy.modeling.Model` The model for which this will be an argument for. argument : int or str A representation of which evaluation input to use ignored : optional, bool Whether or not to ignore this argument in the ModelBoundingBox. Returns ------- Validated selector_argument """ return cls(get_index(model, argument), ignored) def get_selector(self, *inputs): """ Get the selector value corresponding to this argument Parameters ---------- *inputs : All the processed model evaluation inputs. """ _selector = inputs[self.index] if isiterable(_selector): if len(_selector) == 1: return _selector[0] else: return tuple(_selector) return _selector def name(self, model) -> str: """ Get the name of the input described by this selector argument Parameters ---------- model : `~astropy.modeling.Model` The Model this selector argument is for. """ return get_name(model, self.index) def pretty_repr(self, model): """ Get a pretty-print representation of this object Parameters ---------- model : `~astropy.modeling.Model` The Model this selector argument is for. """ return f"Argument(name='{self.name(model)}', ignore={self.ignore})" def get_fixed_value(self, model, values: dict): """ Gets the value fixed input corresponding to this argument Parameters ---------- model : `~astropy.modeling.Model` The Model this selector argument is for. values : dict Dictionary of fixed inputs. """ if self.index in values: return values[self.index] else: if self.name(model) in values: return values[self.name(model)] else: raise RuntimeError(f"{self.pretty_repr(model)} was not found in {values}") def is_argument(self, model, argument) -> bool: """ Determine if passed argument is described by this selector argument Parameters ---------- model : `~astropy.modeling.Model` The Model this selector argument is for. argument : int or str A representation of which evaluation input is being used """ return self.index == get_index(model, argument) def named_tuple(self, model): """ Get a tuple representation of this argument using the input name from the model. Parameters ---------- model : `~astropy.modeling.Model` The Model this selector argument is for. """ return (self.name(model), self.ignore) class _SelectorArguments(tuple): """ Contains the CompoundBoundingBox slicing description Parameters ---------- input_ : The SelectorArgument values Methods ------- validate : Returns a valid SelectorArguments for its model. get_selector : Returns the selector a set of inputs corresponds to. is_selector : Determines if a selector is correctly formatted for this CompoundBoundingBox. get_fixed_value : Gets the selector from a fix_inputs set of values. """ _kept_ignore = None def __new__(cls, input_: Tuple[_SelectorArgument], kept_ignore: List = None): self = super().__new__(cls, input_) if kept_ignore is None: self._kept_ignore = [] else: self._kept_ignore = kept_ignore return self def pretty_repr(self, model): """ Get a pretty-print representation of this object Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. """ parts = ['SelectorArguments('] for argument in self: parts.append( f" {argument.pretty_repr(model)}" ) parts.append(')') return '\n'.join(parts) @property def ignore(self): """Get the list of ignored inputs""" ignore = [argument.index for argument in self if argument.ignore] ignore.extend(self._kept_ignore) return ignore @property def kept_ignore(self): """The arguments to persist in ignoring""" return self._kept_ignore @classmethod def validate(cls, model, arguments, kept_ignore: List=None): """ Construct a valid Selector description for a CompoundBoundingBox. Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. arguments : The individual argument informations kept_ignore : Arguments to persist as ignored """ inputs = [] for argument in arguments: _input = _SelectorArgument.validate(model, *argument) if _input.index in [this.index for this in inputs]: raise ValueError(f"Input: '{get_name(model, _input.index)}' has been repeated.") inputs.append(_input) if len(inputs) == 0: raise ValueError("There must be at least one selector argument.") if isinstance(arguments, _SelectorArguments): if kept_ignore is None: kept_ignore = [] kept_ignore.extend(arguments.kept_ignore) return cls(tuple(inputs), kept_ignore) def get_selector(self, *inputs): """ Get the selector corresponding to these inputs Parameters ---------- *inputs : All the processed model evaluation inputs. """ return tuple([argument.get_selector(*inputs) for argument in self]) def is_selector(self, _selector): """ Determine if this is a reasonable selector Parameters ---------- _selector : tuple The selector to check """ return isinstance(_selector, tuple) and len(_selector) == len(self) def get_fixed_values(self, model, values: dict): """ Gets the value fixed input corresponding to this argument Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. values : dict Dictionary of fixed inputs. """ return tuple([argument.get_fixed_value(model, values) for argument in self]) def is_argument(self, model, argument) -> bool: """ Determine if passed argument is one of the selector arguments Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. argument : int or str A representation of which evaluation input is being used """ for selector_arg in self: if selector_arg.is_argument(model, argument): return True else: return False def selector_index(self, model, argument): """ Get the index of the argument passed in the selector tuples Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. argument : int or str A representation of which argument is being used """ for index, selector_arg in enumerate(self): if selector_arg.is_argument(model, argument): return index else: raise ValueError(f"{argument} does not correspond to any selector argument.") def reduce(self, model, argument): """ Reduce the selector arguments by the argument given Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. argument : int or str A representation of which argument is being used """ arguments = list(self) kept_ignore = [arguments.pop(self.selector_index(model, argument)).index] kept_ignore.extend(self._kept_ignore) return _SelectorArguments.validate(model, tuple(arguments), kept_ignore) def add_ignore(self, model, argument): """ Add argument to the kept_ignore list Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. argument : int or str A representation of which argument is being used """ if self.is_argument(model, argument): raise ValueError(f"{argument}: is a selector argument and cannot be ignored.") kept_ignore = [get_index(model, argument)] return _SelectorArguments.validate(model, self, kept_ignore) def named_tuple(self, model): """ Get a tuple of selector argument tuples using input names Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. """ return tuple([selector_arg.named_tuple(model) for selector_arg in self]) class CompoundBoundingBox(_BoundingDomain): """ A model's compound bounding box Parameters ---------- bounding_boxes : dict A dictionary containing all the ModelBoundingBoxes that are possible keys -> _selector (extracted from model inputs) values -> ModelBoundingBox model : `~astropy.modeling.Model` The Model this compound bounding_box is for. selector_args : _SelectorArguments A description of how to extract the selectors from model inputs. create_selector : optional A method which takes in the selector and the model to return a valid bounding corresponding to that selector. This can be used to construct new bounding_boxes for previously undefined selectors. These new boxes are then stored for future lookups. order : optional, str The ordering that is assumed for the tuple representation of the bounding_boxes. """ def __init__(self, bounding_boxes: Dict[Any, ModelBoundingBox], model, selector_args: _SelectorArguments, create_selector: Callable = None, ignored: List[int] = None, order: str = 'C'): super().__init__(model, ignored, order) self._create_selector = create_selector self._selector_args = _SelectorArguments.validate(model, selector_args) self._bounding_boxes = {} self._validate(bounding_boxes) def copy(self): bounding_boxes = {selector: bbox.copy(self.selector_args.ignore) for selector, bbox in self._bounding_boxes.items()} return CompoundBoundingBox(bounding_boxes, self._model, selector_args=self._selector_args, create_selector=copy.deepcopy(self._create_selector), order=self._order) def __repr__(self): parts = ['CompoundBoundingBox(', ' bounding_boxes={'] # bounding_boxes for _selector, bbox in self._bounding_boxes.items(): bbox_repr = bbox.__repr__().split('\n') parts.append(f" {_selector} = {bbox_repr.pop(0)}") for part in bbox_repr: parts.append(f" {part}") parts.append(' }') # selector_args selector_args_repr = self.selector_args.pretty_repr(self._model).split('\n') parts.append(f" selector_args = {selector_args_repr.pop(0)}") for part in selector_args_repr: parts.append(f" {part}") parts.append(')') return '\n'.join(parts) @property def bounding_boxes(self) -> Dict[Any, ModelBoundingBox]: return self._bounding_boxes @property def selector_args(self) -> _SelectorArguments: return self._selector_args @selector_args.setter def selector_args(self, value): self._selector_args = _SelectorArguments.validate(self._model, value) warnings.warn("Overriding selector_args may cause problems you should re-validate " "the compound bounding box before use!", RuntimeWarning) @property def named_selector_tuple(self) -> tuple: return self._selector_args.named_tuple(self._model) @property def create_selector(self): return self._create_selector @staticmethod def _get_selector_key(key): if isiterable(key): return tuple(key) else: return (key,) def __setitem__(self, key, value): _selector = self._get_selector_key(key) if not self.selector_args.is_selector(_selector): raise ValueError(f"{_selector} is not a selector!") ignored = self.selector_args.ignore + self.ignored self._bounding_boxes[_selector] = ModelBoundingBox.validate(self._model, value, ignored, order=self._order) def _validate(self, bounding_boxes: dict): for _selector, bounding_box in bounding_boxes.items(): self[_selector] = bounding_box def __eq__(self, value): if isinstance(value, CompoundBoundingBox): return (self.bounding_boxes == value.bounding_boxes) and \ (self.selector_args == value.selector_args) and \ (self.create_selector == value.create_selector) else: return False @classmethod def validate(cls, model, bounding_box: dict, selector_args=None, create_selector=None, ignored: list = None, order: str = 'C', _preserve_ignore: bool = False, **kwarg): """ Construct a valid compound bounding box for a model. Parameters ---------- model : `~astropy.modeling.Model` The model for which this will be a bounding_box bounding_box : dict Dictionary of possible bounding_box respresentations selector_args : optional Description of the selector arguments create_selector : optional, callable Method for generating new selectors order : optional, str The order that a tuple representation will be assumed to be Default: 'C' """ if isinstance(bounding_box, CompoundBoundingBox): if selector_args is None: selector_args = bounding_box.selector_args if create_selector is None: create_selector = bounding_box.create_selector order = bounding_box.order if _preserve_ignore: ignored = bounding_box.ignored bounding_box = bounding_box.bounding_boxes if selector_args is None: raise ValueError("Selector arguments must be provided (can be passed as part of bounding_box argument)!") return cls(bounding_box, model, selector_args, create_selector=create_selector, ignored=ignored, order=order) def __contains__(self, key): return key in self._bounding_boxes def _create_bounding_box(self, _selector): self[_selector] = self._create_selector(_selector, model=self._model) return self[_selector] def __getitem__(self, key): _selector = self._get_selector_key(key) if _selector in self: return self._bounding_boxes[_selector] elif self._create_selector is not None: return self._create_bounding_box(_selector) else: raise RuntimeError(f"No bounding box is defined for selector: {_selector}.") def _select_bounding_box(self, inputs) -> ModelBoundingBox: _selector = self.selector_args.get_selector(*inputs) return self[_selector] def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]: """ Get prepare the inputs with respect to the bounding box. Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into inputs : list List of all the model inputs Returns ------- valid_inputs : list The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : array_like array of all indices inside the bounding box all_out: bool if all of the inputs are outside the bounding_box """ bounding_box = self._select_bounding_box(inputs) return bounding_box.prepare_inputs(input_shape, inputs) def _matching_bounding_boxes(self, argument, value) -> Dict[Any, ModelBoundingBox]: selector_index = self.selector_args.selector_index(self._model, argument) matching = {} for selector_key, bbox in self._bounding_boxes.items(): if selector_key[selector_index] == value: new_selector_key = list(selector_key) new_selector_key.pop(selector_index) if bbox.has_interval(argument): new_bbox = bbox.fix_inputs(self._model, {argument: value}, _keep_ignored=True) else: new_bbox = bbox.copy() matching[tuple(new_selector_key)] = new_bbox if len(matching) == 0: raise ValueError(f"Attempting to fix input {argument}, but there are no " f"bounding boxes for argument value {value}.") return matching def _fix_input_selector_arg(self, argument, value): matching_bounding_boxes = self._matching_bounding_boxes(argument, value) if len(self.selector_args) == 1: return matching_bounding_boxes[()] else: return CompoundBoundingBox(matching_bounding_boxes, self._model, self.selector_args.reduce(self._model, argument)) def _fix_input_bbox_arg(self, argument, value): bounding_boxes = {} for selector_key, bbox in self._bounding_boxes.items(): bounding_boxes[selector_key] = bbox.fix_inputs(self._model, {argument: value}, _keep_ignored=True) return CompoundBoundingBox(bounding_boxes, self._model, self.selector_args.add_ignore(self._model, argument)) def fix_inputs(self, model, fixed_inputs: dict): """ Fix the bounding_box for a `fix_inputs` compound model. Parameters ---------- model : `~astropy.modeling.Model` The new model for which this will be a bounding_box fixed_inputs : dict Dictionary of inputs which have been fixed by this bounding box. """ fixed_input_keys = list(fixed_inputs.keys()) argument = fixed_input_keys.pop() value = fixed_inputs[argument] if self.selector_args.is_argument(self._model, argument): bbox = self._fix_input_selector_arg(argument, value) else: bbox = self._fix_input_bbox_arg(argument, value) if len(fixed_input_keys) > 0: new_fixed_inputs = fixed_inputs.copy() del new_fixed_inputs[argument] bbox = bbox.fix_inputs(model, new_fixed_inputs) if isinstance(bbox, CompoundBoundingBox): selector_args = bbox.named_selector_tuple bbox_dict = bbox elif isinstance(bbox, ModelBoundingBox): selector_args = None bbox_dict = bbox.named_intervals return bbox.__class__.validate(model, bbox_dict, order=bbox.order, selector_args=selector_args)
0b772a3daa920beaec426dd65dd664b09639dc83bb94a3bced3831b617057a67
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Mathematical models.""" # pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name import numpy as np from astropy import units as u from astropy.units import Quantity, UnitsError from .core import Fittable1DModel, Fittable2DModel from .parameters import InputParameterError, Parameter from .utils import ellipse_extent __all__ = ['AiryDisk2D', 'Moffat1D', 'Moffat2D', 'Box1D', 'Box2D', 'Const1D', 'Const2D', 'Ellipse2D', 'Disk2D', 'Gaussian1D', 'Gaussian2D', 'Linear1D', 'Lorentz1D', 'RickerWavelet1D', 'RickerWavelet2D', 'RedshiftScaleFactor', 'Multiply', 'Planar2D', 'Scale', 'Sersic1D', 'Sersic2D', 'Shift', 'Sine1D', 'Cosine1D', 'Tangent1D', 'ArcSine1D', 'ArcCosine1D', 'ArcTangent1D', 'Trapezoid1D', 'TrapezoidDisk2D', 'Ring2D', 'Voigt1D', 'KingProjectedAnalytic1D', 'Exponential1D', 'Logarithmic1D'] TWOPI = 2 * np.pi FLOAT_EPSILON = float(np.finfo(np.float32).tiny) # Note that we define this here rather than using the value defined in # astropy.stats to avoid importing astropy.stats every time astropy.modeling # is loaded. GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0)) class Gaussian1D(Fittable1DModel): """ One dimensional Gaussian model. Parameters ---------- amplitude : float or `~astropy.units.Quantity`. Amplitude (peak value) of the Gaussian - for a normalized profile (integrating to 1), set amplitude = 1 / (stddev * np.sqrt(2 * np.pi)) mean : float or `~astropy.units.Quantity`. Mean of the Gaussian. stddev : float or `~astropy.units.Quantity`. Standard deviation of the Gaussian with FWHM = 2 * stddev * np.sqrt(2 * np.log(2)). Notes ----- Either all or none of input ``x``, ``mean`` and ``stddev`` must be provided consistently with compatible units or as unitless numbers. Model formula: .. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}} Examples -------- >>> from astropy.modeling import models >>> def tie_center(model): ... mean = 50 * model.stddev ... return mean >>> tied_parameters = {'mean': tie_center} Specify that 'mean' is a tied parameter in one of two ways: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... tied=tied_parameters) or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.mean.tied False >>> g1.mean.tied = tie_center >>> g1.mean.tied <function tie_center at 0x...> Fixed parameters: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... fixed={'stddev': True}) >>> g1.stddev.fixed True or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.stddev.fixed False >>> g1.stddev.fixed = True >>> g1.stddev.fixed True .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Gaussian1D plt.figure() s1 = Gaussian1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() See Also -------- Gaussian2D, Box1D, Moffat1D, Lorentz1D """ amplitude = Parameter(default=1, description="Amplitude (peak value) of the Gaussian") mean = Parameter(default=0, description="Position of peak (Gaussian)") # Ensure stddev makes sense if its bounds are not explicitly set. # stddev must be non-zero and positive. stddev = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Standard deviation of the Gaussian") def bounding_box(self, factor=5.5): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)`` Parameters ---------- factor : float The multiple of `stddev` used to define the limits. The default is 5.5, corresponding to a relative error < 1e-7. Examples -------- >>> from astropy.modeling.models import Gaussian1D >>> model = Gaussian1D(mean=0, stddev=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-11.0, upper=11.0) } model=Gaussian1D(inputs=('x',)) order='C' ) This range can be set directly (see: `Model.bounding_box <astropy.modeling.Model.bounding_box>`) or by using a different factor, like: >>> model.bounding_box = model.bounding_box(factor=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-4.0, upper=4.0) } model=Gaussian1D(inputs=('x',)) order='C' ) """ x0 = self.mean dx = factor * self.stddev return (x0 - dx, x0 + dx) @property def fwhm(self): """Gaussian full width at half maximum.""" return self.stddev * GAUSSIAN_SIGMA_TO_FWHM @staticmethod def evaluate(x, amplitude, mean, stddev): """ Gaussian1D model function. """ return amplitude * np.exp(- 0.5 * (x - mean) ** 2 / stddev ** 2) @staticmethod def fit_deriv(x, amplitude, mean, stddev): """ Gaussian1D model function derivatives. """ d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2) d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2 d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3 return [d_amplitude, d_mean, d_stddev] @property def input_units(self): if self.mean.unit is None: return None return {self.inputs[0]: self.mean.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'mean': inputs_unit[self.inputs[0]], 'stddev': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Gaussian2D(Fittable2DModel): r""" Two dimensional Gaussian model. Parameters ---------- amplitude : float or `~astropy.units.Quantity`. Amplitude (peak value) of the Gaussian. x_mean : float or `~astropy.units.Quantity`. Mean of the Gaussian in x. y_mean : float or `~astropy.units.Quantity`. Mean of the Gaussian in y. x_stddev : float or `~astropy.units.Quantity` or None. Standard deviation of the Gaussian in x before rotating by theta. Must be None if a covariance matrix (``cov_matrix``) is provided. If no ``cov_matrix`` is given, ``None`` means the default value (1). y_stddev : float or `~astropy.units.Quantity` or None. Standard deviation of the Gaussian in y before rotating by theta. Must be None if a covariance matrix (``cov_matrix``) is provided. If no ``cov_matrix`` is given, ``None`` means the default value (1). theta : float or `~astropy.units.Quantity`, optional. The rotation angle as an angular quantity (`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or a value in radians (as a float). The rotation angle increases counterclockwise. Must be `None` if a covariance matrix (``cov_matrix``) is provided. If no ``cov_matrix`` is given, `None` means the default value (0). cov_matrix : ndarray, optional A 2x2 covariance matrix. If specified, overrides the ``x_stddev``, ``y_stddev``, and ``theta`` defaults. Notes ----- Either all or none of input ``x, y``, ``[x,y]_mean`` and ``[x,y]_stddev`` must be provided consistently with compatible units or as unitless numbers. Model formula: .. math:: f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right) \left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}} Using the following definitions: .. math:: a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} + \frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right) b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} - \frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right) c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} + \frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right) If using a ``cov_matrix``, the model is of the form: .. math:: f(x, y) = A e^{-0.5 \left(\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}\right)} where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`, and :math:`\Sigma` is the covariance matrix: .. math:: \Sigma = \left(\begin{array}{ccc} \sigma_x^2 & \rho \sigma_x \sigma_y \\ \rho \sigma_x \sigma_y & \sigma_y^2 \end{array}\right) :math:`\rho` is the correlation between ``x`` and ``y``, which should be between -1 and +1. Positive correlation corresponds to a ``theta`` in the range 0 to 90 degrees. Negative correlation corresponds to a ``theta`` in the range of 0 to -90 degrees. See [1]_ for more details about the 2D Gaussian function. See Also -------- Gaussian1D, Box2D, Moffat2D References ---------- .. [1] https://en.wikipedia.org/wiki/Gaussian_function """ amplitude = Parameter(default=1, description="Amplitude of the Gaussian") x_mean = Parameter(default=0, description="Peak position (along x axis) of Gaussian") y_mean = Parameter(default=0, description="Peak position (along y axis) of Gaussian") x_stddev = Parameter(default=1, description="Standard deviation of the Gaussian (along x axis)") y_stddev = Parameter(default=1, description="Standard deviation of the Gaussian (along y axis)") theta = Parameter(default=0.0, description=("Rotation angle either as a " "float (in radians) or a " "|Quantity| angle (optional)")) def __init__(self, amplitude=amplitude.default, x_mean=x_mean.default, y_mean=y_mean.default, x_stddev=None, y_stddev=None, theta=None, cov_matrix=None, **kwargs): if cov_matrix is None: if x_stddev is None: x_stddev = self.__class__.x_stddev.default if y_stddev is None: y_stddev = self.__class__.y_stddev.default if theta is None: theta = self.__class__.theta.default else: if x_stddev is not None or y_stddev is not None or theta is not None: raise InputParameterError("Cannot specify both cov_matrix and " "x/y_stddev/theta") # Compute principle coordinate system transformation cov_matrix = np.array(cov_matrix) if cov_matrix.shape != (2, 2): raise ValueError("Covariance matrix must be 2x2") eig_vals, eig_vecs = np.linalg.eig(cov_matrix) x_stddev, y_stddev = np.sqrt(eig_vals) y_vec = eig_vecs[:, 0] theta = np.arctan2(y_vec[1], y_vec[0]) # Ensure stddev makes sense if its bounds are not explicitly set. # stddev must be non-zero and positive. # TODO: Investigate why setting this in Parameter above causes # convolution tests to hang. kwargs.setdefault('bounds', {}) kwargs['bounds'].setdefault('x_stddev', (FLOAT_EPSILON, None)) kwargs['bounds'].setdefault('y_stddev', (FLOAT_EPSILON, None)) super().__init__( amplitude=amplitude, x_mean=x_mean, y_mean=y_mean, x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, **kwargs) @property def x_fwhm(self): """Gaussian full width at half maximum in X.""" return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM @property def y_fwhm(self): """Gaussian full width at half maximum in Y.""" return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM def bounding_box(self, factor=5.5): """ Tuple defining the default ``bounding_box`` limits in each dimension, ``((y_low, y_high), (x_low, x_high))`` The default offset from the mean is 5.5-sigma, corresponding to a relative error < 1e-7. The limits are adjusted for rotation. Parameters ---------- factor : float, optional The multiple of `x_stddev` and `y_stddev` used to define the limits. The default is 5.5. Examples -------- >>> from astropy.modeling.models import Gaussian2D >>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-5.5, upper=5.5) y: Interval(lower=-11.0, upper=11.0) } model=Gaussian2D(inputs=('x', 'y')) order='C' ) This range can be set directly (see: `Model.bounding_box <astropy.modeling.Model.bounding_box>`) or by using a different factor like: >>> model.bounding_box = model.bounding_box(factor=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-2.0, upper=2.0) y: Interval(lower=-4.0, upper=4.0) } model=Gaussian2D(inputs=('x', 'y')) order='C' ) """ a = factor * self.x_stddev b = factor * self.y_stddev if self.theta.quantity is None: theta = self.theta.value else: theta = self.theta.quantity dx, dy = ellipse_extent(a, b, theta) return ((self.y_mean - dy, self.y_mean + dy), (self.x_mean - dx, self.x_mean + dx)) @staticmethod def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta): """Two dimensional Gaussian function""" cost2 = np.cos(theta) ** 2 sint2 = np.sin(theta) ** 2 sin2t = np.sin(2. * theta) xstd2 = x_stddev ** 2 ystd2 = y_stddev ** 2 xdiff = x - x_mean ydiff = y - y_mean a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2)) b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2)) c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2)) return amplitude * np.exp(-((a * xdiff ** 2) + (b * xdiff * ydiff) + (c * ydiff ** 2))) @staticmethod def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta): """Two dimensional Gaussian function derivative with respect to parameters""" cost = np.cos(theta) sint = np.sin(theta) cost2 = np.cos(theta) ** 2 sint2 = np.sin(theta) ** 2 cos2t = np.cos(2. * theta) sin2t = np.sin(2. * theta) xstd2 = x_stddev ** 2 ystd2 = y_stddev ** 2 xstd3 = x_stddev ** 3 ystd3 = y_stddev ** 3 xdiff = x - x_mean ydiff = y - y_mean xdiff2 = xdiff ** 2 ydiff2 = ydiff ** 2 a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2)) b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2)) c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2)) g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) + (c * ydiff2))) da_dtheta = (sint * cost * ((1. / ystd2) - (1. / xstd2))) da_dx_stddev = -cost2 / xstd3 da_dy_stddev = -sint2 / ystd3 db_dtheta = (cos2t / xstd2) - (cos2t / ystd2) db_dx_stddev = -sin2t / xstd3 db_dy_stddev = sin2t / ystd3 dc_dtheta = -da_dtheta dc_dx_stddev = -sint2 / xstd3 dc_dy_stddev = -cost2 / ystd3 dg_dA = g / amplitude dg_dx_mean = g * ((2. * a * xdiff) + (b * ydiff)) dg_dy_mean = g * ((b * xdiff) + (2. * c * ydiff)) dg_dx_stddev = g * (-(da_dx_stddev * xdiff2 + db_dx_stddev * xdiff * ydiff + dc_dx_stddev * ydiff2)) dg_dy_stddev = g * (-(da_dy_stddev * xdiff2 + db_dy_stddev * xdiff * ydiff + dc_dy_stddev * ydiff2)) dg_dtheta = g * (-(da_dtheta * xdiff2 + db_dtheta * xdiff * ydiff + dc_dtheta * ydiff2)) return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev, dg_dtheta] @property def input_units(self): if self.x_mean.unit is None and self.y_mean.unit is None: return None return {self.inputs[0]: self.x_mean.unit, self.inputs[1]: self.y_mean.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_mean': inputs_unit[self.inputs[0]], 'y_mean': inputs_unit[self.inputs[0]], 'x_stddev': inputs_unit[self.inputs[0]], 'y_stddev': inputs_unit[self.inputs[0]], 'theta': u.rad, 'amplitude': outputs_unit[self.outputs[0]]} class Shift(Fittable1DModel): """ Shift a coordinate. Parameters ---------- offset : float Offset to add to a coordinate. """ offset = Parameter(default=0, description="Offset to add to a model") linear = True _has_inverse_bounding_box = True @property def input_units(self): if self.offset.unit is None: return None return {self.inputs[0]: self.offset.unit} @property def inverse(self): """One dimensional inverse Shift model function""" inv = self.copy() inv.offset *= -1 try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple(self.evaluate(x, self.offset) for x in self.bounding_box) return inv @staticmethod def evaluate(x, offset): """One dimensional Shift model function""" return x + offset @staticmethod def sum_of_implicit_terms(x): """Evaluate the implicit term (x) of one dimensional Shift model""" return x @staticmethod def fit_deriv(x, *params): """One dimensional Shift model derivative with respect to parameter""" d_offset = np.ones_like(x) return [d_offset] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'offset': outputs_unit[self.outputs[0]]} class Scale(Fittable1DModel): """ Multiply a model by a dimensionless factor. Parameters ---------- factor : float Factor by which to scale a coordinate. Notes ----- If ``factor`` is a `~astropy.units.Quantity` then the units will be stripped before the scaling operation. """ factor = Parameter(default=1, description="Factor by which to scale a model") linear = True fittable = True _input_units_strict = True _input_units_allow_dimensionless = True _has_inverse_bounding_box = True @property def input_units(self): if self.factor.unit is None: return None return {self.inputs[0]: self.factor.unit} @property def inverse(self): """One dimensional inverse Scale model function""" inv = self.copy() inv.factor = 1 / self.factor try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple(self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box()) return inv @staticmethod def evaluate(x, factor): """One dimensional Scale model function""" if isinstance(factor, u.Quantity): factor = factor.value return factor * x @staticmethod def fit_deriv(x, *params): """One dimensional Scale model derivative with respect to parameter""" d_factor = x return [d_factor] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'factor': outputs_unit[self.outputs[0]]} class Multiply(Fittable1DModel): """ Multiply a model by a quantity or number. Parameters ---------- factor : float Factor by which to multiply a coordinate. """ factor = Parameter(default=1, description="Factor by which to multiply a model") linear = True fittable = True _has_inverse_bounding_box = True @property def inverse(self): """One dimensional inverse multiply model function""" inv = self.copy() inv.factor = 1 / self.factor try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple(self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box()) return inv @staticmethod def evaluate(x, factor): """One dimensional multiply model function""" return factor * x @staticmethod def fit_deriv(x, *params): """One dimensional multiply model derivative with respect to parameter""" d_factor = x return [d_factor] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'factor': outputs_unit[self.outputs[0]]} class RedshiftScaleFactor(Fittable1DModel): """ One dimensional redshift scale factor model. Parameters ---------- z : float Redshift value. Notes ----- Model formula: .. math:: f(x) = x (1 + z) """ z = Parameter(description='Redshift', default=0) _has_inverse_bounding_box = True @staticmethod def evaluate(x, z): """One dimensional RedshiftScaleFactor model function""" return (1 + z) * x @staticmethod def fit_deriv(x, z): """One dimensional RedshiftScaleFactor model derivative""" d_z = x return [d_z] @property def inverse(self): """Inverse RedshiftScaleFactor model""" inv = self.copy() inv.z = 1.0 / (1.0 + self.z) - 1.0 try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple(self.evaluate(x, self.z) for x in self.bounding_box.bounding_box()) return inv class Sersic1D(Fittable1DModel): r""" One dimensional Sersic surface brightness profile. Parameters ---------- amplitude : float Surface brightness at r_eff. r_eff : float Effective (half-light) radius n : float Sersic Index. See Also -------- Gaussian1D, Moffat1D, Lorentz1D Notes ----- Model formula: .. math:: I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\} The constant :math:`b_n` is defined such that :math:`r_e` contains half the total luminosity, and can be solved for numerically. .. math:: \Gamma(2n) = 2\gamma (b_n,2n) Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Sersic1D import matplotlib.pyplot as plt plt.figure() plt.subplot(111, xscale='log', yscale='log') s1 = Sersic1D(amplitude=1, r_eff=5) r=np.arange(0, 100, .01) for n in range(1, 10): s1.n = n plt.plot(r, s1(r), color=str(float(n) / 15)) plt.axis([1e-1, 30, 1e-2, 1e3]) plt.xlabel('log Radius') plt.ylabel('log Surface Brightness') plt.text(.25, 1.5, 'n=1') plt.text(.25, 300, 'n=10') plt.xticks([]) plt.yticks([]) plt.show() References ---------- .. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html """ amplitude = Parameter(default=1, description="Surface brightness at r_eff") r_eff = Parameter(default=1, description="Effective (half-light) radius") n = Parameter(default=4, description="Sersic Index") _gammaincinv = None @classmethod def evaluate(cls, r, amplitude, r_eff, n): """One dimensional Sersic profile function.""" if cls._gammaincinv is None: from scipy.special import gammaincinv cls._gammaincinv = gammaincinv return (amplitude * np.exp( -cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1))) @property def input_units(self): if self.r_eff.unit is None: return None return {self.inputs[0]: self.r_eff.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'r_eff': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class _Trigonometric1D(Fittable1DModel): """ Base class for one dimensional trigonometric and inverse trigonometric models Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase """ amplitude = Parameter(default=1, description="Oscillation amplitude") frequency = Parameter(default=1, description="Oscillation frequency") phase = Parameter(default=0, description="Oscillation phase") @property def input_units(self): if self.frequency.unit is None: return None return {self.inputs[0]: 1. / self.frequency.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'frequency': inputs_unit[self.inputs[0]] ** -1, 'amplitude': outputs_unit[self.outputs[0]]} class Sine1D(_Trigonometric1D): """ One dimensional Sine model. Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase See Also -------- ArcSine1D, Cosine1D, Tangent1D, Const1D, Linear1D Notes ----- Model formula: .. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p) Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Sine1D plt.figure() s1 = Sine1D(amplitude=1, frequency=.25) r=np.arange(0, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([0, 10, -5, 5]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional Sine model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = TWOPI * (frequency * x + phase) if isinstance(argument, Quantity): argument = argument.value return amplitude * np.sin(argument) @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional Sine model derivative""" d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase) d_frequency = (TWOPI * x * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase)) d_phase = (TWOPI * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase)) return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of Sine""" return ArcSine1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase) class Cosine1D(_Trigonometric1D): """ One dimensional Cosine model. Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase See Also -------- ArcCosine1D, Sine1D, Tangent1D, Const1D, Linear1D Notes ----- Model formula: .. math:: f(x) = A \\cos(2 \\pi f x + 2 \\pi p) Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Cosine1D plt.figure() s1 = Cosine1D(amplitude=1, frequency=.25) r=np.arange(0, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([0, 10, -5, 5]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional Cosine model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = TWOPI * (frequency * x + phase) if isinstance(argument, Quantity): argument = argument.value return amplitude * np.cos(argument) @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional Cosine model derivative""" d_amplitude = np.cos(TWOPI * frequency * x + TWOPI * phase) d_frequency = - (TWOPI * x * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase)) d_phase = - (TWOPI * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase)) return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of Cosine""" return ArcCosine1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase) class Tangent1D(_Trigonometric1D): """ One dimensional Tangent model. Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase See Also -------- Sine1D, Cosine1D, Const1D, Linear1D Notes ----- Model formula: .. math:: f(x) = A \\tan(2 \\pi f x + 2 \\pi p) Note that the tangent function is undefined for inputs of the form pi/2 + n*pi for all integers n. Thus thus the default bounding box has been restricted to: .. math:: [(-1/4 - p)/f, (1/4 - p)/f] which is the smallest interval for the tangent function to be continuous on. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Tangent1D plt.figure() s1 = Tangent1D(amplitude=1, frequency=.25) r=np.arange(0, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([0, 10, -5, 5]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional Tangent model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = TWOPI * (frequency * x + phase) if isinstance(argument, Quantity): argument = argument.value return amplitude * np.tan(argument) @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional Tangent model derivative""" sec = 1 / (np.cos(TWOPI * frequency * x + TWOPI * phase))**2 d_amplitude = np.tan(TWOPI * frequency * x + TWOPI * phase) d_frequency = TWOPI * x * amplitude * sec d_phase = TWOPI * amplitude * sec return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of Tangent""" return ArcTangent1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase) def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)`` """ bbox = [(-1/4 - self.phase) / self.frequency, (1/4 - self.phase) / self.frequency] if self.frequency.unit is not None: bbox = bbox / self.frequency.unit return bbox class _InverseTrigonometric1D(_Trigonometric1D): """ Base class for one dimensional inverse trigonometric models """ @property def input_units(self): if self.amplitude.unit is None: return None return {self.inputs[0]: self.amplitude.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'frequency': outputs_unit[self.outputs[0]] ** -1, 'amplitude': inputs_unit[self.inputs[0]]} class ArcSine1D(_InverseTrigonometric1D): """ One dimensional ArcSine model returning values between -pi/2 and pi/2 only. Parameters ---------- amplitude : float Oscillation amplitude for corresponding Sine frequency : float Oscillation frequency for corresponding Sine phase : float Oscillation phase for corresponding Sine See Also -------- Sine1D, ArcCosine1D, ArcTangent1D Notes ----- Model formula: .. math:: f(x) = ((arcsin(x / A) / 2pi) - p) / f The arcsin function being used for this model will only accept inputs in [-A, A]; otherwise, a runtime warning will be thrown and the result will be NaN. To avoid this, the bounding_box has been properly set to accommodate this; therefore, it is recommended that this model always be evaluated with the ``with_bounding_box=True`` option. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import ArcSine1D plt.figure() s1 = ArcSine1D(amplitude=1, frequency=.25) r=np.arange(-1, 1, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([-1, 1, -np.pi/2, np.pi/2]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional ArcSine model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = x / amplitude if isinstance(argument, Quantity): argument = argument.value arc_sine = np.arcsin(argument) / TWOPI return (arc_sine - phase) / frequency @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional ArcSine model derivative""" d_amplitude = - x / (TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude)**2)) d_frequency = (phase - (np.arcsin(x / amplitude) / TWOPI)) / frequency**2 d_phase = - 1 / frequency * np.ones(x.shape) return [d_amplitude, d_frequency, d_phase] def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)`` """ return -1 * self.amplitude, 1 * self.amplitude @property def inverse(self): """One dimensional inverse of ArcSine""" return Sine1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase) class ArcCosine1D(_InverseTrigonometric1D): """ One dimensional ArcCosine returning values between 0 and pi only. Parameters ---------- amplitude : float Oscillation amplitude for corresponding Cosine frequency : float Oscillation frequency for corresponding Cosine phase : float Oscillation phase for corresponding Cosine See Also -------- Cosine1D, ArcSine1D, ArcTangent1D Notes ----- Model formula: .. math:: f(x) = ((arccos(x / A) / 2pi) - p) / f The arccos function being used for this model will only accept inputs in [-A, A]; otherwise, a runtime warning will be thrown and the result will be NaN. To avoid this, the bounding_box has been properly set to accommodate this; therefore, it is recommended that this model always be evaluated with the ``with_bounding_box=True`` option. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import ArcCosine1D plt.figure() s1 = ArcCosine1D(amplitude=1, frequency=.25) r=np.arange(-1, 1, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([-1, 1, 0, np.pi]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional ArcCosine model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = x / amplitude if isinstance(argument, Quantity): argument = argument.value arc_cos = np.arccos(argument) / TWOPI return (arc_cos - phase) / frequency @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional ArcCosine model derivative""" d_amplitude = x / (TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude)**2)) d_frequency = (phase - (np.arccos(x / amplitude) / TWOPI)) / frequency**2 d_phase = - 1 / frequency * np.ones(x.shape) return [d_amplitude, d_frequency, d_phase] def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)`` """ return -1 * self.amplitude, 1 * self.amplitude @property def inverse(self): """One dimensional inverse of ArcCosine""" return Cosine1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase) class ArcTangent1D(_InverseTrigonometric1D): """ One dimensional ArcTangent model returning values between -pi/2 and pi/2 only. Parameters ---------- amplitude : float Oscillation amplitude for corresponding Tangent frequency : float Oscillation frequency for corresponding Tangent phase : float Oscillation phase for corresponding Tangent See Also -------- Tangent1D, ArcSine1D, ArcCosine1D Notes ----- Model formula: .. math:: f(x) = ((arctan(x / A) / 2pi) - p) / f Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import ArcTangent1D plt.figure() s1 = ArcTangent1D(amplitude=1, frequency=.25) r=np.arange(-10, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([-10, 10, -np.pi/2, np.pi/2]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional ArcTangent model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = x / amplitude if isinstance(argument, Quantity): argument = argument.value arc_cos = np.arctan(argument) / TWOPI return (arc_cos - phase) / frequency @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional ArcTangent model derivative""" d_amplitude = - x / (TWOPI * frequency * amplitude**2 * (1 + (x / amplitude)**2)) d_frequency = (phase - (np.arctan(x / amplitude) / TWOPI)) / frequency**2 d_phase = - 1 / frequency * np.ones(x.shape) return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of ArcTangent""" return Tangent1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase) class Linear1D(Fittable1DModel): """ One dimensional Line model. Parameters ---------- slope : float Slope of the straight line intercept : float Intercept of the straight line See Also -------- Const1D Notes ----- Model formula: .. math:: f(x) = a x + b """ slope = Parameter(default=1, description="Slope of the straight line") intercept = Parameter(default=0, description="Intercept of the straight line") linear = True @staticmethod def evaluate(x, slope, intercept): """One dimensional Line model function""" return slope * x + intercept @staticmethod def fit_deriv(x, *params): """One dimensional Line model derivative with respect to parameters""" d_slope = x d_intercept = np.ones_like(x) return [d_slope, d_intercept] @property def inverse(self): new_slope = self.slope ** -1 new_intercept = -self.intercept / self.slope return self.__class__(slope=new_slope, intercept=new_intercept) @property def input_units(self): if self.intercept.unit is None and self.slope.unit is None: return None return {self.inputs[0]: self.intercept.unit / self.slope.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'intercept': outputs_unit[self.outputs[0]], 'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]]} class Planar2D(Fittable2DModel): """ Two dimensional Plane model. Parameters ---------- slope_x : float Slope of the plane in X slope_y : float Slope of the plane in Y intercept : float Z-intercept of the plane Notes ----- Model formula: .. math:: f(x, y) = a x + b y + c """ slope_x = Parameter(default=1, description="Slope of the plane in X") slope_y = Parameter(default=1, description="Slope of the plane in Y") intercept = Parameter(default=0, description="Z-intercept of the plane") linear = True @staticmethod def evaluate(x, y, slope_x, slope_y, intercept): """Two dimensional Plane model function""" return slope_x * x + slope_y * y + intercept @staticmethod def fit_deriv(x, y, *params): """Two dimensional Plane model derivative with respect to parameters""" d_slope_x = x d_slope_y = y d_intercept = np.ones_like(x) return [d_slope_x, d_slope_y, d_intercept] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'intercept': outputs_unit['z'], 'slope_x': outputs_unit['z'] / inputs_unit['x'], 'slope_y': outputs_unit['z'] / inputs_unit['y']} class Lorentz1D(Fittable1DModel): """ One dimensional Lorentzian model. Parameters ---------- amplitude : float or `~astropy.units.Quantity`. Peak value - for a normalized profile (integrating to 1), set amplitude = 2 / (np.pi * fwhm) x_0 : float or `~astropy.units.Quantity`. Position of the peak fwhm : float or `~astropy.units.Quantity`. Full width at half maximum (FWHM) See Also -------- Gaussian1D, Box1D, RickerWavelet1D Notes ----- Either all or none of input ``x``, position ``x_0`` and ``fwhm`` must be provided consistently with compatible units or as unitless numbers. Model formula: .. math:: f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}} where :math:`\\gamma` is half of given FWHM. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Lorentz1D plt.figure() s1 = Lorentz1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Peak value") x_0 = Parameter(default=0, description="Position of the peak") fwhm = Parameter(default=1, description="Full width at half maximum") @staticmethod def evaluate(x, amplitude, x_0, fwhm): """One dimensional Lorentzian model function""" return (amplitude * ((fwhm / 2.) ** 2) / ((x - x_0) ** 2 + (fwhm / 2.) ** 2)) @staticmethod def fit_deriv(x, amplitude, x_0, fwhm): """One dimensional Lorentzian model derivative with respect to parameters""" d_amplitude = fwhm ** 2 / (fwhm ** 2 + (x - x_0) ** 2) d_x_0 = (amplitude * d_amplitude * (2 * x - 2 * x_0) / (fwhm ** 2 + (x - x_0) ** 2)) d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude) return [d_amplitude, d_x_0, d_fwhm] def bounding_box(self, factor=25): """Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. Parameters ---------- factor : float The multiple of FWHM used to define the limits. Default is chosen to include most (99%) of the area under the curve, while still showing the central feature of interest. """ x0 = self.x_0 dx = factor * self.fwhm return (x0 - dx, x0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'fwhm': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Voigt1D(Fittable1DModel): """ One dimensional model for the Voigt profile. Parameters ---------- x_0 : float or `~astropy.units.Quantity` Position of the peak amplitude_L : float or `~astropy.units.Quantity`. The Lorentzian amplitude (peak of the associated Lorentz function) - for a normalized profile (integrating to 1), set amplitude_L = 2 / (np.pi * fwhm_L) fwhm_L : float or `~astropy.units.Quantity` The Lorentzian full width at half maximum fwhm_G : float or `~astropy.units.Quantity`. The Gaussian full width at half maximum method : str, optional Algorithm for computing the complex error function; one of 'Humlicek2' (default, fast and generally more accurate than ``rtol=3.e-5``) or 'Scipy', alternatively 'wofz' (requires ``scipy``, almost as fast and reference in accuracy). See Also -------- Gaussian1D, Lorentz1D Notes ----- Either all or none of input ``x``, position ``x_0`` and the ``fwhm_*`` must be provided consistently with compatible units or as unitless numbers. Voigt function is calculated as real part of the complex error function computed from either Humlicek's rational approximations (JQSRT 21:309, 1979; 27:437, 1982) following Schreier 2018 (MNRAS 479, 3068; and ``hum2zpf16m`` from his cpfX.py module); or `~scipy.special.wofz` (implementing 'Faddeeva.cc'). Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Voigt1D import matplotlib.pyplot as plt plt.figure() x = np.arange(0, 10, 0.01) v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9) plt.plot(x, v1(x)) plt.show() """ x_0 = Parameter(default=0, description="Position of the peak") amplitude_L = Parameter(default=1, # noqa: N815 description="The Lorentzian amplitude") fwhm_L = Parameter(default=2/np.pi, # noqa: N815 description="The Lorentzian full width at half maximum") fwhm_G = Parameter(default=np.log(2), # noqa: N815 description="The Gaussian full width at half maximum") sqrt_pi = np.sqrt(np.pi) sqrt_ln2 = np.sqrt(np.log(2)) sqrt_ln2pi = np.sqrt(np.log(2) * np.pi) _last_z = np.zeros(1, dtype=complex) _last_w = np.zeros(1, dtype=float) _faddeeva = None def __init__(self, x_0=x_0.default, amplitude_L=amplitude_L.default, # noqa: N803 fwhm_L=fwhm_L.default, fwhm_G=fwhm_G.default, method='humlicek2', # noqa: N803 **kwargs): if str(method).lower() in ('wofz', 'scipy'): from scipy.special import wofz self._faddeeva = wofz elif str(method).lower() == 'humlicek2': self._faddeeva = self._hum2zpf16c else: raise ValueError(f'Not a valid method for Voigt1D Faddeeva function: {method}.') self.method = self._faddeeva.__name__ super().__init__(x_0=x_0, amplitude_L=amplitude_L, fwhm_L=fwhm_L, fwhm_G=fwhm_G, **kwargs) def _wrap_wofz(self, z): """Call complex error (Faddeeva) function w(z) implemented by algorithm `method`; cache results for consecutive calls from `evaluate`, `fit_deriv`.""" if (z.shape == self._last_z.shape and np.allclose(z, self._last_z, rtol=1.e-14, atol=1.e-15)): return self._last_w self._last_w = self._faddeeva(z) self._last_z = z return self._last_w def evaluate(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): # noqa: N803 """One dimensional Voigt function scaled to Lorentz peak amplitude.""" z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * self.sqrt_ln2 / fwhm_G # The normalised Voigt profile is w.real * self.sqrt_ln2 / (self.sqrt_pi * fwhm_G) * 2 ; # for the legacy definition we multiply with np.pi * fwhm_L / 2 * amplitude_L return self._wrap_wofz(z).real * self.sqrt_ln2pi / fwhm_G * fwhm_L * amplitude_L def fit_deriv(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): # noqa: N803 """Derivative of the one dimensional Voigt function with respect to parameters.""" s = self.sqrt_ln2 / fwhm_G z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * s # V * constant from McLean implementation (== their Voigt function) w = self._wrap_wofz(z) * s * fwhm_L * amplitude_L * self.sqrt_pi # Schreier (2018) Eq. 6 == (dvdx + 1j * dvdy) / (sqrt(pi) * fwhm_L * amplitude_L) dwdz = -2 * z * w + 2j * s * fwhm_L * amplitude_L return [-dwdz.real * 2 * s, w.real / amplitude_L, w.real / fwhm_L - dwdz.imag * s, (-w.real - s * (2 * (x - x_0) * dwdz.real - fwhm_L * dwdz.imag)) / fwhm_G] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'fwhm_L': inputs_unit[self.inputs[0]], 'fwhm_G': inputs_unit[self.inputs[0]], 'amplitude_L': outputs_unit[self.outputs[0]]} @staticmethod def _hum2zpf16c(z, s=10.0): """Complex error function w(z) for z = x + iy combining Humlicek's rational approximations: |x| + y > 10: Humlicek (JQSRT, 1982) rational approximation for region II; else: Humlicek (JQSRT, 1979) rational approximation with n=16 and delta=y0=1.35 Version using a mask and np.place; single complex argument version of Franz Schreier's cpfX.hum2zpf16m. Originally licensed under a 3-clause BSD style license - see https://atmos.eoc.dlr.de/tools/lbl4IR/cpfX.py """ # Optimized (single fraction) Humlicek region I rational approximation for n=16, delta=1.35 AA = np.array([+46236.3358828121, -147726.58393079657j, # noqa: N806 -206562.80451354137, 281369.1590631087j, +183092.74968253175, -184787.96830696272j, -66155.39578477248, 57778.05827983565j, +11682.770904216826, -9442.402767960672j, -1052.8438624933142, 814.0996198624186j, +45.94499030751872, -34.59751573708725j, -0.7616559377907136, 0.5641895835476449j]) # 1j/sqrt(pi) to the 12. digit bb = np.array([+7918.06640624997, 0.0, -126689.0625, 0.0, +295607.8125, 0.0, -236486.25, 0.0, +84459.375, 0.0, -15015.0, 0.0, +1365.0, 0.0, -60.0, 0.0, +1.0]) sqrt_piinv = 1.0 / np.sqrt(np.pi) zz = z * z w = 1j * (z * (zz * sqrt_piinv - 1.410474)) / (0.75 + zz*(zz - 3.0)) if np.any(z.imag < s): mask = abs(z.real) + z.imag < s # returns true for interior points # returns small complex array covering only the interior region Z = z[np.where(mask)] + 1.35j ZZ = Z * Z numer = (((((((((((((((AA[15]*Z + AA[14])*Z + AA[13])*Z + AA[12])*Z + AA[11])*Z + AA[10])*Z + AA[9])*Z + AA[8])*Z + AA[7])*Z + AA[6])*Z + AA[5])*Z + AA[4])*Z+AA[3])*Z + AA[2])*Z + AA[1])*Z + AA[0]) denom = (((((((ZZ + bb[14])*ZZ + bb[12])*ZZ + bb[10])*ZZ+bb[8])*ZZ + bb[6])*ZZ + bb[4])*ZZ + bb[2])*ZZ + bb[0] np.place(w, mask, numer / denom) return w class Const1D(Fittable1DModel): """ One dimensional Constant model. Parameters ---------- amplitude : float Value of the constant function See Also -------- Const2D Notes ----- Model formula: .. math:: f(x) = A Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Const1D plt.figure() s1 = Const1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Value of the constant function") linear = True @staticmethod def evaluate(x, amplitude): """One dimensional Constant model function""" if amplitude.size == 1: # This is slightly faster than using ones_like and multiplying x = np.empty_like(x, subok=False) x.fill(amplitude.item()) else: # This case is less likely but could occur if the amplitude # parameter is given an array-like value x = amplitude * np.ones_like(x, subok=False) if isinstance(amplitude, Quantity): return Quantity(x, unit=amplitude.unit, copy=False) return x @staticmethod def fit_deriv(x, amplitude): """One dimensional Constant model derivative with respect to parameters""" d_amplitude = np.ones_like(x) return [d_amplitude] @property def input_units(self): return None def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'amplitude': outputs_unit[self.outputs[0]]} class Const2D(Fittable2DModel): """ Two dimensional Constant model. Parameters ---------- amplitude : float Value of the constant function See Also -------- Const1D Notes ----- Model formula: .. math:: f(x, y) = A """ amplitude = Parameter(default=1, description="Value of the constant function") linear = True @staticmethod def evaluate(x, y, amplitude): """Two dimensional Constant model function""" if amplitude.size == 1: # This is slightly faster than using ones_like and multiplying x = np.empty_like(x, subok=False) x.fill(amplitude.item()) else: # This case is less likely but could occur if the amplitude # parameter is given an array-like value x = amplitude * np.ones_like(x, subok=False) if isinstance(amplitude, Quantity): return Quantity(x, unit=amplitude.unit, copy=False) return x @property def input_units(self): return None def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'amplitude': outputs_unit[self.outputs[0]]} class Ellipse2D(Fittable2DModel): """ A 2D Ellipse model. Parameters ---------- amplitude : float Value of the ellipse. x_0 : float x position of the center of the disk. y_0 : float y position of the center of the disk. a : float The length of the semimajor axis. b : float The length of the semiminor axis. theta : float The rotation angle in radians of the semimajor axis. The rotation angle increases counterclockwise from the positive x axis. See Also -------- Disk2D, Box2D Notes ----- Model formula: .. math:: f(x, y) = \\left \\{ \\begin{array}{ll} \\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos \\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 + \\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0) \\cos \\theta}{b}\\right]^2 \\leq 1 \\\\ 0 & : \\mathrm{otherwise} \\end{array} \\right. Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Ellipse2D from astropy.coordinates import Angle import matplotlib.pyplot as plt import matplotlib.patches as mpatches x0, y0 = 25, 25 a, b = 20, 10 theta = Angle(30, 'deg') e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b, theta=theta.radian) y, x = np.mgrid[0:50, 0:50] fig, ax = plt.subplots(1, 1) ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r') e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red', facecolor='none') ax.add_patch(e2) plt.show() """ amplitude = Parameter(default=1, description="Value of the ellipse") x_0 = Parameter(default=0, description="X position of the center of the disk.") y_0 = Parameter(default=0, description="Y position of the center of the disk.") a = Parameter(default=1, description="The length of the semimajor axis") b = Parameter(default=1, description="The length of the semiminor axis") theta = Parameter(default=0, description="The rotation angle in radians of the semimajor axis (Positive - counterclockwise)") @staticmethod def evaluate(x, y, amplitude, x_0, y_0, a, b, theta): """Two dimensional Ellipse model function.""" xx = x - x_0 yy = y - y_0 cost = np.cos(theta) sint = np.sin(theta) numerator1 = (xx * cost) + (yy * sint) numerator2 = -(xx * sint) + (yy * cost) in_ellipse = (((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.) result = np.select([in_ellipse], [amplitude]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``((y_low, y_high), (x_low, x_high))`` """ a = self.a b = self.b theta = self.theta.value dx, dy = ellipse_extent(a, b, theta) return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'a': inputs_unit[self.inputs[0]], 'b': inputs_unit[self.inputs[0]], 'theta': u.rad, 'amplitude': outputs_unit[self.outputs[0]]} class Disk2D(Fittable2DModel): """ Two dimensional radial symmetric Disk model. Parameters ---------- amplitude : float Value of the disk function x_0 : float x position center of the disk y_0 : float y position center of the disk R_0 : float Radius of the disk See Also -------- Box2D, TrapezoidDisk2D Notes ----- Model formula: .. math:: f(r) = \\left \\{ \\begin{array}{ll} A & : r \\leq R_0 \\\\ 0 & : r > R_0 \\end{array} \\right. """ amplitude = Parameter(default=1, description="Value of disk function") x_0 = Parameter(default=0, description="X position of center of the disk") y_0 = Parameter(default=0, description="Y position of center of the disk") R_0 = Parameter(default=1, description="Radius of the disk") @staticmethod def evaluate(x, y, amplitude, x_0, y_0, R_0): """Two dimensional Disk model function""" rr = (x - x_0) ** 2 + (y - y_0) ** 2 result = np.select([rr <= R_0 ** 2], [amplitude]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``((y_low, y_high), (x_low, x_high))`` """ return ((self.y_0 - self.R_0, self.y_0 + self.R_0), (self.x_0 - self.R_0, self.x_0 + self.R_0)) @property def input_units(self): if self.x_0.unit is None and self.y_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'R_0': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Ring2D(Fittable2DModel): """ Two dimensional radial symmetric Ring model. Parameters ---------- amplitude : float Value of the disk function x_0 : float x position center of the disk y_0 : float y position center of the disk r_in : float Inner radius of the ring width : float Width of the ring. r_out : float Outer Radius of the ring. Can be specified instead of width. See Also -------- Disk2D, TrapezoidDisk2D Notes ----- Model formula: .. math:: f(r) = \\left \\{ \\begin{array}{ll} A & : r_{in} \\leq r \\leq r_{out} \\\\ 0 & : \\text{else} \\end{array} \\right. Where :math:`r_{out} = r_{in} + r_{width}`. """ amplitude = Parameter(default=1, description="Value of the disk function") x_0 = Parameter(default=0, description="X position of center of disc") y_0 = Parameter(default=0, description="Y position of center of disc") r_in = Parameter(default=1, description="Inner radius of the ring") width = Parameter(default=1, description="Width of the ring") def __init__(self, amplitude=amplitude.default, x_0=x_0.default, y_0=y_0.default, r_in=None, width=None, r_out=None, **kwargs): if (r_in is None) and (r_out is None) and (width is None): r_in = self.r_in.default width = self.width.default elif (r_in is not None) and (r_out is None) and (width is None): width = self.width.default elif (r_in is None) and (r_out is not None) and (width is None): r_in = self.r_in.default width = r_out - r_in elif (r_in is None) and (r_out is None) and (width is not None): r_in = self.r_in.default elif (r_in is not None) and (r_out is not None) and (width is None): width = r_out - r_in elif (r_in is None) and (r_out is not None) and (width is not None): r_in = r_out - width elif (r_in is not None) and (r_out is not None) and (width is not None): if np.any(width != (r_out - r_in)): raise InputParameterError("Width must be r_out - r_in") if np.any(r_in < 0) or np.any(width < 0): raise InputParameterError(f"{r_in=} and {width=} must both be >=0") super().__init__( amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width, **kwargs) @staticmethod def evaluate(x, y, amplitude, x_0, y_0, r_in, width): """Two dimensional Ring model function.""" rr = (x - x_0) ** 2 + (y - y_0) ** 2 r_range = np.logical_and(rr >= r_in ** 2, rr <= (r_in + width) ** 2) result = np.select([r_range], [amplitude]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box``. ``((y_low, y_high), (x_low, x_high))`` """ dr = self.r_in + self.width return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'r_in': inputs_unit[self.inputs[0]], 'width': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Box1D(Fittable1DModel): """ One dimensional Box model. Parameters ---------- amplitude : float Amplitude A x_0 : float Position of the center of the box function width : float Width of the box See Also -------- Box2D, TrapezoidDisk2D Notes ----- Model formula: .. math:: f(x) = \\left \\{ \\begin{array}{ll} A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\ 0 & : \\text{else} \\end{array} \\right. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Box1D plt.figure() s1 = Box1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude A") x_0 = Parameter(default=0, description="Position of center of box function") width = Parameter(default=1, description="Width of the box") @staticmethod def evaluate(x, amplitude, x_0, width): """One dimensional Box model function""" inside = np.logical_and(x >= x_0 - width / 2., x <= x_0 + width / 2.) return np.select([inside], [amplitude], 0) @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``(x_low, x_high))`` """ dx = self.width / 2 return (self.x_0 - dx, self.x_0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} @property def return_units(self): if self.amplitude.unit is None: return None return {self.outputs[0]: self.amplitude.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'width': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Box2D(Fittable2DModel): """ Two dimensional Box model. Parameters ---------- amplitude : float Amplitude x_0 : float x position of the center of the box function x_width : float Width in x direction of the box y_0 : float y position of the center of the box function y_width : float Width in y direction of the box See Also -------- Box1D, Gaussian2D, Moffat2D Notes ----- Model formula: .. math:: f(x, y) = \\left \\{ \\begin{array}{ll} A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\ & y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\ 0 : & \\text{else} \\end{array} \\right. """ amplitude = Parameter(default=1, description="Amplitude") x_0 = Parameter(default=0, description="X position of the center of the box function") y_0 = Parameter(default=0, description="Y position of the center of the box function") x_width = Parameter(default=1, description="Width in x direction of the box") y_width = Parameter(default=1, description="Width in y direction of the box") @staticmethod def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width): """Two dimensional Box model function""" x_range = np.logical_and(x >= x_0 - x_width / 2., x <= x_0 + x_width / 2.) y_range = np.logical_and(y >= y_0 - y_width / 2., y <= y_0 + y_width / 2.) result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box``. ``((y_low, y_high), (x_low, x_high))`` """ dx = self.x_width / 2 dy = self.y_width / 2 return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[1]], 'x_width': inputs_unit[self.inputs[0]], 'y_width': inputs_unit[self.inputs[1]], 'amplitude': outputs_unit[self.outputs[0]]} class Trapezoid1D(Fittable1DModel): """ One dimensional Trapezoid model. Parameters ---------- amplitude : float Amplitude of the trapezoid x_0 : float Center position of the trapezoid width : float Width of the constant part of the trapezoid. slope : float Slope of the tails of the trapezoid See Also -------- Box1D, Gaussian1D, Moffat1D Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Trapezoid1D plt.figure() s1 = Trapezoid1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude of the trapezoid") x_0 = Parameter(default=0, description="Center position of the trapezoid") width = Parameter(default=1, description="Width of constant part of the trapezoid") slope = Parameter(default=1, description="Slope of the tails of trapezoid") @staticmethod def evaluate(x, amplitude, x_0, width, slope): """One dimensional Trapezoid model function""" # Compute the four points where the trapezoid changes slope # x1 <= x2 <= x3 <= x4 x2 = x_0 - width / 2. x3 = x_0 + width / 2. x1 = x2 - amplitude / slope x4 = x3 + amplitude / slope # Compute model values in pieces between the change points range_a = np.logical_and(x >= x1, x < x2) range_b = np.logical_and(x >= x2, x < x3) range_c = np.logical_and(x >= x3, x < x4) val_a = slope * (x - x1) val_b = amplitude val_c = slope * (x4 - x) result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``(x_low, x_high))`` """ dx = self.width / 2 + self.amplitude / self.slope return (self.x_0 - dx, self.x_0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'width': inputs_unit[self.inputs[0]], 'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class TrapezoidDisk2D(Fittable2DModel): """ Two dimensional circular Trapezoid model. Parameters ---------- amplitude : float Amplitude of the trapezoid x_0 : float x position of the center of the trapezoid y_0 : float y position of the center of the trapezoid R_0 : float Radius of the constant part of the trapezoid. slope : float Slope of the tails of the trapezoid in x direction. See Also -------- Disk2D, Box2D """ amplitude = Parameter(default=1, description="Amplitude of the trapezoid") x_0 = Parameter(default=0, description="X position of the center of the trapezoid") y_0 = Parameter(default=0, description="Y position of the center of the trapezoid") R_0 = Parameter(default=1, description="Radius of constant part of trapezoid") slope = Parameter(default=1, description="Slope of tails of trapezoid in x direction") @staticmethod def evaluate(x, y, amplitude, x_0, y_0, R_0, slope): """Two dimensional Trapezoid Disk model function""" r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) range_1 = r <= R_0 range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope) val_1 = amplitude val_2 = amplitude + slope * (R_0 - r) result = np.select([range_1, range_2], [val_1, val_2]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box``. ``((y_low, y_high), (x_low, x_high))`` """ dr = self.R_0 + self.amplitude / self.slope return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr)) @property def input_units(self): if self.x_0.unit is None and self.y_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit['x'] != inputs_unit['y']: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'R_0': inputs_unit[self.inputs[0]], 'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class RickerWavelet1D(Fittable1DModel): """ One dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat" model). .. note:: See https://github.com/astropy/astropy/pull/9445 for discussions related to renaming of this model. Parameters ---------- amplitude : float Amplitude x_0 : float Position of the peak sigma : float Width of the Ricker wavelet See Also -------- RickerWavelet2D, Box1D, Gaussian1D, Trapezoid1D Notes ----- Model formula: .. math:: f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right) e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}} Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import RickerWavelet1D plt.figure() s1 = RickerWavelet1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -2, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude (peak) value") x_0 = Parameter(default=0, description="Position of the peak") sigma = Parameter(default=1, description="Width of the Ricker wavelet") @staticmethod def evaluate(x, amplitude, x_0, sigma): """One dimensional Ricker Wavelet model function""" xx_ww = (x - x_0) ** 2 / (2 * sigma ** 2) return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww) def bounding_box(self, factor=10.0): """Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. Parameters ---------- factor : float The multiple of sigma used to define the limits. """ x0 = self.x_0 dx = factor * self.sigma return (x0 - dx, x0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'sigma': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class RickerWavelet2D(Fittable2DModel): """ Two dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat" model). .. note:: See https://github.com/astropy/astropy/pull/9445 for discussions related to renaming of this model. Parameters ---------- amplitude : float Amplitude x_0 : float x position of the peak y_0 : float y position of the peak sigma : float Width of the Ricker wavelet See Also -------- RickerWavelet1D, Gaussian2D Notes ----- Model formula: .. math:: f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2} + \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right) e^{\\frac{- \\left(x - x_{0}\\right)^{2} - \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}} """ amplitude = Parameter(default=1, description="Amplitude (peak) value") x_0 = Parameter(default=0, description="X position of the peak") y_0 = Parameter(default=0, description="Y position of the peak") sigma = Parameter(default=1, description="Width of the Ricker wavelet") @staticmethod def evaluate(x, y, amplitude, x_0, y_0, sigma): """Two dimensional Ricker Wavelet model function""" rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma ** 2) return amplitude * (1 - rr_ww) * np.exp(- rr_ww) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'sigma': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class AiryDisk2D(Fittable2DModel): """ Two dimensional Airy disk model. Parameters ---------- amplitude : float Amplitude of the Airy function. x_0 : float x position of the maximum of the Airy function. y_0 : float y position of the maximum of the Airy function. radius : float The radius of the Airy disk (radius of the first zero). See Also -------- Box2D, TrapezoidDisk2D, Gaussian2D Notes ----- Model formula: .. math:: f(r) = A \\left[\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}\\right]^2 Where :math:`J_1` is the first order Bessel function of the first kind, :math:`r` is radial distance from the maximum of the Airy function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R` is the input ``radius`` parameter, and :math:`R_z = 1.2196698912665045`). For an optical system, the radius of the first zero represents the limiting angular resolution and is approximately 1.22 * lambda / D, where lambda is the wavelength of the light and D is the diameter of the aperture. See [1]_ for more details about the Airy disk. References ---------- .. [1] https://en.wikipedia.org/wiki/Airy_disk """ amplitude = Parameter(default=1, description="Amplitude (peak value) of the Airy function") x_0 = Parameter(default=0, description="X position of the peak") y_0 = Parameter(default=0, description="Y position of the peak") radius = Parameter(default=1, description="The radius of the Airy disk (radius of first zero crossing)") _rz = None _j1 = None @classmethod def evaluate(cls, x, y, amplitude, x_0, y_0, radius): """Two dimensional Airy model function""" if cls._rz is None: from scipy.special import j1, jn_zeros cls._rz = jn_zeros(1, 1)[0] / np.pi cls._j1 = j1 r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz) if isinstance(r, Quantity): # scipy function cannot handle Quantity, so turn into array. r = r.to_value(u.dimensionless_unscaled) # Since r can be zero, we have to take care to treat that case # separately so as not to raise a numpy warning z = np.ones(r.shape) rt = np.pi * r[r > 0] z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2 if isinstance(amplitude, Quantity): # make z quantity too, otherwise in-place multiplication fails. z = Quantity(z, u.dimensionless_unscaled, copy=False) z *= amplitude return z @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'radius': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Moffat1D(Fittable1DModel): """ One dimensional Moffat model. Parameters ---------- amplitude : float Amplitude of the model. x_0 : float x position of the maximum of the Moffat model. gamma : float Core width of the Moffat model. alpha : float Power index of the Moffat model. See Also -------- Gaussian1D, Box1D Notes ----- Model formula: .. math:: f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha} Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Moffat1D plt.figure() s1 = Moffat1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude of the model") x_0 = Parameter(default=0, description="X position of maximum of Moffat model") gamma = Parameter(default=1, description="Core width of Moffat model") alpha = Parameter(default=1, description="Power index of the Moffat model") @property def fwhm(self): """ Moffat full width at half maximum. Derivation of the formula is available in `this notebook by Yoonsoo Bach <https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_. """ return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0) @staticmethod def evaluate(x, amplitude, x_0, gamma, alpha): """One dimensional Moffat model function""" return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha) @staticmethod def fit_deriv(x, amplitude, x_0, gamma, alpha): """One dimensional Moffat model derivative with respect to parameters""" fac = (1 + (x - x_0) ** 2 / gamma ** 2) d_A = fac ** (-alpha) d_x_0 = (2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma ** 2)) d_gamma = (2 * amplitude * alpha * (x - x_0) ** 2 * d_A / (fac * gamma ** 3)) d_alpha = -amplitude * d_A * np.log(fac) return [d_A, d_x_0, d_gamma, d_alpha] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'gamma': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Moffat2D(Fittable2DModel): """ Two dimensional Moffat model. Parameters ---------- amplitude : float Amplitude of the model. x_0 : float x position of the maximum of the Moffat model. y_0 : float y position of the maximum of the Moffat model. gamma : float Core width of the Moffat model. alpha : float Power index of the Moffat model. See Also -------- Gaussian2D, Box2D Notes ----- Model formula: .. math:: f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} + \\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha} """ amplitude = Parameter(default=1, description="Amplitude (peak value) of the model") x_0 = Parameter(default=0, description="X position of the maximum of the Moffat model") y_0 = Parameter(default=0, description="Y position of the maximum of the Moffat model") gamma = Parameter(default=1, description="Core width of the Moffat model") alpha = Parameter(default=1, description="Power index of the Moffat model") @property def fwhm(self): """ Moffat full width at half maximum. Derivation of the formula is available in `this notebook by Yoonsoo Bach <https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_. """ return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0) @staticmethod def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha): """Two dimensional Moffat model function""" rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2 return amplitude * (1 + rr_gg) ** (-alpha) @staticmethod def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha): """Two dimensional Moffat model derivative with respect to parameters""" rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2 d_A = (1 + rr_gg) ** (-alpha) d_x_0 = (2 * amplitude * alpha * d_A * (x - x_0) / (gamma ** 2 * (1 + rr_gg))) d_y_0 = (2 * amplitude * alpha * d_A * (y - y_0) / (gamma ** 2 * (1 + rr_gg))) d_alpha = -amplitude * d_A * np.log(1 + rr_gg) d_gamma = (2 * amplitude * alpha * d_A * rr_gg / (gamma * (1 + rr_gg))) return [d_A, d_x_0, d_y_0, d_gamma, d_alpha] @property def input_units(self): if self.x_0.unit is None: return None else: return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'gamma': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Sersic2D(Fittable2DModel): r""" Two dimensional Sersic surface brightness profile. Parameters ---------- amplitude : float Surface brightness at r_eff. r_eff : float Effective (half-light) radius n : float Sersic Index. x_0 : float, optional x position of the center. y_0 : float, optional y position of the center. ellip : float, optional Ellipticity. theta : float, optional Rotation angle in radians, counterclockwise from the positive x-axis. See Also -------- Gaussian2D, Moffat2D Notes ----- Model formula: .. math:: I(x,y) = I(r) = I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\} The constant :math:`b_n` is defined such that :math:`r_e` contains half the total luminosity, and can be solved for numerically. .. math:: \Gamma(2n) = 2\gamma (2n,b_n) Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Sersic2D import matplotlib.pyplot as plt x,y = np.meshgrid(np.arange(100), np.arange(100)) mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50, ellip=.5, theta=-1) img = mod(x, y) log_img = np.log10(img) plt.figure() plt.imshow(log_img, origin='lower', interpolation='nearest', vmin=-1, vmax=2) plt.xlabel('x') plt.ylabel('y') cbar = plt.colorbar() cbar.set_label('Log Brightness', rotation=270, labelpad=25) cbar.set_ticks([-1, 0, 1, 2], update_ticks=True) plt.show() References ---------- .. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html """ amplitude = Parameter(default=1, description="Surface brightness at r_eff") r_eff = Parameter(default=1, description="Effective (half-light) radius") n = Parameter(default=4, description="Sersic Index") x_0 = Parameter(default=0, description="X position of the center") y_0 = Parameter(default=0, description="Y position of the center") ellip = Parameter(default=0, description="Ellipticity") theta = Parameter(default=0, description="Rotation angle in radians (counterclockwise-positive)") _gammaincinv = None @classmethod def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta): """Two dimensional Sersic profile function.""" if cls._gammaincinv is None: from scipy.special import gammaincinv cls._gammaincinv = gammaincinv bn = cls._gammaincinv(2. * n, 0.5) a, b = r_eff, (1 - ellip) * r_eff cos_theta, sin_theta = np.cos(theta), np.sin(theta) x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2) return amplitude * np.exp(-bn * (z ** (1 / n) - 1)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return {'x_0': inputs_unit[self.inputs[0]], 'y_0': inputs_unit[self.inputs[0]], 'r_eff': inputs_unit[self.inputs[0]], 'theta': u.rad, 'amplitude': outputs_unit[self.outputs[0]]} class KingProjectedAnalytic1D(Fittable1DModel): """ Projected (surface density) analytic King Model. Parameters ---------- amplitude : float Amplitude or scaling factor. r_core : float Core radius (f(r_c) ~ 0.5 f_0) r_tide : float Tidal radius. Notes ----- This model approximates a King model with an analytic function. The derivation of this equation can be found in King '62 (equation 14). This is just an approximation of the full model and the parameters derived from this model should be taken with caution. It usually works for models with a concentration (c = log10(r_t/r_c) parameter < 2. Model formula: .. math:: f(x) = A r_c^2 \\left(\\frac{1}{\\sqrt{(x^2 + r_c^2)}} - \\frac{1}{\\sqrt{(r_t^2 + r_c^2)}}\\right)^2 Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import KingProjectedAnalytic1D import matplotlib.pyplot as plt plt.figure() rt_list = [1, 2, 5, 10, 20] for rt in rt_list: r = np.linspace(0.1, rt, 100) mod = KingProjectedAnalytic1D(amplitude = 1, r_core = 1., r_tide = rt) sig = mod(r) plt.loglog(r, sig/sig[0], label='c ~ {:0.2f}'.format(mod.concentration)) plt.xlabel("r") plt.ylabel(r"$\\sigma/\\sigma_0$") plt.legend() plt.show() References ---------- .. [1] https://ui.adsabs.harvard.edu/abs/1962AJ.....67..471K """ amplitude = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Amplitude or scaling factor") r_core = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Core Radius") r_tide = Parameter(default=2, bounds=(FLOAT_EPSILON, None), description="Tidal Radius") @property def concentration(self): """Concentration parameter of the king model""" return np.log10(np.abs(self.r_tide/self.r_core)) @staticmethod def evaluate(x, amplitude, r_core, r_tide): """ Analytic King model function. """ result = amplitude * r_core ** 2 * (1/np.sqrt(x ** 2 + r_core ** 2) - 1/np.sqrt(r_tide ** 2 + r_core ** 2)) ** 2 # Set invalid r values to 0 bounds = (x >= r_tide) | (x < 0) result[bounds] = result[bounds] * 0. return result @staticmethod def fit_deriv(x, amplitude, r_core, r_tide): """ Analytic King model function derivatives. """ d_amplitude = r_core ** 2 * (1/np.sqrt(x ** 2 + r_core ** 2) - 1/np.sqrt(r_tide ** 2 + r_core ** 2)) ** 2 d_r_core = 2 * amplitude * r_core ** 2 * (r_core/(r_core ** 2 + r_tide ** 2) ** (3/2) - r_core/(r_core ** 2 + x ** 2) ** (3/2)) * \ (1./np.sqrt(r_core ** 2 + x ** 2) - 1./np.sqrt(r_core ** 2 + r_tide ** 2)) + \ 2 * amplitude * r_core * (1./np.sqrt(r_core ** 2 + x ** 2) - 1./np.sqrt(r_core ** 2 + r_tide ** 2)) ** 2 d_r_tide = (2 * amplitude * r_core ** 2 * r_tide * (1./np.sqrt(r_core ** 2 + x ** 2) - 1./np.sqrt(r_core ** 2 + r_tide ** 2)))/(r_core ** 2 + r_tide ** 2) ** (3/2) # Set invalid r values to 0 bounds = (x >= r_tide) | (x < 0) d_amplitude[bounds] = d_amplitude[bounds]*0 d_r_core[bounds] = d_r_core[bounds]*0 d_r_tide[bounds] = d_r_tide[bounds]*0 return [d_amplitude, d_r_core, d_r_tide] @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. The model is not defined for r > r_tide. ``(r_low, r_high)`` """ return (0 * self.r_tide, 1 * self.r_tide) @property def input_units(self): if self.r_core.unit is None: return None return {self.inputs[0]: self.r_core.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'r_core': inputs_unit[self.inputs[0]], 'r_tide': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Logarithmic1D(Fittable1DModel): """ One dimensional logarithmic model. Parameters ---------- amplitude : float, optional tau : float, optional See Also -------- Exponential1D, Gaussian1D """ amplitude = Parameter(default=1) tau = Parameter(default=1) @staticmethod def evaluate(x, amplitude, tau): return amplitude * np.log(x / tau) @staticmethod def fit_deriv(x, amplitude, tau): d_amplitude = np.log(x / tau) d_tau = np.zeros(x.shape) - (amplitude / tau) return [d_amplitude, d_tau] @property def inverse(self): new_amplitude = self.tau new_tau = self.amplitude return Exponential1D(amplitude=new_amplitude, tau=new_tau) @tau.validator def tau(self, val): if np.all(val == 0): raise ValueError("0 is not an allowed value for tau") @property def input_units(self): if self.tau.unit is None: return None return {self.inputs[0]: self.tau.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'tau': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class Exponential1D(Fittable1DModel): """ One dimensional exponential model. Parameters ---------- amplitude : float, optional tau : float, optional See Also -------- Logarithmic1D, Gaussian1D """ amplitude = Parameter(default=1) tau = Parameter(default=1) @staticmethod def evaluate(x, amplitude, tau): return amplitude * np.exp(x / tau) @staticmethod def fit_deriv(x, amplitude, tau): ''' Derivative with respect to parameters''' d_amplitude = np.exp(x / tau) d_tau = -amplitude * (x / tau**2) * np.exp(x / tau) return [d_amplitude, d_tau] @property def inverse(self): new_amplitude = self.tau new_tau = self.amplitude return Logarithmic1D(amplitude=new_amplitude, tau=new_tau) @tau.validator def tau(self, val): ''' tau cannot be 0''' if np.all(val == 0): raise ValueError("0 is not an allowed value for tau") @property def input_units(self): if self.tau.unit is None: return None return {self.inputs[0]: self.tau.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'tau': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]}
1d0cb41dab8446c3052cdeac7933e6c3cc9a6879f5381ff915ef9a86ee8cbc3b
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- # pylint: disable=invalid-name """ Implements projections--particularly sky projections defined in WCS Paper II [1]_. All angles are set and and displayed in degrees but internally computations are performed in radians. All functions expect inputs and outputs degrees. References ---------- .. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II) """ import abc from itertools import chain, product import numpy as np from astropy import units as u from astropy import wcs from .core import Model from .parameters import InputParameterError, Parameter from .utils import _to_orig_unit, _to_radian # List of tuples of the form # (long class name without suffix, short WCSLIB projection code): _PROJ_NAME_CODE = [ ('ZenithalPerspective', 'AZP'), ('SlantZenithalPerspective', 'SZP'), ('Gnomonic', 'TAN'), ('Stereographic', 'STG'), ('SlantOrthographic', 'SIN'), ('ZenithalEquidistant', 'ARC'), ('ZenithalEqualArea', 'ZEA'), ('Airy', 'AIR'), ('CylindricalPerspective', 'CYP'), ('CylindricalEqualArea', 'CEA'), ('PlateCarree', 'CAR'), ('Mercator', 'MER'), ('SansonFlamsteed', 'SFL'), ('Parabolic', 'PAR'), ('Molleweide', 'MOL'), ('HammerAitoff', 'AIT'), ('ConicPerspective', 'COP'), ('ConicEqualArea', 'COE'), ('ConicEquidistant', 'COD'), ('ConicOrthomorphic', 'COO'), ('BonneEqualArea', 'BON'), ('Polyconic', 'PCO'), ('TangentialSphericalCube', 'TSC'), ('COBEQuadSphericalCube', 'CSC'), ('QuadSphericalCube', 'QSC'), ('HEALPix', 'HPX'), ('HEALPixPolar', 'XPH'), ] _NOT_SUPPORTED_PROJ_CODES = ['ZPN'] _PROJ_NAME_CODE_MAP = dict(_PROJ_NAME_CODE) projcodes = [code for _, code in _PROJ_NAME_CODE] __all__ = [ 'Projection', 'Pix2SkyProjection', 'Sky2PixProjection', 'Zenithal', 'Cylindrical', 'PseudoCylindrical', 'Conic', 'PseudoConic', 'QuadCube', 'HEALPix', 'AffineTransformation2D', 'projcodes' ] + list(map('_'.join, product(['Pix2Sky', 'Sky2Pix'], chain(*_PROJ_NAME_CODE)))) class _ParameterDS(Parameter): """ Same as `Parameter` but can indicate its modified status via the ``dirty`` property. This flag also gets set automatically when a parameter is modified. This ability to track parameter's modified status is needed for automatic update of WCSLIB's prjprm structure (which may be a more-time intensive operation) *only as required*. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dirty = True def validate(self, value): super().validate(value) self.dirty = True class Projection(Model): """Base class for all sky projections.""" # Radius of the generating sphere. # This sets the circumference to 360 deg so that arc length is measured in deg. r0 = 180 * u.deg / np.pi _separable = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._prj = wcs.Prjprm() @property @abc.abstractmethod def inverse(self): """ Inverse projection--all projection models must provide an inverse. """ @property def prjprm(self): """ WCSLIB ``prjprm`` structure. """ self._update_prj() return self._prj def _update_prj(self): """ A default updater for projection's pv. .. warning:: This method assumes that PV0 is never modified. If a projection that uses PV0 is ever implemented in this module, that projection class should override this method. .. warning:: This method assumes that the order in which PVi values (i>0) are to be asigned is identical to the order of model parameters in ``param_names``. That is, pv[1] = model.parameters[0], ... """ if not self.param_names: return pv = [] dirty = False for p in self.param_names: param = getattr(self, p) pv.append(float(param.value)) dirty |= param.dirty param.dirty = False if dirty: self._prj.pv = None, *pv self._prj.set() class Pix2SkyProjection(Projection): """Base class for all Pix2Sky projections.""" n_inputs = 2 n_outputs = 2 _input_units_strict = True _input_units_allow_dimensionless = True def __new__(cls, *args, **kwargs): long_name = cls.name.split('_')[1] cls.prj_code = _PROJ_NAME_CODE_MAP[long_name] return super(Pix2SkyProjection, cls).__new__(cls) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._prj.code = self.prj_code self._update_prj() if not self.param_names: # force initial call to Prjprm.set() for projections # with no parameters: self._prj.set() self.inputs = ('x', 'y') self.outputs = ('phi', 'theta') @property def input_units(self): return {self.inputs[0]: u.deg, self.inputs[1]: u.deg} @property def return_units(self): return {self.outputs[0]: u.deg, self.outputs[1]: u.deg} def evaluate(self, x, y, *args, **kwargs): self._update_prj() return self._prj.prjx2s(x, y) @property def inverse(self): pv = [getattr(self, param).value for param in self.param_names] return self._inv_cls(*pv) class Sky2PixProjection(Projection): """Base class for all Sky2Pix projections.""" n_inputs = 2 n_outputs = 2 _input_units_strict = True _input_units_allow_dimensionless = True def __new__(cls, *args, **kwargs): long_name = cls.name.split('_')[1] cls.prj_code = _PROJ_NAME_CODE_MAP[long_name] return super(Sky2PixProjection, cls).__new__(cls) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._prj.code = self.prj_code self._update_prj() if not self.param_names: # force initial call to Prjprm.set() for projections # without parameters: self._prj.set() self.inputs = ('phi', 'theta') self.outputs = ('x', 'y') @property def input_units(self): return {self.inputs[0]: u.deg, self.inputs[1]: u.deg} @property def return_units(self): return {self.outputs[0]: u.deg, self.outputs[1]: u.deg} def evaluate(self, phi, theta, *args, **kwargs): self._update_prj() return self._prj.prjs2x(phi, theta) @property def inverse(self): pv = [getattr(self, param).value for param in self.param_names] return self._inv_cls(*pv) class Zenithal(Projection): r"""Base class for all Zenithal projections. Zenithal (or azimuthal) projections map the sphere directly onto a plane. All zenithal projections are specified by defining the radius as a function of native latitude, :math:`R_\theta`. The pixel-to-sky transformation is defined as: .. math:: \phi &= \arg(-y, x) \\ R_\theta &= \sqrt{x^2 + y^2} and the inverse (sky-to-pixel) is defined as: .. math:: x &= R_\theta \sin \phi \\ y &= R_\theta \cos \phi """ class Pix2Sky_ZenithalPerspective(Pix2SkyProjection, Zenithal): r""" Zenithal perspective projection - pixel to sky. Corresponds to the ``AZP`` projection in FITS WCS. .. math:: \phi &= \arg(-y \cos \gamma, x) \\ \theta &= \left\{\genfrac{}{}{0pt}{}{\psi - \omega}{\psi + \omega + 180^{\circ}}\right. where: .. math:: \psi &= \arg(\rho, 1) \\ \omega &= \sin^{-1}\left(\frac{\rho \mu}{\sqrt{\rho^2 + 1}}\right) \\ \rho &= \frac{R}{\frac{180^{\circ}}{\pi}(\mu + 1) + y \sin \gamma} \\ R &= \sqrt{x^2 + y^2 \cos^2 \gamma} Parameters ---------- mu : float Distance from point of projection to center of sphere in spherical radii, μ. Default is 0. gamma : float Look angle γ in degrees. Default is 0°. """ mu = _ParameterDS( default=0.0, description="Distance from point of projection to center of sphere" ) gamma = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian, description="Look angle γ in degrees (Default = 0°)") @mu.validator def mu(self, value): if np.any(np.equal(value, -1.0)): raise InputParameterError( "Zenithal perspective projection is not defined for mu = -1") class Sky2Pix_ZenithalPerspective(Sky2PixProjection, Zenithal): r""" Zenithal perspective projection - sky to pixel. Corresponds to the ``AZP`` projection in FITS WCS. .. math:: x &= R \sin \phi \\ y &= -R \sec \gamma \cos \theta where: .. math:: R = \frac{180^{\circ}}{\pi} \frac{(\mu + 1) \cos \theta}{(\mu + \sin \theta) + \cos \theta \cos \phi \tan \gamma} Parameters ---------- mu : float Distance from point of projection to center of sphere in spherical radii, μ. Default is 0. gamma : float Look angle γ in degrees. Default is 0°. """ mu = _ParameterDS( default=0.0, description="Distance from point of projection to center of sphere" ) gamma = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian, description="Look angle γ in degrees (Default=0°)") @mu.validator def mu(self, value): if np.any(np.equal(value, -1.0)): raise InputParameterError( "Zenithal perspective projection is not defined for mu = -1") class Pix2Sky_SlantZenithalPerspective(Pix2SkyProjection, Zenithal): r""" Slant zenithal perspective projection - pixel to sky. Corresponds to the ``SZP`` projection in FITS WCS. Parameters ---------- mu : float Distance from point of projection to center of sphere in spherical radii, μ. Default is 0. phi0 : float The longitude φ₀ of the reference point, in degrees. Default is 0°. theta0 : float The latitude θ₀ of the reference point, in degrees. Default is 90°. """ mu = _ParameterDS( default=0.0, description="Distance from point of projection to center of sphere" ) phi0 = _ParameterDS( default=0.0, getter=_to_orig_unit, setter=_to_radian, description="The longitude φ₀ of the reference point in degrees (Default=0°)" ) theta0 = _ParameterDS( default=90.0, getter=_to_orig_unit, setter=_to_radian, description="The latitude θ₀ of the reference point, in degrees (Default=0°)" ) @mu.validator def mu(self, value): if np.any(np.equal(value, -1.0)): raise InputParameterError( "Zenithal perspective projection is not defined for mu = -1") class Sky2Pix_SlantZenithalPerspective(Sky2PixProjection, Zenithal): r""" Zenithal perspective projection - sky to pixel. Corresponds to the ``SZP`` projection in FITS WCS. Parameters ---------- mu : float distance from point of projection to center of sphere in spherical radii, μ. Default is 0. phi0 : float The longitude φ₀ of the reference point, in degrees. Default is 0°. theta0 : float The latitude θ₀ of the reference point, in degrees. Default is 90°. """ mu = _ParameterDS( default=0.0, description="Distance from point of projection to center of sphere" ) phi0 = _ParameterDS( default=0.0, getter=_to_orig_unit, setter=_to_radian, description="The longitude φ₀ of the reference point in degrees" ) theta0 = _ParameterDS( default=0.0, getter=_to_orig_unit, setter=_to_radian, description="The latitude θ₀ of the reference point, in degrees" ) @mu.validator def mu(self, value): if np.any(np.equal(value, -1.0)): raise InputParameterError( "Zenithal perspective projection is not defined for mu = -1") class Pix2Sky_Gnomonic(Pix2SkyProjection, Zenithal): r""" Gnomonic projection - pixel to sky. Corresponds to the ``TAN`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: \theta = \tan^{-1}\left(\frac{180^{\circ}}{\pi R_\theta}\right) """ class Sky2Pix_Gnomonic(Sky2PixProjection, Zenithal): r""" Gnomonic Projection - sky to pixel. Corresponds to the ``TAN`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta = \frac{180^{\circ}}{\pi}\cot \theta """ class Pix2Sky_Stereographic(Pix2SkyProjection, Zenithal): r""" Stereographic Projection - pixel to sky. Corresponds to the ``STG`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: \theta = 90^{\circ} - 2 \tan^{-1}\left(\frac{\pi R_\theta}{360^{\circ}}\right) """ class Sky2Pix_Stereographic(Sky2PixProjection, Zenithal): r""" Stereographic Projection - sky to pixel. Corresponds to the ``STG`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta = \frac{180^{\circ}}{\pi}\frac{2 \cos \theta}{1 + \sin \theta} """ class Pix2Sky_SlantOrthographic(Pix2SkyProjection, Zenithal): r""" Slant orthographic projection - pixel to sky. Corresponds to the ``SIN`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. The following transformation applies when :math:`\xi` and :math:`\eta` are both zero. .. math:: \theta = \cos^{-1}\left(\frac{\pi}{180^{\circ}}R_\theta\right) The parameters :math:`\xi` and :math:`\eta` are defined from the reference point :math:`(\phi_c, \theta_c)` as: .. math:: \xi &= \cot \theta_c \sin \phi_c \\ \eta &= - \cot \theta_c \cos \phi_c Parameters ---------- xi : float Obliqueness parameter, ξ. Default is 0.0. eta : float Obliqueness parameter, η. Default is 0.0. """ xi = _ParameterDS(default=0.0, description="Obliqueness parameter") eta = _ParameterDS(default=0.0, description="Obliqueness parameter") class Sky2Pix_SlantOrthographic(Sky2PixProjection, Zenithal): r""" Slant orthographic projection - sky to pixel. Corresponds to the ``SIN`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. The following transformation applies when :math:`\xi` and :math:`\eta` are both zero. .. math:: R_\theta = \frac{180^{\circ}}{\pi}\cos \theta But more specifically are: .. math:: x &= \frac{180^\circ}{\pi}[\cos \theta \sin \phi + \xi(1 - \sin \theta)] \\ y &= \frac{180^\circ}{\pi}[\cos \theta \cos \phi + \eta(1 - \sin \theta)] """ xi = _ParameterDS(default=0.0) eta = _ParameterDS(default=0.0) class Pix2Sky_ZenithalEquidistant(Pix2SkyProjection, Zenithal): r""" Zenithal equidistant projection - pixel to sky. Corresponds to the ``ARC`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: \theta = 90^\circ - R_\theta """ class Sky2Pix_ZenithalEquidistant(Sky2PixProjection, Zenithal): r""" Zenithal equidistant projection - sky to pixel. Corresponds to the ``ARC`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta = 90^\circ - \theta """ class Pix2Sky_ZenithalEqualArea(Pix2SkyProjection, Zenithal): r""" Zenithal equidistant projection - pixel to sky. Corresponds to the ``ZEA`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: \theta = 90^\circ - 2 \sin^{-1} \left(\frac{\pi R_\theta}{360^\circ}\right) """ class Sky2Pix_ZenithalEqualArea(Sky2PixProjection, Zenithal): r""" Zenithal equidistant projection - sky to pixel. Corresponds to the ``ZEA`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta &= \frac{180^\circ}{\pi} \sqrt{2(1 - \sin\theta)} \\ &= \frac{360^\circ}{\pi} \sin\left(\frac{90^\circ - \theta}{2}\right) """ class Pix2Sky_Airy(Pix2SkyProjection, Zenithal): r""" Airy projection - pixel to sky. Corresponds to the ``AIR`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. Parameters ---------- theta_b : float The latitude :math:`\theta_b` at which to minimize the error, in degrees. Default is 90°. """ theta_b = _ParameterDS(default=90.0) class Sky2Pix_Airy(Sky2PixProjection, Zenithal): r""" Airy - sky to pixel. Corresponds to the ``AIR`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta = -2 \frac{180^\circ}{\pi}\left(\frac{\ln(\cos \xi)}{\tan \xi} + \frac{\ln(\cos \xi_b)}{\tan^2 \xi_b} \tan \xi \right) where: .. math:: \xi &= \frac{90^\circ - \theta}{2} \\ \xi_b &= \frac{90^\circ - \theta_b}{2} Parameters ---------- theta_b : float The latitude :math:`\theta_b` at which to minimize the error, in degrees. Default is 90°. """ theta_b = _ParameterDS(default=90.0, description="The latitude at which to minimize the error,in degrees") class Cylindrical(Projection): r"""Base class for Cylindrical projections. Cylindrical projections are so-named because the surface of projection is a cylinder. """ _separable = True class Pix2Sky_CylindricalPerspective(Pix2SkyProjection, Cylindrical): r""" Cylindrical perspective - pixel to sky. Corresponds to the ``CYP`` projection in FITS WCS. .. math:: \phi &= \frac{x}{\lambda} \\ \theta &= \arg(1, \eta) + \sin{-1}\left(\frac{\eta \mu}{\sqrt{\eta^2 + 1}}\right) where: .. math:: \eta = \frac{\pi}{180^{\circ}}\frac{y}{\mu + \lambda} Parameters ---------- mu : float Distance from center of sphere in the direction opposite the projected surface, in spherical radii, μ. Default is 1. lam : float Radius of the cylinder in spherical radii, λ. Default is 1. """ mu = _ParameterDS(default=1.0) lam = _ParameterDS(default=1.0) @mu.validator def mu(self, value): if np.any(value == -self.lam): raise InputParameterError( "CYP projection is not defined for mu = -lambda") @lam.validator def lam(self, value): if np.any(value == -self.mu): raise InputParameterError( "CYP projection is not defined for lambda = -mu") class Sky2Pix_CylindricalPerspective(Sky2PixProjection, Cylindrical): r""" Cylindrical Perspective - sky to pixel. Corresponds to the ``CYP`` projection in FITS WCS. .. math:: x &= \lambda \phi \\ y &= \frac{180^{\circ}}{\pi}\left(\frac{\mu + \lambda}{\mu + \cos \theta}\right)\sin \theta Parameters ---------- mu : float Distance from center of sphere in the direction opposite the projected surface, in spherical radii, μ. Default is 0. lam : float Radius of the cylinder in spherical radii, λ. Default is 0. """ mu = _ParameterDS(default=1.0, description="Distance from center of sphere in spherical radii") lam = _ParameterDS(default=1.0, description="Radius of the cylinder in spherical radii") @mu.validator def mu(self, value): if np.any(value == -self.lam): raise InputParameterError( "CYP projection is not defined for mu = -lambda") @lam.validator def lam(self, value): if np.any(value == -self.mu): raise InputParameterError( "CYP projection is not defined for lambda = -mu") class Pix2Sky_CylindricalEqualArea(Pix2SkyProjection, Cylindrical): r""" Cylindrical equal area projection - pixel to sky. Corresponds to the ``CEA`` projection in FITS WCS. .. math:: \phi &= x \\ \theta &= \sin^{-1}\left(\frac{\pi}{180^{\circ}}\lambda y\right) Parameters ---------- lam : float Radius of the cylinder in spherical radii, λ. Default is 1. """ lam = _ParameterDS(default=1) class Sky2Pix_CylindricalEqualArea(Sky2PixProjection, Cylindrical): r""" Cylindrical equal area projection - sky to pixel. Corresponds to the ``CEA`` projection in FITS WCS. .. math:: x &= \phi \\ y &= \frac{180^{\circ}}{\pi}\frac{\sin \theta}{\lambda} Parameters ---------- lam : float Radius of the cylinder in spherical radii, λ. Default is 0. """ lam = _ParameterDS(default=1) class Pix2Sky_PlateCarree(Pix2SkyProjection, Cylindrical): r""" Plate carrée projection - pixel to sky. Corresponds to the ``CAR`` projection in FITS WCS. .. math:: \phi &= x \\ \theta &= y """ @staticmethod def evaluate(x, y): # The intermediate variables are only used here for clarity phi = np.array(x) theta = np.array(y) return phi, theta class Sky2Pix_PlateCarree(Sky2PixProjection, Cylindrical): r""" Plate carrée projection - sky to pixel. Corresponds to the ``CAR`` projection in FITS WCS. .. math:: x &= \phi \\ y &= \theta """ @staticmethod def evaluate(phi, theta): # The intermediate variables are only used here for clarity x = np.array(phi) y = np.array(theta) return x, y class Pix2Sky_Mercator(Pix2SkyProjection, Cylindrical): r""" Mercator - pixel to sky. Corresponds to the ``MER`` projection in FITS WCS. .. math:: \phi &= x \\ \theta &= 2 \tan^{-1}\left(e^{y \pi / 180^{\circ}}\right)-90^{\circ} """ class Sky2Pix_Mercator(Sky2PixProjection, Cylindrical): r""" Mercator - sky to pixel. Corresponds to the ``MER`` projection in FITS WCS. .. math:: x &= \phi \\ y &= \frac{180^{\circ}}{\pi}\ln \tan \left(\frac{90^{\circ} + \theta}{2}\right) """ class PseudoCylindrical(Projection): r"""Base class for pseudocylindrical projections. Pseudocylindrical projections are like cylindrical projections except the parallels of latitude are projected at diminishing lengths toward the polar regions in order to reduce lateral distortion there. Consequently, the meridians are curved. """ _separable = True class Pix2Sky_SansonFlamsteed(Pix2SkyProjection, PseudoCylindrical): r""" Sanson-Flamsteed projection - pixel to sky. Corresponds to the ``SFL`` projection in FITS WCS. .. math:: \phi &= \frac{x}{\cos y} \\ \theta &= y """ class Sky2Pix_SansonFlamsteed(Sky2PixProjection, PseudoCylindrical): r""" Sanson-Flamsteed projection - sky to pixel. Corresponds to the ``SFL`` projection in FITS WCS. .. math:: x &= \phi \cos \theta \\ y &= \theta """ class Pix2Sky_Parabolic(Pix2SkyProjection, PseudoCylindrical): r""" Parabolic projection - pixel to sky. Corresponds to the ``PAR`` projection in FITS WCS. .. math:: \phi &= \frac{180^\circ}{\pi} \frac{x}{1 - 4(y / 180^\circ)^2} \\ \theta &= 3 \sin^{-1}\left(\frac{y}{180^\circ}\right) """ class Sky2Pix_Parabolic(Sky2PixProjection, PseudoCylindrical): r""" Parabolic projection - sky to pixel. Corresponds to the ``PAR`` projection in FITS WCS. .. math:: x &= \phi \left(2\cos\frac{2\theta}{3} - 1\right) \\ y &= 180^\circ \sin \frac{\theta}{3} """ class Pix2Sky_Molleweide(Pix2SkyProjection, PseudoCylindrical): r""" Molleweide's projection - pixel to sky. Corresponds to the ``MOL`` projection in FITS WCS. .. math:: \phi &= \frac{\pi x}{2 \sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}} \\ \theta &= \sin^{-1}\left(\frac{1}{90^\circ}\sin^{-1}\left(\frac{\pi}{180^\circ}\frac{y}{\sqrt{2}}\right) + \frac{y}{180^\circ}\sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}\right) """ class Sky2Pix_Molleweide(Sky2PixProjection, PseudoCylindrical): r""" Molleweide's projection - sky to pixel. Corresponds to the ``MOL`` projection in FITS WCS. .. math:: x &= \frac{2 \sqrt{2}}{\pi} \phi \cos \gamma \\ y &= \sqrt{2} \frac{180^\circ}{\pi} \sin \gamma where :math:`\gamma` is defined as the solution of the transcendental equation: .. math:: \sin \theta = \frac{\gamma}{90^\circ} + \frac{\sin 2 \gamma}{\pi} """ class Pix2Sky_HammerAitoff(Pix2SkyProjection, PseudoCylindrical): r""" Hammer-Aitoff projection - pixel to sky. Corresponds to the ``AIT`` projection in FITS WCS. .. math:: \phi &= 2 \arg \left(2Z^2 - 1, \frac{\pi}{180^\circ} \frac{Z}{2}x\right) \\ \theta &= \sin^{-1}\left(\frac{\pi}{180^\circ}yZ\right) """ class Sky2Pix_HammerAitoff(Sky2PixProjection, PseudoCylindrical): r""" Hammer-Aitoff projection - sky to pixel. Corresponds to the ``AIT`` projection in FITS WCS. .. math:: x &= 2 \gamma \cos \theta \sin \frac{\phi}{2} \\ y &= \gamma \sin \theta where: .. math:: \gamma = \frac{180^\circ}{\pi} \sqrt{\frac{2}{1 + \cos \theta \cos(\phi / 2)}} """ class Conic(Projection): r"""Base class for conic projections. In conic projections, the sphere is thought to be projected onto the surface of a cone which is then opened out. In a general sense, the pixel-to-sky transformation is defined as: .. math:: \phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right) / C \\ R_\theta &= \mathrm{sign} \theta_a \sqrt{x^2 + (Y_0 - y)^2} and the inverse (sky-to-pixel) is defined as: .. math:: x &= R_\theta \sin (C \phi) \\ y &= R_\theta \cos (C \phi) + Y_0 where :math:`C` is the "constant of the cone": .. math:: C = \frac{180^\circ \cos \theta}{\pi R_\theta} """ sigma = _ParameterDS(default=90.0, getter=_to_orig_unit, setter=_to_radian) delta = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian) class Pix2Sky_ConicPerspective(Pix2SkyProjection, Conic): r""" Colles' conic perspective projection - pixel to sky. Corresponds to the ``COP`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \sin \theta_a \\ R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\ Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Sky2Pix_ConicPerspective(Sky2PixProjection, Conic): r""" Colles' conic perspective projection - sky to pixel. Corresponds to the ``COP`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \sin \theta_a \\ R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\ Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Pix2Sky_ConicEqualArea(Pix2SkyProjection, Conic): r""" Alber's conic equal area projection - pixel to sky. Corresponds to the ``COE`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \gamma / 2 \\ R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\ Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)} where: .. math:: \gamma = \sin \theta_1 + \sin \theta_2 Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Sky2Pix_ConicEqualArea(Sky2PixProjection, Conic): r""" Alber's conic equal area projection - sky to pixel. Corresponds to the ``COE`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \gamma / 2 \\ R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\ Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)} where: .. math:: \gamma = \sin \theta_1 + \sin \theta_2 Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Pix2Sky_ConicEquidistant(Pix2SkyProjection, Conic): r""" Conic equidistant projection - pixel to sky. Corresponds to the ``COD`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\ R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\ Y_0 = \eta\cot\eta\cot\theta_a Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Sky2Pix_ConicEquidistant(Sky2PixProjection, Conic): r""" Conic equidistant projection - sky to pixel. Corresponds to the ``COD`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\ R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\ Y_0 = \eta\cot\eta\cot\theta_a Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Pix2Sky_ConicOrthomorphic(Pix2SkyProjection, Conic): r""" Conic orthomorphic projection - pixel to sky. Corresponds to the ``COO`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)} {\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)} {\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\ R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\ Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C where: .. math:: \psi = \frac{180^\circ}{\pi} \frac{\cos \theta} {C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C} Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Sky2Pix_ConicOrthomorphic(Sky2PixProjection, Conic): r""" Conic orthomorphic projection - sky to pixel. Corresponds to the ``COO`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)} {\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)} {\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\ R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\ Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C where: .. math:: \psi = \frac{180^\circ}{\pi} \frac{\cos \theta} {C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C} Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class PseudoConic(Projection): r"""Base class for pseudoconic projections. Pseudoconics are a subclass of conics with concentric parallels. """ class Pix2Sky_BonneEqualArea(Pix2SkyProjection, PseudoConic): r""" Bonne's equal area pseudoconic projection - pixel to sky. Corresponds to the ``BON`` projection in FITS WCS. .. math:: \phi &= \frac{\pi}{180^\circ} A_\phi R_\theta / \cos \theta \\ \theta &= Y_0 - R_\theta where: .. math:: R_\theta &= \mathrm{sign} \theta_1 \sqrt{x^2 + (Y_0 - y)^2} \\ A_\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right) Parameters ---------- theta1 : float Bonne conformal latitude, in degrees. """ _separable = True theta1 = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian) class Sky2Pix_BonneEqualArea(Sky2PixProjection, PseudoConic): r""" Bonne's equal area pseudoconic projection - sky to pixel. Corresponds to the ``BON`` projection in FITS WCS. .. math:: x &= R_\theta \sin A_\phi \\ y &= -R_\theta \cos A_\phi + Y_0 where: .. math:: A_\phi &= \frac{180^\circ}{\pi R_\theta} \phi \cos \theta \\ R_\theta &= Y_0 - \theta \\ Y_0 &= \frac{180^\circ}{\pi} \cot \theta_1 + \theta_1 Parameters ---------- theta1 : float Bonne conformal latitude, in degrees. """ _separable = True theta1 = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian, description="Bonne conformal latitude, in degrees") class Pix2Sky_Polyconic(Pix2SkyProjection, PseudoConic): r""" Polyconic projection - pixel to sky. Corresponds to the ``PCO`` projection in FITS WCS. """ class Sky2Pix_Polyconic(Sky2PixProjection, PseudoConic): r""" Polyconic projection - sky to pixel. Corresponds to the ``PCO`` projection in FITS WCS. """ class QuadCube(Projection): r"""Base class for quad cube projections. Quadrilateralized spherical cube (quad-cube) projections belong to the class of polyhedral projections in which the sphere is projected onto the surface of an enclosing polyhedron. The six faces of the quad-cube projections are numbered and laid out as:: 0 4 3 2 1 4 3 2 5 """ class Pix2Sky_TangentialSphericalCube(Pix2SkyProjection, QuadCube): r""" Tangential spherical cube projection - pixel to sky. Corresponds to the ``TSC`` projection in FITS WCS. """ class Sky2Pix_TangentialSphericalCube(Sky2PixProjection, QuadCube): r""" Tangential spherical cube projection - sky to pixel. Corresponds to the ``TSC`` projection in FITS WCS. """ class Pix2Sky_COBEQuadSphericalCube(Pix2SkyProjection, QuadCube): r""" COBE quadrilateralized spherical cube projection - pixel to sky. Corresponds to the ``CSC`` projection in FITS WCS. """ class Sky2Pix_COBEQuadSphericalCube(Sky2PixProjection, QuadCube): r""" COBE quadrilateralized spherical cube projection - sky to pixel. Corresponds to the ``CSC`` projection in FITS WCS. """ class Pix2Sky_QuadSphericalCube(Pix2SkyProjection, QuadCube): r""" Quadrilateralized spherical cube projection - pixel to sky. Corresponds to the ``QSC`` projection in FITS WCS. """ class Sky2Pix_QuadSphericalCube(Sky2PixProjection, QuadCube): r""" Quadrilateralized spherical cube projection - sky to pixel. Corresponds to the ``QSC`` projection in FITS WCS. """ class HEALPix(Projection): r"""Base class for HEALPix projections. """ class Pix2Sky_HEALPix(Pix2SkyProjection, HEALPix): r""" HEALPix - pixel to sky. Corresponds to the ``HPX`` projection in FITS WCS. Parameters ---------- H : float The number of facets in longitude direction. X : float The number of facets in latitude direction. """ _separable = True H = _ParameterDS(default=4.0, description="The number of facets in longitude direction.") X = _ParameterDS(default=3.0, description="The number of facets in latitude direction.") class Sky2Pix_HEALPix(Sky2PixProjection, HEALPix): r""" HEALPix projection - sky to pixel. Corresponds to the ``HPX`` projection in FITS WCS. Parameters ---------- H : float The number of facets in longitude direction. X : float The number of facets in latitude direction. """ _separable = True H = _ParameterDS(default=4.0, description="The number of facets in longitude direction.") X = _ParameterDS(default=3.0, description="The number of facets in latitude direction.") class Pix2Sky_HEALPixPolar(Pix2SkyProjection, HEALPix): r""" HEALPix polar, aka "butterfly" projection - pixel to sky. Corresponds to the ``XPH`` projection in FITS WCS. """ class Sky2Pix_HEALPixPolar(Sky2PixProjection, HEALPix): r""" HEALPix polar, aka "butterfly" projection - pixel to sky. Corresponds to the ``XPH`` projection in FITS WCS. """ class AffineTransformation2D(Model): """ Perform an affine transformation in 2 dimensions. Parameters ---------- matrix : array A 2x2 matrix specifying the linear transformation to apply to the inputs translation : array A 2D vector (given as either a 2x1 or 1x2 array) specifying a translation to apply to the inputs """ n_inputs = 2 n_outputs = 2 standard_broadcasting = False _separable = False matrix = Parameter(default=[[1.0, 0.0], [0.0, 1.0]]) translation = Parameter(default=[0.0, 0.0]) @matrix.validator def matrix(self, value): """Validates that the input matrix is a 2x2 2D array.""" if np.shape(value) != (2, 2): raise InputParameterError( "Expected transformation matrix to be a 2x2 array") @translation.validator def translation(self, value): """ Validates that the translation vector is a 2D vector. This allows either a "row" vector or a "column" vector where in the latter case the resultant Numpy array has ``ndim=2`` but the shape is ``(1, 2)``. """ if not ((np.ndim(value) == 1 and np.shape(value) == (2,)) or (np.ndim(value) == 2 and np.shape(value) == (1, 2))): raise InputParameterError( "Expected translation vector to be a 2 element row or column " "vector array") def __init__(self, matrix=matrix, translation=translation, **kwargs): super().__init__(matrix=matrix, translation=translation, **kwargs) self.inputs = ("x", "y") self.outputs = ("x", "y") @property def inverse(self): """ Inverse transformation. Raises `~astropy.modeling.InputParameterError` if the transformation cannot be inverted. """ det = np.linalg.det(self.matrix.value) if det == 0: raise InputParameterError( "Transformation matrix is singular; {} model does not " "have an inverse".format(self.__class__.__name__)) matrix = np.linalg.inv(self.matrix.value) if self.matrix.unit is not None: matrix = matrix * self.matrix.unit # If matrix has unit then translation has unit, so no need to assign it. translation = -np.dot(matrix, self.translation.value) return self.__class__(matrix=matrix, translation=translation) @classmethod def evaluate(cls, x, y, matrix, translation): """ Apply the transformation to a set of 2D Cartesian coordinates given as two lists--one for the x coordinates and one for a y coordinates--or a single coordinate pair. Parameters ---------- x, y : array, float x and y coordinates """ if x.shape != y.shape: raise ValueError("Expected input arrays to have the same shape") shape = x.shape or (1,) # Use asarray to ensure loose the units. inarr = np.vstack([np.asarray(x).ravel(), np.asarray(y).ravel(), np.ones(x.size, x.dtype)]) if inarr.shape[0] != 3 or inarr.ndim != 2: raise ValueError("Incompatible input shapes") augmented_matrix = cls._create_augmented_matrix(matrix, translation) result = np.dot(augmented_matrix, inarr) x, y = result[0], result[1] x.shape = y.shape = shape return x, y @staticmethod def _create_augmented_matrix(matrix, translation): unit = None if any([hasattr(translation, 'unit'), hasattr(matrix, 'unit')]): if not all([hasattr(translation, 'unit'), hasattr(matrix, 'unit')]): raise ValueError("To use AffineTransformation with quantities, " "both matrix and unit need to be quantities.") unit = translation.unit # matrix should have the same units as translation if not (matrix.unit / translation.unit) == u.dimensionless_unscaled: raise ValueError("matrix and translation must have the same units.") augmented_matrix = np.empty((3, 3), dtype=float) augmented_matrix[0:2, 0:2] = matrix augmented_matrix[0:2, 2:].flat = translation augmented_matrix[2] = [0, 0, 1] if unit is not None: return augmented_matrix * unit return augmented_matrix @property def input_units(self): if self.translation.unit is None and self.matrix.unit is None: return None elif self.translation.unit is not None: return dict(zip(self.inputs, [self.translation.unit] * 2)) else: return dict(zip(self.inputs, [self.matrix.unit] * 2)) for long_name, short_name in _PROJ_NAME_CODE: # define short-name projection equivalent classes: globals()['Pix2Sky_' + short_name] = globals()['Pix2Sky_' + long_name] globals()['Sky2Pix_' + short_name] = globals()['Sky2Pix_' + long_name] # set inverse classes: globals()['Pix2Sky_' + long_name]._inv_cls = globals()['Sky2Pix_' + long_name] globals()['Sky2Pix_' + long_name]._inv_cls = globals()['Pix2Sky_' + long_name]
f193ef523f6f686b431bba80736283f617ca13ecaa730f866f1a1c73841f898d
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains models representing polynomials and polynomial series. """ # pylint: disable=invalid-name import numpy as np from astropy.utils import check_broadcast, indent from .core import FittableModel, Model from .functional_models import Shift from .parameters import Parameter from .utils import _validate_domain_window, comb, poly_map_domain __all__ = [ 'Chebyshev1D', 'Chebyshev2D', 'Hermite1D', 'Hermite2D', 'InverseSIP', 'Legendre1D', 'Legendre2D', 'Polynomial1D', 'Polynomial2D', 'SIP', 'OrthoPolynomialBase', 'PolynomialModel' ] class PolynomialBase(FittableModel): """ Base class for all polynomial-like models with an arbitrary number of parameters in the form of coefficients. In this case Parameter instances are returned through the class's ``__getattr__`` rather than through class descriptors. """ # Default _param_names list; this will be filled in by the implementation's # __init__ _param_names = () linear = True col_fit_deriv = False @property def param_names(self): """Coefficient names generated based on the model's polynomial degree and number of dimensions. Subclasses should implement this to return parameter names in the desired format. On most `Model` classes this is a class attribute, but for polynomial models it is an instance attribute since each polynomial model instance can have different parameters depending on the degree of the polynomial and the number of dimensions, for example. """ return self._param_names class PolynomialModel(PolynomialBase): """ Base class for polynomial models. Its main purpose is to determine how many coefficients are needed based on the polynomial order and dimension and to provide their default values, names and ordering. """ def __init__(self, degree, n_models=None, model_set_axis=None, name=None, meta=None, **params): self._degree = degree self._order = self.get_num_coeff(self.n_inputs) self._param_names = self._generate_coeff_names(self.n_inputs) if n_models: if model_set_axis is None: model_set_axis = 0 minshape = (1,) * model_set_axis + (n_models,) else: minshape = () for param_name in self._param_names: self._parameters_[param_name] = \ Parameter(param_name, default=np.zeros(minshape)) super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) @property def degree(self): """Degree of polynomial.""" return self._degree def get_num_coeff(self, ndim): """ Return the number of coefficients in one parameter set """ if self.degree < 0: raise ValueError("Degree of polynomial must be positive or null") # deg+1 is used to account for the difference between iraf using # degree and numpy using exact degree if ndim != 1: nmixed = comb(self.degree, ndim) else: nmixed = 0 numc = self.degree * ndim + nmixed + 1 return numc def _invlex(self): c = [] lencoeff = self.degree + 1 for i in range(lencoeff): for j in range(lencoeff): if i + j <= self.degree: c.append((j, i)) return c[::-1] def _generate_coeff_names(self, ndim): names = [] if ndim == 1: for n in range(self._order): names.append(f'c{n}') else: for i in range(self.degree + 1): names.append(f'c{i}_{0}') for i in range(1, self.degree + 1): names.append(f'c{0}_{i}') for i in range(1, self.degree): for j in range(1, self.degree): if i + j < self.degree + 1: names.append(f'c{i}_{j}') return tuple(names) class _PolyDomainWindow1D(PolynomialModel): """ This class sets ``domain`` and ``window`` of 1D polynomials. """ def __init__(self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( degree, n_models, model_set_axis, name=name, meta=meta, **params) self._set_default_domain_window(domain, window) @property def window(self): return self._window @window.setter def window(self, val): self._window = _validate_domain_window(val) @property def domain(self): return self._domain @domain.setter def domain(self, val): self._domain = _validate_domain_window(val) def _set_default_domain_window(self, domain, window): """ This method sets the ``domain`` and ``window`` attributes on 1D subclasses. """ self._default_domain_window = {'domain': None, 'window': (-1, 1) } self.window = window or (-1, 1) self.domain = domain def __repr__(self): return self._format_repr([self.degree], kwargs={'domain': self.domain, 'window': self.window}, defaults=self._default_domain_window ) def __str__(self): return self._format_str([('Degree', self.degree), ('Domain', self.domain), ('Window', self.window)], self._default_domain_window) class OrthoPolynomialBase(PolynomialBase): """ This is a base class for the 2D Chebyshev and Legendre models. The polynomials implemented here require a maximum degree in x and y. For explanation of ``x_domain``, ``y_domain``, ```x_window`` and ```y_window`` see :ref:`Notes regarding usage of domain and window <astropy:domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable x_window : tuple or None, optional range of the x independent variable y_domain : tuple or None, optional domain of the y independent variable y_window : tuple or None, optional range of the y independent variable **params : dict {keyword: value} pairs, representing {parameter_name: value} """ n_inputs = 2 n_outputs = 1 def __init__(self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): self.x_degree = x_degree self.y_degree = y_degree self._order = self.get_num_coeff() # Set the ``x/y_domain`` and ``x/y_wndow`` attributes in subclasses. self._default_domain_window = { 'x_window': (-1, 1), 'y_window': (-1, 1), 'x_domain': None, 'y_domain': None } self.x_window = x_window or self._default_domain_window['x_window'] self.y_window = y_window or self._default_domain_window['y_window'] self.x_domain = x_domain self.y_domain = y_domain self._param_names = self._generate_coeff_names() if n_models: if model_set_axis is None: model_set_axis = 0 minshape = (1,) * model_set_axis + (n_models,) else: minshape = () for param_name in self._param_names: self._parameters_[param_name] = \ Parameter(param_name, default=np.zeros(minshape)) super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) @property def x_domain(self): return self._x_domain @x_domain.setter def x_domain(self, val): self._x_domain = _validate_domain_window(val) @property def y_domain(self): return self._y_domain @y_domain.setter def y_domain(self, val): self._y_domain = _validate_domain_window(val) @property def x_window(self): return self._x_window @x_window.setter def x_window(self, val): self._x_window = _validate_domain_window(val) @property def y_window(self): return self._y_window @y_window.setter def y_window(self, val): self._y_window = _validate_domain_window(val) def __repr__(self): return self._format_repr([self.x_degree, self.y_degree], kwargs={'x_domain': self.x_domain, 'y_domain': self.y_domain, 'x_window': self.x_window, 'y_window': self.y_window}, defaults=self._default_domain_window) def __str__(self): return self._format_str( [('X_Degree', self.x_degree), ('Y_Degree', self.y_degree), ('X_Domain', self.x_domain), ('Y_Domain', self.y_domain), ('X_Window', self.x_window), ('Y_Window', self.y_window)], self._default_domain_window) def get_num_coeff(self): """ Determine how many coefficients are needed Returns ------- numc : int number of coefficients """ if self.x_degree < 0 or self.y_degree < 0: raise ValueError("Degree of polynomial must be positive or null") return (self.x_degree + 1) * (self.y_degree + 1) def _invlex(self): # TODO: This is a very slow way to do this; fix it and related methods # like _alpha c = [] xvar = np.arange(self.x_degree + 1) yvar = np.arange(self.y_degree + 1) for j in yvar: for i in xvar: c.append((i, j)) return np.array(c[::-1]) def invlex_coeff(self, coeffs): invlex_coeffs = [] xvar = np.arange(self.x_degree + 1) yvar = np.arange(self.y_degree + 1) for j in yvar: for i in xvar: name = f'c{i}_{j}' coeff = coeffs[self.param_names.index(name)] invlex_coeffs.append(coeff) return np.array(invlex_coeffs[::-1]) def _alpha(self): invlexdeg = self._invlex() invlexdeg[:, 1] = invlexdeg[:, 1] + self.x_degree + 1 nx = self.x_degree + 1 ny = self.y_degree + 1 alpha = np.zeros((ny * nx + 3, ny + nx)) for n in range(len(invlexdeg)): alpha[n][invlexdeg[n]] = [1, 1] alpha[-2, 0] = 1 alpha[-3, nx] = 1 return alpha def imhorner(self, x, y, coeff): _coeff = list(coeff) _coeff.extend([0, 0, 0]) alpha = self._alpha() r0 = _coeff[0] nalpha = len(alpha) karr = np.diff(alpha, axis=0) kfunc = self._fcache(x, y) x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 nterms = x_terms + y_terms for n in range(1, nterms + 1 + 3): setattr(self, 'r' + str(n), 0.) for n in range(1, nalpha): k = karr[n - 1].nonzero()[0].max() + 1 rsum = 0 for i in range(1, k + 1): rsum = rsum + getattr(self, 'r' + str(i)) val = kfunc[k - 1] * (r0 + rsum) setattr(self, 'r' + str(k), val) r0 = _coeff[n] for i in range(1, k): setattr(self, 'r' + str(i), 0.) result = r0 for i in range(1, nterms + 1 + 3): result = result + getattr(self, 'r' + str(i)) return result def _generate_coeff_names(self): names = [] for j in range(self.y_degree + 1): for i in range(self.x_degree + 1): names.append(f'c{i}_{j}') return tuple(names) def _fcache(self, x, y): """ Computation and store the individual functions. To be implemented by subclasses" """ raise NotImplementedError("Subclasses should implement this") def evaluate(self, x, y, *coeffs): if self.x_domain is not None: x = poly_map_domain(x, self.x_domain, self.x_window) if self.y_domain is not None: y = poly_map_domain(y, self.y_domain, self.y_window) invcoeff = self.invlex_coeff(coeffs) return self.imhorner(x, y, invcoeff) def prepare_inputs(self, x, y, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs) x, y = inputs if x.shape != y.shape: raise ValueError("Expected input arrays to have the same shape") return (x, y), broadcasted_shapes class Chebyshev1D(_PolyDomainWindow1D): r""" Univariate Chebyshev series. It is defined as: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * T_{i}(x) where ``T_i(x)`` is the corresponding Chebyshev polynomial of the 1st kind. For explanation of ```domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window. **params : dict keyword : value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Chebyshev polynomials is a polynomial in x - since the coefficients within each Chebyshev polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with units, 2x^2 and -1 would have incompatible units. """ n_inputs = 1 n_outputs = 1 _separable = True def __init__(self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__(degree, domain=domain, window=window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ x = np.array(x, dtype=float, copy=False, ndmin=1) v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype) v[0] = 1 if self.degree > 0: x2 = 2 * x v[1] = x for i in range(2, self.degree + 1): v[i] = v[i - 1] * x2 - v[i - 2] return np.rollaxis(v, 0, v.ndim) def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.clenshaw(x, coeffs) @staticmethod def clenshaw(x, coeffs): """Evaluates the polynomial using Clenshaw's algorithm.""" if len(coeffs) == 1: c0 = coeffs[0] c1 = 0 elif len(coeffs) == 2: c0 = coeffs[0] c1 = coeffs[1] else: x2 = 2 * x c0 = coeffs[-2] c1 = coeffs[-1] for i in range(3, len(coeffs) + 1): tmp = c0 c0 = coeffs[-i] - c1 c1 = tmp + c1 * x2 return c0 + c1 * x class Hermite1D(_PolyDomainWindow1D): r""" Univariate Hermite series. It is defined as: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * H_{i}(x) where ``H_i(x)`` is the corresponding Hermite polynomial ("Physicist's kind"). For explanation of ``domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword : value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Hermite polynomials is a polynomial in x - since the coefficients within each Hermite polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units, 4x^2 and -2 would have incompatible units. """ n_inputs = 1 n_outputs = 1 _separable = True def __init__(self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( degree, domain, window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ x = np.array(x, dtype=float, copy=False, ndmin=1) v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype) v[0] = 1 if self.degree > 0: x2 = 2 * x v[1] = 2 * x for i in range(2, self.degree + 1): v[i] = x2 * v[i - 1] - 2 * (i - 1) * v[i - 2] return np.rollaxis(v, 0, v.ndim) def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.clenshaw(x, coeffs) @staticmethod def clenshaw(x, coeffs): x2 = x * 2 if len(coeffs) == 1: c0 = coeffs[0] c1 = 0 elif len(coeffs) == 2: c0 = coeffs[0] c1 = coeffs[1] else: nd = len(coeffs) c0 = coeffs[-2] c1 = coeffs[-1] for i in range(3, len(coeffs) + 1): temp = c0 nd = nd - 1 c0 = coeffs[-i] - c1 * (2 * (nd - 1)) c1 = temp + c1 * x2 return c0 + c1 * x2 class Hermite2D(OrthoPolynomialBase): r""" Bivariate Hermite series. It is defined as .. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} H_n(x) H_m(y) where ``H_n(x)`` and ``H_m(y)`` are Hermite polynomials. For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable y_domain : tuple or None, optional domain of the y independent variable x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Hermite polynomials is a polynomial in x and/or y - since the coefficients within each Hermite polynomial are fixed, we can't use quantities for x and/or y since the units would not be compatible. For example, the third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units, 4x^2 and -2 would have incompatible units. """ _separable = False def __init__(self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( x_degree, y_degree, x_domain=x_domain, y_domain=y_domain, x_window=x_window, y_window=y_window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) def _fcache(self, x, y): """ Calculate the individual Hermite functions once and store them in a dictionary to be reused. """ x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 kfunc = {} kfunc[0] = np.ones(x.shape) kfunc[1] = 2 * x.copy() kfunc[x_terms] = np.ones(y.shape) kfunc[x_terms + 1] = 2 * y.copy() for n in range(2, x_terms): kfunc[n] = 2 * x * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2] for n in range(x_terms + 2, x_terms + y_terms): kfunc[n] = 2 * y * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2] return kfunc def fit_deriv(self, x, y, *params): """ Derivatives with respect to the coefficients. This is an array with Hermite polynomials: .. math:: H_{x_0}H_{y_0}, H_{x_1}H_{y_0}...H_{x_n}H_{y_0}...H_{x_n}H_{y_m} Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.shape != y.shape: raise ValueError("x and y must have the same shape") x = x.flatten() y = y.flatten() x_deriv = self._hermderiv1d(x, self.x_degree + 1).T y_deriv = self._hermderiv1d(y, self.y_degree + 1).T ij = [] for i in range(self.y_degree + 1): for j in range(self.x_degree + 1): ij.append(x_deriv[j] * y_deriv[i]) v = np.array(ij) return v.T def _hermderiv1d(self, x, deg): """ Derivative of 1D Hermite series """ x = np.array(x, dtype=float, copy=False, ndmin=1) d = np.empty((deg + 1, len(x)), dtype=x.dtype) d[0] = x * 0 + 1 if deg > 0: x2 = 2 * x d[1] = x2 for i in range(2, deg + 1): d[i] = x2 * d[i - 1] - 2 * (i - 1) * d[i - 2] return np.rollaxis(d, 0, d.ndim) class Legendre1D(_PolyDomainWindow1D): r""" Univariate Legendre series. It is defined as: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x) where ``L_i(x)`` is the corresponding Legendre polynomial. For explanation of ``domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Legendre polynomials is a polynomial in x - since the coefficients within each Legendre polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with units, 1.5x^2 and -0.5 would have incompatible units. """ n_inputs = 1 n_outputs = 1 _separable = True def __init__(self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( degree, domain, window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.clenshaw(x, coeffs) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ x = np.array(x, dtype=float, copy=False, ndmin=1) v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype) v[0] = 1 if self.degree > 0: v[1] = x for i in range(2, self.degree + 1): v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i return np.rollaxis(v, 0, v.ndim) @staticmethod def clenshaw(x, coeffs): if len(coeffs) == 1: c0 = coeffs[0] c1 = 0 elif len(coeffs) == 2: c0 = coeffs[0] c1 = coeffs[1] else: nd = len(coeffs) c0 = coeffs[-2] c1 = coeffs[-1] for i in range(3, len(coeffs) + 1): tmp = c0 nd = nd - 1 c0 = coeffs[-i] - (c1 * (nd - 1)) / nd c1 = tmp + (c1 * x * (2 * nd - 1)) / nd return c0 + c1 * x class Polynomial1D(_PolyDomainWindow1D): r""" 1D Polynomial model. It is defined as: .. math:: P = \sum_{i=0}^{i=n}C_{i} * x^{i} For explanation of ``domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional If None, it is set to (-1, 1) window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value """ n_inputs = 1 n_outputs = 1 _separable = True def __init__(self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( degree, domain, window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) # Set domain separately because it's different from # the orthogonal polynomials. self._default_domain_window = {'domain': (-1, 1), 'window': (-1, 1), } self.domain = domain or self._default_domain_window['domain'] self.window = window or self._default_domain_window['window'] def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.horner(x, coeffs) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ v = np.empty((self.degree + 1,) + x.shape, dtype=float) v[0] = 1 if self.degree > 0: v[1] = x for i in range(2, self.degree + 1): v[i] = v[i - 1] * x return np.rollaxis(v, 0, v.ndim) @staticmethod def horner(x, coeffs): if len(coeffs) == 1: c0 = coeffs[-1] * np.ones_like(x, subok=False) else: c0 = coeffs[-1] for i in range(2, len(coeffs) + 1): c0 = coeffs[-i] + c0 * x return c0 @property def input_units(self): if self.degree == 0 or self.c1.unit is None: return None else: return {self.inputs[0]: self.c0.unit / self.c1.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): mapping = {} for i in range(self.degree + 1): par = getattr(self, f'c{i}') mapping[par.name] = outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]] ** i return mapping class Polynomial2D(PolynomialModel): r""" 2D Polynomial model. Represents a general polynomial of degree n: .. math:: P(x,y) = c_{00} + c_{10}x + ...+ c_{n0}x^n + c_{01}y + ...+ c_{0n}y^n + c_{11}xy + c_{12}xy^2 + ... + c_{1(n-1)}xy^{n-1}+ ... + c_{(n-1)1}x^{n-1}y For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int Polynomial degree: largest sum of exponents (:math:`i + j`) of variables in each monomial term of the form :math:`x^i y^j`. The number of terms in a 2D polynomial of degree ``n`` is given by binomial coefficient :math:`C(n + 2, 2) = (n + 2)! / (2!\,n!) = (n + 1)(n + 2) / 2`. x_domain : tuple or None, optional domain of the x independent variable If None, it is set to (-1, 1) y_domain : tuple or None, optional domain of the y independent variable If None, it is set to (-1, 1) x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the x_domain to x_window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the y_domain to y_window **params : dict keyword: value pairs, representing parameter_name: value """ n_inputs = 2 n_outputs = 1 _separable = False def __init__(self, degree, x_domain=None, y_domain=None, x_window=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( degree, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) self._default_domain_window = { 'x_domain': (-1, 1), 'y_domain': (-1, 1), 'x_window': (-1, 1), 'y_window': (-1, 1) } self.x_domain = x_domain or self._default_domain_window['x_domain'] self.y_domain = y_domain or self._default_domain_window['y_domain'] self.x_window = x_window or self._default_domain_window['x_window'] self.y_window = y_window or self._default_domain_window['y_window'] def prepare_inputs(self, x, y, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs) x, y = inputs return (x, y), broadcasted_shapes def evaluate(self, x, y, *coeffs): if self.x_domain is not None: x = poly_map_domain(x, self.x_domain, self.x_window) if self.y_domain is not None: y = poly_map_domain(y, self.y_domain, self.y_window) invcoeff = self.invlex_coeff(coeffs) result = self.multivariate_horner(x, y, invcoeff) # Special case for degree==0 to ensure that the shape of the output is # still as expected by the broadcasting rules, even though the x and y # inputs are not used in the evaluation if self.degree == 0: output_shape = check_broadcast(np.shape(coeffs[0]), x.shape) if output_shape: new_result = np.empty(output_shape) new_result[:] = result result = new_result return result def __repr__(self): return self._format_repr([self.degree], kwargs={'x_domain': self.x_domain, 'y_domain': self.y_domain, 'x_window': self.x_window, 'y_window': self.y_window}, defaults=self._default_domain_window) def __str__(self): return self._format_str([('Degree', self.degree), ('X_Domain', self.x_domain), ('Y_Domain', self.y_domain), ('X_Window', self.x_window), ('Y_Window', self.y_window)], self._default_domain_window) def fit_deriv(self, x, y, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.ndim == 2: x = x.flatten() if y.ndim == 2: y = y.flatten() if x.size != y.size: raise ValueError('Expected x and y to be of equal size') designx = x[:, None] ** np.arange(self.degree + 1) designy = y[:, None] ** np.arange(1, self.degree + 1) designmixed = [] for i in range(1, self.degree): for j in range(1, self.degree): if i + j <= self.degree: designmixed.append((x ** i) * (y ** j)) designmixed = np.array(designmixed).T if designmixed.any(): v = np.hstack([designx, designy, designmixed]) else: v = np.hstack([designx, designy]) return v def invlex_coeff(self, coeffs): invlex_coeffs = [] lencoeff = range(self.degree + 1) for i in lencoeff: for j in lencoeff: if i + j <= self.degree: name = f'c{j}_{i}' coeff = coeffs[self.param_names.index(name)] invlex_coeffs.append(coeff) return invlex_coeffs[::-1] def multivariate_horner(self, x, y, coeffs): """ Multivariate Horner's scheme Parameters ---------- x, y : array coeffs : array Coefficients in inverse lexical order. """ alpha = self._invlex() r0 = coeffs[0] r1 = r0 * 0.0 r2 = r0 * 0.0 karr = np.diff(alpha, axis=0) for n in range(len(karr)): if karr[n, 1] != 0: r2 = y * (r0 + r1 + r2) r1 = np.zeros_like(coeffs[0], subok=False) else: r1 = x * (r0 + r1) r0 = coeffs[n + 1] return r0 + r1 + r2 @property def input_units(self): if self.degree == 0 or (self.c1_0.unit is None and self.c0_1.unit is None): return None return {self.inputs[0]: self.c0_0.unit / self.c1_0.unit, self.inputs[1]: self.c0_0.unit / self.c0_1.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): mapping = {} for i in range(self.degree + 1): for j in range(self.degree + 1): if i + j > 2: continue par = getattr(self, f'c{i}_{j}') mapping[par.name] = outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]] ** i / inputs_unit[self.inputs[1]] ** j # noqa return mapping @property def x_domain(self): return self._x_domain @x_domain.setter def x_domain(self, val): self._x_domain = _validate_domain_window(val) @property def y_domain(self): return self._y_domain @y_domain.setter def y_domain(self, val): self._y_domain = _validate_domain_window(val) @property def x_window(self): return self._x_window @x_window.setter def x_window(self, val): self._x_window = _validate_domain_window(val) @property def y_window(self): return self._y_window @y_window.setter def y_window(self, val): self._y_window = _validate_domain_window(val) class Chebyshev2D(OrthoPolynomialBase): r""" Bivariate Chebyshev series.. It is defined as .. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} T_n(x ) T_m(y) where ``T_n(x)`` and ``T_m(y)`` are Chebyshev polynomials of the first kind. For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable y_domain : tuple or None, optional domain of the y independent variable x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Chebyshev polynomials is a polynomial in x and/or y - since the coefficients within each Chebyshev polynomial are fixed, we can't use quantities for x and/or y since the units would not be compatible. For example, the third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with units, 2x^2 and -1 would have incompatible units. """ _separable = False def __init__(self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( x_degree, y_degree, x_domain=x_domain, y_domain=y_domain, x_window=x_window, y_window=y_window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) def _fcache(self, x, y): """ Calculate the individual Chebyshev functions once and store them in a dictionary to be reused. """ x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 kfunc = {} kfunc[0] = np.ones(x.shape) kfunc[1] = x.copy() kfunc[x_terms] = np.ones(y.shape) kfunc[x_terms + 1] = y.copy() for n in range(2, x_terms): kfunc[n] = 2 * x * kfunc[n - 1] - kfunc[n - 2] for n in range(x_terms + 2, x_terms + y_terms): kfunc[n] = 2 * y * kfunc[n - 1] - kfunc[n - 2] return kfunc def fit_deriv(self, x, y, *params): """ Derivatives with respect to the coefficients. This is an array with Chebyshev polynomials: .. math:: T_{x_0}T_{y_0}, T_{x_1}T_{y_0}...T_{x_n}T_{y_0}...T_{x_n}T_{y_m} Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.shape != y.shape: raise ValueError("x and y must have the same shape") x = x.flatten() y = y.flatten() x_deriv = self._chebderiv1d(x, self.x_degree + 1).T y_deriv = self._chebderiv1d(y, self.y_degree + 1).T ij = [] for i in range(self.y_degree + 1): for j in range(self.x_degree + 1): ij.append(x_deriv[j] * y_deriv[i]) v = np.array(ij) return v.T def _chebderiv1d(self, x, deg): """ Derivative of 1D Chebyshev series """ x = np.array(x, dtype=float, copy=False, ndmin=1) d = np.empty((deg + 1, len(x)), dtype=x.dtype) d[0] = x * 0 + 1 if deg > 0: x2 = 2 * x d[1] = x for i in range(2, deg + 1): d[i] = d[i - 1] * x2 - d[i - 2] return np.rollaxis(d, 0, d.ndim) class Legendre2D(OrthoPolynomialBase): r""" Bivariate Legendre series. Defined as: .. math:: P_{n_m}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} L_n(x ) L_m(y) where ``L_n(x)`` and ``L_m(y)`` are Legendre polynomials. For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable y_domain : tuple or None, optional domain of the y independent variable x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- Model formula: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x) where ``L_{i}`` is the corresponding Legendre polynomial. This model does not support the use of units/quantities, because each term in the sum of Legendre polynomials is a polynomial in x - since the coefficients within each Legendre polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with units, 1.5x^2 and -0.5 would have incompatible units. """ _separable = False def __init__(self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params): super().__init__( x_degree, y_degree, x_domain=x_domain, y_domain=y_domain, x_window=x_window, y_window=y_window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) def _fcache(self, x, y): """ Calculate the individual Legendre functions once and store them in a dictionary to be reused. """ x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 kfunc = {} kfunc[0] = np.ones(x.shape) kfunc[1] = x.copy() kfunc[x_terms] = np.ones(y.shape) kfunc[x_terms + 1] = y.copy() for n in range(2, x_terms): kfunc[n] = (((2 * (n - 1) + 1) * x * kfunc[n - 1] - (n - 1) * kfunc[n - 2]) / n) for n in range(2, y_terms): kfunc[n + x_terms] = ((2 * (n - 1) + 1) * y * kfunc[n + x_terms - 1] - (n - 1) * kfunc[n + x_terms - 2]) / (n) return kfunc def fit_deriv(self, x, y, *params): """ Derivatives with respect to the coefficients. This is an array with Legendre polynomials: Lx0Ly0 Lx1Ly0...LxnLy0...LxnLym Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.shape != y.shape: raise ValueError("x and y must have the same shape") x = x.flatten() y = y.flatten() x_deriv = self._legendderiv1d(x, self.x_degree + 1).T y_deriv = self._legendderiv1d(y, self.y_degree + 1).T ij = [] for i in range(self.y_degree + 1): for j in range(self.x_degree + 1): ij.append(x_deriv[j] * y_deriv[i]) v = np.array(ij) return v.T def _legendderiv1d(self, x, deg): """Derivative of 1D Legendre polynomial""" x = np.array(x, dtype=float, copy=False, ndmin=1) d = np.empty((deg + 1,) + x.shape, dtype=x.dtype) d[0] = x * 0 + 1 if deg > 0: d[1] = x for i in range(2, deg + 1): d[i] = (d[i - 1] * x * (2 * i - 1) - d[i - 2] * (i - 1)) / i return np.rollaxis(d, 0, d.ndim) class _SIP1D(PolynomialBase): """ This implements the Simple Imaging Polynomial Model (SIP) in 1D. It's unlikely it will be used in 1D so this class is private and SIP should be used instead. """ n_inputs = 2 n_outputs = 1 _separable = False def __init__(self, order, coeff_prefix, n_models=None, model_set_axis=None, name=None, meta=None, **params): self.order = order self.coeff_prefix = coeff_prefix self._param_names = self._generate_coeff_names(coeff_prefix) if n_models: if model_set_axis is None: model_set_axis = 0 minshape = (1,) * model_set_axis + (n_models,) else: minshape = () for param_name in self._param_names: self._parameters_[param_name] = \ Parameter(param_name, default=np.zeros(minshape)) super().__init__(n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params) def __repr__(self): return self._format_repr(args=[self.order, self.coeff_prefix]) def __str__(self): return self._format_str( [('Order', self.order), ('Coeff. Prefix', self.coeff_prefix)]) def evaluate(self, x, y, *coeffs): # TODO: Rewrite this so that it uses a simpler method of determining # the matrix based on the number of given coefficients. mcoef = self._coeff_matrix(self.coeff_prefix, coeffs) return self._eval_sip(x, y, mcoef) def get_num_coeff(self, ndim): """ Return the number of coefficients in one param set """ if self.order < 2 or self.order > 9: raise ValueError("Degree of polynomial must be 2< deg < 9") nmixed = comb(self.order, ndim) # remove 3 terms because SIP deg >= 2 numc = self.order * ndim + nmixed - 2 return numc def _generate_coeff_names(self, coeff_prefix): names = [] for i in range(2, self.order + 1): names.append(f'{coeff_prefix}_{i}_{0}') for i in range(2, self.order + 1): names.append(f'{coeff_prefix}_{0}_{i}') for i in range(1, self.order): for j in range(1, self.order): if i + j < self.order + 1: names.append(f'{coeff_prefix}_{i}_{j}') return tuple(names) def _coeff_matrix(self, coeff_prefix, coeffs): mat = np.zeros((self.order + 1, self.order + 1)) for i in range(2, self.order + 1): attr = f'{coeff_prefix}_{i}_{0}' mat[i, 0] = coeffs[self.param_names.index(attr)] for i in range(2, self.order + 1): attr = f'{coeff_prefix}_{0}_{i}' mat[0, i] = coeffs[self.param_names.index(attr)] for i in range(1, self.order): for j in range(1, self.order): if i + j < self.order + 1: attr = f'{coeff_prefix}_{i}_{j}' mat[i, j] = coeffs[self.param_names.index(attr)] return mat def _eval_sip(self, x, y, coef): x = np.asarray(x, dtype=np.float64) y = np.asarray(y, dtype=np.float64) if self.coeff_prefix == 'A': result = np.zeros(x.shape) else: result = np.zeros(y.shape) for i in range(coef.shape[0]): for j in range(coef.shape[1]): if 1 < i + j < self.order + 1: result = result + coef[i, j] * x ** i * y ** j return result class SIP(Model): """ Simple Imaging Polynomial (SIP) model. The SIP convention is used to represent distortions in FITS image headers. See [1]_ for a description of the SIP convention. Parameters ---------- crpix : list or (2,) ndarray CRPIX values a_order : int SIP polynomial order for first axis b_order : int SIP order for second axis a_coeff : dict SIP coefficients for first axis b_coeff : dict SIP coefficients for the second axis ap_order : int order for the inverse transformation (AP coefficients) bp_order : int order for the inverse transformation (BP coefficients) ap_coeff : dict coefficients for the inverse transform bp_coeff : dict coefficients for the inverse transform References ---------- .. [1] `David Shupe, et al, ADASS, ASP Conference Series, Vol. 347, 2005 <https://ui.adsabs.harvard.edu/abs/2005ASPC..347..491S>`_ """ n_inputs = 2 n_outputs = 2 _separable = False def __init__(self, crpix, a_order, b_order, a_coeff={}, b_coeff={}, ap_order=None, bp_order=None, ap_coeff={}, bp_coeff={}, n_models=None, model_set_axis=None, name=None, meta=None): self._crpix = crpix self._a_order = a_order self._b_order = b_order self._a_coeff = a_coeff self._b_coeff = b_coeff self._ap_order = ap_order self._bp_order = bp_order self._ap_coeff = ap_coeff self._bp_coeff = bp_coeff self.shift_a = Shift(-crpix[0]) self.shift_b = Shift(-crpix[1]) self.sip1d_a = _SIP1D(a_order, coeff_prefix='A', n_models=n_models, model_set_axis=model_set_axis, **a_coeff) self.sip1d_b = _SIP1D(b_order, coeff_prefix='B', n_models=n_models, model_set_axis=model_set_axis, **b_coeff) super().__init__(n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta) self._inputs = ("u", "v") self._outputs = ("x", "y") def __repr__(self): return '<{}({!r})>'.format(self.__class__.__name__, [self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]) def __str__(self): parts = [f'Model: {self.__class__.__name__}'] for model in [self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]: parts.append(indent(str(model), width=4)) parts.append('') return '\n'.join(parts) @property def inverse(self): if (self._ap_order is not None and self._bp_order is not None): return InverseSIP(self._ap_order, self._bp_order, self._ap_coeff, self._bp_coeff) else: raise NotImplementedError("SIP inverse coefficients are not available.") def evaluate(self, x, y): u = self.shift_a.evaluate(x, *self.shift_a.param_sets) v = self.shift_b.evaluate(y, *self.shift_b.param_sets) f = self.sip1d_a.evaluate(u, v, *self.sip1d_a.param_sets) g = self.sip1d_b.evaluate(u, v, *self.sip1d_b.param_sets) return f, g class InverseSIP(Model): """ Inverse Simple Imaging Polynomial Parameters ---------- ap_order : int order for the inverse transformation (AP coefficients) bp_order : int order for the inverse transformation (BP coefficients) ap_coeff : dict coefficients for the inverse transform bp_coeff : dict coefficients for the inverse transform """ n_inputs = 2 n_outputs = 2 _separable = False def __init__(self, ap_order, bp_order, ap_coeff={}, bp_coeff={}, n_models=None, model_set_axis=None, name=None, meta=None): self._ap_order = ap_order self._bp_order = bp_order self._ap_coeff = ap_coeff self._bp_coeff = bp_coeff # define the 0th term in order to use Polynomial2D ap_coeff.setdefault('AP_0_0', 0) bp_coeff.setdefault('BP_0_0', 0) ap_coeff_params = dict((k.replace('AP_', 'c'), v) for k, v in ap_coeff.items()) bp_coeff_params = dict((k.replace('BP_', 'c'), v) for k, v in bp_coeff.items()) self.sip1d_ap = Polynomial2D(degree=ap_order, model_set_axis=model_set_axis, **ap_coeff_params) self.sip1d_bp = Polynomial2D(degree=bp_order, model_set_axis=model_set_axis, **bp_coeff_params) super().__init__(n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta) def __repr__(self): return f'<{self.__class__.__name__}({[self.sip1d_ap, self.sip1d_bp]!r})>' def __str__(self): parts = [f'Model: {self.__class__.__name__}'] for model in [self.sip1d_ap, self.sip1d_bp]: parts.append(indent(str(model), width=4)) parts.append('') return '\n'.join(parts) def evaluate(self, x, y): x1 = self.sip1d_ap.evaluate(x, y, *self.sip1d_ap.param_sets) y1 = self.sip1d_bp.evaluate(x, y, *self.sip1d_bp.param_sets) return x1, y1
ee05fcda396fd52fc8a29ae3b6d92273b72688794478596e13758ef77d9fe72d
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tabular models. Tabular models of any dimension can be created using `tabular_model`. For convenience `Tabular1D` and `Tabular2D` are provided. Examples -------- >>> table = np.array([[ 3., 0., 0.], ... [ 0., 2., 0.], ... [ 0., 0., 0.]]) >>> points = ([1, 2, 3], [1, 2, 3]) >>> t2 = Tabular2D(points, lookup_table=table, bounds_error=False, ... fill_value=None, method='nearest') """ # pylint: disable=invalid-name import abc import numpy as np from astropy import units as u from .core import Model try: from scipy.interpolate import interpn has_scipy = True except ImportError: has_scipy = False __all__ = ['tabular_model', 'Tabular1D', 'Tabular2D'] __doctest_requires__ = {('tabular_model'): ['scipy']} class _Tabular(Model): """ Returns an interpolated lookup table value. Parameters ---------- points : tuple of ndarray of float, optional The points defining the regular grid in n dimensions. ndarray must have shapes (m1, ), ..., (mn, ), lookup_table : array-like The data on a regular grid in n dimensions. Must have shapes (m1, ..., mn, ...) method : str, optional The method of interpolation to perform. Supported are "linear" and "nearest", and "splinef2d". "splinef2d" is only supported for 2-dimensional data. Default is "linear". bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data, a ValueError is raised. If False, then ``fill_value`` is used. fill_value : float or `~astropy.units.Quantity`, optional If provided, the value to use for points outside of the interpolation domain. If None, values outside the domain are extrapolated. Extrapolation is not supported by method "splinef2d". If Quantity is given, it will be converted to the unit of ``lookup_table``, if applicable. Returns ------- value : ndarray Interpolated values at input coordinates. Raises ------ ImportError Scipy is not installed. Notes ----- Uses `scipy.interpolate.interpn`. """ linear = False fittable = False standard_broadcasting = False _is_dynamic = True _id = 0 def __init__(self, points=None, lookup_table=None, method='linear', bounds_error=True, fill_value=np.nan, **kwargs): n_models = kwargs.get('n_models', 1) if n_models > 1: raise NotImplementedError('Only n_models=1 is supported.') super().__init__(**kwargs) self.outputs = ("y",) if lookup_table is None: raise ValueError('Must provide a lookup table.') if not isinstance(lookup_table, u.Quantity): lookup_table = np.asarray(lookup_table) if self.lookup_table.ndim != lookup_table.ndim: raise ValueError("lookup_table should be an array with " "{} dimensions.".format(self.lookup_table.ndim)) if points is None: points = tuple(np.arange(x, dtype=float) for x in lookup_table.shape) else: if lookup_table.ndim == 1 and not isinstance(points, tuple): points = (points,) npts = len(points) if npts != lookup_table.ndim: raise ValueError( "Expected grid points in " "{} directions, got {}.".format(lookup_table.ndim, npts)) if (npts > 1 and isinstance(points[0], u.Quantity) and len(set([getattr(p, 'unit', None) for p in points])) > 1): raise ValueError('points must all have the same unit.') if isinstance(fill_value, u.Quantity): if not isinstance(lookup_table, u.Quantity): raise ValueError('fill value is in {} but expected to be ' 'unitless.'.format(fill_value.unit)) fill_value = fill_value.to(lookup_table.unit).value self.points = points self.lookup_table = lookup_table self.bounds_error = bounds_error self.method = method self.fill_value = fill_value def __repr__(self): return "<{}(points={}, lookup_table={})>".format( self.__class__.__name__, self.points, self.lookup_table) def __str__(self): default_keywords = [ ('Model', self.__class__.__name__), ('Name', self.name), ('N_inputs', self.n_inputs), ('N_outputs', self.n_outputs), ('Parameters', ""), (' points', self.points), (' lookup_table', self.lookup_table), (' method', self.method), (' fill_value', self.fill_value), (' bounds_error', self.bounds_error) ] parts = [f'{keyword}: {value}' for keyword, value in default_keywords if value is not None] return '\n'.join(parts) @property def input_units(self): pts = self.points[0] if not isinstance(pts, u.Quantity): return None return dict([(x, pts.unit) for x in self.inputs]) @property def return_units(self): if not isinstance(self.lookup_table, u.Quantity): return None return {self.outputs[0]: self.lookup_table.unit} @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(points_low, points_high)``. Examples -------- >>> from astropy.modeling.models import Tabular1D, Tabular2D >>> t1 = Tabular1D(points=[1, 2, 3], lookup_table=[10, 20, 30]) >>> t1.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=1, upper=3) } model=Tabular1D(inputs=('x',)) order='C' ) >>> t2 = Tabular2D(points=[[1, 2, 3], [2, 3, 4]], ... lookup_table=[[10, 20, 30], [20, 30, 40]]) >>> t2.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=1, upper=3) y: Interval(lower=2, upper=4) } model=Tabular2D(inputs=('x', 'y')) order='C' ) """ bbox = [(min(p), max(p)) for p in self.points][::-1] if len(bbox) == 1: bbox = bbox[0] return bbox def evaluate(self, *inputs): """ Return the interpolated values at the input coordinates. Parameters ---------- inputs : list of scalar or list of ndarray Input coordinates. The number of inputs must be equal to the dimensions of the lookup table. """ inputs = np.broadcast_arrays(*inputs) shape = inputs[0].shape inputs = [inp.flatten() for inp in inputs[: self.n_inputs]] inputs = np.array(inputs).T if not has_scipy: # pragma: no cover raise ImportError("Tabular model requires scipy.") result = interpn(self.points, self.lookup_table, inputs, method=self.method, bounds_error=self.bounds_error, fill_value=self.fill_value) # return_units not respected when points has no units if (isinstance(self.lookup_table, u.Quantity) and not isinstance(self.points[0], u.Quantity)): result = result * self.lookup_table.unit if self.n_outputs == 1: result = result.reshape(shape) else: result = [r.reshape(shape) for r in result] return result @property def inverse(self): if self.n_inputs == 1: # If the wavelength array is descending instead of ascending, both # points and lookup_table need to be reversed in the inverse transform # for scipy.interpolate to work properly if np.all(np.diff(self.lookup_table) > 0): # ascending case points = self.lookup_table lookup_table = self.points[0] elif np.all(np.diff(self.lookup_table) < 0): # descending case, reverse order points = self.lookup_table[::-1] lookup_table = self.points[0][::-1] else: # equal-valued or double-valued lookup_table raise NotImplementedError return Tabular1D(points=points, lookup_table=lookup_table, method=self.method, bounds_error=self.bounds_error, fill_value=self.fill_value) raise NotImplementedError("An analytical inverse transform " "has not been implemented for this model.") def tabular_model(dim, name=None): """ Make a ``Tabular`` model where ``n_inputs`` is based on the dimension of the lookup_table. This model has to be further initialized and when evaluated returns the interpolated values. Parameters ---------- dim : int Dimensions of the lookup table. name : str Name for the class. Examples -------- >>> table = np.array([[3., 0., 0.], ... [0., 2., 0.], ... [0., 0., 0.]]) >>> tab = tabular_model(2, name='Tabular2D') >>> print(tab) <class 'astropy.modeling.tabular.Tabular2D'> Name: Tabular2D N_inputs: 2 N_outputs: 1 >>> points = ([1, 2, 3], [1, 2, 3]) Setting fill_value to None, allows extrapolation. >>> m = tab(points, lookup_table=table, name='my_table', ... bounds_error=False, fill_value=None, method='nearest') >>> xinterp = [0, 1, 1.5, 2.72, 3.14] >>> m(xinterp, xinterp) # doctest: +FLOAT_CMP array([3., 3., 3., 0., 0.]) """ if dim < 1: raise ValueError('Lookup table must have at least one dimension.') table = np.zeros([2] * dim) members = {'lookup_table': table, 'n_inputs': dim, 'n_outputs': 1} if dim == 1: members['_separable'] = True else: members['_separable'] = False if name is None: model_id = _Tabular._id _Tabular._id += 1 name = f'Tabular{model_id}' model_class = type(str(name), (_Tabular,), members) model_class.__module__ = 'astropy.modeling.tabular' return model_class Tabular1D = tabular_model(1, name='Tabular1D') Tabular2D = tabular_model(2, name='Tabular2D') _tab_docs = """ method : str, optional The method of interpolation to perform. Supported are "linear" and "nearest", and "splinef2d". "splinef2d" is only supported for 2-dimensional data. Default is "linear". bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data, a ValueError is raised. If False, then ``fill_value`` is used. fill_value : float, optional If provided, the value to use for points outside of the interpolation domain. If None, values outside the domain are extrapolated. Extrapolation is not supported by method "splinef2d". Returns ------- value : ndarray Interpolated values at input coordinates. Raises ------ ImportError Scipy is not installed. Notes ----- Uses `scipy.interpolate.interpn`. """ Tabular1D.__doc__ = """ Tabular model in 1D. Returns an interpolated lookup table value. Parameters ---------- points : array-like of float of ndim=1. The points defining the regular grid in n dimensions. lookup_table : array-like, of ndim=1. The data in one dimensions. """ + _tab_docs Tabular2D.__doc__ = """ Tabular model in 2D. Returns an interpolated lookup table value. Parameters ---------- points : tuple of ndarray of float, optional The points defining the regular grid in n dimensions. ndarray with shapes (m1, m2). lookup_table : array-like The data on a regular grid in 2 dimensions. Shape (m1, m2). """ + _tab_docs
56de15bce30e82a717220df8cc6fdcf517d6c41842efc6562a25ec337f154fc2
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Models that have physical origins. """ # pylint: disable=invalid-name, no-member import warnings import numpy as np from astropy import constants as const from astropy import units as u from astropy.utils.exceptions import AstropyUserWarning from .core import Fittable1DModel from .parameters import InputParameterError, Parameter __all__ = ["BlackBody", "Drude1D", "Plummer1D", "NFW"] class BlackBody(Fittable1DModel): """ Blackbody model using the Planck function. Parameters ---------- temperature : `~astropy.units.Quantity` ['temperature'] Blackbody temperature. scale : float or `~astropy.units.Quantity` ['dimensionless'] Scale factor. If dimensionless, input units will assumed to be in Hz and output units in (erg / (cm ** 2 * s * Hz * sr). If not dimensionless, must be equivalent to either (erg / (cm ** 2 * s * Hz * sr) or erg / (cm ** 2 * s * AA * sr), in which case the result will be returned in the requested units and the scale will be stripped of units (with the float value applied). Notes ----- Model formula: .. math:: B_{\\nu}(T) = A \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1} Examples -------- >>> from astropy.modeling import models >>> from astropy import units as u >>> bb = models.BlackBody(temperature=5000*u.K) >>> bb(6000 * u.AA) # doctest: +FLOAT_CMP <Quantity 1.53254685e-05 erg / (cm2 Hz s sr)> .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import BlackBody from astropy import units as u from astropy.visualization import quantity_support bb = BlackBody(temperature=5778*u.K) wav = np.arange(1000, 110000) * u.AA flux = bb(wav) with quantity_support(): plt.figure() plt.semilogx(wav, flux) plt.axvline(bb.nu_max.to(u.AA, equivalencies=u.spectral()).value, ls='--') plt.show() """ # We parametrize this model with a temperature and a scale. temperature = Parameter(default=5000.0, min=0, unit=u.K, description="Blackbody temperature") scale = Parameter(default=1.0, min=0, description="Scale factor") # We allow values without units to be passed when evaluating the model, and # in this case the input x values are assumed to be frequencies in Hz or wavelengths # in AA (depending on the choice of output units controlled by units on scale # and stored in self._output_units during init). _input_units_allow_dimensionless = True # We enable the spectral equivalency by default for the spectral axis input_units_equivalencies = {'x': u.spectral()} # Store the native units returned by B_nu equation _native_units = u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr) # Store the base native output units. If scale is not dimensionless, it # must be equivalent to one of these. If equivalent to SLAM, then # input_units will expect AA for 'x', otherwise Hz. _native_output_units = {'SNU': u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr), 'SLAM': u.erg / (u.cm ** 2 * u.s * u.AA * u.sr)} def __init__(self, *args, **kwargs): scale = kwargs.get('scale', None) # Support scale with non-dimensionless unit by stripping the unit and # storing as self._output_units. if hasattr(scale, 'unit') and not scale.unit.is_equivalent(u.dimensionless_unscaled): output_units = scale.unit if not output_units.is_equivalent(self._native_units, u.spectral_density(1*u.AA)): raise ValueError(f"scale units not dimensionless or in surface brightness: {output_units}") kwargs['scale'] = scale.value self._output_units = output_units else: self._output_units = self._native_units return super().__init__(*args, **kwargs) def evaluate(self, x, temperature, scale): """Evaluate the model. Parameters ---------- x : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['frequency'] Frequency at which to compute the blackbody. If no units are given, this defaults to Hz (or AA if `scale` was initialized with units equivalent to erg / (cm ** 2 * s * AA * sr)). temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity` Temperature of the blackbody. If no units are given, this defaults to Kelvin. scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['dimensionless'] Desired scale for the blackbody. Returns ------- y : number or ndarray Blackbody spectrum. The units are determined from the units of ``scale``. .. note:: Use `numpy.errstate` to suppress Numpy warnings, if desired. .. warning:: Output values might contain ``nan`` and ``inf``. Raises ------ ValueError Invalid temperature. ZeroDivisionError Wavelength is zero (when converting to frequency). """ if not isinstance(temperature, u.Quantity): in_temp = u.Quantity(temperature, u.K) else: in_temp = temperature if not isinstance(x, u.Quantity): # then we assume it has input_units which depends on the # requested output units (either Hz or AA) in_x = u.Quantity(x, self.input_units['x']) else: in_x = x # Convert to units for calculations, also force double precision with u.add_enabled_equivalencies(u.spectral() + u.temperature()): freq = u.Quantity(in_x, u.Hz, dtype=np.float64) temp = u.Quantity(in_temp, u.K) # Check if input values are physically possible if np.any(temp < 0): raise ValueError(f"Temperature should be positive: {temp}") if not np.all(np.isfinite(freq)) or np.any(freq <= 0): warnings.warn( "Input contains invalid wavelength/frequency value(s)", AstropyUserWarning, ) log_boltz = const.h * freq / (const.k_B * temp) boltzm1 = np.expm1(log_boltz) # Calculate blackbody flux bb_nu = 2.0 * const.h * freq ** 3 / (const.c ** 2 * boltzm1) / u.sr if self.scale.unit is not None: # Will be dimensionless at this point, but may not be dimensionless_unscaled if not hasattr(scale, 'unit'): # during fitting, scale will be passed without units # but we still need to convert from the input dimensionless # to dimensionless unscaled scale = scale * self.scale.unit scale = scale.to(u.dimensionless_unscaled).value # NOTE: scale is already stripped of any input units y = scale * bb_nu.to(self._output_units, u.spectral_density(freq)) # If the temperature parameter has no unit, we should return a unitless # value. This occurs for instance during fitting, since we drop the # units temporarily. if hasattr(temperature, "unit"): return y return y.value @property def input_units(self): # The input units are those of the 'x' value, which will depend on the # units compatible with the expected output units. if self._output_units.is_equivalent(self._native_output_units['SNU']): return {self.inputs[0]: u.Hz} else: # only other option is equivalent with SLAM return {self.inputs[0]: u.AA} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"temperature": u.K} @property def bolometric_flux(self): """Bolometric flux.""" if self.scale.unit is not None: # Will be dimensionless at this point, but may not be dimensionless_unscaled scale = self.scale.quantity.to(u.dimensionless_unscaled) else: scale = self.scale.value # bolometric flux in the native units of the planck function native_bolflux = ( scale * const.sigma_sb * self.temperature ** 4 / np.pi ) # return in more "astro" units return native_bolflux.to(u.erg / (u.cm ** 2 * u.s)) @property def lambda_max(self): """Peak wavelength when the curve is expressed as power density.""" return const.b_wien / self.temperature @property def nu_max(self): """Peak frequency when the curve is expressed as power density.""" return 2.8214391 * const.k_B * self.temperature / const.h class Drude1D(Fittable1DModel): """ Drude model based one the behavior of electons in materials (esp. metals). Parameters ---------- amplitude : float Peak value x_0 : float Position of the peak fwhm : float Full width at half maximum Model formula: .. math:: f(x) = A \\frac{(fwhm/x_0)^2}{((x/x_0 - x_0/x)^2 + (fwhm/x_0)^2} Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Drude1D fig, ax = plt.subplots() # generate the curves and plot them x = np.arange(7.5 , 12.5 , 0.1) dmodel = Drude1D(amplitude=1.0, fwhm=1.0, x_0=10.0) ax.plot(x, dmodel(x)) ax.set_xlabel('x') ax.set_ylabel('F(x)') plt.show() """ amplitude = Parameter(default=1.0, description="Peak Value") x_0 = Parameter(default=1.0, description="Position of the peak") fwhm = Parameter(default=1.0, description="Full width at half maximum") @staticmethod def evaluate(x, amplitude, x_0, fwhm): """ One dimensional Drude model function """ return ( amplitude * ((fwhm / x_0) ** 2) / ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2) ) @staticmethod def fit_deriv(x, amplitude, x_0, fwhm): """ Drude1D model function derivatives. """ d_amplitude = (fwhm / x_0) ** 2 / ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2) d_x_0 = ( -2 * amplitude * d_amplitude * ( (1 / x_0) + d_amplitude * (x_0 ** 2 / fwhm ** 2) * ( (-x / x_0 - 1 / x) * (x / x_0 - x_0 / x) - (2 * fwhm ** 2 / x_0 ** 3) ) ) ) d_fwhm = (2 * amplitude * d_amplitude / fwhm) * (1 - d_amplitude) return [d_amplitude, d_x_0, d_fwhm] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "fwhm": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } @property def return_units(self): if self.amplitude.unit is None: return None return {self.outputs[0]: self.amplitude.unit} @x_0.validator def x_0(self, val): """ Ensure `x_0` is not 0.""" if np.any(val == 0): raise InputParameterError("0 is not an allowed value for x_0") def bounding_box(self, factor=50): """Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. Parameters ---------- factor : float The multiple of FWHM used to define the limits. """ x0 = self.x_0 dx = factor * self.fwhm return (x0 - dx, x0 + dx) class Plummer1D(Fittable1DModel): r"""One dimensional Plummer density profile model. Parameters ---------- mass : float Total mass of cluster. r_plum : float Scale parameter which sets the size of the cluster core. Notes ----- Model formula: .. math:: \rho(r)=\frac{3M}{4\pi a^3}(1+\frac{r^2}{a^2})^{-5/2} References ---------- .. [1] https://ui.adsabs.harvard.edu/abs/1911MNRAS..71..460P """ mass = Parameter(default=1.0, description="Total mass of cluster") r_plum = Parameter(default=1.0, description="Scale parameter which sets the size of the cluster core") @staticmethod def evaluate(x, mass, r_plum): """ Evaluate plummer density profile model. """ return (3*mass)/(4 * np.pi * r_plum**3) * (1+(x/r_plum)**2)**(-5/2) @staticmethod def fit_deriv(x, mass, r_plum): """ Plummer1D model derivatives. """ d_mass = 3 / ((4*np.pi*r_plum**3) * (((x/r_plum)**2 + 1)**(5/2))) d_r_plum = (6*mass*x**2-9*mass*r_plum**2) / ((4*np.pi * r_plum**6) * (1+(x/r_plum)**2)**(7/2)) return [d_mass, d_r_plum] @property def input_units(self): if self.mass.unit is None and self.r_plum.unit is None: return None else: return {self.inputs[0]: self.r_plum.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'mass': outputs_unit[self.outputs[0]] * inputs_unit[self.inputs[0]] ** 3, 'r_plum': inputs_unit[self.inputs[0]]} class NFW(Fittable1DModel): r""" Navarro–Frenk–White (NFW) profile - model for radial distribution of dark matter. Parameters ---------- mass : float or `~astropy.units.Quantity` ['mass'] Mass of NFW peak within specified overdensity radius. concentration : float Concentration of the NFW profile. redshift : float Redshift of the NFW profile. massfactor : tuple or str Mass overdensity factor and type for provided profiles: Tuple version: ("virial",) : virial radius ("critical", N) : radius where density is N times that of the critical density ("mean", N) : radius where density is N times that of the mean density String version: "virial" : virial radius "Nc" : radius where density is N times that of the critical density (e.g. "200c") "Nm" : radius where density is N times that of the mean density (e.g. "500m") cosmo : :class:`~astropy.cosmology.Cosmology` Background cosmology for density calculation. If None, the default cosmology will be used. Notes ----- Model formula: .. math:: \rho(r)=\frac{\delta_c\rho_{c}}{r/r_s(1+r/r_s)^2} References ---------- .. [1] https://arxiv.org/pdf/astro-ph/9508025 .. [2] https://en.wikipedia.org/wiki/Navarro%E2%80%93Frenk%E2%80%93White_profile .. [3] https://en.wikipedia.org/wiki/Virial_mass """ # Model Parameters # NFW Profile mass mass = Parameter(default=1.0, min=1.0, unit=u.M_sun, description="Peak mass within specified overdensity radius") # NFW profile concentration concentration = Parameter(default=1.0, min=1.0, description="Concentration") # NFW Profile redshift redshift = Parameter(default=0.0, min=0.0, description="Redshift") # We allow values without units to be passed when evaluating the model, and # in this case the input r values are assumed to be lengths / positions in kpc. _input_units_allow_dimensionless = True def __init__(self, mass=u.Quantity(mass.default, mass.unit), concentration=concentration.default, redshift=redshift.default, massfactor=("critical", 200), cosmo=None, **kwargs): # Set default cosmology if cosmo is None: # LOCAL from astropy.cosmology import default_cosmology cosmo = default_cosmology.get() # Set mass overdensity type and factor self._density_delta(massfactor, cosmo, redshift) # Establish mass units for density calculation (default solar masses) if not isinstance(mass, u.Quantity): in_mass = u.Quantity(mass, u.M_sun) else: in_mass = mass # Obtain scale radius self._radius_s(mass, concentration) # Obtain scale density self._density_s(mass, concentration) super().__init__(mass=in_mass, concentration=concentration, redshift=redshift, **kwargs) def evaluate(self, r, mass, concentration, redshift): """ One dimensional NFW profile function Parameters ---------- r : float or `~astropy.units.Quantity` ['length'] Radial position of density to be calculated for the NFW profile. mass : float or `~astropy.units.Quantity` ['mass'] Mass of NFW peak within specified overdensity radius. concentration : float Concentration of the NFW profile. redshift : float Redshift of the NFW profile. Returns ------- density : float or `~astropy.units.Quantity` ['density'] NFW profile mass density at location ``r``. The density units are: [``mass`` / ``r`` ^3] Notes ----- .. warning:: Output values might contain ``nan`` and ``inf``. """ # Create radial version of input with dimension if hasattr(r, "unit"): in_r = r else: in_r = u.Quantity(r, u.kpc) # Define reduced radius (r / r_{\\rm s}) # also update scale radius radius_reduced = in_r / self._radius_s(mass, concentration).to(in_r.unit) # Density distribution # \rho (r)=\frac{\rho_0}{\frac{r}{R_s}\left(1~+~\frac{r}{R_s}\right)^2} # also update scale density density = self._density_s(mass, concentration) / (radius_reduced * (u.Quantity(1.0) + radius_reduced) ** 2) if hasattr(mass, "unit"): return density else: return density.value def _density_delta(self, massfactor, cosmo, redshift): """ Calculate density delta. """ # Set mass overdensity type and factor if isinstance(massfactor, tuple): # Tuple options # ("virial") : virial radius # ("critical", N) : radius where density is N that of the critical density # ("mean", N) : radius where density is N that of the mean density if massfactor[0].lower() == "virial": # Virial Mass delta = None masstype = massfactor[0].lower() elif massfactor[0].lower() == "critical": # Critical or Mean Overdensity Mass delta = float(massfactor[1]) masstype = 'c' elif massfactor[0].lower() == "mean": # Critical or Mean Overdensity Mass delta = float(massfactor[1]) masstype = 'm' else: raise ValueError("Massfactor '" + str(massfactor[0]) + "' not one of 'critical', " "'mean', or 'virial'") else: try: # String options # virial : virial radius # Nc : radius where density is N that of the critical density # Nm : radius where density is N that of the mean density if massfactor.lower() == "virial": # Virial Mass delta = None masstype = massfactor.lower() elif massfactor[-1].lower() == 'c' or massfactor[-1].lower() == 'm': # Critical or Mean Overdensity Mass delta = float(massfactor[0:-1]) masstype = massfactor[-1].lower() else: raise ValueError("Massfactor " + str(massfactor) + " string not of the form " "'#m', '#c', or 'virial'") except (AttributeError, TypeError): raise TypeError("Massfactor " + str( massfactor) + " not a tuple or string") # Set density from masstype specification if masstype == "virial": Om_c = cosmo.Om(redshift) - 1.0 d_c = 18.0 * np.pi ** 2 + 82.0 * Om_c - 39.0 * Om_c ** 2 self.density_delta = d_c * cosmo.critical_density(redshift) elif masstype == 'c': self.density_delta = delta * cosmo.critical_density(redshift) elif masstype == 'm': self.density_delta = delta * cosmo.critical_density(redshift) * cosmo.Om(redshift) return self.density_delta @staticmethod def A_NFW(y): r""" Dimensionless volume integral of the NFW profile, used as an intermediate step in some calculations for this model. Notes ----- Model formula: .. math:: A_{NFW} = [\ln(1+y) - \frac{y}{1+y}] """ return np.log(1.0 + y) - (y / (1.0 + y)) def _density_s(self, mass, concentration): """ Calculate scale density of the NFW profile. """ # Enforce default units if not isinstance(mass, u.Quantity): in_mass = u.Quantity(mass, u.M_sun) else: in_mass = mass # Calculate scale density # M_{200} = 4\pi \rho_{s} R_{s}^3 \left[\ln(1+c) - \frac{c}{1+c}\right]. self.density_s = in_mass / (4.0 * np.pi * self._radius_s(in_mass, concentration) ** 3 * self.A_NFW(concentration)) return self.density_s @property def rho_scale(self): r""" Scale density of the NFW profile. Often written in the literature as :math:`\rho_s` """ return self.density_s def _radius_s(self, mass, concentration): """ Calculate scale radius of the NFW profile. """ # Enforce default units if not isinstance(mass, u.Quantity): in_mass = u.Quantity(mass, u.M_sun) else: in_mass = mass # Delta Mass is related to delta radius by # M_{200}=\frac{4}{3}\pi r_{200}^3 200 \rho_{c} # And delta radius is related to the NFW scale radius by # c = R / r_{\\rm s} self.radius_s = (((3.0 * in_mass) / (4.0 * np.pi * self.density_delta)) ** ( 1.0 / 3.0)) / concentration # Set radial units to kiloparsec by default (unit will be rescaled by units of radius # in evaluate) return self.radius_s.to(u.kpc) @property def r_s(self): """ Scale radius of the NFW profile. """ return self.radius_s @property def r_virial(self): """ Mass factor defined virial radius of the NFW profile (R200c for M200c, Rvir for Mvir, etc.). """ return self.r_s * self.concentration @property def r_max(self): """ Radius of maximum circular velocity. """ return self.r_s * 2.16258 @property def v_max(self): """ Maximum circular velocity. """ return self.circular_velocity(self.r_max) def circular_velocity(self, r): r""" Circular velocities of the NFW profile. Parameters ---------- r : float or `~astropy.units.Quantity` ['length'] Radial position of velocity to be calculated for the NFW profile. Returns ------- velocity : float or `~astropy.units.Quantity` ['speed'] NFW profile circular velocity at location ``r``. The velocity units are: [km / s] Notes ----- Model formula: .. math:: v_{circ}(r)^2 = \frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)} .. math:: x = r/r_s .. warning:: Output values might contain ``nan`` and ``inf``. """ # Enforce default units (if parameters are without units) if hasattr(r, "unit"): in_r = r else: in_r = u.Quantity(r, u.kpc) # Mass factor defined velocity (i.e. V200c for M200c, Rvir for Mvir) v_profile = np.sqrt(self.mass * const.G.to(in_r.unit**3 / (self.mass.unit * u.s**2)) / self.r_virial) # Define reduced radius (r / r_{\\rm s}) reduced_radius = in_r / self.r_virial.to(in_r.unit) # Circular velocity given by: # v^2=\frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)} # where x=r/r_{200} velocity = np.sqrt((v_profile**2 * self.A_NFW(self.concentration * reduced_radius)) / (reduced_radius * self.A_NFW(self.concentration))) return velocity.to(u.km / u.s) @property def input_units(self): # The units for the 'r' variable should be a length (default kpc) return {self.inputs[0]: u.kpc} @property def return_units(self): # The units for the 'density' variable should be a matter density (default M_sun / kpc^3) if (self.mass.unit is None): return {self.outputs[0]: u.M_sun / self.input_units[self.inputs[0]] ** 3} else: return {self.outputs[0]: self.mass.unit / self.input_units[self.inputs[0]] ** 3} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'mass': u.M_sun, "concentration": None, "redshift": None}
53d95b7f1dd523f8834a1c14c6a5373720505aa485d3884af12b5512aff8f93e
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Power law model variants """ # pylint: disable=invalid-name import numpy as np from astropy.units import Quantity from .core import Fittable1DModel from .parameters import InputParameterError, Parameter __all__ = ['PowerLaw1D', 'BrokenPowerLaw1D', 'SmoothlyBrokenPowerLaw1D', 'ExponentialCutoffPowerLaw1D', 'LogParabola1D'] class PowerLaw1D(Fittable1DModel): """ One dimensional power law model. Parameters ---------- amplitude : float Model amplitude at the reference point x_0 : float Reference point alpha : float Power law index See Also -------- BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``): .. math:: f(x) = A (x / x_0) ^ {-\\alpha} """ amplitude = Parameter(default=1, description="Peak value at the reference point") x_0 = Parameter(default=1, description="Reference point") alpha = Parameter(default=1, description="Power law index") @staticmethod def evaluate(x, amplitude, x_0, alpha): """One dimensional power law model function""" xx = x / x_0 return amplitude * xx ** (-alpha) @staticmethod def fit_deriv(x, amplitude, x_0, alpha): """One dimensional power law derivative with respect to parameters""" xx = x / x_0 d_amplitude = xx ** (-alpha) d_x_0 = amplitude * alpha * d_amplitude / x_0 d_alpha = -amplitude * d_amplitude * np.log(xx) return [d_amplitude, d_x_0, d_alpha] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class BrokenPowerLaw1D(Fittable1DModel): """ One dimensional power law model with a break. Parameters ---------- amplitude : float Model amplitude at the break point. x_break : float Break point. alpha_1 : float Power law index for x < x_break. alpha_2 : float Power law index for x > x_break. See Also -------- PowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha_1` for ``alpha_1`` and :math:`\\alpha_2` for ``alpha_2``): .. math:: f(x) = \\left \\{ \\begin{array}{ll} A (x / x_{break}) ^ {-\\alpha_1} & : x < x_{break} \\\\ A (x / x_{break}) ^ {-\\alpha_2} & : x > x_{break} \\\\ \\end{array} \\right. """ amplitude = Parameter(default=1, description="Peak value at break point") x_break = Parameter(default=1, description="Break point") alpha_1 = Parameter(default=1, description="Power law index before break point") alpha_2 = Parameter(default=1, description="Power law index after break point") @staticmethod def evaluate(x, amplitude, x_break, alpha_1, alpha_2): """One dimensional broken power law model function""" alpha = np.where(x < x_break, alpha_1, alpha_2) xx = x / x_break return amplitude * xx ** (-alpha) @staticmethod def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2): """One dimensional broken power law derivative with respect to parameters""" alpha = np.where(x < x_break, alpha_1, alpha_2) xx = x / x_break d_amplitude = xx ** (-alpha) d_x_break = amplitude * alpha * d_amplitude / x_break d_alpha = -amplitude * d_amplitude * np.log(xx) d_alpha_1 = np.where(x < x_break, d_alpha, 0) d_alpha_2 = np.where(x >= x_break, d_alpha, 0) return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2] @property def input_units(self): if self.x_break.unit is None: return None return {self.inputs[0]: self.x_break.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_break': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class SmoothlyBrokenPowerLaw1D(Fittable1DModel): """One dimensional smoothly broken power law model. Parameters ---------- amplitude : float Model amplitude at the break point. x_break : float Break point. alpha_1 : float Power law index for ``x << x_break``. alpha_2 : float Power law index for ``x >> x_break``. delta : float Smoothness parameter. See Also -------- BrokenPowerLaw1D Notes ----- Model formula (with :math:`A` for ``amplitude``, :math:`x_b` for ``x_break``, :math:`\\alpha_1` for ``alpha_1``, :math:`\\alpha_2` for ``alpha_2`` and :math:`\\Delta` for ``delta``): .. math:: f(x) = A \\left( \\frac{x}{x_b} \\right) ^ {-\\alpha_1} \\left\\{ \\frac{1}{2} \\left[ 1 + \\left( \\frac{x}{x_b}\\right)^{1 / \\Delta} \\right] \\right\\}^{(\\alpha_1 - \\alpha_2) \\Delta} The change of slope occurs between the values :math:`x_1` and :math:`x_2` such that: .. math:: \\log_{10} \\frac{x_2}{x_b} = \\log_{10} \\frac{x_b}{x_1} \\sim \\Delta At values :math:`x \\lesssim x_1` and :math:`x \\gtrsim x_2` the model is approximately a simple power law with index :math:`\\alpha_1` and :math:`\\alpha_2` respectively. The two power laws are smoothly joined at values :math:`x_1 < x < x_2`, hence the :math:`\\Delta` parameter sets the "smoothness" of the slope change. The ``delta`` parameter is bounded to values greater than 1e-3 (corresponding to :math:`x_2 / x_1 \\gtrsim 1.002`) to avoid overflow errors. The ``amplitude`` parameter is bounded to positive values since this model is typically used to represent positive quantities. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling import models x = np.logspace(0.7, 2.3, 500) f = models.SmoothlyBrokenPowerLaw1D(amplitude=1, x_break=20, alpha_1=-2, alpha_2=2) plt.figure() plt.title("amplitude=1, x_break=20, alpha_1=-2, alpha_2=2") f.delta = 0.5 plt.loglog(x, f(x), '--', label='delta=0.5') f.delta = 0.3 plt.loglog(x, f(x), '-.', label='delta=0.3') f.delta = 0.1 plt.loglog(x, f(x), label='delta=0.1') plt.axis([x.min(), x.max(), 0.1, 1.1]) plt.legend(loc='lower center') plt.grid(True) plt.show() """ amplitude = Parameter(default=1, min=0, description="Peak value at break point") x_break = Parameter(default=1, description="Break point") alpha_1 = Parameter(default=-2, description="Power law index before break point") alpha_2 = Parameter(default=2, description="Power law index after break point") delta = Parameter(default=1, min=1.e-3, description="Smoothness Parameter") @amplitude.validator def amplitude(self, value): if np.any(value <= 0): raise InputParameterError( "amplitude parameter must be > 0") @delta.validator def delta(self, value): if np.any(value < 0.001): raise InputParameterError( "delta parameter must be >= 0.001") @staticmethod def evaluate(x, amplitude, x_break, alpha_1, alpha_2, delta): """One dimensional smoothly broken power law model function""" # Pre-calculate `x/x_b` xx = x / x_break # Initialize the return value f = np.zeros_like(xx, subok=False) if isinstance(amplitude, Quantity): return_unit = amplitude.unit amplitude = amplitude.value else: return_unit = None # The quantity `t = (x / x_b)^(1 / delta)` can become quite # large. To avoid overflow errors we will start by calculating # its natural logarithm: logt = np.log(xx) / delta # When `t >> 1` or `t << 1` we don't actually need to compute # the `t` value since the main formula (see docstring) can be # significantly simplified by neglecting `1` or `t` # respectively. In the following we will check whether `t` is # much greater, much smaller, or comparable to 1 by comparing # the `logt` value with an appropriate threshold. threshold = 30 # corresponding to exp(30) ~ 1e13 i = logt > threshold if i.max(): # In this case the main formula reduces to a simple power # law with index `alpha_2`. f[i] = amplitude * xx[i] ** (-alpha_2) \ / (2. ** ((alpha_1 - alpha_2) * delta)) i = logt < -threshold if i.max(): # In this case the main formula reduces to a simple power # law with index `alpha_1`. f[i] = amplitude * xx[i] ** (-alpha_1) \ / (2. ** ((alpha_1 - alpha_2) * delta)) i = np.abs(logt) <= threshold if i.max(): # In this case the `t` value is "comparable" to 1, hence we # we will evaluate the whole formula. t = np.exp(logt[i]) r = (1. + t) / 2. f[i] = amplitude * xx[i] ** (-alpha_1) \ * r ** ((alpha_1 - alpha_2) * delta) if return_unit: return Quantity(f, unit=return_unit, copy=False) return f @staticmethod def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2, delta): """One dimensional smoothly broken power law derivative with respect to parameters""" # Pre-calculate `x_b` and `x/x_b` and `logt` (see comments in # SmoothlyBrokenPowerLaw1D.evaluate) xx = x / x_break logt = np.log(xx) / delta # Initialize the return values f = np.zeros_like(xx) d_amplitude = np.zeros_like(xx) d_x_break = np.zeros_like(xx) d_alpha_1 = np.zeros_like(xx) d_alpha_2 = np.zeros_like(xx) d_delta = np.zeros_like(xx) threshold = 30 # (see comments in SmoothlyBrokenPowerLaw1D.evaluate) i = logt > threshold if i.max(): f[i] = amplitude * xx[i] ** (-alpha_2) \ / (2. ** ((alpha_1 - alpha_2) * delta)) d_amplitude[i] = f[i] / amplitude d_x_break[i] = f[i] * alpha_2 / x_break d_alpha_1[i] = f[i] * (-delta * np.log(2)) d_alpha_2[i] = f[i] * (-np.log(xx[i]) + delta * np.log(2)) d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2)) i = logt < -threshold if i.max(): f[i] = amplitude * xx[i] ** (-alpha_1) \ / (2. ** ((alpha_1 - alpha_2) * delta)) d_amplitude[i] = f[i] / amplitude d_x_break[i] = f[i] * alpha_1 / x_break d_alpha_1[i] = f[i] * (-np.log(xx[i]) - delta * np.log(2)) d_alpha_2[i] = f[i] * delta * np.log(2) d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2)) i = np.abs(logt) <= threshold if i.max(): t = np.exp(logt[i]) r = (1. + t) / 2. f[i] = amplitude * xx[i] ** (-alpha_1) \ * r ** ((alpha_1 - alpha_2) * delta) d_amplitude[i] = f[i] / amplitude d_x_break[i] = f[i] * (alpha_1 - (alpha_1 - alpha_2) * t / 2. / r) / x_break d_alpha_1[i] = f[i] * (-np.log(xx[i]) + delta * np.log(r)) d_alpha_2[i] = f[i] * (-delta * np.log(r)) d_delta[i] = f[i] * (alpha_1 - alpha_2) \ * (np.log(r) - t / (1. + t) / delta * np.log(xx[i])) return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2, d_delta] @property def input_units(self): if self.x_break.unit is None: return None return {self.inputs[0]: self.x_break.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_break': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class ExponentialCutoffPowerLaw1D(Fittable1DModel): """ One dimensional power law model with an exponential cutoff. Parameters ---------- amplitude : float Model amplitude x_0 : float Reference point alpha : float Power law index x_cutoff : float Cutoff point See Also -------- PowerLaw1D, BrokenPowerLaw1D, LogParabola1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``): .. math:: f(x) = A (x / x_0) ^ {-\\alpha} \\exp (-x / x_{cutoff}) """ amplitude = Parameter(default=1, description="Peak value of model") x_0 = Parameter(default=1, description="Reference point") alpha = Parameter(default=1, description="Power law index") x_cutoff = Parameter(default=1, description="Cutoff point") @staticmethod def evaluate(x, amplitude, x_0, alpha, x_cutoff): """One dimensional exponential cutoff power law model function""" xx = x / x_0 return amplitude * xx ** (-alpha) * np.exp(-x / x_cutoff) @staticmethod def fit_deriv(x, amplitude, x_0, alpha, x_cutoff): """One dimensional exponential cutoff power law derivative with respect to parameters""" xx = x / x_0 xc = x / x_cutoff d_amplitude = xx ** (-alpha) * np.exp(-xc) d_x_0 = alpha * amplitude * d_amplitude / x_0 d_alpha = -amplitude * d_amplitude * np.log(xx) d_x_cutoff = amplitude * x * d_amplitude / x_cutoff ** 2 return [d_amplitude, d_x_0, d_alpha, d_x_cutoff] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'x_cutoff': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]} class LogParabola1D(Fittable1DModel): """ One dimensional log parabola model (sometimes called curved power law). Parameters ---------- amplitude : float Model amplitude x_0 : float Reference point alpha : float Power law index beta : float Power law curvature See Also -------- PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha`` and :math:`\\beta` for ``beta``): .. math:: f(x) = A \\left(\\frac{x}{x_{0}}\\right)^{- \\alpha - \\beta \\log{\\left (\\frac{x}{x_{0}} \\right )}} """ amplitude = Parameter(default=1, description="Peak value of model") x_0 = Parameter(default=1, description="Reference point") alpha = Parameter(default=1, description="Power law index") beta = Parameter(default=0, description="Power law curvature") @staticmethod def evaluate(x, amplitude, x_0, alpha, beta): """One dimensional log parabola model function""" xx = x / x_0 exponent = -alpha - beta * np.log(xx) return amplitude * xx ** exponent @staticmethod def fit_deriv(x, amplitude, x_0, alpha, beta): """One dimensional log parabola derivative with respect to parameters""" xx = x / x_0 log_xx = np.log(xx) exponent = -alpha - beta * log_xx d_amplitude = xx ** exponent d_beta = -amplitude * d_amplitude * log_xx ** 2 d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0) d_alpha = -amplitude * d_amplitude * log_xx return [d_amplitude, d_x_0, d_alpha, d_beta] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {'x_0': inputs_unit[self.inputs[0]], 'amplitude': outputs_unit[self.outputs[0]]}
a24c87fc4d7097b2728b59ba9d3a92c26a3f8daa882c504bf142d8bc95f68e19
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Utilities for console input and output. """ import codecs import locale import re import math import multiprocessing import os import struct import sys import threading import time # concurrent.futures imports moved inside functions using them to avoid # import failure when running in pyodide/Emscripten try: import fcntl import termios import signal _CAN_RESIZE_TERMINAL = True except ImportError: _CAN_RESIZE_TERMINAL = False from astropy import conf from .misc import isiterable from .decorators import classproperty __all__ = [ 'isatty', 'color_print', 'human_time', 'human_file_size', 'ProgressBar', 'Spinner', 'print_code_line', 'ProgressBarOrSpinner', 'terminal_size'] _DEFAULT_ENCODING = 'utf-8' class _IPython: """Singleton class given access to IPython streams, etc.""" @classproperty def get_ipython(cls): try: from IPython import get_ipython except ImportError: pass return get_ipython @classproperty def OutStream(cls): if not hasattr(cls, '_OutStream'): cls._OutStream = None try: cls.get_ipython() except NameError: return None try: from ipykernel.iostream import OutStream except ImportError: try: from IPython.zmq.iostream import OutStream except ImportError: from IPython import version_info if version_info[0] >= 4: return None try: from IPython.kernel.zmq.iostream import OutStream except ImportError: return None cls._OutStream = OutStream return cls._OutStream @classproperty def ipyio(cls): if not hasattr(cls, '_ipyio'): try: from IPython.utils import io except ImportError: cls._ipyio = None else: cls._ipyio = io return cls._ipyio @classmethod def get_stream(cls, stream): return getattr(cls.ipyio, stream) def _get_stdout(stderr=False): """ This utility function contains the logic to determine what streams to use by default for standard out/err. Typically this will just return `sys.stdout`, but it contains additional logic for use in IPython on Windows to determine the correct stream to use (usually ``IPython.util.io.stdout`` but only if sys.stdout is a TTY). """ if stderr: stream = 'stderr' else: stream = 'stdout' sys_stream = getattr(sys, stream) return sys_stream def isatty(file): """ Returns `True` if ``file`` is a tty. Most built-in Python file-like objects have an `isatty` member, but some user-defined types may not, so this assumes those are not ttys. """ if (multiprocessing.current_process().name != 'MainProcess' or threading.current_thread().name != 'MainThread'): return False if hasattr(file, 'isatty'): return file.isatty() if _IPython.OutStream is None or (not isinstance(file, _IPython.OutStream)): return False # File is an IPython OutStream. Check whether: # - File name is 'stdout'; or # - File wraps a Console if getattr(file, 'name', None) == 'stdout': return True if hasattr(file, 'stream'): # FIXME: pyreadline has no had new release since 2015, drop it when # IPython minversion is 5.x. # On Windows, in IPython 2 the standard I/O streams will wrap # pyreadline.Console objects if pyreadline is available; this should # be considered a TTY. try: from pyreadline.console import Console as PyreadlineConsole except ImportError: return False return isinstance(file.stream, PyreadlineConsole) return False def terminal_size(file=None): """ Returns a tuple (height, width) containing the height and width of the terminal. This function will look for the width in height in multiple areas before falling back on the width and height in astropy's configuration. """ if file is None: file = _get_stdout() try: s = struct.pack("HHHH", 0, 0, 0, 0) x = fcntl.ioctl(file, termios.TIOCGWINSZ, s) (lines, width, xpixels, ypixels) = struct.unpack("HHHH", x) if lines > 12: lines -= 6 if width > 10: width -= 1 if lines <= 0 or width <= 0: raise Exception('unable to get terminal size') return (lines, width) except Exception: try: # see if POSIX standard variables will work return (int(os.environ.get('LINES')), int(os.environ.get('COLUMNS'))) except TypeError: # fall back on configuration variables, or if not # set, (25, 80) lines = conf.max_lines width = conf.max_width if lines is None: lines = 25 if width is None: width = 80 return lines, width def _color_text(text, color): """ Returns a string wrapped in ANSI color codes for coloring the text in a terminal:: colored_text = color_text('Here is a message', 'blue') This won't actually effect the text until it is printed to the terminal. Parameters ---------- text : str The string to return, bounded by the color codes. color : str An ANSI terminal color name. Must be one of: black, red, green, brown, blue, magenta, cyan, lightgrey, default, darkgrey, lightred, lightgreen, yellow, lightblue, lightmagenta, lightcyan, white, or '' (the empty string). """ color_mapping = { 'black': '0;30', 'red': '0;31', 'green': '0;32', 'brown': '0;33', 'blue': '0;34', 'magenta': '0;35', 'cyan': '0;36', 'lightgrey': '0;37', 'default': '0;39', 'darkgrey': '1;30', 'lightred': '1;31', 'lightgreen': '1;32', 'yellow': '1;33', 'lightblue': '1;34', 'lightmagenta': '1;35', 'lightcyan': '1;36', 'white': '1;37'} if sys.platform == 'win32' and _IPython.OutStream is None: # On Windows do not colorize text unless in IPython return text color_code = color_mapping.get(color, '0;39') return f'\033[{color_code}m{text}\033[0m' def _decode_preferred_encoding(s): """Decode the supplied byte string using the preferred encoding for the locale (`locale.getpreferredencoding`) or, if the default encoding is invalid, fall back first on utf-8, then on latin-1 if the message cannot be decoded with utf-8. """ enc = locale.getpreferredencoding() try: try: return s.decode(enc) except LookupError: enc = _DEFAULT_ENCODING return s.decode(enc) except UnicodeDecodeError: return s.decode('latin-1') def _write_with_fallback(s, write, fileobj): """Write the supplied string with the given write function like ``write(s)``, but use a writer for the locale's preferred encoding in case of a UnicodeEncodeError. Failing that attempt to write with 'utf-8' or 'latin-1'. """ try: write(s) return write except UnicodeEncodeError: # Let's try the next approach... pass enc = locale.getpreferredencoding() try: Writer = codecs.getwriter(enc) except LookupError: Writer = codecs.getwriter(_DEFAULT_ENCODING) f = Writer(fileobj) write = f.write try: write(s) return write except UnicodeEncodeError: Writer = codecs.getwriter('latin-1') f = Writer(fileobj) write = f.write # If this doesn't work let the exception bubble up; I'm out of ideas write(s) return write def color_print(*args, end='\n', **kwargs): """ Prints colors and styles to the terminal uses ANSI escape sequences. :: color_print('This is the color ', 'default', 'GREEN', 'green') Parameters ---------- positional args : str The positional arguments come in pairs (*msg*, *color*), where *msg* is the string to display and *color* is the color to display it in. *color* is an ANSI terminal color name. Must be one of: black, red, green, brown, blue, magenta, cyan, lightgrey, default, darkgrey, lightred, lightgreen, yellow, lightblue, lightmagenta, lightcyan, white, or '' (the empty string). file : writable file-like, optional Where to write to. Defaults to `sys.stdout`. If file is not a tty (as determined by calling its `isatty` member, if one exists), no coloring will be included. end : str, optional The ending of the message. Defaults to ``\\n``. The end will be printed after resetting any color or font state. """ file = kwargs.get('file', _get_stdout()) write = file.write if isatty(file) and conf.use_color: for i in range(0, len(args), 2): msg = args[i] if i + 1 == len(args): color = '' else: color = args[i + 1] if color: msg = _color_text(msg, color) # Some file objects support writing unicode sensibly on some Python # versions; if this fails try creating a writer using the locale's # preferred encoding. If that fails too give up. write = _write_with_fallback(msg, write, file) write(end) else: for i in range(0, len(args), 2): msg = args[i] write(msg) write(end) def strip_ansi_codes(s): """ Remove ANSI color codes from the string. """ return re.sub('\033\\[([0-9]+)(;[0-9]+)*m', '', s) def human_time(seconds): """ Returns a human-friendly time string that is always exactly 6 characters long. Depending on the number of seconds given, can be one of:: 1w 3d 2d 4h 1h 5m 1m 4s 15s Will be in color if console coloring is turned on. Parameters ---------- seconds : int The number of seconds to represent Returns ------- time : str A human-friendly representation of the given number of seconds that is always exactly 6 characters. """ units = [ ('y', 60 * 60 * 24 * 7 * 52), ('w', 60 * 60 * 24 * 7), ('d', 60 * 60 * 24), ('h', 60 * 60), ('m', 60), ('s', 1), ] seconds = int(seconds) if seconds < 60: return f' {seconds:2d}s' for i in range(len(units) - 1): unit1, limit1 = units[i] unit2, limit2 = units[i + 1] if seconds >= limit1: return '{:2d}{}{:2d}{}'.format( seconds // limit1, unit1, (seconds % limit1) // limit2, unit2) return ' ~inf' def human_file_size(size): """ Returns a human-friendly string representing a file size that is 2-4 characters long. For example, depending on the number of bytes given, can be one of:: 256b 64k 1.1G Parameters ---------- size : int The size of the file (in bytes) Returns ------- size : str A human-friendly representation of the size of the file """ if hasattr(size, 'unit'): # Import units only if necessary because the import takes a # significant time [#4649] from astropy import units as u size = u.Quantity(size, u.byte).value suffixes = ' kMGTPEZY' if size == 0: num_scale = 0 else: num_scale = int(math.floor(math.log(size) / math.log(1000))) if num_scale > 7: suffix = '?' else: suffix = suffixes[num_scale] num_scale = int(math.pow(1000, num_scale)) value = size / num_scale str_value = str(value) if suffix == ' ': str_value = str_value[:str_value.index('.')] elif str_value[2] == '.': str_value = str_value[:2] else: str_value = str_value[:3] return f"{str_value:>3s}{suffix}" class _mapfunc(object): """ A function wrapper to support ProgressBar.map(). """ def __init__(self, func): self._func = func def __call__(self, i_arg): i, arg = i_arg return i, self._func(arg) class ProgressBar: """ A class to display a progress bar in the terminal. It is designed to be used either with the ``with`` statement:: with ProgressBar(len(items)) as bar: for item in enumerate(items): bar.update() or as a generator:: for item in ProgressBar(items): item.process() """ def __init__(self, total_or_items, ipython_widget=False, file=None): """ Parameters ---------- total_or_items : int or sequence If an int, the number of increments in the process being tracked. If a sequence, the items to iterate over. ipython_widget : bool, optional If `True`, the progress bar will display as an IPython notebook widget. file : writable file-like, optional The file to write the progress bar to. Defaults to `sys.stdout`. If ``file`` is not a tty (as determined by calling its `isatty` member, if any, or special case hacks to detect the IPython console), the progress bar will be completely silent. """ if file is None: file = _get_stdout() if not ipython_widget and not isatty(file): self.update = self._silent_update self._silent = True else: self._silent = False if isiterable(total_or_items): self._items = iter(total_or_items) self._total = len(total_or_items) else: try: self._total = int(total_or_items) except TypeError: raise TypeError("First argument must be int or sequence") else: self._items = iter(range(self._total)) self._file = file self._start_time = time.time() self._human_total = human_file_size(self._total) self._ipython_widget = ipython_widget self._signal_set = False if not ipython_widget: self._should_handle_resize = ( _CAN_RESIZE_TERMINAL and self._file.isatty()) self._handle_resize() if self._should_handle_resize: signal.signal(signal.SIGWINCH, self._handle_resize) self._signal_set = True self.update(0) def _handle_resize(self, signum=None, frame=None): terminal_width = terminal_size(self._file)[1] self._bar_length = terminal_width - 37 def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if not self._silent: if exc_type is None: self.update(self._total) self._file.write('\n') self._file.flush() if self._signal_set: signal.signal(signal.SIGWINCH, signal.SIG_DFL) def __iter__(self): return self def __next__(self): try: rv = next(self._items) except StopIteration: self.__exit__(None, None, None) raise else: self.update() return rv def update(self, value=None): """ Update progress bar via the console or notebook accordingly. """ # Update self.value if value is None: value = self._current_value + 1 self._current_value = value # Choose the appropriate environment if self._ipython_widget: self._update_ipython_widget(value) else: self._update_console(value) def _update_console(self, value=None): """ Update the progress bar to the given value (out of the total given to the constructor). """ if self._total == 0: frac = 1.0 else: frac = float(value) / float(self._total) file = self._file write = file.write if frac > 1: bar_fill = int(self._bar_length) else: bar_fill = int(float(self._bar_length) * frac) write('\r|') color_print('=' * bar_fill, 'blue', file=file, end='') if bar_fill < self._bar_length: color_print('>', 'green', file=file, end='') write('-' * (self._bar_length - bar_fill - 1)) write('|') if value >= self._total: t = time.time() - self._start_time prefix = ' ' elif value <= 0: t = None prefix = '' else: t = ((time.time() - self._start_time) * (1.0 - frac)) / frac prefix = ' ETA ' write(f' {human_file_size(value):>4s}/{self._human_total:>4s}') write(f' ({frac:>6.2%})') write(prefix) if t is not None: write(human_time(t)) self._file.flush() def _update_ipython_widget(self, value=None): """ Update the progress bar to the given value (out of a total given to the constructor). This method is for use in the IPython notebook 2+. """ # Create and display an empty progress bar widget, # if none exists. if not hasattr(self, '_widget'): # Import only if an IPython widget, i.e., widget in iPython NB from IPython import version_info if version_info[0] < 4: from IPython.html import widgets self._widget = widgets.FloatProgressWidget() else: _IPython.get_ipython() from ipywidgets import widgets self._widget = widgets.FloatProgress() from IPython.display import display display(self._widget) self._widget.value = 0 # Calculate percent completion, and update progress bar frac = (value/self._total) self._widget.value = frac * 100 self._widget.description = f' ({frac:>6.2%})' def _silent_update(self, value=None): pass @classmethod def map(cls, function, items, multiprocess=False, file=None, step=100, ipython_widget=False, multiprocessing_start_method=None): """Map function over items while displaying a progress bar with percentage complete. The map operation may run in arbitrary order on the items, but the results are returned in sequential order. :: def work(i): print(i) ProgressBar.map(work, range(50)) Parameters ---------- function : function Function to call for each step items : sequence Sequence where each element is a tuple of arguments to pass to *function*. multiprocess : bool, int, optional If `True`, use the `multiprocessing` module to distribute each task to a different processor core. If a number greater than 1, then use that number of cores. ipython_widget : bool, optional If `True`, the progress bar will display as an IPython notebook widget. file : writable file-like, optional The file to write the progress bar to. Defaults to `sys.stdout`. If ``file`` is not a tty (as determined by calling its `isatty` member, if any), the scrollbar will be completely silent. step : int, optional Update the progress bar at least every *step* steps (default: 100). If ``multiprocess`` is `True`, this will affect the size of the chunks of ``items`` that are submitted as separate tasks to the process pool. A large step size may make the job complete faster if ``items`` is very long. multiprocessing_start_method : str, optional Useful primarily for testing; if in doubt leave it as the default. When using multiprocessing, certain anomalies occur when starting processes with the "spawn" method (the only option on Windows); other anomalies occur with the "fork" method (the default on Linux). """ if multiprocess: function = _mapfunc(function) items = list(enumerate(items)) results = cls.map_unordered( function, items, multiprocess=multiprocess, file=file, step=step, ipython_widget=ipython_widget, multiprocessing_start_method=multiprocessing_start_method) if multiprocess: _, results = zip(*sorted(results)) results = list(results) return results @classmethod def map_unordered(cls, function, items, multiprocess=False, file=None, step=100, ipython_widget=False, multiprocessing_start_method=None): """Map function over items, reporting the progress. Does a `map` operation while displaying a progress bar with percentage complete. The map operation may run on arbitrary order on the items, and the results may be returned in arbitrary order. :: def work(i): print(i) ProgressBar.map(work, range(50)) Parameters ---------- function : function Function to call for each step items : sequence Sequence where each element is a tuple of arguments to pass to *function*. multiprocess : bool, int, optional If `True`, use the `multiprocessing` module to distribute each task to a different processor core. If a number greater than 1, then use that number of cores. ipython_widget : bool, optional If `True`, the progress bar will display as an IPython notebook widget. file : writable file-like, optional The file to write the progress bar to. Defaults to `sys.stdout`. If ``file`` is not a tty (as determined by calling its `isatty` member, if any), the scrollbar will be completely silent. step : int, optional Update the progress bar at least every *step* steps (default: 100). If ``multiprocess`` is `True`, this will affect the size of the chunks of ``items`` that are submitted as separate tasks to the process pool. A large step size may make the job complete faster if ``items`` is very long. multiprocessing_start_method : str, optional Useful primarily for testing; if in doubt leave it as the default. When using multiprocessing, certain anomalies occur when starting processes with the "spawn" method (the only option on Windows); other anomalies occur with the "fork" method (the default on Linux). """ # concurrent.futures import here to avoid import failure when running # in pyodide/Emscripten from concurrent.futures import ProcessPoolExecutor, as_completed results = [] if file is None: file = _get_stdout() with cls(len(items), ipython_widget=ipython_widget, file=file) as bar: if bar._ipython_widget: chunksize = step else: default_step = max(int(float(len(items)) / bar._bar_length), 1) chunksize = min(default_step, step) if not multiprocess or multiprocess < 1: for i, item in enumerate(items): results.append(function(item)) if (i % chunksize) == 0: bar.update(i) else: ctx = multiprocessing.get_context(multiprocessing_start_method) kwargs = dict(mp_context=ctx) with ProcessPoolExecutor( max_workers=(int(multiprocess) if multiprocess is not True else None), **kwargs) as p: for i, f in enumerate( as_completed( p.submit(function, item) for item in items)): bar.update(i) results.append(f.result()) return results class Spinner: """ A class to display a spinner in the terminal. It is designed to be used with the ``with`` statement:: with Spinner("Reticulating splines", "green") as s: for item in enumerate(items): s.update() """ _default_unicode_chars = "◓◑◒◐" _default_ascii_chars = "-/|\\" def __init__(self, msg, color='default', file=None, step=1, chars=None): """ Parameters ---------- msg : str The message to print color : str, optional An ANSI terminal color name. Must be one of: black, red, green, brown, blue, magenta, cyan, lightgrey, default, darkgrey, lightred, lightgreen, yellow, lightblue, lightmagenta, lightcyan, white. file : writable file-like, optional The file to write the spinner to. Defaults to `sys.stdout`. If ``file`` is not a tty (as determined by calling its `isatty` member, if any, or special case hacks to detect the IPython console), the spinner will be completely silent. step : int, optional Only update the spinner every *step* steps chars : str, optional The character sequence to use for the spinner """ if file is None: file = _get_stdout() self._msg = msg self._color = color self._file = file self._step = step if chars is None: if conf.unicode_output: chars = self._default_unicode_chars else: chars = self._default_ascii_chars self._chars = chars self._silent = not isatty(file) if self._silent: self._iter = self._silent_iterator() else: self._iter = self._iterator() def _iterator(self): chars = self._chars index = 0 file = self._file write = file.write flush = file.flush try_fallback = True while True: write('\r') color_print(self._msg, self._color, file=file, end='') write(' ') try: if try_fallback: write = _write_with_fallback(chars[index], write, file) else: write(chars[index]) except UnicodeError: # If even _write_with_fallback failed for any reason just give # up on trying to use the unicode characters chars = self._default_ascii_chars write(chars[index]) try_fallback = False # No good will come of using this again flush() yield for i in range(self._step): yield index = (index + 1) % len(chars) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): file = self._file write = file.write flush = file.flush if not self._silent: write('\r') color_print(self._msg, self._color, file=file, end='') if exc_type is None: color_print(' [Done]', 'green', file=file) else: color_print(' [Failed]', 'red', file=file) flush() def __iter__(self): return self def __next__(self): next(self._iter) def update(self, value=None): """Update the spin wheel in the terminal. Parameters ---------- value : int, optional Ignored (present just for compatibility with `ProgressBar.update`). """ next(self) def _silent_iterator(self): color_print(self._msg, self._color, file=self._file, end='') self._file.flush() while True: yield class ProgressBarOrSpinner: """ A class that displays either a `ProgressBar` or `Spinner` depending on whether the total size of the operation is known or not. It is designed to be used with the ``with`` statement:: if file.has_length(): length = file.get_length() else: length = None bytes_read = 0 with ProgressBarOrSpinner(length) as bar: while file.read(blocksize): bytes_read += blocksize bar.update(bytes_read) """ def __init__(self, total, msg, color='default', file=None): """ Parameters ---------- total : int or None If an int, the number of increments in the process being tracked and a `ProgressBar` is displayed. If `None`, a `Spinner` is displayed. msg : str The message to display above the `ProgressBar` or alongside the `Spinner`. color : str, optional The color of ``msg``, if any. Must be an ANSI terminal color name. Must be one of: black, red, green, brown, blue, magenta, cyan, lightgrey, default, darkgrey, lightred, lightgreen, yellow, lightblue, lightmagenta, lightcyan, white. file : writable file-like, optional The file to write the to. Defaults to `sys.stdout`. If ``file`` is not a tty (as determined by calling its `isatty` member, if any), only ``msg`` will be displayed: the `ProgressBar` or `Spinner` will be silent. """ if file is None: file = _get_stdout() if total is None or not isatty(file): self._is_spinner = True self._obj = Spinner(msg, color=color, file=file) else: self._is_spinner = False color_print(msg, color, file=file) self._obj = ProgressBar(total, file=file) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): return self._obj.__exit__(exc_type, exc_value, traceback) def update(self, value): """ Update the progress bar to the given value (out of the total given to the constructor. """ self._obj.update(value) def print_code_line(line, col=None, file=None, tabwidth=8, width=70): """ Prints a line of source code, highlighting a particular character position in the line. Useful for displaying the context of error messages. If the line is more than ``width`` characters, the line is truncated accordingly and '…' characters are inserted at the front and/or end. It looks like this:: there_is_a_syntax_error_here : ^ Parameters ---------- line : unicode The line of code to display col : int, optional The character in the line to highlight. ``col`` must be less than ``len(line)``. file : writable file-like, optional Where to write to. Defaults to `sys.stdout`. tabwidth : int, optional The number of spaces per tab (``'\\t'``) character. Default is 8. All tabs will be converted to spaces to ensure that the caret lines up with the correct column. width : int, optional The width of the display, beyond which the line will be truncated. Defaults to 70 (this matches the default in the standard library's `textwrap` module). """ if file is None: file = _get_stdout() if conf.unicode_output: ellipsis = '…' else: ellipsis = '...' write = file.write if col is not None: if col >= len(line): raise ValueError('col must be less the the line length.') ntabs = line[:col].count('\t') col += ntabs * (tabwidth - 1) line = line.rstrip('\n') line = line.replace('\t', ' ' * tabwidth) if col is not None and col > width: new_col = min(width // 2, len(line) - col) offset = col - new_col line = line[offset + len(ellipsis):] width -= len(ellipsis) new_col = col col -= offset color_print(ellipsis, 'darkgrey', file=file, end='') if len(line) > width: write(line[:width - len(ellipsis)]) color_print(ellipsis, 'darkgrey', file=file) else: write(line) write('\n') if col is not None: write(' ' * col) color_print('^', 'red', file=file) # The following four Getch* classes implement unbuffered character reading from # stdin on Windows, linux, MacOSX. This is taken directly from ActiveState # Code Recipes: # http://code.activestate.com/recipes/134892-getch-like-unbuffered-character-reading-from-stdin/ # class Getch: """Get a single character from standard input without screen echo. Returns ------- char : str (one character) """ def __init__(self): try: self.impl = _GetchWindows() except ImportError: try: self.impl = _GetchMacCarbon() except (ImportError, AttributeError): self.impl = _GetchUnix() def __call__(self): return self.impl() class _GetchUnix: def __init__(self): import tty # pylint: disable=W0611 import sys # pylint: disable=W0611 # import termios now or else you'll get the Unix # version on the Mac import termios # pylint: disable=W0611 def __call__(self): import sys import tty import termios fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch class _GetchWindows: def __init__(self): import msvcrt # pylint: disable=W0611 def __call__(self): import msvcrt return msvcrt.getch() class _GetchMacCarbon: """ A function which returns the current ASCII key that is down; if no ASCII key is down, the null string is returned. The page http://www.mactech.com/macintosh-c/chap02-1.html was very helpful in figuring out how to do this. """ def __init__(self): import Carbon Carbon.Evt # see if it has this (in Unix, it doesn't) def __call__(self): import Carbon if Carbon.Evt.EventAvail(0x0008)[0] == 0: # 0x0008 is the keyDownMask return '' else: # # The event contains the following info: # (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1] # # The message (msg) contains the ASCII char which is # extracted with the 0x000000FF charCodeMask; this # number is converted to an ASCII character with chr() and # returned # (what, msg, when, where, mod) = Carbon.Evt.GetNextEvent(0x0008)[1] return chr(msg & 0x000000FF)
4dd3659a9f66fea3689716b7e67189a60e9e3db06117355455ab324e2916ce01
""" A simple class to manage a piece of global science state. See :ref:`astropy:config-developer` for more details. """ __all__ = ['ScienceState'] class _ScienceStateContext: def __init__(self, parent, value): self._value = value self._parent = parent def __enter__(self): pass def __exit__(self, type, value, tb): self._parent._value = self._value def __repr__(self): # Ensure we have a single-line repr, just in case our # value is not something simple like a string. value_repr, lb, _ = repr(self._parent._value).partition("\n") if lb: value_repr += "..." return f"<ScienceState {self._parent.__name__}: {value_repr}>" class ScienceState: """ Science state subclasses are used to manage global items that can affect science results. Subclasses will generally override `validate` to convert from any of the acceptable inputs (such as strings) to the appropriate internal objects, and set an initial value to the ``_value`` member so it has a default. Examples -------- :: class MyState(ScienceState): @classmethod def validate(cls, value): if value not in ('A', 'B', 'C'): raise ValueError("Must be one of A, B, C") return value """ def __init__(self): raise RuntimeError("This class is a singleton. Do not instantiate.") @classmethod def get(cls): """ Get the current science state value. """ return cls.validate(cls._value) @classmethod def set(cls, value): """Set the current science state value.""" # Create context with current value ctx = _ScienceStateContext(cls, cls._value) # Set new value value = cls.validate(value) cls._value = value # Return context manager return ctx @classmethod def validate(cls, value): """ Validate the value and convert it to its native type, if necessary. """ return value
c50286266d50e15af8660da9a90d0feaee76de238b555c89483df63c6857bd80
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Functions for accessing, downloading, and caching data files.""" import atexit import contextlib import errno import fnmatch import functools import hashlib import os import io import re import shutil # import ssl moved inside functions using ssl to avoid import failure # when running in pyodide/Emscripten import sys import urllib.request import urllib.error import urllib.parse import zipfile import ftplib from tempfile import NamedTemporaryFile, gettempdir, TemporaryDirectory, mkdtemp from warnings import warn try: import certifi except ImportError: # certifi support is optional; when available it will be used for TLS/SSL # downloads certifi = None import astropy.config.paths from astropy import config as _config from astropy.utils.exceptions import AstropyWarning from astropy.utils.introspection import find_current_module, resolve_name # Order here determines order in the autosummary __all__ = [ 'Conf', 'conf', 'download_file', 'download_files_in_parallel', 'get_readable_fileobj', 'get_pkg_data_fileobj', 'get_pkg_data_filename', 'get_pkg_data_contents', 'get_pkg_data_fileobjs', 'get_pkg_data_filenames', 'get_pkg_data_path', 'is_url', 'is_url_in_cache', 'get_cached_urls', 'cache_total_size', 'cache_contents', 'export_download_cache', 'import_download_cache', 'import_file_to_cache', 'check_download_cache', 'clear_download_cache', 'compute_hash', 'get_free_space_in_dir', 'check_free_space_in_dir', 'get_file_contents', 'CacheMissingWarning', "CacheDamaged" ] _dataurls_to_alias = {} class _NonClosingBufferedReader(io.BufferedReader): def __del__(self): try: # NOTE: self.raw will not be closed, but left in the state # it was in at detactment self.detach() except Exception: pass class _NonClosingTextIOWrapper(io.TextIOWrapper): def __del__(self): try: # NOTE: self.stream will not be closed, but left in the state # it was in at detactment self.detach() except Exception: pass class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy.utils.data`. """ dataurl = _config.ConfigItem( 'http://data.astropy.org/', 'Primary URL for astropy remote data site.') dataurl_mirror = _config.ConfigItem( 'http://www.astropy.org/astropy-data/', 'Mirror URL for astropy remote data site.') default_http_user_agent = _config.ConfigItem( 'astropy', 'Default User-Agent for HTTP request headers. This can be overwritten ' 'for a particular call via http_headers option, where available. ' 'This only provides the default value when not set by https_headers.') remote_timeout = _config.ConfigItem( 10., 'Time to wait for remote data queries (in seconds).', aliases=['astropy.coordinates.name_resolve.name_resolve_timeout']) allow_internet = _config.ConfigItem( True, 'If False, prevents any attempt to download from Internet.') compute_hash_block_size = _config.ConfigItem( 2 ** 16, # 64K 'Block size for computing file hashes.') download_block_size = _config.ConfigItem( 2 ** 16, # 64K 'Number of bytes of remote data to download per step.') delete_temporary_downloads_at_exit = _config.ConfigItem( True, 'If True, temporary download files created when the cache is ' 'inaccessible will be deleted at the end of the python session.') conf = Conf() class CacheMissingWarning(AstropyWarning): """ This warning indicates the standard cache directory is not accessible, with the first argument providing the warning message. If args[1] is present, it is a filename indicating the path to a temporary file that was created to store a remote data download in the absence of the cache. """ def is_url(string): """ Test whether a string is a valid URL for :func:`download_file`. Parameters ---------- string : str The string to test. Returns ------- status : bool String is URL or not. """ url = urllib.parse.urlparse(string) # we can't just check that url.scheme is not an empty string, because # file paths in windows would return a non-empty scheme (e.g. e:\\ # returns 'e'). return url.scheme.lower() in ['http', 'https', 'ftp', 'sftp', 'ssh', 'file'] # Backward compatibility because some downstream packages allegedly uses it. _is_url = is_url def _is_inside(path, parent_path): # We have to try realpath too to avoid issues with symlinks, but we leave # abspath because some systems like debian have the absolute path (with no # symlinks followed) match, but the real directories in different # locations, so need to try both cases. return os.path.abspath(path).startswith(os.path.abspath(parent_path)) \ or os.path.realpath(path).startswith(os.path.realpath(parent_path)) @contextlib.contextmanager def get_readable_fileobj(name_or_obj, encoding=None, cache=False, show_progress=True, remote_timeout=None, sources=None, http_headers=None): """Yield a readable, seekable file-like object from a file or URL. This supports passing filenames, URLs, and readable file-like objects, any of which can be compressed in gzip, bzip2 or lzma (xz) if the appropriate compression libraries are provided by the Python installation. Notes ----- This function is a context manager, and should be used for example as:: with get_readable_fileobj('file.dat') as f: contents = f.read() If a URL is provided and the cache is in use, the provided URL will be the name used in the cache. The contents may already be stored in the cache under this URL provided, they may be downloaded from this URL, or they may be downloaded from one of the locations listed in ``sources``. See `~download_file` for details. Parameters ---------- name_or_obj : str or file-like The filename of the file to access (if given as a string), or the file-like object to access. If a file-like object, it must be opened in binary mode. encoding : str, optional When `None` (default), returns a file-like object with a ``read`` method that returns `str` (``unicode``) objects, using `locale.getpreferredencoding` as an encoding. This matches the default behavior of the built-in `open` when no ``mode`` argument is provided. When ``'binary'``, returns a file-like object where its ``read`` method returns `bytes` objects. When another string, it is the name of an encoding, and the file-like object's ``read`` method will return `str` (``unicode``) objects, decoded from binary using the given encoding. cache : bool or "update", optional Whether to cache the contents of remote URLs. If "update", check the remote URL for a new version but store the result in the cache. show_progress : bool, optional Whether to display a progress bar if the file is downloaded from a remote server. Default is `True`. remote_timeout : float Timeout for remote requests in seconds (default is the configurable `astropy.utils.data.Conf.remote_timeout`). sources : list of str, optional If provided, a list of URLs to try to obtain the file from. The result will be stored under the original URL. The original URL will *not* be tried unless it is in this list; this is to prevent long waits for a primary server that is known to be inaccessible at the moment. http_headers : dict or None HTTP request headers to pass into ``urlopen`` if needed. (These headers are ignored if the protocol for the ``name_or_obj``/``sources`` entry is not a remote HTTP URL.) In the default case (None), the headers are ``User-Agent: some_value`` and ``Accept: */*``, where ``some_value`` is set by ``astropy.utils.data.conf.default_http_user_agent``. Returns ------- file : readable file-like """ # close_fds is a list of file handles created by this function # that need to be closed. We don't want to always just close the # returned file handle, because it may simply be the file handle # passed in. In that case it is not the responsibility of this # function to close it: doing so could result in a "double close" # and an "invalid file descriptor" exception. close_fds = [] delete_fds = [] if remote_timeout is None: # use configfile default remote_timeout = conf.remote_timeout # name_or_obj could be an os.PathLike object if isinstance(name_or_obj, os.PathLike): name_or_obj = os.fspath(name_or_obj) # Get a file object to the content if isinstance(name_or_obj, str): is_url = _is_url(name_or_obj) if is_url: name_or_obj = download_file( name_or_obj, cache=cache, show_progress=show_progress, timeout=remote_timeout, sources=sources, http_headers=http_headers) fileobj = io.FileIO(name_or_obj, 'r') if is_url and not cache: delete_fds.append(fileobj) close_fds.append(fileobj) else: fileobj = name_or_obj # Check if the file object supports random access, and if not, # then wrap it in a BytesIO buffer. It would be nicer to use a # BufferedReader to avoid reading loading the whole file first, # but that is not compatible with streams or urllib2.urlopen # objects on Python 2.x. if not hasattr(fileobj, 'seek'): try: # py.path.LocalPath objects have .read() method but it uses # text mode, which won't work. .read_binary() does, and # surely other ducks would return binary contents when # called like this. # py.path.LocalPath is what comes from the tmpdir fixture # in pytest. fileobj = io.BytesIO(fileobj.read_binary()) except AttributeError: fileobj = io.BytesIO(fileobj.read()) # Now read enough bytes to look at signature signature = fileobj.read(4) fileobj.seek(0) if signature[:3] == b'\x1f\x8b\x08': # gzip import struct try: import gzip fileobj_new = gzip.GzipFile(fileobj=fileobj, mode='rb') fileobj_new.read(1) # need to check that the file is really gzip except (OSError, EOFError, struct.error): # invalid gzip file fileobj.seek(0) fileobj_new.close() else: fileobj_new.seek(0) fileobj = fileobj_new elif signature[:3] == b'BZh': # bzip2 try: import bz2 except ImportError: for fd in close_fds: fd.close() raise ModuleNotFoundError( "This Python installation does not provide the bz2 module.") try: # bz2.BZ2File does not support file objects, only filenames, so we # need to write the data to a temporary file with NamedTemporaryFile("wb", delete=False) as tmp: tmp.write(fileobj.read()) tmp.close() fileobj_new = bz2.BZ2File(tmp.name, mode='rb') fileobj_new.read(1) # need to check that the file is really bzip2 except OSError: # invalid bzip2 file fileobj.seek(0) fileobj_new.close() # raise else: fileobj_new.seek(0) close_fds.append(fileobj_new) fileobj = fileobj_new elif signature[:3] == b'\xfd7z': # xz try: import lzma fileobj_new = lzma.LZMAFile(fileobj, mode='rb') fileobj_new.read(1) # need to check that the file is really xz except ImportError: for fd in close_fds: fd.close() raise ModuleNotFoundError( "This Python installation does not provide the lzma module.") except (OSError, EOFError): # invalid xz file fileobj.seek(0) fileobj_new.close() # should we propagate this to the caller to signal bad content? # raise ValueError(e) else: fileobj_new.seek(0) fileobj = fileobj_new # By this point, we have a file, io.FileIO, gzip.GzipFile, bz2.BZ2File # or lzma.LZMAFile instance opened in binary mode (that is, read # returns bytes). Now we need to, if requested, wrap it in a # io.TextIOWrapper so read will return unicode based on the # encoding parameter. needs_textio_wrapper = encoding != 'binary' if needs_textio_wrapper: # A bz2.BZ2File can not be wrapped by a TextIOWrapper, # so we decompress it to a temporary file and then # return a handle to that. try: import bz2 except ImportError: pass else: if isinstance(fileobj, bz2.BZ2File): tmp = NamedTemporaryFile("wb", delete=False) data = fileobj.read() tmp.write(data) tmp.close() delete_fds.append(tmp) fileobj = io.FileIO(tmp.name, 'r') close_fds.append(fileobj) fileobj = _NonClosingBufferedReader(fileobj) fileobj = _NonClosingTextIOWrapper(fileobj, encoding=encoding) # Ensure that file is at the start - io.FileIO will for # example not always be at the start: # >>> import io # >>> f = open('test.fits', 'rb') # >>> f.read(4) # 'SIMP' # >>> f.seek(0) # >>> fileobj = io.FileIO(f.fileno()) # >>> fileobj.tell() # 4096L fileobj.seek(0) try: yield fileobj finally: for fd in close_fds: fd.close() for fd in delete_fds: os.remove(fd.name) def get_file_contents(*args, **kwargs): """ Retrieves the contents of a filename or file-like object. See the `get_readable_fileobj` docstring for details on parameters. Returns ------- object The content of the file (as requested by ``encoding``). """ with get_readable_fileobj(*args, **kwargs) as f: return f.read() @contextlib.contextmanager def get_pkg_data_fileobj(data_name, package=None, encoding=None, cache=True): """ Retrieves a data file from the standard locations for the package and provides the file as a file-like object that reads bytes. Parameters ---------- data_name : str Name/location of the desired data file. One of the following: * The name of a data file included in the source distribution. The path is relative to the module calling this function. For example, if calling from ``astropy.pkname``, use ``'data/file.dat'`` to get the file in ``astropy/pkgname/data/file.dat``. Double-dots can be used to go up a level. In the same example, use ``'../data/file.dat'`` to get ``astropy/data/file.dat``. * If a matching local file does not exist, the Astropy data server will be queried for the file. * A hash like that produced by `compute_hash` can be requested, prefixed by 'hash/' e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash will first be searched for locally, and if not found, the Astropy data server will be queried. package : str, optional If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. encoding : str, optional When `None` (default), returns a file-like object with a ``read`` method returns `str` (``unicode``) objects, using `locale.getpreferredencoding` as an encoding. This matches the default behavior of the built-in `open` when no ``mode`` argument is provided. When ``'binary'``, returns a file-like object where its ``read`` method returns `bytes` objects. When another string, it is the name of an encoding, and the file-like object's ``read`` method will return `str` (``unicode``) objects, decoded from binary using the given encoding. cache : bool If True, the file will be downloaded and saved locally or the already-cached local copy will be accessed. If False, the file-like object will directly access the resource (e.g. if a remote URL is accessed, an object like that from `urllib.request.urlopen` is returned). Returns ------- fileobj : file-like An object with the contents of the data file available via ``read`` function. Can be used as part of a ``with`` statement, automatically closing itself after the ``with`` block. Raises ------ urllib.error.URLError If a remote file cannot be found. OSError If problems occur writing or reading a local file. Examples -------- This will retrieve a data file and its contents for the `astropy.wcs` tests:: >>> from astropy.utils.data import get_pkg_data_fileobj >>> with get_pkg_data_fileobj('data/3d_cd.hdr', ... package='astropy.wcs.tests') as fobj: ... fcontents = fobj.read() ... This next example would download a data file from the astropy data server because the ``allsky/allsky_rosat.fits`` file is not present in the source distribution. It will also save the file locally so the next time it is accessed it won't need to be downloaded.:: >>> from astropy.utils.data import get_pkg_data_fileobj >>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits', ... encoding='binary') as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT ... fcontents = fobj.read() ... Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done] This does the same thing but does *not* cache it locally:: >>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits', ... encoding='binary', cache=False) as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT ... fcontents = fobj.read() ... Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done] See Also -------- get_pkg_data_contents : returns the contents of a file or url as a bytes object get_pkg_data_filename : returns a local name for a file containing the data """ # noqa datafn = get_pkg_data_path(data_name, package=package) if os.path.isdir(datafn): raise OSError("Tried to access a data file that's actually " "a package data directory") elif os.path.isfile(datafn): # local file with get_readable_fileobj(datafn, encoding=encoding) as fileobj: yield fileobj else: # remote file with get_readable_fileobj( conf.dataurl + data_name, encoding=encoding, cache=cache, sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name], ) as fileobj: # We read a byte to trigger any URLErrors fileobj.read(1) fileobj.seek(0) yield fileobj def get_pkg_data_filename(data_name, package=None, show_progress=True, remote_timeout=None): """ Retrieves a data file from the standard locations for the package and provides a local filename for the data. This function is similar to `get_pkg_data_fileobj` but returns the file *name* instead of a readable file-like object. This means that this function must always cache remote files locally, unlike `get_pkg_data_fileobj`. Parameters ---------- data_name : str Name/location of the desired data file. One of the following: * The name of a data file included in the source distribution. The path is relative to the module calling this function. For example, if calling from ``astropy.pkname``, use ``'data/file.dat'`` to get the file in ``astropy/pkgname/data/file.dat``. Double-dots can be used to go up a level. In the same example, use ``'../data/file.dat'`` to get ``astropy/data/file.dat``. * If a matching local file does not exist, the Astropy data server will be queried for the file. * A hash like that produced by `compute_hash` can be requested, prefixed by 'hash/' e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash will first be searched for locally, and if not found, the Astropy data server will be queried. package : str, optional If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. show_progress : bool, optional Whether to display a progress bar if the file is downloaded from a remote server. Default is `True`. remote_timeout : float Timeout for the requests in seconds (default is the configurable `astropy.utils.data.Conf.remote_timeout`). Raises ------ urllib.error.URLError If a remote file cannot be found. OSError If problems occur writing or reading a local file. Returns ------- filename : str A file path on the local file system corresponding to the data requested in ``data_name``. Examples -------- This will retrieve the contents of the data file for the `astropy.wcs` tests:: >>> from astropy.utils.data import get_pkg_data_filename >>> fn = get_pkg_data_filename('data/3d_cd.hdr', ... package='astropy.wcs.tests') >>> with open(fn) as f: ... fcontents = f.read() ... This retrieves a data file by hash either locally or from the astropy data server:: >>> from astropy.utils.data import get_pkg_data_filename >>> fn = get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28') # doctest: +SKIP >>> with open(fn) as f: ... fcontents = f.read() ... See Also -------- get_pkg_data_contents : returns the contents of a file or url as a bytes object get_pkg_data_fileobj : returns a file-like object with the data """ if remote_timeout is None: # use configfile default remote_timeout = conf.remote_timeout if data_name.startswith('hash/'): # first try looking for a local version if a hash is specified hashfn = _find_hash_fn(data_name[5:]) if hashfn is None: return download_file(conf.dataurl + data_name, cache=True, show_progress=show_progress, timeout=remote_timeout, sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name]) else: return hashfn else: fs_path = os.path.normpath(data_name) datafn = get_pkg_data_path(fs_path, package=package) if os.path.isdir(datafn): raise OSError("Tried to access a data file that's actually " "a package data directory") elif os.path.isfile(datafn): # local file return datafn else: # remote file return download_file(conf.dataurl + data_name, cache=True, show_progress=show_progress, timeout=remote_timeout, sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name]) def get_pkg_data_contents(data_name, package=None, encoding=None, cache=True): """ Retrieves a data file from the standard locations and returns its contents as a bytes object. Parameters ---------- data_name : str Name/location of the desired data file. One of the following: * The name of a data file included in the source distribution. The path is relative to the module calling this function. For example, if calling from ``astropy.pkname``, use ``'data/file.dat'`` to get the file in ``astropy/pkgname/data/file.dat``. Double-dots can be used to go up a level. In the same example, use ``'../data/file.dat'`` to get ``astropy/data/file.dat``. * If a matching local file does not exist, the Astropy data server will be queried for the file. * A hash like that produced by `compute_hash` can be requested, prefixed by 'hash/' e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash will first be searched for locally, and if not found, the Astropy data server will be queried. * A URL to some other file. package : str, optional If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. encoding : str, optional When `None` (default), returns a file-like object with a ``read`` method that returns `str` (``unicode``) objects, using `locale.getpreferredencoding` as an encoding. This matches the default behavior of the built-in `open` when no ``mode`` argument is provided. When ``'binary'``, returns a file-like object where its ``read`` method returns `bytes` objects. When another string, it is the name of an encoding, and the file-like object's ``read`` method will return `str` (``unicode``) objects, decoded from binary using the given encoding. cache : bool If True, the file will be downloaded and saved locally or the already-cached local copy will be accessed. If False, the file-like object will directly access the resource (e.g. if a remote URL is accessed, an object like that from `urllib.request.urlopen` is returned). Returns ------- contents : bytes The complete contents of the file as a bytes object. Raises ------ urllib.error.URLError If a remote file cannot be found. OSError If problems occur writing or reading a local file. See Also -------- get_pkg_data_fileobj : returns a file-like object with the data get_pkg_data_filename : returns a local name for a file containing the data """ with get_pkg_data_fileobj(data_name, package=package, encoding=encoding, cache=cache) as fd: contents = fd.read() return contents def get_pkg_data_filenames(datadir, package=None, pattern='*'): """ Returns the path of all of the data files in a given directory that match a given glob pattern. Parameters ---------- datadir : str Name/location of the desired data files. One of the following: * The name of a directory included in the source distribution. The path is relative to the module calling this function. For example, if calling from ``astropy.pkname``, use ``'data'`` to get the files in ``astropy/pkgname/data``. * Remote URLs are not currently supported. package : str, optional If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. pattern : str, optional A UNIX-style filename glob pattern to match files. See the `glob` module in the standard library for more information. By default, matches all files. Returns ------- filenames : iterator of str Paths on the local filesystem in *datadir* matching *pattern*. Examples -------- This will retrieve the contents of the data file for the `astropy.wcs` tests:: >>> from astropy.utils.data import get_pkg_data_filenames >>> for fn in get_pkg_data_filenames('data/maps', 'astropy.wcs.tests', ... '*.hdr'): ... with open(fn) as f: ... fcontents = f.read() ... """ path = get_pkg_data_path(datadir, package=package) if os.path.isfile(path): raise OSError( "Tried to access a data directory that's actually " "a package data file") elif os.path.isdir(path): for filename in os.listdir(path): if fnmatch.fnmatch(filename, pattern): yield os.path.join(path, filename) else: raise OSError("Path not found") def get_pkg_data_fileobjs(datadir, package=None, pattern='*', encoding=None): """ Returns readable file objects for all of the data files in a given directory that match a given glob pattern. Parameters ---------- datadir : str Name/location of the desired data files. One of the following: * The name of a directory included in the source distribution. The path is relative to the module calling this function. For example, if calling from ``astropy.pkname``, use ``'data'`` to get the files in ``astropy/pkgname/data`` * Remote URLs are not currently supported package : str, optional If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. pattern : str, optional A UNIX-style filename glob pattern to match files. See the `glob` module in the standard library for more information. By default, matches all files. encoding : str, optional When `None` (default), returns a file-like object with a ``read`` method that returns `str` (``unicode``) objects, using `locale.getpreferredencoding` as an encoding. This matches the default behavior of the built-in `open` when no ``mode`` argument is provided. When ``'binary'``, returns a file-like object where its ``read`` method returns `bytes` objects. When another string, it is the name of an encoding, and the file-like object's ``read`` method will return `str` (``unicode``) objects, decoded from binary using the given encoding. Returns ------- fileobjs : iterator of file object File objects for each of the files on the local filesystem in *datadir* matching *pattern*. Examples -------- This will retrieve the contents of the data file for the `astropy.wcs` tests:: >>> from astropy.utils.data import get_pkg_data_filenames >>> for fd in get_pkg_data_fileobjs('data/maps', 'astropy.wcs.tests', ... '*.hdr'): ... fcontents = fd.read() ... """ for fn in get_pkg_data_filenames(datadir, package=package, pattern=pattern): with get_readable_fileobj(fn, encoding=encoding) as fd: yield fd def compute_hash(localfn): """ Computes the MD5 hash for a file. The hash for a data file is used for looking up data files in a unique fashion. This is of particular use for tests; a test may require a particular version of a particular file, in which case it can be accessed via hash to get the appropriate version. Typically, if you wish to write a test that requires a particular data file, you will want to submit that file to the astropy data servers, and use e.g. ``get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28')``, but with the hash for your file in place of the hash in the example. Parameters ---------- localfn : str The path to the file for which the hash should be generated. Returns ------- hash : str The hex digest of the cryptographic hash for the contents of the ``localfn`` file. """ with open(localfn, 'rb') as f: h = hashlib.md5() block = f.read(conf.compute_hash_block_size) while block: h.update(block) block = f.read(conf.compute_hash_block_size) return h.hexdigest() def get_pkg_data_path(*path, package=None): """Get path from source-included data directories. Parameters ---------- *path : str Name/location of the desired data file/directory. May be a tuple of strings for ``os.path`` joining. package : str or None, optional, keyword-only If specified, look for a file relative to the given package, rather than the calling module's package. Returns ------- path : str Name/location of the desired data file/directory. Raises ------ ImportError Given package or module is not importable. RuntimeError If the local data file is outside of the package's tree. """ if package is None: module = find_current_module(1, finddiff=['astropy.utils.data', 'contextlib']) if module is None: # not called from inside an astropy package. So just pass name # through return os.path.join(*path) if not hasattr(module, '__package__') or not module.__package__: # The __package__ attribute may be missing or set to None; see # PEP-366, also astropy issue #1256 if '.' in module.__name__: package = module.__name__.rpartition('.')[0] else: package = module.__name__ else: package = module.__package__ else: # package errors if it isn't a str # so there is no need for checks in the containing if/else module = resolve_name(package) # module path within package module_path = os.path.dirname(module.__file__) full_path = os.path.join(module_path, *path) # Check that file is inside tree. rootpkgname = package.partition('.')[0] rootpkg = resolve_name(rootpkgname) root_dir = os.path.dirname(rootpkg.__file__) if not _is_inside(full_path, root_dir): raise RuntimeError(f"attempted to get a local data file outside " f"of the {rootpkgname} tree.") return full_path def _find_hash_fn(hexdigest, pkgname='astropy'): """ Looks for a local file by hash - returns file name if found and a valid file, otherwise returns None. """ for v in cache_contents(pkgname=pkgname).values(): if compute_hash(v) == hexdigest: return v return None def get_free_space_in_dir(path, unit=False): """ Given a path to a directory, returns the amount of free space on that filesystem. Parameters ---------- path : str The path to a directory. unit : bool or `~astropy.units.Unit` Return the amount of free space as Quantity in the given unit, if provided. Default is `False` for backward-compatibility. Returns ------- free_space : int or `~astropy.units.Quantity` The amount of free space on the partition that the directory is on. If ``unit=False``, it is returned as plain integer (in bytes). """ if not os.path.isdir(path): raise OSError( "Can only determine free space associated with directories, " "not files.") # Actually you can on Linux but I want to avoid code that fails # on Windows only. free_space = shutil.disk_usage(path).free if unit: from astropy import units as u # TODO: Automatically determine best prefix to use. if unit is True: unit = u.byte free_space = u.Quantity(free_space, u.byte).to(unit) return free_space def check_free_space_in_dir(path, size): """ Determines if a given directory has enough space to hold a file of a given size. Parameters ---------- path : str The path to a directory. size : int or `~astropy.units.Quantity` A proposed filesize. If not a Quantity, assume it is in bytes. Raises ------ OSError There is not enough room on the filesystem. """ space = get_free_space_in_dir(path, unit=getattr(size, 'unit', False)) if space < size: from astropy.utils.console import human_file_size raise OSError(f"Not enough free space in {path} " f"to download a {human_file_size(size)} file, " f"only {human_file_size(space)} left") class _ftptlswrapper(urllib.request.ftpwrapper): def init(self): self.busy = 0 self.ftp = ftplib.FTP_TLS() self.ftp.connect(self.host, self.port, self.timeout) self.ftp.login(self.user, self.passwd) self.ftp.prot_p() _target = '/'.join(self.dirs) self.ftp.cwd(_target) class _FTPTLSHandler(urllib.request.FTPHandler): def connect_ftp(self, user, passwd, host, port, dirs, timeout): return _ftptlswrapper(user, passwd, host, port, dirs, timeout, persistent=False) @functools.lru_cache() def _build_urlopener(ftp_tls=False, ssl_context=None, allow_insecure=False): """ Helper for building a `urllib.request.build_opener` which handles TLS/SSL. """ # Import ssl here to avoid import failure when running in pyodide/Emscripten import ssl ssl_context = dict(it for it in ssl_context) if ssl_context else {} cert_chain = {} if 'certfile' in ssl_context: cert_chain.update({ 'certfile': ssl_context.pop('certfile'), 'keyfile': ssl_context.pop('keyfile', None), 'password': ssl_context.pop('password', None) }) elif 'password' in ssl_context or 'keyfile' in ssl_context: raise ValueError( "passing 'keyfile' or 'password' in the ssl_context argument " "requires passing 'certfile' as well") if 'cafile' not in ssl_context and certifi is not None: ssl_context['cafile'] = certifi.where() ssl_context = ssl.create_default_context(**ssl_context) if allow_insecure: ssl_context.check_hostname = False ssl_context.verify_mode = ssl.CERT_NONE if cert_chain: ssl_context.load_cert_chain(**cert_chain) https_handler = urllib.request.HTTPSHandler(context=ssl_context) if ftp_tls: urlopener = urllib.request.build_opener(_FTPTLSHandler(), https_handler) else: urlopener = urllib.request.build_opener(https_handler) return urlopener def _try_url_open(source_url, timeout=None, http_headers=None, ftp_tls=False, ssl_context=None, allow_insecure=False): """Helper for opening a URL while handling TLS/SSL verification issues.""" # Import ssl here to avoid import failure when running in pyodide/Emscripten import ssl # Always try first with a secure connection # _build_urlopener uses lru_cache, so the ssl_context argument must be # converted to a hashshable type (a set of 2-tuples) ssl_context = frozenset(ssl_context.items() if ssl_context else []) urlopener = _build_urlopener(ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=False) req = urllib.request.Request(source_url, headers=http_headers) try: return urlopener.open(req, timeout=timeout) except urllib.error.URLError as exc: reason = exc.reason if (isinstance(reason, ssl.SSLError) and reason.reason == 'CERTIFICATE_VERIFY_FAILED'): msg = (f'Verification of TLS/SSL certificate at {source_url} ' f'failed: this can mean either the server is ' f'misconfigured or your local root CA certificates are ' f'out-of-date; in the latter case this can usually be ' f'addressed by installing the Python package "certifi" ' f'(see the documentation for astropy.utils.data.download_url)') if not allow_insecure: msg += (f' or in both cases you can work around this by ' f'passing allow_insecure=True, but only if you ' f'understand the implications; the original error ' f'was: {reason}') raise urllib.error.URLError(msg) else: msg += '. Re-trying with allow_insecure=True.' warn(msg, AstropyWarning) # Try again with a new urlopener allowing insecure connections urlopener = _build_urlopener(ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=True) return urlopener.open(req, timeout=timeout) raise def _download_file_from_source(source_url, show_progress=True, timeout=None, remote_url=None, cache=False, pkgname='astropy', http_headers=None, ftp_tls=None, ssl_context=None, allow_insecure=False): from astropy.utils.console import ProgressBarOrSpinner if not conf.allow_internet: raise urllib.error.URLError( f"URL {remote_url} was supposed to be downloaded but " f"allow_internet is {conf.allow_internet}; " f"if this is unexpected check the astropy.cfg file for the option " f"allow_internet") if remote_url is None: remote_url = source_url if http_headers is None: http_headers = {} if ftp_tls is None and urllib.parse.urlparse(remote_url).scheme == "ftp": try: return _download_file_from_source(source_url, show_progress=show_progress, timeout=timeout, remote_url=remote_url, cache=cache, pkgname=pkgname, http_headers=http_headers, ftp_tls=False) except urllib.error.URLError as e: # e.reason might not be a string, e.g. socket.gaierror if str(e.reason).startswith("ftp error: error_perm"): ftp_tls = True else: raise with _try_url_open(source_url, timeout=timeout, http_headers=http_headers, ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=allow_insecure) as remote: info = remote.info() try: size = int(info['Content-Length']) except (KeyError, ValueError, TypeError): size = None if size is not None: check_free_space_in_dir(gettempdir(), size) if cache: dldir = _get_download_cache_loc(pkgname) check_free_space_in_dir(dldir, size) if show_progress and sys.stdout.isatty(): progress_stream = sys.stdout else: progress_stream = io.StringIO() if source_url == remote_url: dlmsg = f"Downloading {remote_url}" else: dlmsg = f"Downloading {remote_url} from {source_url}" with ProgressBarOrSpinner(size, dlmsg, file=progress_stream) as p: with NamedTemporaryFile(prefix=f"astropy-download-{os.getpid()}-", delete=False) as f: try: bytes_read = 0 block = remote.read(conf.download_block_size) while block: f.write(block) bytes_read += len(block) p.update(bytes_read) block = remote.read(conf.download_block_size) if size is not None and bytes_read > size: raise urllib.error.URLError( f"File was supposed to be {size} bytes but " f"server provides more, at least {bytes_read} " f"bytes. Download failed.") if size is not None and bytes_read < size: raise urllib.error.ContentTooShortError( f"File was supposed to be {size} bytes but we " f"only got {bytes_read} bytes. Download failed.", content=None) except BaseException: if os.path.exists(f.name): try: os.remove(f.name) except OSError: pass raise return f.name def download_file(remote_url, cache=False, show_progress=True, timeout=None, sources=None, pkgname='astropy', http_headers=None, ssl_context=None, allow_insecure=False): """Downloads a URL and optionally caches the result. It returns the filename of a file containing the URL's contents. If ``cache=True`` and the file is present in the cache, just returns the filename; if the file had to be downloaded, add it to the cache. If ``cache="update"`` always download and add it to the cache. The cache is effectively a dictionary mapping URLs to files; by default the file contains the contents of the URL that is its key, but in practice these can be obtained from a mirror (using ``sources``) or imported from the local filesystem (using `~import_file_to_cache` or `~import_download_cache`). Regardless, each file is regarded as representing the contents of a particular URL, and this URL should be used to look them up or otherwise manipulate them. The files in the cache directory are named according to a cryptographic hash of their URLs (currently MD5, so hackers can cause collisions). The modification times on these files normally indicate when they were last downloaded from the Internet. Parameters ---------- remote_url : str The URL of the file to download cache : bool or "update", optional Whether to cache the contents of remote URLs. If "update", always download the remote URL in case there is a new version and store the result in the cache. show_progress : bool, optional Whether to display a progress bar during the download (default is `True`). Regardless of this setting, the progress bar is only displayed when outputting to a terminal. timeout : float, optional Timeout for remote requests in seconds (default is the configurable `astropy.utils.data.Conf.remote_timeout`). sources : list of str, optional If provided, a list of URLs to try to obtain the file from. The result will be stored under the original URL. The original URL will *not* be tried unless it is in this list; this is to prevent long waits for a primary server that is known to be inaccessible at the moment. If an empty list is passed, then ``download_file`` will not attempt to connect to the Internet, that is, if the file is not in the cache a KeyError will be raised. pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. http_headers : dict or None HTTP request headers to pass into ``urlopen`` if needed. (These headers are ignored if the protocol for the ``name_or_obj``/``sources`` entry is not a remote HTTP URL.) In the default case (None), the headers are ``User-Agent: some_value`` and ``Accept: */*``, where ``some_value`` is set by ``astropy.utils.data.conf.default_http_user_agent``. ssl_context : dict, optional Keyword arguments to pass to `ssl.create_default_context` when downloading from HTTPS or TLS+FTP sources. This can be used provide alternative paths to root CA certificates. Additionally, if the key ``'certfile'`` and optionally ``'keyfile'`` and ``'password'`` are included, they are passed to `ssl.SSLContext.load_cert_chain`. This can be used for performing SSL/TLS client certificate authentication for servers that require it. allow_insecure : bool, optional Allow downloading files over a TLS/SSL connection even when the server certificate verification failed. When set to `True` the potentially insecure download is allowed to proceed, but an `~astropy.utils.exceptions.AstropyWarning` is issued. If you are frequently getting certificate verification warnings, consider installing or upgrading `certifi`_ package, which provides frequently updated certificates for common root CAs (i.e., a set similar to those used by web browsers). If installed, Astropy will use it automatically. .. _certifi: https://pypi.org/project/certifi/ Returns ------- local_path : str Returns the local path that the file was download to. Raises ------ urllib.error.URLError Whenever there's a problem getting the remote file. KeyError When a file was requested from the cache but is missing and no sources were provided to obtain it from the Internet. Notes ----- Because this function returns a filename, another process could run `clear_download_cache` before you actually open the file, leaving you with a filename that no longer points to a usable file. """ if timeout is None: timeout = conf.remote_timeout if sources is None: sources = [remote_url] if http_headers is None: http_headers = {'User-Agent': conf.default_http_user_agent, 'Accept': '*/*'} missing_cache = "" url_key = remote_url if cache: try: dldir = _get_download_cache_loc(pkgname) except OSError as e: cache = False missing_cache = ( f"Cache directory cannot be read or created ({e}), " f"providing data in temporary file instead." ) else: if cache == "update": pass elif isinstance(cache, str): raise ValueError(f"Cache value '{cache}' was requested but " f"'update' is the only recognized string; " f"otherwise use a boolean") else: filename = os.path.join(dldir, _url_to_dirname(url_key), "contents") if os.path.exists(filename): return os.path.abspath(filename) errors = {} for source_url in sources: try: f_name = _download_file_from_source( source_url, timeout=timeout, show_progress=show_progress, cache=cache, remote_url=remote_url, pkgname=pkgname, http_headers=http_headers, ssl_context=ssl_context, allow_insecure=allow_insecure) # Success! break except urllib.error.URLError as e: # errno 8 is from SSL "EOF occurred in violation of protocol" if (hasattr(e, 'reason') and hasattr(e.reason, 'errno') and e.reason.errno == 8): e.reason.strerror = (e.reason.strerror + '. requested URL: ' + remote_url) e.reason.args = (e.reason.errno, e.reason.strerror) errors[source_url] = e else: # No success if not sources: raise KeyError( f"No sources listed and file {remote_url} not in cache! " f"Please include primary URL in sources if you want it to be " f"included as a valid source.") elif len(sources) == 1: raise errors[sources[0]] else: raise urllib.error.URLError( f"Unable to open any source! Exceptions were {errors}") \ from errors[sources[0]] if cache: try: return import_file_to_cache(url_key, f_name, remove_original=True, replace=(cache == 'update'), pkgname=pkgname) except PermissionError as e: # Cache is readonly, we can't update it missing_cache = ( f"Cache directory appears to be read-only ({e}), unable to import " f"downloaded file, providing data in temporary file {f_name} " f"instead.") # FIXME: other kinds of cache problem can occur? if missing_cache: warn(CacheMissingWarning(missing_cache, f_name)) if conf.delete_temporary_downloads_at_exit: global _tempfilestodel _tempfilestodel.append(f_name) return os.path.abspath(f_name) def is_url_in_cache(url_key, pkgname='astropy'): """Check if a download for ``url_key`` is in the cache. The provided ``url_key`` will be the name used in the cache. The contents may have been downloaded from this URL or from a mirror or they may have been provided by the user. See `~download_file` for details. Parameters ---------- url_key : str The URL retrieved pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. Returns ------- in_cache : bool `True` if a download for ``url_key`` is in the cache, `False` if not or if the cache does not exist at all. See Also -------- cache_contents : obtain a dictionary listing everything in the cache """ try: dldir = _get_download_cache_loc(pkgname) except OSError: return False filename = os.path.join(dldir, _url_to_dirname(url_key), "contents") return os.path.exists(filename) def cache_total_size(pkgname='astropy'): """Return the total size in bytes of all files in the cache.""" size = 0 dldir = _get_download_cache_loc(pkgname=pkgname) for root, dirs, files in os.walk(dldir): size += sum(os.path.getsize(os.path.join(root, name)) for name in files) return size def _do_download_files_in_parallel(kwargs): with astropy.config.paths.set_temp_config(kwargs.pop("temp_config")): with astropy.config.paths.set_temp_cache(kwargs.pop("temp_cache")): return download_file(**kwargs) def download_files_in_parallel(urls, cache="update", show_progress=True, timeout=None, sources=None, multiprocessing_start_method=None, pkgname='astropy'): """Download multiple files in parallel from the given URLs. Blocks until all files have downloaded. The result is a list of local file paths corresponding to the given urls. The results will be stored in the cache under the values in ``urls`` even if they are obtained from some other location via ``sources``. See `~download_file` for details. Parameters ---------- urls : list of str The URLs to retrieve. cache : bool or "update", optional Whether to use the cache (default is `True`). If "update", always download the remote URLs to see if new data is available and store the result in cache. .. versionchanged:: 4.0 The default was changed to ``"update"`` and setting it to ``False`` will print a Warning and set it to ``"update"`` again, because the function will not work properly without cache. Using ``True`` will work as expected. .. versionchanged:: 3.0 The default was changed to ``True`` and setting it to ``False`` will print a Warning and set it to ``True`` again, because the function will not work properly without cache. show_progress : bool, optional Whether to display a progress bar during the download (default is `True`) timeout : float, optional Timeout for each individual requests in seconds (default is the configurable `astropy.utils.data.Conf.remote_timeout`). sources : dict, optional If provided, for each URL a list of URLs to try to obtain the file from. The result will be stored under the original URL. For any URL in this dictionary, the original URL will *not* be tried unless it is in this list; this is to prevent long waits for a primary server that is known to be inaccessible at the moment. multiprocessing_start_method : str, optional Useful primarily for testing; if in doubt leave it as the default. When using multiprocessing, certain anomalies occur when starting processes with the "spawn" method (the only option on Windows); other anomalies occur with the "fork" method (the default on Linux). pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. Returns ------- paths : list of str The local file paths corresponding to the downloaded URLs. Notes ----- If a URL is unreachable, the downloading will grind to a halt and the exception will propagate upward, but an unpredictable number of files will have been successfully downloaded and will remain in the cache. """ from .console import ProgressBar if timeout is None: timeout = conf.remote_timeout if sources is None: sources = {} if not cache: # See issue #6662, on windows won't work because the files are removed # again before they can be used. On *NIX systems it will behave as if # cache was set to True because multiprocessing cannot insert the items # in the list of to-be-removed files. This could be fixed, but really, # just use the cache, with update_cache if appropriate. warn('Disabling the cache does not work because of multiprocessing, ' 'it will be set to ``"update"``. You may need to manually remove ' 'the cached files with clear_download_cache() afterwards.', AstropyWarning) cache = "update" if show_progress: progress = sys.stdout else: progress = io.BytesIO() # Combine duplicate URLs combined_urls = list(set(urls)) combined_paths = ProgressBar.map( _do_download_files_in_parallel, [dict(remote_url=u, cache=cache, show_progress=False, timeout=timeout, sources=sources.get(u, None), pkgname=pkgname, temp_cache=astropy.config.paths.set_temp_cache._temp_path, temp_config=astropy.config.paths.set_temp_config._temp_path) for u in combined_urls], file=progress, multiprocess=True, multiprocessing_start_method=multiprocessing_start_method, ) paths = [] for url in urls: paths.append(combined_paths[combined_urls.index(url)]) return paths # This is used by download_file and _deltemps to determine the files to delete # when the interpreter exits _tempfilestodel = [] @atexit.register def _deltemps(): global _tempfilestodel if _tempfilestodel is not None: while len(_tempfilestodel) > 0: fn = _tempfilestodel.pop() if os.path.isfile(fn): try: os.remove(fn) except OSError: # oh well we tried # could be held open by some process, on Windows pass elif os.path.isdir(fn): try: shutil.rmtree(fn) except OSError: # couldn't get rid of it, sorry # could be held open by some process, on Windows pass def clear_download_cache(hashorurl=None, pkgname='astropy'): """Clears the data file cache by deleting the local file(s). If a URL is provided, it will be the name used in the cache. The contents may have been downloaded from this URL or from a mirror or they may have been provided by the user. See `~download_file` for details. For the purposes of this function, a file can also be identified by a hash of its contents or by the filename under which the data is stored (as returned by `~download_file`, for example). Parameters ---------- hashorurl : str or None If None, the whole cache is cleared. Otherwise, specify a hash for the cached file that is supposed to be deleted, the full path to a file in the cache that should be deleted, or a URL that should be removed from the cache if present. pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. """ try: dldir = _get_download_cache_loc(pkgname) except OSError as e: # Problem arose when trying to open the cache # Just a warning, though msg = 'Not clearing data cache - cache inaccessible due to ' estr = '' if len(e.args) < 1 else (': ' + str(e)) warn(CacheMissingWarning(msg + e.__class__.__name__ + estr)) return try: if hashorurl is None: # Optional: delete old incompatible caches too _rmtree(dldir) elif _is_url(hashorurl): filepath = os.path.join(dldir, _url_to_dirname(hashorurl)) _rmtree(filepath) else: # Not a URL, it should be either a filename or a hash filepath = os.path.join(dldir, hashorurl) rp = os.path.relpath(filepath, dldir) if rp.startswith(".."): raise RuntimeError( f"attempted to use clear_download_cache on the path " f"{filepath} outside the data cache directory {dldir}") d, f = os.path.split(rp) if d and f in ["contents", "url"]: # It's a filename not the hash of a URL # so we want to zap the directory containing the # files "url" and "contents" filepath = os.path.join(dldir, d) if os.path.exists(filepath): _rmtree(filepath) elif (len(hashorurl) == 2*hashlib.md5().digest_size and re.match(r"[0-9a-f]+", hashorurl)): # It's the hash of some file contents, we have to find the right file filename = _find_hash_fn(hashorurl) if filename is not None: clear_download_cache(filename) except OSError as e: msg = 'Not clearing data from cache - problem arose ' estr = '' if len(e.args) < 1 else (': ' + str(e)) warn(CacheMissingWarning(msg + e.__class__.__name__ + estr)) def _get_download_cache_loc(pkgname='astropy'): """Finds the path to the cache directory and makes them if they don't exist. Parameters ---------- pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. Returns ------- datadir : str The path to the data cache directory. """ try: datadir = os.path.join(astropy.config.paths.get_cache_dir(pkgname), 'download', 'url') if not os.path.exists(datadir): try: os.makedirs(datadir) except OSError: if not os.path.exists(datadir): raise elif not os.path.isdir(datadir): raise OSError(f'Data cache directory {datadir} is not a directory') return datadir except OSError as e: msg = 'Remote data cache could not be accessed due to ' estr = '' if len(e.args) < 1 else (': ' + str(e)) warn(CacheMissingWarning(msg + e.__class__.__name__ + estr)) raise def _url_to_dirname(url): if not _is_url(url): raise ValueError(f"Malformed URL: '{url}'") # Make domain names case-insensitive # Also makes the http:// case-insensitive urlobj = list(urllib.parse.urlsplit(url)) urlobj[1] = urlobj[1].lower() if urlobj[0].lower() in ['http', 'https'] and urlobj[1] and urlobj[2] == '': urlobj[2] = '/' url_c = urllib.parse.urlunsplit(urlobj) return hashlib.md5(url_c.encode("utf-8")).hexdigest() class ReadOnlyDict(dict): def __setitem__(self, key, value): raise TypeError("This object is read-only.") _NOTHING = ReadOnlyDict({}) class CacheDamaged(ValueError): """Record the URL or file that was a problem. Using clear_download_cache on the .bad_file or .bad_url attribute, whichever is not None, should resolve this particular problem. """ def __init__(self, *args, bad_urls=None, bad_files=None, **kwargs): super().__init__(*args, **kwargs) self.bad_urls = bad_urls if bad_urls is not None else [] self.bad_files = bad_files if bad_files is not None else [] def check_download_cache(pkgname='astropy'): """Do a consistency check on the cache. .. note:: Since v5.0, this function no longer returns anything. Because the cache is shared by all versions of ``astropy`` in all virtualenvs run by your user, possibly concurrently, it could accumulate problems. This could lead to hard-to-debug problems or wasted space. This function detects a number of incorrect conditions, including nonexistent files that are indexed, files that are indexed but in the wrong place, and, if you request it, files whose content does not match the hash that is indexed. This function also returns a list of non-indexed files. A few will be associated with the shelve object; their exact names depend on the backend used but will probably be based on ``urlmap``. The presence of other files probably indicates that something has gone wrong and inaccessible files have accumulated in the cache. These can be removed with :func:`clear_download_cache`, either passing the filename returned here, or with no arguments to empty the entire cache and return it to a reasonable, if empty, state. Parameters ---------- pkgname : str, optional The package name to use to locate the download cache, i.e., for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. Raises ------ `~astropy.utils.data.CacheDamaged` To indicate a problem with the cache contents; the exception contains a ``.bad_files`` attribute containing a set of filenames to allow the user to use :func:`clear_download_cache` to remove the offending items. OSError, RuntimeError To indicate some problem with the cache structure. This may need a full :func:`clear_download_cache` to resolve, or may indicate some kind of misconfiguration. """ bad_files = set() messages = set() dldir = _get_download_cache_loc(pkgname=pkgname) with os.scandir(dldir) as it: for entry in it: f = os.path.abspath(os.path.join(dldir, entry.name)) if entry.name.startswith("rmtree-"): if f not in _tempfilestodel: bad_files.add(f) messages.add(f"Cache entry {entry.name} not scheduled for deletion") elif entry.is_dir(): for sf in os.listdir(f): if sf in ['url', 'contents']: continue sf = os.path.join(f, sf) bad_files.add(sf) messages.add(f"Unexpected file f{sf}") urlf = os.path.join(f, "url") url = None if not os.path.isfile(urlf): bad_files.add(urlf) messages.add(f"Problem with URL file f{urlf}") else: url = get_file_contents(urlf, encoding="utf-8") if not _is_url(url): bad_files.add(f) messages.add(f"Malformed URL: {url}") else: hashname = _url_to_dirname(url) if entry.name != hashname: bad_files.add(f) messages.add(f"URL hashes to {hashname} but is stored in {entry.name}") if not os.path.isfile(os.path.join(f, "contents")): bad_files.add(f) if url is None: messages.add(f"Hash {entry.name} is missing contents") else: messages.add(f"URL {url} with hash {entry.name} is missing contents") else: bad_files.add(f) messages.add(f"Left-over non-directory {f} in cache") if bad_files: raise CacheDamaged("\n".join(messages), bad_files=bad_files) @contextlib.contextmanager def _SafeTemporaryDirectory(suffix=None, prefix=None, dir=None): """Temporary directory context manager This will not raise an exception if the temporary directory goes away before it's supposed to be deleted. Specifically, what is deleted will be the directory *name* produced; if no such directory exists, no exception will be raised. It would be safer to delete it only if it's really the same directory - checked by file descriptor - and if it's still called the same thing. But that opens a platform-specific can of worms. It would also be more robust to use ExitStack and TemporaryDirectory, which is more aggressive about removing readonly things. """ d = mkdtemp(suffix=suffix, prefix=prefix, dir=dir) try: yield d finally: try: shutil.rmtree(d) except OSError: pass def _rmtree(path, replace=None): """More-atomic rmtree. Ignores missing directory.""" with TemporaryDirectory(prefix="rmtree-", dir=os.path.dirname(os.path.abspath(path))) as d: try: os.rename(path, os.path.join(d, "to-zap")) except FileNotFoundError: pass except PermissionError: warn(CacheMissingWarning( f"Unable to remove directory {path} because a file in it " f"is in use and you are on Windows", path)) raise if replace is not None: try: os.rename(replace, path) except FileExistsError: # already there, fine pass except OSError as e: if e.errno == errno.ENOTEMPTY: # already there, fine pass else: raise def import_file_to_cache(url_key, filename, remove_original=False, pkgname='astropy', *, replace=True): """Import the on-disk file specified by filename to the cache. The provided ``url_key`` will be the name used in the cache. The file should contain the contents of this URL, at least notionally (the URL may be temporarily or permanently unavailable). It is using ``url_key`` that users will request these contents from the cache. See :func:`download_file` for details. If ``url_key`` already exists in the cache, it will be updated to point to these imported contents, and its old contents will be deleted from the cache. Parameters ---------- url_key : str The key to index the file under. This should probably be the URL where the file was located, though if you obtained it from a mirror you should use the URL of the primary location. filename : str The file whose contents you want to import. remove_original : bool Whether to remove the original file (``filename``) once import is complete. pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. replace : boolean, optional Whether or not to replace an existing object in the cache, if one exists. If replacement is not requested but the object exists, silently pass. """ cache_dir = _get_download_cache_loc(pkgname=pkgname) cache_dirname = _url_to_dirname(url_key) local_dirname = os.path.join(cache_dir, cache_dirname) local_filename = os.path.join(local_dirname, "contents") with _SafeTemporaryDirectory(prefix="temp_dir", dir=cache_dir) as temp_dir: temp_filename = os.path.join(temp_dir, "contents") # Make sure we're on the same filesystem # This will raise an exception if the url_key doesn't turn into a valid filename shutil.copy(filename, temp_filename) with open(os.path.join(temp_dir, "url"), "wt", encoding="utf-8") as f: f.write(url_key) if replace: _rmtree(local_dirname, replace=temp_dir) else: try: os.rename(temp_dir, local_dirname) except FileExistsError: # already there, fine pass except OSError as e: if e.errno == errno.ENOTEMPTY: # already there, fine pass else: raise if remove_original: os.remove(filename) return os.path.abspath(local_filename) def get_cached_urls(pkgname='astropy'): """ Get the list of URLs in the cache. Especially useful for looking up what files are stored in your cache when you don't have internet access. The listed URLs are the keys programs should use to access the file contents, but those contents may have actually been obtained from a mirror. See `~download_file` for details. Parameters ---------- pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. Returns ------- cached_urls : list List of cached URLs. See Also -------- cache_contents : obtain a dictionary listing everything in the cache """ return sorted(cache_contents(pkgname=pkgname).keys()) def cache_contents(pkgname='astropy'): """Obtain a dict mapping cached URLs to filenames. This dictionary is a read-only snapshot of the state of the cache when this function was called. If other processes are actively working with the cache, it is possible for them to delete files that are listed in this dictionary. Use with some caution if you are working on a system that is busy with many running astropy processes, although the same issues apply to most functions in this module. """ r = {} try: dldir = _get_download_cache_loc(pkgname=pkgname) except OSError: return _NOTHING with os.scandir(dldir) as it: for entry in it: if entry.is_dir: url = get_file_contents(os.path.join(dldir, entry.name, "url"), encoding="utf-8") r[url] = os.path.abspath(os.path.join(dldir, entry.name, "contents")) return ReadOnlyDict(r) def export_download_cache(filename_or_obj, urls=None, overwrite=False, pkgname='astropy'): """Exports the cache contents as a ZIP file. Parameters ---------- filename_or_obj : str or file-like Where to put the created ZIP file. Must be something the zipfile module can write to. urls : iterable of str or None The URLs to include in the exported cache. The default is all URLs currently in the cache. If a URL is included in this list but is not currently in the cache, a KeyError will be raised. To ensure that all are in the cache use `~download_file` or `~download_files_in_parallel`. overwrite : bool, optional If filename_or_obj is a filename that exists, it will only be overwritten if this is True. pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. See Also -------- import_download_cache : import the contents of such a ZIP file import_file_to_cache : import a single file directly """ if urls is None: urls = get_cached_urls(pkgname) with zipfile.ZipFile(filename_or_obj, 'w' if overwrite else 'x') as z: for u in urls: fn = download_file(u, cache=True, sources=[], pkgname=pkgname) # Do not use os.path.join because ZIP files want # "/" on all platforms z_fn = urllib.parse.quote(u, safe="") z.write(fn, z_fn) def import_download_cache(filename_or_obj, urls=None, update_cache=False, pkgname='astropy'): """Imports the contents of a ZIP file into the cache. Each member of the ZIP file should be named by a quoted version of the URL whose contents it stores. These names are decoded with :func:`~urllib.parse.unquote`. Parameters ---------- filename_or_obj : str or file-like Where the stored ZIP file is. Must be something the :mod:`~zipfile` module can read from. urls : set of str or list of str or None The URLs to import from the ZIP file. The default is all URLs in the file. update_cache : bool, optional If True, any entry in the ZIP file will overwrite the value in the cache; if False, leave untouched any entry already in the cache. pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. See Also -------- export_download_cache : export the contents the cache to of such a ZIP file import_file_to_cache : import a single file directly """ with zipfile.ZipFile(filename_or_obj, 'r') as z, TemporaryDirectory() as d: for i, zf in enumerate(z.infolist()): url = urllib.parse.unquote(zf.filename) # FIXME(aarchiba): do we want some kind of validation on this URL? # urllib.parse might do something sensible...but what URLs might # they have? # is_url in this file is probably a good check, not just here # but throughout this file. if urls is not None and url not in urls: continue if not update_cache and is_url_in_cache(url, pkgname=pkgname): continue f_temp_name = os.path.join(d, str(i)) with z.open(zf) as f_zip, open(f_temp_name, "wb") as f_temp: block = f_zip.read(conf.download_block_size) while block: f_temp.write(block) block = f_zip.read(conf.download_block_size) import_file_to_cache(url, f_temp_name, remove_original=True, pkgname=pkgname)
b3a5e3250cb39cddd1b82b8aaee0fec243b5b99b7a63ac392e05508473003561
# Licensed under a 3-clause BSD style license - see LICENSE.rst import io import os from os.path import join import os.path import shutil import sys from collections import defaultdict from setuptools import Extension from setuptools.dep_util import newer_group import numpy from extension_helpers import import_file, write_if_different, get_compiler, pkg_config WCSROOT = os.path.relpath(os.path.dirname(__file__)) WCSVERSION = "7.9" def b(s): return s.encode('ascii') def string_escape(s): s = s.decode('ascii').encode('ascii', 'backslashreplace') s = s.replace(b'\n', b'\\n') s = s.replace(b'\0', b'\\0') return s.decode('ascii') def determine_64_bit_int(): """ The only configuration parameter needed at compile-time is how to specify a 64-bit signed integer. Python's ctypes module can get us that information. If we can't be absolutely certain, we default to "long long int", which is correct on most platforms (x86, x86_64). If we find platforms where this heuristic doesn't work, we may need to hardcode for them. """ try: try: import ctypes except ImportError: raise ValueError() if ctypes.sizeof(ctypes.c_longlong) == 8: return "long long int" elif ctypes.sizeof(ctypes.c_long) == 8: return "long int" elif ctypes.sizeof(ctypes.c_int) == 8: return "int" else: raise ValueError() except ValueError: return "long long int" def write_wcsconfig_h(paths): """ Writes out the wcsconfig.h header with local configuration. """ h_file = io.StringIO() h_file.write(""" /* The bundled version has WCSLIB_VERSION */ #define HAVE_WCSLIB_VERSION 1 /* WCSLIB library version number. */ #define WCSLIB_VERSION {} /* 64-bit integer data type. */ #define WCSLIB_INT64 {} /* Windows needs some other defines to prevent inclusion of wcsset() which conflicts with wcslib's wcsset(). These need to be set on code that *uses* astropy.wcs, in addition to astropy.wcs itself. */ #if defined(_WIN32) || defined(_MSC_VER) || defined(__MINGW32__) || defined (__MINGW64__) #ifndef YY_NO_UNISTD_H #define YY_NO_UNISTD_H #endif #ifndef _CRT_SECURE_NO_WARNINGS #define _CRT_SECURE_NO_WARNINGS #endif #ifndef _NO_OLDNAMES #define _NO_OLDNAMES #endif #ifndef NO_OLDNAMES #define NO_OLDNAMES #endif #ifndef __STDC__ #define __STDC__ 1 #endif #endif """.format(WCSVERSION, determine_64_bit_int())) content = h_file.getvalue().encode('ascii') for path in paths: write_if_different(path, content) ###################################################################### # GENERATE DOCSTRINGS IN C def generate_c_docstrings(): docstrings = import_file(os.path.join(WCSROOT, 'docstrings.py')) docstrings = docstrings.__dict__ keys = [ key for key, val in docstrings.items() if not key.startswith('__') and isinstance(val, str)] keys.sort() docs = {} for key in keys: docs[key] = docstrings[key].encode('utf8').lstrip() + b'\0' h_file = io.StringIO() h_file.write("""/* DO NOT EDIT! This file is autogenerated by astropy/wcs/setup_package.py. To edit its contents, edit astropy/wcs/docstrings.py */ #ifndef __DOCSTRINGS_H__ #define __DOCSTRINGS_H__ """) for key in keys: val = docs[key] h_file.write(f'extern char doc_{key}[{len(val)}];\n') h_file.write("\n#endif\n\n") write_if_different( join(WCSROOT, 'include', 'astropy_wcs', 'docstrings.h'), h_file.getvalue().encode('utf-8')) c_file = io.StringIO() c_file.write("""/* DO NOT EDIT! This file is autogenerated by astropy/wcs/setup_package.py. To edit its contents, edit astropy/wcs/docstrings.py The weirdness here with strncpy is because some C compilers, notably MSVC, do not support string literals greater than 256 characters. */ #include <string.h> #include "astropy_wcs/docstrings.h" """) for key in keys: val = docs[key] c_file.write(f'char doc_{key}[{len(val)}] = {{\n') for i in range(0, len(val), 12): section = val[i:i+12] c_file.write(' ') c_file.write(''.join(f'0x{x:02x}, ' for x in section)) c_file.write('\n') c_file.write(" };\n\n") write_if_different( join(WCSROOT, 'src', 'docstrings.c'), c_file.getvalue().encode('utf-8')) def get_wcslib_cfg(cfg, wcslib_files, include_paths): debug = '--debug' in sys.argv cfg['include_dirs'].append(numpy.get_include()) cfg['define_macros'].extend([ ('ECHO', None), ('WCSTRIG_MACRO', None), ('ASTROPY_WCS_BUILD', None), ('_GNU_SOURCE', None)]) if ((int(os.environ.get('ASTROPY_USE_SYSTEM_WCSLIB', 0)) or int(os.environ.get('ASTROPY_USE_SYSTEM_ALL', 0))) and not sys.platform == 'win32'): wcsconfig_h_path = join(WCSROOT, 'include', 'wcsconfig.h') if os.path.exists(wcsconfig_h_path): os.unlink(wcsconfig_h_path) for k, v in pkg_config(['wcslib'], ['wcs']).items(): cfg[k].extend(v) else: write_wcsconfig_h(include_paths) wcslib_path = join("cextern", "wcslib") # Path to wcslib wcslib_cpath = join(wcslib_path, "C") # Path to wcslib source files cfg['sources'].extend(join(wcslib_cpath, x) for x in wcslib_files) cfg['include_dirs'].append(wcslib_cpath) if debug: cfg['define_macros'].append(('DEBUG', None)) cfg['undef_macros'].append('NDEBUG') if (not sys.platform.startswith('sun') and not sys.platform == 'win32'): cfg['extra_compile_args'].extend(["-fno-inline", "-O0", "-g"]) else: # Define ECHO as nothing to prevent spurious newlines from # printing within the libwcs parser cfg['define_macros'].append(('NDEBUG', None)) cfg['undef_macros'].append('DEBUG') if sys.platform == 'win32': # These are written into wcsconfig.h, but that file is not # used by all parts of wcslib. cfg['define_macros'].extend([ ('YY_NO_UNISTD_H', None), ('_CRT_SECURE_NO_WARNINGS', None), ('_NO_OLDNAMES', None), # for mingw32 ('NO_OLDNAMES', None), # for mingw64 ('__STDC__', None) # for MSVC ]) if sys.platform.startswith('linux'): cfg['define_macros'].append(('HAVE_SINCOS', None)) # For 4.7+ enable C99 syntax in older compilers (need 'gnu99' std for gcc) if get_compiler() == 'unix': cfg['extra_compile_args'].extend(['-std=gnu99']) # Squelch a few compilation warnings in WCSLIB if get_compiler() in ('unix', 'mingw32'): if not debug: cfg['extra_compile_args'].extend([ '-Wno-strict-prototypes', '-Wno-unused-function', '-Wno-unused-value', '-Wno-uninitialized']) def get_extensions(): generate_c_docstrings() ###################################################################### # DISTUTILS SETUP cfg = defaultdict(list) wcslib_files = [ # List of wcslib files to compile 'flexed/wcsbth.c', 'flexed/wcspih.c', 'flexed/wcsulex.c', 'flexed/wcsutrn.c', 'cel.c', 'dis.c', 'lin.c', 'log.c', 'prj.c', 'spc.c', 'sph.c', 'spx.c', 'tab.c', 'wcs.c', 'wcserr.c', 'wcsfix.c', 'wcshdr.c', 'wcsprintf.c', 'wcsunits.c', 'wcsutil.c' ] wcslib_config_paths = [ join(WCSROOT, 'include', 'astropy_wcs', 'wcsconfig.h'), join(WCSROOT, 'include', 'wcsconfig.h') ] get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths) cfg['include_dirs'].append(join(WCSROOT, "include")) astropy_wcs_files = [ # List of astropy.wcs files to compile 'distortion.c', 'distortion_wrap.c', 'docstrings.c', 'pipeline.c', 'pyutil.c', 'astropy_wcs.c', 'astropy_wcs_api.c', 'sip.c', 'sip_wrap.c', 'str_list_proxy.c', 'unit_list_proxy.c', 'util.c', 'wcslib_wrap.c', 'wcslib_auxprm_wrap.c', 'wcslib_prjprm_wrap.c', 'wcslib_celprm_wrap.c', 'wcslib_tabprm_wrap.c', 'wcslib_wtbarr_wrap.c' ] cfg['sources'].extend(join(WCSROOT, 'src', x) for x in astropy_wcs_files) cfg['sources'] = [str(x) for x in cfg['sources']] cfg = dict((str(key), val) for key, val in cfg.items()) # Copy over header files from WCSLIB into the installed version of Astropy # so that other Python packages can write extensions that link to it. We # do the copying here then include the data in [options.package_data] in # the setup.cfg file wcslib_headers = [ 'cel.h', 'lin.h', 'prj.h', 'spc.h', 'spx.h', 'tab.h', 'wcs.h', 'wcserr.h', 'wcsmath.h', 'wcsprintf.h', ] if not (int(os.environ.get('ASTROPY_USE_SYSTEM_WCSLIB', 0)) or int(os.environ.get('ASTROPY_USE_SYSTEM_ALL', 0))): for header in wcslib_headers: source = join('cextern', 'wcslib', 'C', header) dest = join('astropy', 'wcs', 'include', 'wcslib', header) if newer_group([source], dest, 'newer'): shutil.copy(source, dest) return [Extension('astropy.wcs._wcs', **cfg)]
83709722e3696901f263dfc3e0a4f29d4cb7d81e31b5417dfe50f0123b63652e
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines the CGS units. They are also available in the top-level `astropy.units` namespace. """ from fractions import Fraction from . import si from .core import UnitBase, def_unit _ns = globals() def_unit(['cm', 'centimeter'], si.cm, namespace=_ns, prefixes=False) g = si.g s = si.s C = si.C rad = si.rad sr = si.sr cd = si.cd K = si.K deg_C = si.deg_C mol = si.mol ########################################################################## # ACCELERATION def_unit(['Gal', 'gal'], cm / s ** 2, namespace=_ns, prefixes=True, doc="Gal: CGS unit of acceleration") ########################################################################## # ENERGY # Use CGS definition of erg def_unit(['erg'], g * cm ** 2 / s ** 2, namespace=_ns, prefixes=True, doc="erg: CGS unit of energy") ########################################################################## # FORCE def_unit(['dyn', 'dyne'], g * cm / s ** 2, namespace=_ns, prefixes=True, doc="dyne: CGS unit of force") ########################################################################## # PRESSURE def_unit(['Ba', 'Barye', 'barye'], g / (cm * s ** 2), namespace=_ns, prefixes=True, doc="Barye: CGS unit of pressure") ########################################################################## # DYNAMIC VISCOSITY def_unit(['P', 'poise'], g / (cm * s), namespace=_ns, prefixes=True, doc="poise: CGS unit of dynamic viscosity") ########################################################################## # KINEMATIC VISCOSITY def_unit(['St', 'stokes'], cm ** 2 / s, namespace=_ns, prefixes=True, doc="stokes: CGS unit of kinematic viscosity") ########################################################################## # WAVENUMBER def_unit(['k', 'Kayser', 'kayser'], cm ** -1, namespace=_ns, prefixes=True, doc="kayser: CGS unit of wavenumber") ########################################################################### # ELECTRICAL def_unit(['D', 'Debye', 'debye'], Fraction(1, 3) * 1e-29 * C * si.m, namespace=_ns, prefixes=True, doc="Debye: CGS unit of electric dipole moment") def_unit(['Fr', 'Franklin', 'statcoulomb', 'statC', 'esu'], g ** Fraction(1, 2) * cm ** Fraction(3, 2) * s ** -1, namespace=_ns, doc='Franklin: CGS (ESU) unit of charge') def_unit(['statA', 'statampere'], Fr * s ** -1, namespace=_ns, doc='statampere: CGS (ESU) unit of current') def_unit(['Bi', 'Biot', 'abA', 'abampere'], g ** Fraction(1, 2) * cm ** Fraction(1, 2) * s ** -1, namespace=_ns, doc='Biot: CGS (EMU) unit of current') def_unit(['abC', 'abcoulomb'], Bi * s, namespace=_ns, doc='abcoulomb: CGS (EMU) of charge') ########################################################################### # MAGNETIC def_unit(['G', 'Gauss', 'gauss'], 1e-4 * si.T, namespace=_ns, prefixes=True, doc="Gauss: CGS unit for magnetic field") def_unit(['Mx', 'Maxwell', 'maxwell'], 1e-8 * si.Wb, namespace=_ns, doc="Maxwell: CGS unit for magnetic flux") ########################################################################### # BASES bases = set([cm, g, s, rad, cd, K, mol]) ########################################################################### # CLEANUP del UnitBase del def_unit del si del Fraction ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals())
6a7a7ae3af3af261e0b6e993d3f904e2f3becbc8084b1cbd81b412a118c49a0c
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains convenience functions for retrieving solar system ephemerides from jplephem. """ from urllib.parse import urlparse import os.path import numpy as np import erfa from .sky_coordinate import SkyCoord from astropy.utils.data import download_file from astropy.utils.decorators import classproperty, deprecated from astropy.utils.state import ScienceState from astropy.utils import indent from astropy import units as u from astropy.constants import c as speed_of_light from .representation import CartesianRepresentation, CartesianDifferential from .builtin_frames import GCRS, ICRS, ITRS, TETE from .builtin_frames.utils import get_jd12 __all__ = ["get_body", "get_moon", "get_body_barycentric", "get_body_barycentric_posvel", "solar_system_ephemeris"] DEFAULT_JPL_EPHEMERIS = 'de430' """List of kernel pairs needed to calculate positions of a given object.""" BODY_NAME_TO_KERNEL_SPEC = { 'sun': [(0, 10)], 'mercury': [(0, 1), (1, 199)], 'venus': [(0, 2), (2, 299)], 'earth-moon-barycenter': [(0, 3)], 'earth': [(0, 3), (3, 399)], 'moon': [(0, 3), (3, 301)], 'mars': [(0, 4)], 'jupiter': [(0, 5)], 'saturn': [(0, 6)], 'uranus': [(0, 7)], 'neptune': [(0, 8)], 'pluto': [(0, 9)], } """Indices to the plan94 routine for the given object.""" PLAN94_BODY_NAME_TO_PLANET_INDEX = { 'mercury': 1, 'venus': 2, 'earth-moon-barycenter': 3, 'mars': 4, 'jupiter': 5, 'saturn': 6, 'uranus': 7, 'neptune': 8, } _EPHEMERIS_NOTE = """ You can either give an explicit ephemeris or use a default, which is normally a built-in ephemeris that does not require ephemeris files. To change the default to be the JPL ephemeris:: >>> from astropy.coordinates import solar_system_ephemeris >>> solar_system_ephemeris.set('jpl') # doctest: +SKIP Use of any JPL ephemeris requires the jplephem package (https://pypi.org/project/jplephem/). If needed, the ephemeris file will be downloaded (and cached). One can check which bodies are covered by a given ephemeris using:: >>> solar_system_ephemeris.bodies ('earth', 'sun', 'moon', 'mercury', 'venus', 'earth-moon-barycenter', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune') """[1:-1] class solar_system_ephemeris(ScienceState): """Default ephemerides for calculating positions of Solar-System bodies. This can be one of the following:: - 'builtin': polynomial approximations to the orbital elements. - 'de430', 'de432s', 'de440', 'de440s': short-cuts for recent JPL dynamical models. - 'jpl': Alias for the default JPL ephemeris (currently, 'de430'). - URL: (str) The url to a SPK ephemeris in SPICE binary (.bsp) format. - PATH: (str) File path to a SPK ephemeris in SPICE binary (.bsp) format. - `None`: Ensure an Exception is raised without an explicit ephemeris. The default is 'builtin', which uses the ``epv00`` and ``plan94`` routines from the ``erfa`` implementation of the Standards Of Fundamental Astronomy library. Notes ----- Any file required will be downloaded (and cached) when the state is set. The default Satellite Planet Kernel (SPK) file from NASA JPL (de430) is ~120MB, and covers years ~1550-2650 CE [1]_. The smaller de432s file is ~10MB, and covers years 1950-2050 [2]_ (and similarly for the newer de440 and de440s). Older versions of the JPL ephemerides (such as the widely used de200) can be used via their URL [3]_. .. [1] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de430-de431.txt .. [2] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de432s.txt .. [3] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/a_old_versions/ """ _value = 'builtin' _kernel = None @classmethod def validate(cls, value): # make no changes if value is None if value is None: return cls._value # Set up Kernel; if the file is not in cache, this will download it. cls.get_kernel(value) return value @classmethod def get_kernel(cls, value): # ScienceState only ensures the `_value` attribute is up to date, # so we need to be sure any kernel returned is consistent. if cls._kernel is None or cls._kernel.origin != value: if cls._kernel is not None: cls._kernel.daf.file.close() cls._kernel = None kernel = _get_kernel(value) if kernel is not None: kernel.origin = value cls._kernel = kernel return cls._kernel @classproperty def kernel(cls): return cls.get_kernel(cls._value) @classproperty def bodies(cls): if cls._value is None: return None if cls._value.lower() == 'builtin': return (('earth', 'sun', 'moon') + tuple(PLAN94_BODY_NAME_TO_PLANET_INDEX.keys())) else: return tuple(BODY_NAME_TO_KERNEL_SPEC.keys()) def _get_kernel(value): """ Try importing jplephem, download/retrieve from cache the Satellite Planet Kernel corresponding to the given ephemeris. """ if value is None or value.lower() == 'builtin': return None try: from jplephem.spk import SPK except ImportError: raise ImportError("Solar system JPL ephemeris calculations require " "the jplephem package " "(https://pypi.org/project/jplephem/)") if value.lower() == 'jpl': value = DEFAULT_JPL_EPHEMERIS if value.lower() in ('de430', 'de432s', 'de440', 'de440s'): value = ('https://naif.jpl.nasa.gov/pub/naif/generic_kernels' '/spk/planets/{:s}.bsp'.format(value.lower())) elif os.path.isfile(value): return SPK.open(value) else: try: urlparse(value) except Exception: raise ValueError('{} was not one of the standard strings and ' 'could not be parsed as a file path or URL'.format(value)) return SPK.open(download_file(value, cache=True)) def _get_body_barycentric_posvel(body, time, ephemeris=None, get_velocity=True): """Calculate the barycentric position (and velocity) of a solar system body. Parameters ---------- body : str or other The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` get_velocity : bool, optional Whether or not to calculate the velocity as well as the position. Returns ------- position : `~astropy.coordinates.CartesianRepresentation` or tuple Barycentric (ICRS) position or tuple of position and velocity. Notes ----- Whether or not velocities are calculated makes little difference for the built-in ephemerides, but for most JPL ephemeris files, the execution time roughly doubles. """ # If the ephemeris is to be taken from solar_system_ephemeris, or the one # it already contains, use the kernel there. Otherwise, open the ephemeris, # possibly downloading it, but make sure the file is closed at the end. default_kernel = ephemeris is None or ephemeris is solar_system_ephemeris._value kernel = None try: if default_kernel: if solar_system_ephemeris.get() is None: raise ValueError(_EPHEMERIS_NOTE) kernel = solar_system_ephemeris.kernel else: kernel = _get_kernel(ephemeris) jd1, jd2 = get_jd12(time, 'tdb') if kernel is None: body = body.lower() earth_pv_helio, earth_pv_bary = erfa.epv00(jd1, jd2) if body == 'earth': body_pv_bary = earth_pv_bary elif body == 'moon': # The moon98 documentation notes that it takes TT, but that TDB leads # to errors smaller than the uncertainties in the algorithm. # moon98 returns the astrometric position relative to the Earth. moon_pv_geo = erfa.moon98(jd1, jd2) body_pv_bary = erfa.pvppv(moon_pv_geo, earth_pv_bary) else: sun_pv_bary = erfa.pvmpv(earth_pv_bary, earth_pv_helio) if body == 'sun': body_pv_bary = sun_pv_bary else: try: body_index = PLAN94_BODY_NAME_TO_PLANET_INDEX[body] except KeyError: raise KeyError("{}'s position and velocity cannot be " "calculated with the '{}' ephemeris." .format(body, ephemeris)) body_pv_helio = erfa.plan94(jd1, jd2, body_index) body_pv_bary = erfa.pvppv(body_pv_helio, sun_pv_bary) body_pos_bary = CartesianRepresentation( body_pv_bary['p'], unit=u.au, xyz_axis=-1, copy=False) if get_velocity: body_vel_bary = CartesianRepresentation( body_pv_bary['v'], unit=u.au/u.day, xyz_axis=-1, copy=False) else: if isinstance(body, str): # Look up kernel chain for JPL ephemeris, based on name try: kernel_spec = BODY_NAME_TO_KERNEL_SPEC[body.lower()] except KeyError: raise KeyError("{}'s position cannot be calculated with " "the {} ephemeris.".format(body, ephemeris)) else: # otherwise, assume the user knows what their doing and intentionally # passed in a kernel chain kernel_spec = body # jplephem cannot handle multi-D arrays, so convert to 1D here. jd1_shape = getattr(jd1, 'shape', ()) if len(jd1_shape) > 1: jd1, jd2 = jd1.ravel(), jd2.ravel() # Note that we use the new jd1.shape here to create a 1D result array. # It is reshaped below. body_posvel_bary = np.zeros((2 if get_velocity else 1, 3) + getattr(jd1, 'shape', ())) for pair in kernel_spec: spk = kernel[pair] if spk.data_type == 3: # Type 3 kernels contain both position and velocity. posvel = spk.compute(jd1, jd2) if get_velocity: body_posvel_bary += posvel.reshape(body_posvel_bary.shape) else: body_posvel_bary[0] += posvel[:4] else: # spk.generate first yields the position and then the # derivative. If no velocities are desired, body_posvel_bary # has only one element and thus the loop ends after a single # iteration, avoiding the velocity calculation. for body_p_or_v, p_or_v in zip(body_posvel_bary, spk.generate(jd1, jd2)): body_p_or_v += p_or_v body_posvel_bary.shape = body_posvel_bary.shape[:2] + jd1_shape body_pos_bary = CartesianRepresentation(body_posvel_bary[0], unit=u.km, copy=False) if get_velocity: body_vel_bary = CartesianRepresentation(body_posvel_bary[1], unit=u.km/u.day, copy=False) return (body_pos_bary, body_vel_bary) if get_velocity else body_pos_bary finally: if not default_kernel and kernel is not None: kernel.daf.file.close() def get_body_barycentric_posvel(body, time, ephemeris=None): """Calculate the barycentric position and velocity of a solar system body. Parameters ---------- body : str or list of tuple The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` Returns ------- position, velocity : tuple of `~astropy.coordinates.CartesianRepresentation` Tuple of barycentric (ICRS) position and velocity. See Also -------- get_body_barycentric : to calculate position only. This is faster by about a factor two for JPL kernels, but has no speed advantage for the built-in ephemeris. Notes ----- {_EPHEMERIS_NOTE} """ return _get_body_barycentric_posvel(body, time, ephemeris) def get_body_barycentric(body, time, ephemeris=None): """Calculate the barycentric position of a solar system body. Parameters ---------- body : str or list of tuple The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` Returns ------- position : `~astropy.coordinates.CartesianRepresentation` Barycentric (ICRS) position of the body in cartesian coordinates See Also -------- get_body_barycentric_posvel : to calculate both position and velocity. Notes ----- {_EPHEMERIS_NOTE} """ return _get_body_barycentric_posvel(body, time, ephemeris, get_velocity=False) def _get_apparent_body_position(body, time, ephemeris, obsgeoloc=None): """Calculate the apparent position of body ``body`` relative to Earth. This corrects for the light-travel time to the object. Parameters ---------- body : str or other The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``~astropy.coordinates.solar_system_ephemeris.set`` obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, optional The GCRS position of the observer Returns ------- cartesian_position : `~astropy.coordinates.CartesianRepresentation` Barycentric (ICRS) apparent position of the body in cartesian coordinates Notes ----- {_EPHEMERIS_NOTE} """ if ephemeris is None: ephemeris = solar_system_ephemeris.get() # Calculate position given approximate light travel time. delta_light_travel_time = 20. * u.s emitted_time = time light_travel_time = 0. * u.s earth_loc = get_body_barycentric('earth', time, ephemeris) if obsgeoloc is not None: earth_loc += obsgeoloc while np.any(np.fabs(delta_light_travel_time) > 1.0e-8*u.s): body_loc = get_body_barycentric(body, emitted_time, ephemeris) earth_distance = (body_loc - earth_loc).norm() delta_light_travel_time = (light_travel_time - earth_distance/speed_of_light) light_travel_time = earth_distance/speed_of_light emitted_time = time - light_travel_time return get_body_barycentric(body, emitted_time, ephemeris) def get_body(body, time, location=None, ephemeris=None): """ Get a `~astropy.coordinates.SkyCoord` for a solar system body as observed from a location on Earth in the `~astropy.coordinates.GCRS` reference system. Parameters ---------- body : str or list of tuple The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. location : `~astropy.coordinates.EarthLocation`, optional Location of observer on the Earth. If not given, will be taken from ``time`` (if not present, a geocentric observer will be assumed). ephemeris : str, optional Ephemeris to use. If not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default). Returns ------- skycoord : `~astropy.coordinates.SkyCoord` GCRS Coordinate for the body Notes ----- The coordinate returned is the apparent position, which is the position of the body at time *t* minus the light travel time from the *body* to the observing *location*. {_EPHEMERIS_NOTE} """ if location is None: location = time.location if location is not None: obsgeoloc, obsgeovel = location.get_gcrs_posvel(time) else: obsgeoloc, obsgeovel = None, None cartrep = _get_apparent_body_position(body, time, ephemeris, obsgeoloc) icrs = ICRS(cartrep) gcrs = icrs.transform_to(GCRS(obstime=time, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)) return SkyCoord(gcrs) def get_moon(time, location=None, ephemeris=None): """ Get a `~astropy.coordinates.SkyCoord` for the Earth's Moon as observed from a location on Earth in the `~astropy.coordinates.GCRS` reference system. Parameters ---------- time : `~astropy.time.Time` Time of observation location : `~astropy.coordinates.EarthLocation` Location of observer on the Earth. If none is supplied, taken from ``time`` (if not present, a geocentric observer will be assumed). ephemeris : str, optional Ephemeris to use. If not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default). Returns ------- skycoord : `~astropy.coordinates.SkyCoord` GCRS Coordinate for the Moon Notes ----- The coordinate returned is the apparent position, which is the position of the moon at time *t* minus the light travel time from the moon to the observing *location*. {_EPHEMERIS_NOTE} """ return get_body('moon', time, location=location, ephemeris=ephemeris) # Add note about the ephemeris choices to the docstrings of relevant functions. # Note: sadly, one cannot use f-strings for docstrings, so we format explicitly. for f in [f for f in locals().values() if callable(f) and f.__doc__ is not None and '{_EPHEMERIS_NOTE}' in f.__doc__]: f.__doc__ = f.__doc__.format(_EPHEMERIS_NOTE=indent(_EPHEMERIS_NOTE)[4:]) deprecation_msg = """ The use of _apparent_position_in_true_coordinates is deprecated because astropy now implements a True Equator True Equinox Frame (TETE), which should be used instead. """ @deprecated('4.2', deprecation_msg) def _apparent_position_in_true_coordinates(skycoord): """ Convert Skycoord in GCRS frame into one in which RA and Dec are defined w.r.t to the true equinox and poles of the Earth """ location = getattr(skycoord, 'location', None) if location is None: gcrs_rep = skycoord.obsgeoloc.with_differentials( {'s': CartesianDifferential.from_cartesian(skycoord.obsgeovel)}) location = (GCRS(gcrs_rep, obstime=skycoord.obstime) .transform_to(ITRS(obstime=skycoord.obstime)) .earth_location) tete_frame = TETE(obstime=skycoord.obstime, location=location) return skycoord.transform_to(tete_frame)
4819e9deb9afb7cf99d8ecd5571be22d1806c44c1b2c1c81476acfeb6f8aef40
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Utililies used for constructing and inspecting rotation matrices. """ from functools import reduce import numpy as np from astropy import units as u from .angles import Angle def matrix_product(*matrices): """Matrix multiply all arguments together. Arguments should have dimension 2 or larger. Larger dimensional objects are interpreted as stacks of matrices residing in the last two dimensions. This function mostly exists for readability: using `~numpy.matmul` directly, one would have ``matmul(matmul(m1, m2), m3)``, etc. For even better readability, one might consider using `~numpy.matrix` for the arguments (so that one could write ``m1 * m2 * m3``), but then it is not possible to handle stacks of matrices. Once only python >=3.5 is supported, this function can be replaced by ``m1 @ m2 @ m3``. """ return reduce(np.matmul, matrices) def matrix_transpose(matrix): """Transpose a matrix or stack of matrices by swapping the last two axes. This function mostly exists for readability; seeing ``.swapaxes(-2, -1)`` it is not that obvious that one does a transpose. Note that one cannot use `~numpy.ndarray.T`, as this transposes all axes and thus does not work for stacks of matrices. """ return matrix.swapaxes(-2, -1) def rotation_matrix(angle, axis='z', unit=None): """ Generate matrices for rotation by some angle around some axis. Parameters ---------- angle : angle-like The amount of rotation the matrices should represent. Can be an array. axis : str or array-like Either ``'x'``, ``'y'``, ``'z'``, or a (x,y,z) specifying the axis to rotate about. If ``'x'``, ``'y'``, or ``'z'``, the rotation sense is counterclockwise looking down the + axis (e.g. positive rotations obey left-hand-rule). If given as an array, the last dimension should be 3; it will be broadcast against ``angle``. unit : unit-like, optional If ``angle`` does not have associated units, they are in this unit. If neither are provided, it is assumed to be degrees. Returns ------- rmat : `numpy.matrix` A unitary rotation matrix. """ if isinstance(angle, u.Quantity): angle = angle.to_value(u.radian) else: if unit is None: angle = np.deg2rad(angle) else: angle = u.Unit(unit).to(u.rad, angle) s = np.sin(angle) c = np.cos(angle) # use optimized implementations for x/y/z try: i = 'xyz'.index(axis) except TypeError: axis = np.asarray(axis) axis = axis / np.sqrt((axis * axis).sum(axis=-1, keepdims=True)) R = (axis[..., np.newaxis] * axis[..., np.newaxis, :] * (1. - c)[..., np.newaxis, np.newaxis]) for i in range(0, 3): R[..., i, i] += c a1 = (i + 1) % 3 a2 = (i + 2) % 3 R[..., a1, a2] += axis[..., i] * s R[..., a2, a1] -= axis[..., i] * s else: a1 = (i + 1) % 3 a2 = (i + 2) % 3 R = np.zeros(getattr(angle, 'shape', ()) + (3, 3)) R[..., i, i] = 1. R[..., a1, a1] = c R[..., a1, a2] = s R[..., a2, a1] = -s R[..., a2, a2] = c return R def angle_axis(matrix): """ Angle of rotation and rotation axis for a given rotation matrix. Parameters ---------- matrix : array-like A 3 x 3 unitary rotation matrix (or stack of matrices). Returns ------- angle : `~astropy.coordinates.Angle` The angle of rotation. axis : array The (normalized) axis of rotation (with last dimension 3). """ m = np.asanyarray(matrix) if m.shape[-2:] != (3, 3): raise ValueError('matrix is not 3x3') axis = np.zeros(m.shape[:-1]) axis[..., 0] = m[..., 2, 1] - m[..., 1, 2] axis[..., 1] = m[..., 0, 2] - m[..., 2, 0] axis[..., 2] = m[..., 1, 0] - m[..., 0, 1] r = np.sqrt((axis * axis).sum(-1, keepdims=True)) angle = np.arctan2(r[..., 0], m[..., 0, 0] + m[..., 1, 1] + m[..., 2, 2] - 1.) return Angle(angle, u.radian), -axis / r def is_O3(matrix): """Check whether a matrix is in the length-preserving group O(3). Parameters ---------- matrix : (..., N, N) array-like Must have attribute ``.shape`` and method ``.swapaxes()`` and not error when using `~numpy.isclose`. Returns ------- is_o3 : bool or array of bool If the matrix has more than two axes, the O(3) check is performed on slices along the last two axes -- (M, N, N) => (M, ) bool array. Notes ----- The orthogonal group O(3) preserves lengths, but is not guaranteed to keep orientations. Rotations and reflections are in this group. For more information, see https://en.wikipedia.org/wiki/Orthogonal_group """ # matrix is in O(3) (rotations, proper and improper). I = np.identity(matrix.shape[-1]) is_o3 = np.all(np.isclose(matrix @ matrix.swapaxes(-2, -1), I, atol=1e-15), axis=(-2, -1)) return is_o3 def is_rotation(matrix, allow_improper=False): """Check whether a matrix is a rotation, proper or improper. Parameters ---------- matrix : (..., N, N) array-like Must have attribute ``.shape`` and method ``.swapaxes()`` and not error when using `~numpy.isclose` and `~numpy.linalg.det`. allow_improper : bool, optional Whether to restrict check to the SO(3), the group of proper rotations, or also allow improper rotations (with determinant -1). The default (False) is only SO(3). Returns ------- isrot : bool or array of bool If the matrix has more than two axes, the checks are performed on slices along the last two axes -- (M, N, N) => (M, ) bool array. See Also -------- astopy.coordinates.matrix_utilities.is_O3 : For the less restrictive check that a matrix is in the group O(3). Notes ----- The group SO(3) is the rotation group. It is O(3), with determinant 1. Rotations with determinant -1 are improper rotations, combining both a rotation and a reflection. For more information, see https://en.wikipedia.org/wiki/Orthogonal_group """ # matrix is in O(3). is_o3 = is_O3(matrix) # determinant checks for rotation (proper and improper) if allow_improper: # determinant can be +/- 1 is_det1 = np.isclose(np.abs(np.linalg.det(matrix)), 1.0) else: # restrict to SO(3) is_det1 = np.isclose(np.linalg.det(matrix), 1.0) return is_o3 & is_det1
998b9f0e7c806bb9754acbecfda3033c32a472e87b96f56d0dc37b05e0665421
import re import copy import warnings import operator import numpy as np import erfa from astropy.utils.compat.misc import override__dir__ from astropy import units as u from astropy.constants import c as speed_of_light from astropy.utils.data_info import MixinInfo from astropy.utils import ShapedLikeNDArray from astropy.table import QTable from astropy.time import Time from astropy.utils.exceptions import AstropyUserWarning from .distances import Distance from .angles import Angle from .baseframe import (BaseCoordinateFrame, frame_transform_graph, GenericFrame) from .builtin_frames import ICRS, SkyOffsetFrame from .representation import (RadialDifferential, SphericalDifferential, SphericalRepresentation, UnitSphericalCosLatDifferential, UnitSphericalDifferential, UnitSphericalRepresentation) from .sky_coordinate_parsers import (_get_frame_class, _get_frame_without_data, _parse_coordinate_data) __all__ = ['SkyCoord', 'SkyCoordInfo'] class SkyCoordInfo(MixinInfo): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. """ attrs_from_parent = set(['unit']) # Unit is read-only _supports_indexing = False @staticmethod def default_format(val): repr_data = val.info._repr_data formats = ['{0.' + compname + '.value:}' for compname in repr_data.components] return ','.join(formats).format(repr_data) @property def unit(self): repr_data = self._repr_data unit = ','.join(str(getattr(repr_data, comp).unit) or 'None' for comp in repr_data.components) return unit @property def _repr_data(self): if self._parent is None: return None sc = self._parent if (issubclass(sc.representation_type, SphericalRepresentation) and isinstance(sc.data, UnitSphericalRepresentation)): repr_data = sc.represent_as(sc.data.__class__, in_frame_units=True) else: repr_data = sc.represent_as(sc.representation_type, in_frame_units=True) return repr_data def _represent_as_dict(self): sc = self._parent attrs = list(sc.representation_component_names) # Don't output distance unless it's actually distance. if isinstance(sc.data, UnitSphericalRepresentation): attrs = attrs[:-1] diff = sc.data.differentials.get('s') if diff is not None: diff_attrs = list(sc.get_representation_component_names('s')) # Don't output proper motions if they haven't been specified. if isinstance(diff, RadialDifferential): diff_attrs = diff_attrs[2:] # Don't output radial velocity unless it's actually velocity. elif isinstance(diff, (UnitSphericalDifferential, UnitSphericalCosLatDifferential)): diff_attrs = diff_attrs[:-1] attrs.extend(diff_attrs) attrs.extend(frame_transform_graph.frame_attributes.keys()) out = super()._represent_as_dict(attrs) out['representation_type'] = sc.representation_type.get_name() out['frame'] = sc.frame.name # Note that sc.info.unit is a fake composite unit (e.g. 'deg,deg,None' # or None,None,m) and is not stored. The individual attributes have # units. return out def new_like(self, skycoords, length, metadata_conflicts='warn', name=None): """ Return a new SkyCoord instance which is consistent with the input SkyCoord objects ``skycoords`` and has ``length`` rows. Being "consistent" is defined as being able to set an item from one to each of the rest without any exception being raised. This is intended for creating a new SkyCoord instance whose elements can be set in-place for table operations like join or vstack. This is used when a SkyCoord object is used as a mixin column in an astropy Table. The data values are not predictable and it is expected that the consumer of the object will fill in all values. Parameters ---------- skycoords : list List of input SkyCoord objects length : int Length of the output skycoord object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output name (sets output skycoord.info.name) Returns ------- skycoord : SkyCoord (or subclass) Instance of this class consistent with ``skycoords`` """ # Get merged info attributes like shape, dtype, format, description, etc. attrs = self.merge_cols_attributes(skycoords, metadata_conflicts, name, ('meta', 'description')) skycoord0 = skycoords[0] # Make a new SkyCoord object with the desired length and attributes # by using the _apply / __getitem__ machinery to effectively return # skycoord0[[0, 0, ..., 0, 0]]. This will have the all the right frame # attributes with the right shape. indexes = np.zeros(length, dtype=np.int64) out = skycoord0[indexes] # Use __setitem__ machinery to check for consistency of all skycoords for skycoord in skycoords[1:]: try: out[0] = skycoord[0] except Exception as err: raise ValueError(f'Input skycoords are inconsistent.') from err # Set (merged) info attributes for attr in ('name', 'meta', 'description'): if attr in attrs: setattr(out.info, attr, attrs[attr]) return out class SkyCoord(ShapedLikeNDArray): """High-level object providing a flexible interface for celestial coordinate representation, manipulation, and transformation between systems. The `SkyCoord` class accepts a wide variety of inputs for initialization. At a minimum these must provide one or more celestial coordinate values with unambiguous units. Inputs may be scalars or lists/tuples/arrays, yielding scalar or array coordinates (can be checked via ``SkyCoord.isscalar``). Typically one also specifies the coordinate frame, though this is not required. The general pattern for spherical representations is:: SkyCoord(COORD, [FRAME], keyword_args ...) SkyCoord(LON, LAT, [FRAME], keyword_args ...) SkyCoord(LON, LAT, [DISTANCE], frame=FRAME, unit=UNIT, keyword_args ...) SkyCoord([FRAME], <lon_attr>=LON, <lat_attr>=LAT, keyword_args ...) It is also possible to input coordinate values in other representations such as cartesian or cylindrical. In this case one includes the keyword argument ``representation_type='cartesian'`` (for example) along with data in ``x``, ``y``, and ``z``. See also: https://docs.astropy.org/en/stable/coordinates/ Examples -------- The examples below illustrate common ways of initializing a `SkyCoord` object. For a complete description of the allowed syntax see the full coordinates documentation. First some imports:: >>> from astropy.coordinates import SkyCoord # High-level coordinates >>> from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames >>> from astropy.coordinates import Angle, Latitude, Longitude # Angles >>> import astropy.units as u The coordinate values and frame specification can now be provided using positional and keyword arguments:: >>> c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame >>> c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords >>> coords = ["1:12:43.2 +31:12:43", "1 12 43.2 +31 12 43"] >>> c = SkyCoord(coords, frame=FK4, unit=(u.hourangle, u.deg), obstime="J1992.21") >>> c = SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic) # Units from string >>> c = SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s") >>> ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle >>> dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity >>> c = SkyCoord(ra, dec, frame='icrs') >>> c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56') >>> c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox >>> c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults >>> c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic', ... representation_type='cartesian') >>> c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)]) Velocity components (proper motions or radial velocities) can also be provided in a similar manner:: >>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s) >>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr, pm_dec=1*u.mas/u.yr) As shown, the frame can be a `~astropy.coordinates.BaseCoordinateFrame` class or the corresponding string alias. The frame classes that are built in to astropy are `ICRS`, `FK5`, `FK4`, `FK4NoETerms`, and `Galactic`. The string aliases are simply lower-case versions of the class name, and allow for creating a `SkyCoord` object and transforming frames without explicitly importing the frame classes. Parameters ---------- frame : `~astropy.coordinates.BaseCoordinateFrame` class or string, optional Type of coordinate frame this `SkyCoord` should represent. Defaults to to ICRS if not given or given as None. unit : `~astropy.units.Unit`, string, or tuple of :class:`~astropy.units.Unit` or str, optional Units for supplied coordinate values. If only one unit is supplied then it applies to all values. Note that passing only one unit might lead to unit conversion errors if the coordinate values are expected to have mixed physical meanings (e.g., angles and distances). obstime : time-like, optional Time(s) of observation. equinox : time-like, optional Coordinate frame equinox time. representation_type : str or Representation class Specifies the representation, e.g. 'spherical', 'cartesian', or 'cylindrical'. This affects the positional args and other keyword args which must correspond to the given representation. copy : bool, optional If `True` (default), a copy of any coordinate data is made. This argument can only be passed in as a keyword argument. **keyword_args Other keyword arguments as applicable for user-defined coordinate frames. Common options include: ra, dec : angle-like, optional RA and Dec for frames where ``ra`` and ``dec`` are keys in the frame's ``representation_component_names``, including `ICRS`, `FK5`, `FK4`, and `FK4NoETerms`. pm_ra_cosdec, pm_dec : `~astropy.units.Quantity` ['angular speed'], optional Proper motion components, in angle per time units. l, b : angle-like, optional Galactic ``l`` and ``b`` for for frames where ``l`` and ``b`` are keys in the frame's ``representation_component_names``, including the `Galactic` frame. pm_l_cosb, pm_b : `~astropy.units.Quantity` ['angular speed'], optional Proper motion components in the `Galactic` frame, in angle per time units. x, y, z : float or `~astropy.units.Quantity` ['length'], optional Cartesian coordinates values u, v, w : float or `~astropy.units.Quantity` ['length'], optional Cartesian coordinates values for the Galactic frame. radial_velocity : `~astropy.units.Quantity` ['speed'], optional The component of the velocity along the line-of-sight (i.e., the radial direction), in velocity units. """ # Declare that SkyCoord can be used as a Table column by defining the # info property. info = SkyCoordInfo() def __init__(self, *args, copy=True, **kwargs): # these are frame attributes set on this SkyCoord but *not* a part of # the frame object this SkyCoord contains self._extra_frameattr_names = set() # If all that is passed in is a frame instance that already has data, # we should bypass all of the parsing and logic below. This is here # to make this the fastest way to create a SkyCoord instance. Many of # the classmethods implemented for performance enhancements will use # this as the initialization path if (len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], (BaseCoordinateFrame, SkyCoord))): coords = args[0] if isinstance(coords, SkyCoord): self._extra_frameattr_names = coords._extra_frameattr_names self.info = coords.info # Copy over any extra frame attributes for attr_name in self._extra_frameattr_names: # Setting it will also validate it. setattr(self, attr_name, getattr(coords, attr_name)) coords = coords.frame if not coords.has_data: raise ValueError('Cannot initialize from a coordinate frame ' 'instance without coordinate data') if copy: self._sky_coord_frame = coords.copy() else: self._sky_coord_frame = coords else: # Get the frame instance without coordinate data but with all frame # attributes set - these could either have been passed in with the # frame as an instance, or passed in as kwargs here frame_cls, frame_kwargs = _get_frame_without_data(args, kwargs) # Parse the args and kwargs to assemble a sanitized and validated # kwargs dict for initializing attributes for this object and for # creating the internal self._sky_coord_frame object args = list(args) # Make it mutable skycoord_kwargs, components, info = _parse_coordinate_data( frame_cls(**frame_kwargs), args, kwargs) # In the above two parsing functions, these kwargs were identified # as valid frame attributes for *some* frame, but not the frame that # this SkyCoord will have. We keep these attributes as special # skycoord frame attributes: for attr in skycoord_kwargs: # Setting it will also validate it. setattr(self, attr, skycoord_kwargs[attr]) if info is not None: self.info = info # Finally make the internal coordinate object. frame_kwargs.update(components) self._sky_coord_frame = frame_cls(copy=copy, **frame_kwargs) if not self._sky_coord_frame.has_data: raise ValueError('Cannot create a SkyCoord without data') @property def frame(self): return self._sky_coord_frame @property def representation_type(self): return self.frame.representation_type @representation_type.setter def representation_type(self, value): self.frame.representation_type = value # TODO: remove these in future @property def representation(self): return self.frame.representation @representation.setter def representation(self, value): self.frame.representation = value @property def shape(self): return self.frame.shape def __eq__(self, value): """Equality operator for SkyCoord This implements strict equality and requires that the frames are equivalent, extra frame attributes are equivalent, and that the representation data are exactly equal. """ if not isinstance(value, SkyCoord): return NotImplemented # Make sure that any extra frame attribute names are equivalent. for attr in self._extra_frameattr_names | value._extra_frameattr_names: if not self.frame._frameattr_equiv(getattr(self, attr), getattr(value, attr)): raise ValueError(f"cannot compare: extra frame attribute " f"'{attr}' is not equivalent " f"(perhaps compare the frames directly to avoid " f"this exception)") return self._sky_coord_frame == value._sky_coord_frame def __ne__(self, value): return np.logical_not(self == value) def _apply(self, method, *args, **kwargs): """Create a new instance, applying a method to the underlying data. In typical usage, the method is any of the shape-changing methods for `~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those picking particular elements (``__getitem__``, ``take``, etc.), which are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be applied to the underlying arrays in the representation (e.g., ``x``, ``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`), as well as to any frame attributes that have a shape, with the results used to create a new instance. Internally, it is also used to apply functions to the above parts (in particular, `~numpy.broadcast_to`). Parameters ---------- method : str or callable If str, it is the name of a method that is applied to the internal ``components``. If callable, the function is applied. *args Any positional arguments for ``method``. **kwargs : dict Any keyword arguments for ``method``. """ def apply_method(value): if isinstance(value, ShapedLikeNDArray): return value._apply(method, *args, **kwargs) else: if callable(method): return method(value, *args, **kwargs) else: return getattr(value, method)(*args, **kwargs) # create a new but empty instance, and copy over stuff new = super().__new__(self.__class__) new._sky_coord_frame = self._sky_coord_frame._apply(method, *args, **kwargs) new._extra_frameattr_names = self._extra_frameattr_names.copy() for attr in self._extra_frameattr_names: value = getattr(self, attr) if getattr(value, 'shape', ()): value = apply_method(value) elif method == 'copy' or method == 'flatten': # flatten should copy also for a single element array, but # we cannot use it directly for array scalars, since it # always returns a one-dimensional array. So, just copy. value = copy.copy(value) setattr(new, '_' + attr, value) # Copy other 'info' attr only if it has actually been defined. # See PR #3898 for further explanation and justification, along # with Quantity.__array_finalize__ if 'info' in self.__dict__: new.info = self.info return new def __setitem__(self, item, value): """Implement self[item] = value for SkyCoord The right hand ``value`` must be strictly consistent with self: - Identical class - Equivalent frames - Identical representation_types - Identical representation differentials keys - Identical frame attributes - Identical "extra" frame attributes (e.g. obstime for an ICRS coord) With these caveats the setitem ends up as effectively a setitem on the representation data. self.frame.data[item] = value.frame.data """ if self.__class__ is not value.__class__: raise TypeError(f'can only set from object of same class: ' f'{self.__class__.__name__} vs. ' f'{value.__class__.__name__}') # Make sure that any extra frame attribute names are equivalent. for attr in self._extra_frameattr_names | value._extra_frameattr_names: if not self.frame._frameattr_equiv(getattr(self, attr), getattr(value, attr)): raise ValueError(f'attribute {attr} is not equivalent') # Set the frame values. This checks frame equivalence and also clears # the cache to ensure that the object is not in an inconsistent state. self._sky_coord_frame[item] = value._sky_coord_frame def insert(self, obj, values, axis=0): """ Insert coordinate values before the given indices in the object and return a new Frame object. The values to be inserted must conform to the rules for in-place setting of ``SkyCoord`` objects. The API signature matches the ``np.insert`` API, but is more limited. The specification of insert index ``obj`` must be a single integer, and the ``axis`` must be ``0`` for simple insertion before the index. Parameters ---------- obj : int Integer index before which ``values`` is inserted. values : array-like Value(s) to insert. If the type of ``values`` is different from that of quantity, ``values`` is converted to the matching type. axis : int, optional Axis along which to insert ``values``. Default is 0, which is the only allowed value and will insert a row. Returns ------- out : `~astropy.coordinates.SkyCoord` instance New coordinate object with inserted value(s) """ # Validate inputs: obj arg is integer, axis=0, self is not a scalar, and # input index is in bounds. try: idx0 = operator.index(obj) except TypeError: raise TypeError('obj arg must be an integer') if axis != 0: raise ValueError('axis must be 0') if not self.shape: raise TypeError('cannot insert into scalar {} object' .format(self.__class__.__name__)) if abs(idx0) > len(self): raise IndexError('index {} is out of bounds for axis 0 with size {}' .format(idx0, len(self))) # Turn negative index into positive if idx0 < 0: idx0 = len(self) + idx0 n_values = len(values) if values.shape else 1 # Finally make the new object with the correct length and set values for the # three sections, before insert, the insert, and after the insert. out = self.__class__.info.new_like([self], len(self) + n_values, name=self.info.name) # Set the output values. This is where validation of `values` takes place to ensure # that it can indeed be inserted. out[:idx0] = self[:idx0] out[idx0:idx0 + n_values] = values out[idx0 + n_values:] = self[idx0:] return out def is_transformable_to(self, new_frame): """ Determines if this coordinate frame can be transformed to another given frame. Parameters ---------- new_frame : frame class, frame object, or str The proposed frame to transform into. Returns ------- transformable : bool or str `True` if this can be transformed to ``new_frame``, `False` if not, or the string 'same' if ``new_frame`` is the same system as this object but no transformation is defined. Notes ----- A return value of 'same' means the transformation will work, but it will just give back a copy of this object. The intended usage is:: if coord.is_transformable_to(some_unknown_frame): coord2 = coord.transform_to(some_unknown_frame) This will work even if ``some_unknown_frame`` turns out to be the same frame class as ``coord``. This is intended for cases where the frame is the same regardless of the frame attributes (e.g. ICRS), but be aware that it *might* also indicate that someone forgot to define the transformation between two objects of the same frame class but with different attributes. """ # TODO! like matplotlib, do string overrides for modified methods new_frame = (_get_frame_class(new_frame) if isinstance(new_frame, str) else new_frame) return self.frame.is_transformable_to(new_frame) def transform_to(self, frame, merge_attributes=True): """Transform this coordinate to a new frame. The precise frame transformed to depends on ``merge_attributes``. If `False`, the destination frame is used exactly as passed in. But this is often not quite what one wants. E.g., suppose one wants to transform an ICRS coordinate that has an obstime attribute to FK4; in this case, one likely would want to use this information. Thus, the default for ``merge_attributes`` is `True`, in which the precedence is as follows: (1) explicitly set (i.e., non-default) values in the destination frame; (2) explicitly set values in the source; (3) default value in the destination frame. Note that in either case, any explicitly set attributes on the source `SkyCoord` that are not part of the destination frame's definition are kept (stored on the resulting `SkyCoord`), and thus one can round-trip (e.g., from FK4 to ICRS to FK4 without losing obstime). Parameters ---------- frame : str, `BaseCoordinateFrame` class or instance, or `SkyCoord` instance The frame to transform this coordinate into. If a `SkyCoord`, the underlying frame is extracted, and all other information ignored. merge_attributes : bool, optional Whether the default attributes in the destination frame are allowed to be overridden by explicitly set attributes in the source (see note above; default: `True`). Returns ------- coord : `SkyCoord` A new object with this coordinate represented in the `frame` frame. Raises ------ ValueError If there is no possible transformation route. """ from astropy.coordinates.errors import ConvertError frame_kwargs = {} # Frame name (string) or frame class? Coerce into an instance. try: frame = _get_frame_class(frame)() except Exception: pass if isinstance(frame, SkyCoord): frame = frame.frame # Change to underlying coord frame instance if isinstance(frame, BaseCoordinateFrame): new_frame_cls = frame.__class__ # Get frame attributes, allowing defaults to be overridden by # explicitly set attributes of the source if ``merge_attributes``. for attr in frame_transform_graph.frame_attributes: self_val = getattr(self, attr, None) frame_val = getattr(frame, attr, None) if (frame_val is not None and not (merge_attributes and frame.is_frame_attr_default(attr))): frame_kwargs[attr] = frame_val elif (self_val is not None and not self.is_frame_attr_default(attr)): frame_kwargs[attr] = self_val elif frame_val is not None: frame_kwargs[attr] = frame_val else: raise ValueError('Transform `frame` must be a frame name, class, or instance') # Get the composite transform to the new frame trans = frame_transform_graph.get_transform(self.frame.__class__, new_frame_cls) if trans is None: raise ConvertError('Cannot transform from {} to {}' .format(self.frame.__class__, new_frame_cls)) # Make a generic frame which will accept all the frame kwargs that # are provided and allow for transforming through intermediate frames # which may require one or more of those kwargs. generic_frame = GenericFrame(frame_kwargs) # Do the transformation, returning a coordinate frame of the desired # final type (not generic). new_coord = trans(self.frame, generic_frame) # Finally make the new SkyCoord object from the `new_coord` and # remaining frame_kwargs that are not frame_attributes in `new_coord`. for attr in (set(new_coord.get_frame_attr_names()) & set(frame_kwargs.keys())): frame_kwargs.pop(attr) # Always remove the origin frame attribute, as that attribute only makes # sense with a SkyOffsetFrame (in which case it will be stored on the frame). # See gh-11277. # TODO: Should it be a property of the frame attribute that it can # or cannot be stored on a SkyCoord? frame_kwargs.pop('origin', None) return self.__class__(new_coord, **frame_kwargs) def apply_space_motion(self, new_obstime=None, dt=None): """ Compute the position of the source represented by this coordinate object to a new time using the velocities stored in this object and assuming linear space motion (including relativistic corrections). This is sometimes referred to as an "epoch transformation." The initial time before the evolution is taken from the ``obstime`` attribute of this coordinate. Note that this method currently does not support evolving coordinates where the *frame* has an ``obstime`` frame attribute, so the ``obstime`` is only used for storing the before and after times, not actually as an attribute of the frame. Alternatively, if ``dt`` is given, an ``obstime`` need not be provided at all. Parameters ---------- new_obstime : `~astropy.time.Time`, optional The time at which to evolve the position to. Requires that the ``obstime`` attribute be present on this frame. dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional An amount of time to evolve the position of the source. Cannot be given at the same time as ``new_obstime``. Returns ------- new_coord : `SkyCoord` A new coordinate object with the evolved location of this coordinate at the new time. ``obstime`` will be set on this object to the new time only if ``self`` also has ``obstime``. """ if (new_obstime is None and dt is None or new_obstime is not None and dt is not None): raise ValueError("You must specify one of `new_obstime` or `dt`, " "but not both.") # Validate that we have velocity info if 's' not in self.frame.data.differentials: raise ValueError('SkyCoord requires velocity data to evolve the ' 'position.') if 'obstime' in self.frame.frame_attributes: raise NotImplementedError("Updating the coordinates in a frame " "with explicit time dependence is " "currently not supported. If you would " "like this functionality, please open an " "issue on github:\n" "https://github.com/astropy/astropy") if new_obstime is not None and self.obstime is None: # If no obstime is already on this object, raise an error if a new # obstime is passed: we need to know the time / epoch at which the # the position / velocity were measured initially raise ValueError('This object has no associated `obstime`. ' 'apply_space_motion() must receive a time ' 'difference, `dt`, and not a new obstime.') # Compute t1 and t2, the times used in the starpm call, which *only* # uses them to compute a delta-time t1 = self.obstime if dt is None: # self.obstime is not None and new_obstime is not None b/c of above # checks t2 = new_obstime else: # new_obstime is definitely None b/c of the above checks if t1 is None: # MAGIC NUMBER: if the current SkyCoord object has no obstime, # assume J2000 to do the dt offset. This is not actually used # for anything except a delta-t in starpm, so it's OK that it's # not necessarily the "real" obstime t1 = Time('J2000') new_obstime = None # we don't actually know the initial obstime t2 = t1 + dt else: t2 = t1 + dt new_obstime = t2 # starpm wants tdb time t1 = t1.tdb t2 = t2.tdb # proper motion in RA should not include the cos(dec) term, see the # erfa function eraStarpv, comment (4). So we convert to the regular # spherical differentials. icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential) icrsvel = icrsrep.differentials['s'] parallax_zero = False try: plx = icrsrep.distance.to_value(u.arcsecond, u.parallax()) except u.UnitConversionError: # No distance: set to 0 by convention plx = 0. parallax_zero = True try: rv = icrsvel.d_distance.to_value(u.km/u.s) except u.UnitConversionError: # No RV rv = 0. starpm = erfa.pmsafe(icrsrep.lon.radian, icrsrep.lat.radian, icrsvel.d_lon.to_value(u.radian/u.yr), icrsvel.d_lat.to_value(u.radian/u.yr), plx, rv, t1.jd1, t1.jd2, t2.jd1, t2.jd2) if parallax_zero: new_distance = None else: new_distance = Distance(parallax=starpm[4] << u.arcsec) icrs2 = ICRS(ra=u.Quantity(starpm[0], u.radian, copy=False), dec=u.Quantity(starpm[1], u.radian, copy=False), pm_ra=u.Quantity(starpm[2], u.radian/u.yr, copy=False), pm_dec=u.Quantity(starpm[3], u.radian/u.yr, copy=False), distance=new_distance, radial_velocity=u.Quantity(starpm[5], u.km/u.s, copy=False), differential_type=SphericalDifferential) # Update the obstime of the returned SkyCoord, and need to carry along # the frame attributes frattrs = {attrnm: getattr(self, attrnm) for attrnm in self._extra_frameattr_names} frattrs['obstime'] = new_obstime result = self.__class__(icrs2, **frattrs).transform_to(self.frame) # Without this the output might not have the right differential type. # Not sure if this fixes the problem or just hides it. See #11932 result.differential_type = self.differential_type return result def _is_name(self, string): """ Returns whether a string is one of the aliases for the frame. """ return (self.frame.name == string or (isinstance(self.frame.name, list) and string in self.frame.name)) def __getattr__(self, attr): """ Overrides getattr to return coordinates that this can be transformed to, based on the alias attr in the primary transform graph. """ if '_sky_coord_frame' in self.__dict__: if self._is_name(attr): return self # Should this be a deepcopy of self? # Anything in the set of all possible frame_attr_names is handled # here. If the attr is relevant for the current frame then delegate # to self.frame otherwise get it from self._<attr>. if attr in frame_transform_graph.frame_attributes: if attr in self.frame.get_frame_attr_names(): return getattr(self.frame, attr) else: return getattr(self, '_' + attr, None) # Some attributes might not fall in the above category but still # are available through self._sky_coord_frame. if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr): return getattr(self._sky_coord_frame, attr) # Try to interpret as a new frame for transforming. frame_cls = frame_transform_graph.lookup_name(attr) if frame_cls is not None and self.frame.is_transformable_to(frame_cls): return self.transform_to(attr) # Fail raise AttributeError("'{}' object has no attribute '{}'" .format(self.__class__.__name__, attr)) def __setattr__(self, attr, val): # This is to make anything available through __getattr__ immutable if '_sky_coord_frame' in self.__dict__: if self._is_name(attr): raise AttributeError(f"'{attr}' is immutable") if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr): setattr(self._sky_coord_frame, attr, val) return frame_cls = frame_transform_graph.lookup_name(attr) if frame_cls is not None and self.frame.is_transformable_to(frame_cls): raise AttributeError(f"'{attr}' is immutable") if attr in frame_transform_graph.frame_attributes: # All possible frame attributes can be set, but only via a private # variable. See __getattr__ above. super().__setattr__('_' + attr, val) # Validate it frame_transform_graph.frame_attributes[attr].__get__(self) # And add to set of extra attributes self._extra_frameattr_names |= {attr} else: # Otherwise, do the standard Python attribute setting super().__setattr__(attr, val) def __delattr__(self, attr): # mirror __setattr__ above if '_sky_coord_frame' in self.__dict__: if self._is_name(attr): raise AttributeError(f"'{attr}' is immutable") if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr): delattr(self._sky_coord_frame, attr) return frame_cls = frame_transform_graph.lookup_name(attr) if frame_cls is not None and self.frame.is_transformable_to(frame_cls): raise AttributeError(f"'{attr}' is immutable") if attr in frame_transform_graph.frame_attributes: # All possible frame attributes can be deleted, but need to remove # the corresponding private variable. See __getattr__ above. super().__delattr__('_' + attr) # Also remove it from the set of extra attributes self._extra_frameattr_names -= {attr} else: # Otherwise, do the standard Python attribute setting super().__delattr__(attr) @override__dir__ def __dir__(self): """ Override the builtin `dir` behavior to include: - Transforms available by aliases - Attribute / methods of the underlying self.frame object """ # determine the aliases that this can be transformed to. dir_values = set() for name in frame_transform_graph.get_names(): frame_cls = frame_transform_graph.lookup_name(name) if self.frame.is_transformable_to(frame_cls): dir_values.add(name) # Add public attributes of self.frame dir_values.update(set(attr for attr in dir(self.frame) if not attr.startswith('_'))) # Add all possible frame attributes dir_values.update(frame_transform_graph.frame_attributes.keys()) return dir_values def __repr__(self): clsnm = self.__class__.__name__ coonm = self.frame.__class__.__name__ frameattrs = self.frame._frame_attrs_repr() if frameattrs: frameattrs = ': ' + frameattrs data = self.frame._data_repr() if data: data = ': ' + data return '<{clsnm} ({coonm}{frameattrs}){data}>'.format(**locals()) def to_string(self, style='decimal', **kwargs): """ A string representation of the coordinates. The default styles definitions are:: 'decimal': 'lat': {'decimal': True, 'unit': "deg"} 'lon': {'decimal': True, 'unit': "deg"} 'dms': 'lat': {'unit': "deg"} 'lon': {'unit': "deg"} 'hmsdms': 'lat': {'alwayssign': True, 'pad': True, 'unit': "deg"} 'lon': {'pad': True, 'unit': "hour"} See :meth:`~astropy.coordinates.Angle.to_string` for details and keyword arguments (the two angles forming the coordinates are are both :class:`~astropy.coordinates.Angle` instances). Keyword arguments have precedence over the style defaults and are passed to :meth:`~astropy.coordinates.Angle.to_string`. Parameters ---------- style : {'hmsdms', 'dms', 'decimal'} The formatting specification to use. These encode the three most common ways to represent coordinates. The default is `decimal`. **kwargs Keyword args passed to :meth:`~astropy.coordinates.Angle.to_string`. """ sph_coord = self.frame.represent_as(SphericalRepresentation) styles = {'hmsdms': {'lonargs': {'unit': u.hour, 'pad': True}, 'latargs': {'unit': u.degree, 'pad': True, 'alwayssign': True}}, 'dms': {'lonargs': {'unit': u.degree}, 'latargs': {'unit': u.degree}}, 'decimal': {'lonargs': {'unit': u.degree, 'decimal': True}, 'latargs': {'unit': u.degree, 'decimal': True}} } lonargs = {} latargs = {} if style in styles: lonargs.update(styles[style]['lonargs']) latargs.update(styles[style]['latargs']) else: raise ValueError(f"Invalid style. Valid options are: {','.join(styles)}") lonargs.update(kwargs) latargs.update(kwargs) if np.isscalar(sph_coord.lon.value): coord_string = (sph_coord.lon.to_string(**lonargs) + " " + sph_coord.lat.to_string(**latargs)) else: coord_string = [] for lonangle, latangle in zip(sph_coord.lon.ravel(), sph_coord.lat.ravel()): coord_string += [(lonangle.to_string(**lonargs) + " " + latangle.to_string(**latargs))] if len(sph_coord.shape) > 1: coord_string = np.array(coord_string).reshape(sph_coord.shape) return coord_string def to_table(self): """ Convert this |SkyCoord| to a |QTable|. Any attributes that have the same length as the |SkyCoord| will be converted to columns of the |QTable|. All other attributes will be recorded as metadata. Returns ------- `~astropy.table.QTable` A |QTable| containing the data of this |SkyCoord|. Examples -------- >>> sc = SkyCoord(ra=[40, 70]*u.deg, dec=[0, -20]*u.deg, ... obstime=Time([2000, 2010], format='jyear')) >>> t = sc.to_table() >>> t <QTable length=2> ra dec obstime deg deg float64 float64 Time ------- ------- ------- 40.0 0.0 2000.0 70.0 -20.0 2010.0 >>> t.meta {'representation_type': 'spherical', 'frame': 'icrs'} """ self_as_dict = self.info._represent_as_dict() tabledata = {} metadata = {} # Record attributes that have the same length as self as columns in the # table, and the other attributes as table metadata. This matches # table.serialize._represent_mixin_as_column(). for key, value in self_as_dict.items(): if getattr(value, 'shape', ())[:1] == (len(self),): tabledata[key] = value else: metadata[key] = value return QTable(tabledata, meta=metadata) def is_equivalent_frame(self, other): """ Checks if this object's frame as the same as that of the ``other`` object. To be the same frame, two objects must be the same frame class and have the same frame attributes. For two `SkyCoord` objects, *all* of the frame attributes have to match, not just those relevant for the object's frame. Parameters ---------- other : SkyCoord or BaseCoordinateFrame The other object to check. Returns ------- isequiv : bool True if the frames are the same, False if not. Raises ------ TypeError If ``other`` isn't a `SkyCoord` or a `BaseCoordinateFrame` or subclass. """ if isinstance(other, BaseCoordinateFrame): return self.frame.is_equivalent_frame(other) elif isinstance(other, SkyCoord): if other.frame.name != self.frame.name: return False for fattrnm in frame_transform_graph.frame_attributes: if not BaseCoordinateFrame._frameattr_equiv(getattr(self, fattrnm), getattr(other, fattrnm)): return False return True else: # not a BaseCoordinateFrame nor a SkyCoord object raise TypeError("Tried to do is_equivalent_frame on something that " "isn't frame-like") # High-level convenience methods def separation(self, other): """ Computes on-sky separation between this coordinate and another. .. note:: If the ``other`` coordinate object is in a different frame, it is first transformed to the frame of this object. This can lead to unintuitive behavior if not accounted for. Particularly of note is that ``self.separation(other)`` and ``other.separation(self)`` may not give the same answer in this case. For more on how to use this (and related) functionality, see the examples in :doc:`astropy:/coordinates/matchsep`. Parameters ---------- other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` The coordinate to get the separation to. Returns ------- sep : `~astropy.coordinates.Angle` The on-sky separation between this and the ``other`` coordinate. Notes ----- The separation is calculated using the Vincenty formula, which is stable at all locations, including poles and antipodes [1]_. .. [1] https://en.wikipedia.org/wiki/Great-circle_distance """ from . import Angle from .angle_utilities import angular_separation if not self.is_equivalent_frame(other): try: kwargs = {'merge_attributes': False} if isinstance(other, SkyCoord) else {} other = other.transform_to(self, **kwargs) except TypeError: raise TypeError('Can only get separation to another SkyCoord ' 'or a coordinate frame with data') lon1 = self.spherical.lon lat1 = self.spherical.lat lon2 = other.spherical.lon lat2 = other.spherical.lat # Get the separation as a Quantity, convert to Angle in degrees sep = angular_separation(lon1, lat1, lon2, lat2) return Angle(sep, unit=u.degree) def separation_3d(self, other): """ Computes three dimensional separation between this coordinate and another. For more on how to use this (and related) functionality, see the examples in :doc:`astropy:/coordinates/matchsep`. Parameters ---------- other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` The coordinate to get the separation to. Returns ------- sep : `~astropy.coordinates.Distance` The real-space distance between these two coordinates. Raises ------ ValueError If this or the other coordinate do not have distances. """ if not self.is_equivalent_frame(other): try: kwargs = {'merge_attributes': False} if isinstance(other, SkyCoord) else {} other = other.transform_to(self, **kwargs) except TypeError: raise TypeError('Can only get separation to another SkyCoord ' 'or a coordinate frame with data') if issubclass(self.data.__class__, UnitSphericalRepresentation): raise ValueError('This object does not have a distance; cannot ' 'compute 3d separation.') if issubclass(other.data.__class__, UnitSphericalRepresentation): raise ValueError('The other object does not have a distance; ' 'cannot compute 3d separation.') c1 = self.cartesian.without_differentials() c2 = other.cartesian.without_differentials() return Distance((c1 - c2).norm()) def spherical_offsets_to(self, tocoord): r""" Computes angular offsets to go *from* this coordinate *to* another. Parameters ---------- tocoord : `~astropy.coordinates.BaseCoordinateFrame` The coordinate to find the offset to. Returns ------- lon_offset : `~astropy.coordinates.Angle` The angular offset in the longitude direction. The definition of "longitude" depends on this coordinate's frame (e.g., RA for equatorial coordinates). lat_offset : `~astropy.coordinates.Angle` The angular offset in the latitude direction. The definition of "latitude" depends on this coordinate's frame (e.g., Dec for equatorial coordinates). Raises ------ ValueError If the ``tocoord`` is not in the same frame as this one. This is different from the behavior of the `separation`/`separation_3d` methods because the offset components depend critically on the specific choice of frame. Notes ----- This uses the sky offset frame machinery, and hence will produce a new sky offset frame if one does not already exist for this object's frame class. See Also -------- separation : for the *total* angular offset (not broken out into components). position_angle : for the direction of the offset. """ if not self.is_equivalent_frame(tocoord): raise ValueError('Tried to use spherical_offsets_to with two non-matching frames!') aframe = self.skyoffset_frame() acoord = tocoord.transform_to(aframe) dlon = acoord.spherical.lon.view(Angle) dlat = acoord.spherical.lat.view(Angle) return dlon, dlat def spherical_offsets_by(self, d_lon, d_lat): """ Computes the coordinate that is a specified pair of angular offsets away from this coordinate. Parameters ---------- d_lon : angle-like The angular offset in the longitude direction. The definition of "longitude" depends on this coordinate's frame (e.g., RA for equatorial coordinates). d_lat : angle-like The angular offset in the latitude direction. The definition of "latitude" depends on this coordinate's frame (e.g., Dec for equatorial coordinates). Returns ------- newcoord : `~astropy.coordinates.SkyCoord` The coordinates for the location that corresponds to offsetting by ``d_lat`` in the latitude direction and ``d_lon`` in the longitude direction. Notes ----- This internally uses `~astropy.coordinates.SkyOffsetFrame` to do the transformation. For a more complete set of transform offsets, use `~astropy.coordinates.SkyOffsetFrame` or `~astropy.wcs.WCS` manually. This specific method can be reproduced by doing ``SkyCoord(SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self))``. See Also -------- spherical_offsets_to : compute the angular offsets to another coordinate directional_offset_by : offset a coordinate by an angle in a direction """ return self.__class__( SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self)) def directional_offset_by(self, position_angle, separation): """ Computes coordinates at the given offset from this coordinate. Parameters ---------- position_angle : `~astropy.coordinates.Angle` position_angle of offset separation : `~astropy.coordinates.Angle` offset angular separation Returns ------- newpoints : `~astropy.coordinates.SkyCoord` The coordinates for the location that corresponds to offsetting by the given `position_angle` and `separation`. Notes ----- Returned SkyCoord frame retains only the frame attributes that are for the resulting frame type. (e.g. if the input frame is `~astropy.coordinates.ICRS`, an ``equinox`` value will be retained, but an ``obstime`` will not.) For a more complete set of transform offsets, use `~astropy.wcs.WCS`. `~astropy.coordinates.SkyCoord.skyoffset_frame()` can also be used to create a spherical frame with (lat=0, lon=0) at a reference point, approximating an xy cartesian system for small offsets. This method is distinct in that it is accurate on the sphere. See Also -------- position_angle : inverse operation for the ``position_angle`` component separation : inverse operation for the ``separation`` component """ from . import angle_utilities slat = self.represent_as(UnitSphericalRepresentation).lat slon = self.represent_as(UnitSphericalRepresentation).lon newlon, newlat = angle_utilities.offset_by( lon=slon, lat=slat, posang=position_angle, distance=separation) return SkyCoord(newlon, newlat, frame=self.frame) def match_to_catalog_sky(self, catalogcoord, nthneighbor=1): """ Finds the nearest on-sky matches of this coordinate in a set of catalog coordinates. For more on how to use this (and related) functionality, see the examples in :doc:`astropy:/coordinates/matchsep`. Parameters ---------- catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` The base catalog in which to search for matches. Typically this will be a coordinate object that is an array (i.e., ``catalogcoord.isscalar == False``) nthneighbor : int, optional Which closest neighbor to search for. Typically ``1`` is desired here, as that is correct for matching one set of coordinates to another. The next likely use case is ``2``, for matching a coordinate catalog against *itself* (``1`` is inappropriate because each point will find itself as the closest match). Returns ------- idx : int array Indices into ``catalogcoord`` to get the matched points for each of this object's coordinates. Shape matches this object. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the closest match for each element in this object in ``catalogcoord``. Shape matches this object. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the closest match for each element in this object in ``catalogcoord``. Shape matches this object. Unless both this and ``catalogcoord`` have associated distances, this quantity assumes that all sources are at a distance of 1 (dimensionless). Notes ----- This method requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. See Also -------- astropy.coordinates.match_coordinates_sky SkyCoord.match_to_catalog_3d """ from .matching import match_coordinates_sky if not (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame)) and catalogcoord.has_data): raise TypeError('Can only get separation to another SkyCoord or a ' 'coordinate frame with data') res = match_coordinates_sky(self, catalogcoord, nthneighbor=nthneighbor, storekdtree='_kdtree_sky') return res def match_to_catalog_3d(self, catalogcoord, nthneighbor=1): """ Finds the nearest 3-dimensional matches of this coordinate to a set of catalog coordinates. This finds the 3-dimensional closest neighbor, which is only different from the on-sky distance if ``distance`` is set in this object or the ``catalogcoord`` object. For more on how to use this (and related) functionality, see the examples in :doc:`astropy:/coordinates/matchsep`. Parameters ---------- catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` The base catalog in which to search for matches. Typically this will be a coordinate object that is an array (i.e., ``catalogcoord.isscalar == False``) nthneighbor : int, optional Which closest neighbor to search for. Typically ``1`` is desired here, as that is correct for matching one set of coordinates to another. The next likely use case is ``2``, for matching a coordinate catalog against *itself* (``1`` is inappropriate because each point will find itself as the closest match). Returns ------- idx : int array Indices into ``catalogcoord`` to get the matched points for each of this object's coordinates. Shape matches this object. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the closest match for each element in this object in ``catalogcoord``. Shape matches this object. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the closest match for each element in this object in ``catalogcoord``. Shape matches this object. Notes ----- This method requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. See Also -------- astropy.coordinates.match_coordinates_3d SkyCoord.match_to_catalog_sky """ from .matching import match_coordinates_3d if not (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame)) and catalogcoord.has_data): raise TypeError('Can only get separation to another SkyCoord or a ' 'coordinate frame with data') res = match_coordinates_3d(self, catalogcoord, nthneighbor=nthneighbor, storekdtree='_kdtree_3d') return res def search_around_sky(self, searcharoundcoords, seplimit): """ Searches for all coordinates in this object around a supplied set of points within a given on-sky separation. This is intended for use on `~astropy.coordinates.SkyCoord` objects with coordinate arrays, rather than a scalar coordinate. For a scalar coordinate, it is better to use `~astropy.coordinates.SkyCoord.separation`. For more on how to use this (and related) functionality, see the examples in :doc:`astropy:/coordinates/matchsep`. Parameters ---------- searcharoundcoords : coordinate-like The coordinates to search around to try to find matching points in this `SkyCoord`. This should be an object with array coordinates, not a scalar coordinate object. seplimit : `~astropy.units.Quantity` ['angle'] The on-sky separation to search within. Returns ------- idxsearcharound : int array Indices into ``searcharoundcoords`` that match the corresponding elements of ``idxself``. Shape matches ``idxself``. idxself : int array Indices into ``self`` that match the corresponding elements of ``idxsearcharound``. Shape matches ``idxsearcharound``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the coordinates. Shape matches ``idxsearcharound`` and ``idxself``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the coordinates. Shape matches ``idxsearcharound`` and ``idxself``. Notes ----- This method requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. In the current implementation, the return values are always sorted in the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is in ascending order). This is considered an implementation detail, though, so it could change in a future release. See Also -------- astropy.coordinates.search_around_sky SkyCoord.search_around_3d """ from .matching import search_around_sky return search_around_sky(searcharoundcoords, self, seplimit, storekdtree='_kdtree_sky') def search_around_3d(self, searcharoundcoords, distlimit): """ Searches for all coordinates in this object around a supplied set of points within a given 3D radius. This is intended for use on `~astropy.coordinates.SkyCoord` objects with coordinate arrays, rather than a scalar coordinate. For a scalar coordinate, it is better to use `~astropy.coordinates.SkyCoord.separation_3d`. For more on how to use this (and related) functionality, see the examples in :doc:`astropy:/coordinates/matchsep`. Parameters ---------- searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` The coordinates to search around to try to find matching points in this `SkyCoord`. This should be an object with array coordinates, not a scalar coordinate object. distlimit : `~astropy.units.Quantity` ['length'] The physical radius to search within. Returns ------- idxsearcharound : int array Indices into ``searcharoundcoords`` that match the corresponding elements of ``idxself``. Shape matches ``idxself``. idxself : int array Indices into ``self`` that match the corresponding elements of ``idxsearcharound``. Shape matches ``idxsearcharound``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the coordinates. Shape matches ``idxsearcharound`` and ``idxself``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the coordinates. Shape matches ``idxsearcharound`` and ``idxself``. Notes ----- This method requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. In the current implementation, the return values are always sorted in the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is in ascending order). This is considered an implementation detail, though, so it could change in a future release. See Also -------- astropy.coordinates.search_around_3d SkyCoord.search_around_sky """ from .matching import search_around_3d return search_around_3d(searcharoundcoords, self, distlimit, storekdtree='_kdtree_3d') def position_angle(self, other): """ Computes the on-sky position angle (East of North) between this `SkyCoord` and another. Parameters ---------- other : `SkyCoord` The other coordinate to compute the position angle to. It is treated as the "head" of the vector of the position angle. Returns ------- pa : `~astropy.coordinates.Angle` The (positive) position angle of the vector pointing from ``self`` to ``other``. If either ``self`` or ``other`` contain arrays, this will be an array following the appropriate `numpy` broadcasting rules. Examples -------- >>> c1 = SkyCoord(0*u.deg, 0*u.deg) >>> c2 = SkyCoord(1*u.deg, 0*u.deg) >>> c1.position_angle(c2).degree 90.0 >>> c3 = SkyCoord(1*u.deg, 1*u.deg) >>> c1.position_angle(c3).degree # doctest: +FLOAT_CMP 44.995636455344844 """ from . import angle_utilities if not self.is_equivalent_frame(other): try: other = other.transform_to(self, merge_attributes=False) except TypeError: raise TypeError('Can only get position_angle to another ' 'SkyCoord or a coordinate frame with data') slat = self.represent_as(UnitSphericalRepresentation).lat slon = self.represent_as(UnitSphericalRepresentation).lon olat = other.represent_as(UnitSphericalRepresentation).lat olon = other.represent_as(UnitSphericalRepresentation).lon return angle_utilities.position_angle(slon, slat, olon, olat) def skyoffset_frame(self, rotation=None): """ Returns the sky offset frame with this `SkyCoord` at the origin. Returns ------- astrframe : `~astropy.coordinates.SkyOffsetFrame` A sky offset frame of the same type as this `SkyCoord` (e.g., if this object has an ICRS coordinate, the resulting frame is SkyOffsetICRS, with the origin set to this object) rotation : angle-like The final rotation of the frame about the ``origin``. The sign of the rotation is the left-hand rule. That is, an object at a particular position angle in the un-rotated system will be sent to the positive latitude (z) direction in the final frame. """ return SkyOffsetFrame(origin=self, rotation=rotation) def get_constellation(self, short_name=False, constellation_list='iau'): """ Determines the constellation(s) of the coordinates this `SkyCoord` contains. Parameters ---------- short_name : bool If True, the returned names are the IAU-sanctioned abbreviated names. Otherwise, full names for the constellations are used. constellation_list : str The set of constellations to use. Currently only ``'iau'`` is supported, meaning the 88 "modern" constellations endorsed by the IAU. Returns ------- constellation : str or string array If this is a scalar coordinate, returns the name of the constellation. If it is an array `SkyCoord`, it returns an array of names. Notes ----- To determine which constellation a point on the sky is in, this first precesses to B1875, and then uses the Delporte boundaries of the 88 modern constellations, as tabulated by `Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_. See Also -------- astropy.coordinates.get_constellation """ from .funcs import get_constellation # because of issue #7028, the conversion to a PrecessedGeocentric # system fails in some cases. Work around is to drop the velocities. # they are not needed here since only position information is used extra_frameattrs = {nm: getattr(self, nm) for nm in self._extra_frameattr_names} novel = SkyCoord(self.realize_frame(self.data.without_differentials()), **extra_frameattrs) return get_constellation(novel, short_name, constellation_list) # the simpler version below can be used when gh-issue #7028 is resolved # return get_constellation(self, short_name, constellation_list) # WCS pixel to/from sky conversions def to_pixel(self, wcs, origin=0, mode='all'): """ Convert this coordinate to pixel coordinates using a `~astropy.wcs.WCS` object. Parameters ---------- wcs : `~astropy.wcs.WCS` The WCS to use for convert origin : int Whether to return 0 or 1-based pixel coordinates. mode : 'all' or 'wcs' Whether to do the transformation including distortions (``'all'``) or only including only the core WCS transformation (``'wcs'``). Returns ------- xp, yp : `numpy.ndarray` The pixel coordinates See Also -------- astropy.wcs.utils.skycoord_to_pixel : the implementation of this method """ from astropy.wcs.utils import skycoord_to_pixel return skycoord_to_pixel(self, wcs=wcs, origin=origin, mode=mode) @classmethod def from_pixel(cls, xp, yp, wcs, origin=0, mode='all'): """ Create a new `SkyCoord` from pixel coordinates using an `~astropy.wcs.WCS` object. Parameters ---------- xp, yp : float or ndarray The coordinates to convert. wcs : `~astropy.wcs.WCS` The WCS to use for convert origin : int Whether to return 0 or 1-based pixel coordinates. mode : 'all' or 'wcs' Whether to do the transformation including distortions (``'all'``) or only including only the core WCS transformation (``'wcs'``). Returns ------- coord : `~astropy.coordinates.SkyCoord` A new object with sky coordinates corresponding to the input ``xp`` and ``yp``. See Also -------- to_pixel : to do the inverse operation astropy.wcs.utils.pixel_to_skycoord : the implementation of this method """ from astropy.wcs.utils import pixel_to_skycoord return pixel_to_skycoord(xp, yp, wcs=wcs, origin=origin, mode=mode, cls=cls) def contained_by(self, wcs, image=None, **kwargs): """ Determines if the SkyCoord is contained in the given wcs footprint. Parameters ---------- wcs : `~astropy.wcs.WCS` The coordinate to check if it is within the wcs coordinate. image : array Optional. The image associated with the wcs object that the cooordinate is being checked against. If not given the naxis keywords will be used to determine if the coordinate falls within the wcs footprint. **kwargs Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel` Returns ------- response : bool True means the WCS footprint contains the coordinate, False means it does not. """ if image is not None: ymax, xmax = image.shape else: xmax, ymax = wcs._naxis import warnings with warnings.catch_warnings(): # Suppress warnings since they just mean we didn't find the coordinate warnings.simplefilter("ignore") try: x, y = self.to_pixel(wcs, **kwargs) except Exception: return False return (x < xmax) & (x > 0) & (y < ymax) & (y > 0) def radial_velocity_correction(self, kind='barycentric', obstime=None, location=None): """ Compute the correction required to convert a radial velocity at a given time and place on the Earth's Surface to a barycentric or heliocentric velocity. Parameters ---------- kind : str The kind of velocity correction. Must be 'barycentric' or 'heliocentric'. obstime : `~astropy.time.Time` or None, optional The time at which to compute the correction. If `None`, the ``obstime`` frame attribute on the `SkyCoord` will be used. location : `~astropy.coordinates.EarthLocation` or None, optional The observer location at which to compute the correction. If `None`, the ``location`` frame attribute on the passed-in ``obstime`` will be used, and if that is None, the ``location`` frame attribute on the `SkyCoord` will be used. Raises ------ ValueError If either ``obstime`` or ``location`` are passed in (not ``None``) when the frame attribute is already set on this `SkyCoord`. TypeError If ``obstime`` or ``location`` aren't provided, either as arguments or as frame attributes. Returns ------- vcorr : `~astropy.units.Quantity` ['speed'] The correction with a positive sign. I.e., *add* this to an observed radial velocity to get the barycentric (or heliocentric) velocity. If m/s precision or better is needed, see the notes below. Notes ----- The barycentric correction is calculated to higher precision than the heliocentric correction and includes additional physics (e.g time dilation). Use barycentric corrections if m/s precision is required. The algorithm here is sufficient to perform corrections at the mm/s level, but care is needed in application. The barycentric correction returned uses the optical approximation v = z * c. Strictly speaking, the barycentric correction is multiplicative and should be applied as:: >>> from astropy.time import Time >>> from astropy.coordinates import SkyCoord, EarthLocation >>> from astropy.constants import c >>> t = Time(56370.5, format='mjd', scale='utc') >>> loc = EarthLocation('149d33m00.5s','-30d18m46.385s',236.87*u.m) >>> sc = SkyCoord(1*u.deg, 2*u.deg) >>> vcorr = sc.radial_velocity_correction(kind='barycentric', obstime=t, location=loc) # doctest: +REMOTE_DATA >>> rv = rv + vcorr + rv * vcorr / c # doctest: +SKIP Also note that this method returns the correction velocity in the so-called *optical convention*:: >>> vcorr = zb * c # doctest: +SKIP where ``zb`` is the barycentric correction redshift as defined in section 3 of Wright & Eastman (2014). The application formula given above follows from their equation (11) under assumption that the radial velocity ``rv`` has also been defined using the same optical convention. Note, this can be regarded as a matter of velocity definition and does not by itself imply any loss of accuracy, provided sufficient care has been taken during interpretation of the results. If you need the barycentric correction expressed as the full relativistic velocity (e.g., to provide it as the input to another software which performs the application), the following recipe can be used:: >>> zb = vcorr / c # doctest: +REMOTE_DATA >>> zb_plus_one_squared = (zb + 1) ** 2 # doctest: +REMOTE_DATA >>> vcorr_rel = c * (zb_plus_one_squared - 1) / (zb_plus_one_squared + 1) # doctest: +REMOTE_DATA or alternatively using just equivalencies:: >>> vcorr_rel = vcorr.to(u.Hz, u.doppler_optical(1*u.Hz)).to(vcorr.unit, u.doppler_relativistic(1*u.Hz)) # doctest: +REMOTE_DATA See also `~astropy.units.equivalencies.doppler_optical`, `~astropy.units.equivalencies.doppler_radio`, and `~astropy.units.equivalencies.doppler_relativistic` for more information on the velocity conventions. The default is for this method to use the builtin ephemeris for computing the sun and earth location. Other ephemerides can be chosen by setting the `~astropy.coordinates.solar_system_ephemeris` variable, either directly or via ``with`` statement. For example, to use the JPL ephemeris, do:: >>> from astropy.coordinates import solar_system_ephemeris >>> sc = SkyCoord(1*u.deg, 2*u.deg) >>> with solar_system_ephemeris.set('jpl'): # doctest: +REMOTE_DATA ... rv += sc.radial_velocity_correction(obstime=t, location=loc) # doctest: +SKIP """ # has to be here to prevent circular imports from .solar_system import get_body_barycentric_posvel # location validation timeloc = getattr(obstime, 'location', None) if location is None: if self.location is not None: location = self.location if timeloc is not None: raise ValueError('`location` cannot be in both the ' 'passed-in `obstime` and this `SkyCoord` ' 'because it is ambiguous which is meant ' 'for the radial_velocity_correction.') elif timeloc is not None: location = timeloc else: raise TypeError('Must provide a `location` to ' 'radial_velocity_correction, either as a ' 'SkyCoord frame attribute, as an attribute on ' 'the passed in `obstime`, or in the method ' 'call.') elif self.location is not None or timeloc is not None: raise ValueError('Cannot compute radial velocity correction if ' '`location` argument is passed in and there is ' 'also a `location` attribute on this SkyCoord or ' 'the passed-in `obstime`.') # obstime validation coo_at_rv_obstime = self # assume we need no space motion for now if obstime is None: obstime = self.obstime if obstime is None: raise TypeError('Must provide an `obstime` to ' 'radial_velocity_correction, either as a ' 'SkyCoord frame attribute or in the method ' 'call.') elif self.obstime is not None and self.frame.data.differentials: # we do need space motion after all coo_at_rv_obstime = self.apply_space_motion(obstime) elif self.obstime is None: # warn the user if the object has differentials set if 's' in self.data.differentials: warnings.warn( "SkyCoord has space motion, and therefore the specified " "position of the SkyCoord may not be the same as " "the `obstime` for the radial velocity measurement. " "This may affect the rv correction at the order of km/s" "for very high proper motions sources. If you wish to " "apply space motion of the SkyCoord to correct for this" "the `obstime` attribute of the SkyCoord must be set", AstropyUserWarning ) pos_earth, v_earth = get_body_barycentric_posvel('earth', obstime) if kind == 'barycentric': v_origin_to_earth = v_earth elif kind == 'heliocentric': v_sun = get_body_barycentric_posvel('sun', obstime)[1] v_origin_to_earth = v_earth - v_sun else: raise ValueError("`kind` argument to radial_velocity_correction must " "be 'barycentric' or 'heliocentric', but got " "'{}'".format(kind)) gcrs_p, gcrs_v = location.get_gcrs_posvel(obstime) # transforming to GCRS is not the correct thing to do here, since we don't want to # include aberration (or light deflection)? Instead, only apply parallax if necessary icrs_cart = coo_at_rv_obstime.icrs.cartesian icrs_cart_novel = icrs_cart.without_differentials() if self.data.__class__ is UnitSphericalRepresentation: targcart = icrs_cart_novel else: # skycoord has distances so apply parallax obs_icrs_cart = pos_earth + gcrs_p targcart = icrs_cart_novel - obs_icrs_cart targcart /= targcart.norm() if kind == 'barycentric': beta_obs = (v_origin_to_earth + gcrs_v) / speed_of_light gamma_obs = 1 / np.sqrt(1 - beta_obs.norm()**2) gr = location.gravitational_redshift(obstime) # barycentric redshift according to eq 28 in Wright & Eastmann (2014), # neglecting Shapiro delay and effects of the star's own motion zb = gamma_obs * (1 + beta_obs.dot(targcart)) / (1 + gr/speed_of_light) # try and get terms corresponding to stellar motion. if icrs_cart.differentials: try: ro = self.icrs.cartesian beta_star = ro.differentials['s'].to_cartesian() / speed_of_light # ICRS unit vector at coordinate epoch ro = ro.without_differentials() ro /= ro.norm() zb *= (1 + beta_star.dot(ro)) / (1 + beta_star.dot(targcart)) except u.UnitConversionError: warnings.warn("SkyCoord contains some velocity information, but not enough to " "calculate the full space motion of the source, and so this has " "been ignored for the purposes of calculating the radial velocity " "correction. This can lead to errors on the order of metres/second.", AstropyUserWarning) zb = zb - 1 return zb * speed_of_light else: # do a simpler correction ignoring time dilation and gravitational redshift # this is adequate since Heliocentric corrections shouldn't be used if # cm/s precision is required. return targcart.dot(v_origin_to_earth + gcrs_v) # Table interactions @classmethod def guess_from_table(cls, table, **coord_kwargs): r""" A convenience method to create and return a new `SkyCoord` from the data in an astropy Table. This method matches table columns that start with the case-insensitive names of the the components of the requested frames (including differentials), if they are also followed by a non-alphanumeric character. It will also match columns that *end* with the component name if a non-alphanumeric character is *before* it. For example, the first rule means columns with names like ``'RA[J2000]'`` or ``'ra'`` will be interpreted as ``ra`` attributes for `~astropy.coordinates.ICRS` frames, but ``'RAJ2000'`` or ``'radius'`` are *not*. Similarly, the second rule applied to the `~astropy.coordinates.Galactic` frame means that a column named ``'gal_l'`` will be used as the the ``l`` component, but ``gall`` or ``'fill'`` will not. The definition of alphanumeric here is based on Unicode's definition of alphanumeric, except without ``_`` (which is normally considered alphanumeric). So for ASCII, this means the non-alphanumeric characters are ``<space>_!"#$%&'()*+,-./\:;<=>?@[]^`{|}~``). Parameters ---------- table : `~astropy.table.Table` or subclass The table to load data from. **coord_kwargs Any additional keyword arguments are passed directly to this class's constructor. Returns ------- newsc : `~astropy.coordinates.SkyCoord` or subclass The new `SkyCoord` (or subclass) object. Raises ------ ValueError If more than one match is found in the table for a component, unless the additional matches are also valid frame component names. If a "coord_kwargs" is provided for a value also found in the table. """ _frame_cls, _frame_kwargs = _get_frame_without_data([], coord_kwargs) frame = _frame_cls(**_frame_kwargs) coord_kwargs['frame'] = coord_kwargs.get('frame', frame) representation_component_names = ( set(frame.get_representation_component_names()) .union(set(frame.get_representation_component_names("s"))) ) comp_kwargs = {} for comp_name in representation_component_names: # this matches things like 'ra[...]'' but *not* 'rad'. # note that the "_" must be in there explicitly, because # "alphanumeric" usually includes underscores. starts_with_comp = comp_name + r'(\W|\b|_)' # this part matches stuff like 'center_ra', but *not* # 'aura' ends_with_comp = r'.*(\W|\b|_)' + comp_name + r'\b' # the final regex ORs together the two patterns rex = re.compile(rf"({starts_with_comp})|({ends_with_comp})", re.IGNORECASE | re.UNICODE) # find all matches matches = {col_name for col_name in table.colnames if rex.match(col_name)} # now need to select among matches, also making sure we don't have # an exact match with another component if len(matches) == 0: # no matches continue elif len(matches) == 1: # only one match col_name = matches.pop() else: # more than 1 match # try to sieve out other components matches -= representation_component_names - {comp_name} # if there's only one remaining match, it worked. if len(matches) == 1: col_name = matches.pop() else: raise ValueError( 'Found at least two matches for component ' f'"{comp_name}": "{matches}". Cannot guess coordinates ' 'from a table with this ambiguity.') comp_kwargs[comp_name] = table[col_name] for k, v in comp_kwargs.items(): if k in coord_kwargs: raise ValueError('Found column "{}" in table, but it was ' 'already provided as "{}" keyword to ' 'guess_from_table function.'.format(v.name, k)) else: coord_kwargs[k] = v return cls(**coord_kwargs) # Name resolve @classmethod def from_name(cls, name, frame='icrs', parse=False, cache=True): """ Given a name, query the CDS name resolver to attempt to retrieve coordinate information for that object. The search database, sesame url, and query timeout can be set through configuration items in ``astropy.coordinates.name_resolve`` -- see docstring for `~astropy.coordinates.get_icrs_coordinates` for more information. Parameters ---------- name : str The name of the object to get coordinates for, e.g. ``'M42'``. frame : str or `BaseCoordinateFrame` class or instance The frame to transform the object to. parse : bool Whether to attempt extracting the coordinates from the name by parsing with a regex. For objects catalog names that have J-coordinates embedded in their names, e.g., 'CRTS SSS100805 J194428-420209', this may be much faster than a Sesame query for the same object name. The coordinates extracted in this way may differ from the database coordinates by a few deci-arcseconds, so only use this option if you do not need sub-arcsecond accuracy for coordinates. cache : bool, optional Determines whether to cache the results or not. To update or overwrite an existing value, pass ``cache='update'``. Returns ------- coord : SkyCoord Instance of the SkyCoord class. """ from .name_resolve import get_icrs_coordinates icrs_coord = get_icrs_coordinates(name, parse, cache=cache) icrs_sky_coord = cls(icrs_coord) if frame in ('icrs', icrs_coord.__class__): return icrs_sky_coord else: return icrs_sky_coord.transform_to(frame)
f4ee688a0561fdd05ae48895493a30af67c9acda8a94fac15196e560fd730c9b
""" In this module, we define the coordinate representation classes, which are used to represent low-level cartesian, spherical, cylindrical, and other coordinates. """ import abc import functools import operator import inspect import warnings import numpy as np import astropy.units as u from erfa import ufunc as erfa_ufunc from .angles import Angle, Longitude, Latitude from .distances import Distance from .matrix_utilities import is_O3 from astropy.utils import ShapedLikeNDArray, classproperty from astropy.utils.data_info import MixinInfo from astropy.utils.exceptions import DuplicateRepresentationWarning __all__ = ["BaseRepresentationOrDifferential", "BaseRepresentation", "CartesianRepresentation", "SphericalRepresentation", "UnitSphericalRepresentation", "RadialRepresentation", "PhysicsSphericalRepresentation", "CylindricalRepresentation", "BaseDifferential", "CartesianDifferential", "BaseSphericalDifferential", "BaseSphericalCosLatDifferential", "SphericalDifferential", "SphericalCosLatDifferential", "UnitSphericalDifferential", "UnitSphericalCosLatDifferential", "RadialDifferential", "CylindricalDifferential", "PhysicsSphericalDifferential"] # Module-level dict mapping representation string alias names to classes. # This is populated by __init_subclass__ when called by Representation or # Differential classes so that they are all registered automatically. REPRESENTATION_CLASSES = {} DIFFERENTIAL_CLASSES = {} # set for tracking duplicates DUPLICATE_REPRESENTATIONS = set() # a hash for the content of the above two dicts, cached for speed. _REPRDIFF_HASH = None def _fqn_class(cls): ''' Get the fully qualified name of a class ''' return cls.__module__ + '.' + cls.__qualname__ def get_reprdiff_cls_hash(): """ Returns a hash value that should be invariable if the `REPRESENTATION_CLASSES` and `DIFFERENTIAL_CLASSES` dictionaries have not changed. """ global _REPRDIFF_HASH if _REPRDIFF_HASH is None: _REPRDIFF_HASH = (hash(tuple(REPRESENTATION_CLASSES.items())) + hash(tuple(DIFFERENTIAL_CLASSES.items()))) return _REPRDIFF_HASH def _invalidate_reprdiff_cls_hash(): global _REPRDIFF_HASH _REPRDIFF_HASH = None def _array2string(values, prefix=''): # Work around version differences for array2string. kwargs = {'separator': ', ', 'prefix': prefix} kwargs['formatter'] = {} return np.array2string(values, **kwargs) class BaseRepresentationOrDifferentialInfo(MixinInfo): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. """ attrs_from_parent = {'unit'} # Indicates unit is read-only _supports_indexing = False @staticmethod def default_format(val): # Create numpy dtype so that numpy formatting will work. components = val.components values = tuple(getattr(val, component).value for component in components) a = np.empty(getattr(val, 'shape', ()), [(component, value.dtype) for component, value in zip(components, values)]) for component, value in zip(components, values): a[component] = value return str(a) @property def _represent_as_dict_attrs(self): return self._parent.components @property def unit(self): if self._parent is None: return None unit = self._parent._unitstr return unit[1:-1] if unit.startswith('(') else unit def new_like(self, reps, length, metadata_conflicts='warn', name=None): """ Return a new instance like ``reps`` with ``length`` rows. This is intended for creating an empty column object whose elements can be set in-place for table operations like join or vstack. Parameters ---------- reps : list List of input representations or differentials. length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : `BaseRepresentation` or `BaseDifferential` subclass instance Empty instance of this class consistent with ``cols`` """ # Get merged info attributes like shape, dtype, format, description, etc. attrs = self.merge_cols_attributes(reps, metadata_conflicts, name, ('meta', 'description')) # Make a new representation or differential with the desired length # using the _apply / __getitem__ machinery to effectively return # rep0[[0, 0, ..., 0, 0]]. This will have the right shape, and # include possible differentials. indexes = np.zeros(length, dtype=np.int64) out = reps[0][indexes] # Use __setitem__ machinery to check whether all representations # can represent themselves as this one without loss of information. for rep in reps[1:]: try: out[0] = rep[0] except Exception as err: raise ValueError(f'input representations are inconsistent.') from err # Set (merged) info attributes. for attr in ('name', 'meta', 'description'): if attr in attrs: setattr(out.info, attr, attrs[attr]) return out class BaseRepresentationOrDifferential(ShapedLikeNDArray): """3D coordinate representations and differentials. Parameters ---------- comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass The components of the 3D point or differential. The names are the keys and the subclasses the values of the ``attr_classes`` attribute. copy : bool, optional If `True` (default), arrays will be copied; if `False`, they will be broadcast together but not use new memory. """ # Ensure multiplication/division with ndarray or Quantity doesn't lead to # object arrays. __array_priority__ = 50000 info = BaseRepresentationOrDifferentialInfo() def __init__(self, *args, **kwargs): # make argument a list, so we can pop them off. args = list(args) components = self.components if (args and isinstance(args[0], self.__class__) and all(arg is None for arg in args[1:])): rep_or_diff = args[0] copy = kwargs.pop('copy', True) attrs = [getattr(rep_or_diff, component) for component in components] if 'info' in rep_or_diff.__dict__: self.info = rep_or_diff.info if kwargs: raise TypeError(f'unexpected keyword arguments for case ' f'where class instance is passed in: {kwargs}') else: attrs = [] for component in components: try: attr = args.pop(0) if args else kwargs.pop(component) except KeyError: raise TypeError(f'__init__() missing 1 required positional ' f'argument: {component!r}') from None if attr is None: raise TypeError(f'__init__() missing 1 required positional ' f'argument: {component!r} (or first ' f'argument should be an instance of ' f'{self.__class__.__name__}).') attrs.append(attr) copy = args.pop(0) if args else kwargs.pop('copy', True) if args: raise TypeError(f'unexpected arguments: {args}') if kwargs: for component in components: if component in kwargs: raise TypeError(f"__init__() got multiple values for " f"argument {component!r}") raise TypeError(f'unexpected keyword arguments: {kwargs}') # Pass attributes through the required initializing classes. attrs = [self.attr_classes[component](attr, copy=copy, subok=True) for component, attr in zip(components, attrs)] try: bc_attrs = np.broadcast_arrays(*attrs, subok=True) except ValueError as err: if len(components) <= 2: c_str = ' and '.join(components) else: c_str = ', '.join(components[:2]) + ', and ' + components[2] raise ValueError(f"Input parameters {c_str} cannot be broadcast") from err # The output of np.broadcast_arrays() has limitations on writeability, so we perform # additional handling to enable writeability in most situations. This is primarily # relevant for allowing the changing of the wrap angle of longitude components. # # If the shape has changed for a given component, broadcasting is needed: # If copy=True, we make a copy of the broadcasted array to ensure writeability. # Note that array had already been copied prior to the broadcasting. # TODO: Find a way to avoid the double copy. # If copy=False, we use the broadcasted array, and writeability may still be # limited. # If the shape has not changed for a given component, we can proceed with using the # non-broadcasted array, which avoids writeability issues from np.broadcast_arrays(). attrs = [(bc_attr.copy() if copy else bc_attr) if bc_attr.shape != attr.shape else attr for attr, bc_attr in zip(attrs, bc_attrs)] # Set private attributes for the attributes. (If not defined explicitly # on the class, the metaclass will define properties to access these.) for component, attr in zip(components, attrs): setattr(self, '_' + component, attr) @classmethod def get_name(cls): """Name of the representation or differential. In lower case, with any trailing 'representation' or 'differential' removed. (E.g., 'spherical' for `~astropy.coordinates.SphericalRepresentation` or `~astropy.coordinates.SphericalDifferential`.) """ name = cls.__name__.lower() if name.endswith('representation'): name = name[:-14] elif name.endswith('differential'): name = name[:-12] return name # The two methods that any subclass has to define. @classmethod @abc.abstractmethod def from_cartesian(cls, other): """Create a representation of this class from a supplied Cartesian one. Parameters ---------- other : `CartesianRepresentation` The representation to turn into this class Returns ------- representation : `BaseRepresentation` subclass instance A new representation of this class's type. """ # Note: the above docstring gets overridden for differentials. raise NotImplementedError() @abc.abstractmethod def to_cartesian(self): """Convert the representation to its Cartesian form. Note that any differentials get dropped. Also note that orientation information at the origin is *not* preserved by conversions through Cartesian coordinates. For example, transforming an angular position defined at distance=0 through cartesian coordinates and back will lose the original angular coordinates:: >>> import astropy.units as u >>> import astropy.coordinates as coord >>> rep = coord.SphericalRepresentation( ... lon=15*u.deg, ... lat=-11*u.deg, ... distance=0*u.pc) >>> rep.to_cartesian().represent_as(coord.SphericalRepresentation) <SphericalRepresentation (lon, lat, distance) in (rad, rad, pc) (0., 0., 0.)> Returns ------- cartrepr : `CartesianRepresentation` The representation in Cartesian form. """ # Note: the above docstring gets overridden for differentials. raise NotImplementedError() @property def components(self): """A tuple with the in-order names of the coordinate components.""" return tuple(self.attr_classes) def __eq__(self, value): """Equality operator This implements strict equality and requires that the representation classes are identical and that the representation data are exactly equal. """ if self.__class__ is not value.__class__: raise TypeError(f'cannot compare: objects must have same class: ' f'{self.__class__.__name__} vs. ' f'{value.__class__.__name__}') try: np.broadcast(self, value) except ValueError as exc: raise ValueError(f'cannot compare: {exc}') from exc out = True for comp in self.components: out &= (getattr(self, '_' + comp) == getattr(value, '_' + comp)) return out def __ne__(self, value): return np.logical_not(self == value) def _apply(self, method, *args, **kwargs): """Create a new representation or differential with ``method`` applied to the component data. In typical usage, the method is any of the shape-changing methods for `~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those picking particular elements (``__getitem__``, ``take``, etc.), which are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be applied to the underlying arrays (e.g., ``x``, ``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`), with the results used to create a new instance. Internally, it is also used to apply functions to the components (in particular, `~numpy.broadcast_to`). Parameters ---------- method : str or callable If str, it is the name of a method that is applied to the internal ``components``. If callable, the function is applied. *args : tuple Any positional arguments for ``method``. **kwargs : dict Any keyword arguments for ``method``. """ if callable(method): apply_method = lambda array: method(array, *args, **kwargs) else: apply_method = operator.methodcaller(method, *args, **kwargs) new = super().__new__(self.__class__) for component in self.components: setattr(new, '_' + component, apply_method(getattr(self, component))) # Copy other 'info' attr only if it has actually been defined. # See PR #3898 for further explanation and justification, along # with Quantity.__array_finalize__ if 'info' in self.__dict__: new.info = self.info return new def __setitem__(self, item, value): if value.__class__ is not self.__class__: raise TypeError(f'can only set from object of same class: ' f'{self.__class__.__name__} vs. ' f'{value.__class__.__name__}') for component in self.components: getattr(self, '_' + component)[item] = getattr(value, '_' + component) @property def shape(self): """The shape of the instance and underlying arrays. Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a tuple. Note that if different instances share some but not all underlying data, setting the shape of one instance can make the other instance unusable. Hence, it is strongly recommended to get new, reshaped instances with the ``reshape`` method. Raises ------ ValueError If the new shape has the wrong total number of elements. AttributeError If the shape of any of the components cannot be changed without the arrays being copied. For these cases, use the ``reshape`` method (which copies any arrays that cannot be reshaped in-place). """ return getattr(self, self.components[0]).shape @shape.setter def shape(self, shape): # We keep track of arrays that were already reshaped since we may have # to return those to their original shape if a later shape-setting # fails. (This can happen since coordinates are broadcast together.) reshaped = [] oldshape = self.shape for component in self.components: val = getattr(self, component) if val.size > 1: try: val.shape = shape except Exception: for val2 in reshaped: val2.shape = oldshape raise else: reshaped.append(val) # Required to support multiplication and division, and defined by the base # representation and differential classes. @abc.abstractmethod def _scale_operation(self, op, *args): raise NotImplementedError() def __mul__(self, other): return self._scale_operation(operator.mul, other) def __rmul__(self, other): return self.__mul__(other) def __truediv__(self, other): return self._scale_operation(operator.truediv, other) def __neg__(self): return self._scale_operation(operator.neg) # Follow numpy convention and make an independent copy. def __pos__(self): return self.copy() # Required to support addition and subtraction, and defined by the base # representation and differential classes. @abc.abstractmethod def _combine_operation(self, op, other, reverse=False): raise NotImplementedError() def __add__(self, other): return self._combine_operation(operator.add, other) def __radd__(self, other): return self._combine_operation(operator.add, other, reverse=True) def __sub__(self, other): return self._combine_operation(operator.sub, other) def __rsub__(self, other): return self._combine_operation(operator.sub, other, reverse=True) # The following are used for repr and str @property def _values(self): """Turn the coordinates into a record array with the coordinate values. The record array fields will have the component names. """ coo_items = [(c, getattr(self, c)) for c in self.components] result = np.empty(self.shape, [(c, coo.dtype) for c, coo in coo_items]) for c, coo in coo_items: result[c] = coo.value return result @property def _units(self): """Return a dictionary with the units of the coordinate components.""" return dict([(component, getattr(self, component).unit) for component in self.components]) @property def _unitstr(self): units_set = set(self._units.values()) if len(units_set) == 1: unitstr = units_set.pop().to_string() else: unitstr = '({})'.format( ', '.join([self._units[component].to_string() for component in self.components])) return unitstr def __str__(self): return f'{_array2string(self._values)} {self._unitstr:s}' def __repr__(self): prefixstr = ' ' arrstr = _array2string(self._values, prefix=prefixstr) diffstr = '' if getattr(self, 'differentials', None): diffstr = '\n (has differentials w.r.t.: {})'.format( ', '.join([repr(key) for key in self.differentials.keys()])) unitstr = ('in ' + self._unitstr) if self._unitstr else '[dimensionless]' return '<{} ({}) {:s}\n{}{}{}>'.format( self.__class__.__name__, ', '.join(self.components), unitstr, prefixstr, arrstr, diffstr) def _make_getter(component): """Make an attribute getter for use in a property. Parameters ---------- component : str The name of the component that should be accessed. This assumes the actual value is stored in an attribute of that name prefixed by '_'. """ # This has to be done in a function to ensure the reference to component # is not lost/redirected. component = '_' + component def get_component(self): return getattr(self, component) return get_component class RepresentationInfo(BaseRepresentationOrDifferentialInfo): @property def _represent_as_dict_attrs(self): attrs = super()._represent_as_dict_attrs if self._parent._differentials: attrs += ('differentials',) return attrs def _represent_as_dict(self, attrs=None): out = super()._represent_as_dict(attrs) for key, value in out.pop('differentials', {}).items(): out[f'differentials.{key}'] = value return out def _construct_from_dict(self, map): differentials = {} for key in list(map.keys()): if key.startswith('differentials.'): differentials[key[14:]] = map.pop(key) map['differentials'] = differentials return super()._construct_from_dict(map) class BaseRepresentation(BaseRepresentationOrDifferential): """Base for representing a point in a 3D coordinate system. Parameters ---------- comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass The components of the 3D points. The names are the keys and the subclasses the values of the ``attr_classes`` attribute. differentials : dict, `~astropy.coordinates.BaseDifferential`, optional Any differential classes that should be associated with this representation. The input must either be a single `~astropy.coordinates.BaseDifferential` subclass instance, or a dictionary with keys set to a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. Notes ----- All representation classes should subclass this base representation class, and define an ``attr_classes`` attribute, a `dict` which maps component names to the class that creates them. They must also define a ``to_cartesian`` method and a ``from_cartesian`` class method. By default, transformations are done via the cartesian system, but classes that want to define a smarter transformation path can overload the ``represent_as`` method. If one wants to use an associated differential class, one should also define ``unit_vectors`` and ``scale_factors`` methods (see those methods for details). """ info = RepresentationInfo() def __init_subclass__(cls, **kwargs): # Register representation name (except for BaseRepresentation) if cls.__name__ == 'BaseRepresentation': return if not hasattr(cls, 'attr_classes'): raise NotImplementedError('Representations must have an ' '"attr_classes" class attribute.') repr_name = cls.get_name() # first time a duplicate is added # remove first entry and add both using their qualnames if repr_name in REPRESENTATION_CLASSES: DUPLICATE_REPRESENTATIONS.add(repr_name) fqn_cls = _fqn_class(cls) existing = REPRESENTATION_CLASSES[repr_name] fqn_existing = _fqn_class(existing) if fqn_cls == fqn_existing: raise ValueError(f'Representation "{fqn_cls}" already defined') msg = ( f'Representation "{repr_name}" already defined, removing it to avoid confusion.' f'Use qualnames "{fqn_cls}" and "{fqn_existing}" or class instances directly' ) warnings.warn(msg, DuplicateRepresentationWarning) del REPRESENTATION_CLASSES[repr_name] REPRESENTATION_CLASSES[fqn_existing] = existing repr_name = fqn_cls # further definitions with the same name, just add qualname elif repr_name in DUPLICATE_REPRESENTATIONS: fqn_cls = _fqn_class(cls) warnings.warn(f'Representation "{repr_name}" already defined, using qualname ' f'"{fqn_cls}".') repr_name = fqn_cls if repr_name in REPRESENTATION_CLASSES: raise ValueError( f'Representation "{repr_name}" already defined' ) REPRESENTATION_CLASSES[repr_name] = cls _invalidate_reprdiff_cls_hash() # define getters for any component that does not yet have one. for component in cls.attr_classes: if not hasattr(cls, component): setattr(cls, component, property(_make_getter(component), doc=f"The '{component}' component of the points(s).")) super().__init_subclass__(**kwargs) def __init__(self, *args, differentials=None, **kwargs): # Handle any differentials passed in. super().__init__(*args, **kwargs) if (differentials is None and args and isinstance(args[0], self.__class__)): differentials = args[0]._differentials self._differentials = self._validate_differentials(differentials) def _validate_differentials(self, differentials): """ Validate that the provided differentials are appropriate for this representation and recast/reshape as necessary and then return. Note that this does *not* set the differentials on ``self._differentials``, but rather leaves that for the caller. """ # Now handle the actual validation of any specified differential classes if differentials is None: differentials = dict() elif isinstance(differentials, BaseDifferential): # We can't handle auto-determining the key for this combo if (isinstance(differentials, RadialDifferential) and isinstance(self, UnitSphericalRepresentation)): raise ValueError("To attach a RadialDifferential to a " "UnitSphericalRepresentation, you must supply " "a dictionary with an appropriate key.") key = differentials._get_deriv_key(self) differentials = {key: differentials} for key in differentials: try: diff = differentials[key] except TypeError as err: raise TypeError("'differentials' argument must be a " "dictionary-like object") from err diff._check_base(self) if (isinstance(diff, RadialDifferential) and isinstance(self, UnitSphericalRepresentation)): # We trust the passing of a key for a RadialDifferential # attached to a UnitSphericalRepresentation because it will not # have a paired component name (UnitSphericalRepresentation has # no .distance) to automatically determine the expected key pass else: expected_key = diff._get_deriv_key(self) if key != expected_key: raise ValueError("For differential object '{}', expected " "unit key = '{}' but received key = '{}'" .format(repr(diff), expected_key, key)) # For now, we are very rigid: differentials must have the same shape # as the representation. This makes it easier to handle __getitem__ # and any other shape-changing operations on representations that # have associated differentials if diff.shape != self.shape: # TODO: message of IncompatibleShapeError is not customizable, # so use a valueerror instead? raise ValueError("Shape of differentials must be the same " "as the shape of the representation ({} vs " "{})".format(diff.shape, self.shape)) return differentials def _raise_if_has_differentials(self, op_name): """ Used to raise a consistent exception for any operation that is not supported when a representation has differentials attached. """ if self.differentials: raise TypeError("Operation '{}' is not supported when " "differentials are attached to a {}." .format(op_name, self.__class__.__name__)) @classproperty def _compatible_differentials(cls): return [DIFFERENTIAL_CLASSES[cls.get_name()]] @property def differentials(self): """A dictionary of differential class instances. The keys of this dictionary must be a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. """ return self._differentials # We do not make unit_vectors and scale_factors abstract methods, since # they are only necessary if one also defines an associated Differential. # Also, doing so would break pre-differential representation subclasses. def unit_vectors(self): r"""Cartesian unit vectors in the direction of each component. Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`, a change in one component of :math:`\delta c` corresponds to a change in representation of :math:`\delta c \times f_c \times \hat{e}_c`. Returns ------- unit_vectors : dict of `CartesianRepresentation` The keys are the component names. """ raise NotImplementedError(f"{type(self)} has not implemented unit vectors") def scale_factors(self): r"""Scale factors for each component's direction. Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`, a change in one component of :math:`\delta c` corresponds to a change in representation of :math:`\delta c \times f_c \times \hat{e}_c`. Returns ------- scale_factors : dict of `~astropy.units.Quantity` The keys are the component names. """ raise NotImplementedError(f"{type(self)} has not implemented scale factors.") def _re_represent_differentials(self, new_rep, differential_class): """Re-represent the differentials to the specified classes. This returns a new dictionary with the same keys but with the attached differentials converted to the new differential classes. """ if differential_class is None: return dict() if not self.differentials and differential_class: raise ValueError("No differentials associated with this " "representation!") elif (len(self.differentials) == 1 and inspect.isclass(differential_class) and issubclass(differential_class, BaseDifferential)): # TODO: is there a better way to do this? differential_class = { list(self.differentials.keys())[0]: differential_class } elif differential_class.keys() != self.differentials.keys(): raise ValueError("Desired differential classes must be passed in " "as a dictionary with keys equal to a string " "representation of the unit of the derivative " "for each differential stored with this " "representation object ({0})" .format(self.differentials)) new_diffs = dict() for k in self.differentials: diff = self.differentials[k] try: new_diffs[k] = diff.represent_as(differential_class[k], base=self) except Exception as err: if (differential_class[k] not in new_rep._compatible_differentials): raise TypeError("Desired differential class {} is not " "compatible with the desired " "representation class {}" .format(differential_class[k], new_rep.__class__)) from err else: raise return new_diffs def represent_as(self, other_class, differential_class=None): """Convert coordinates to another representation. If the instance is of the requested class, it is returned unmodified. By default, conversion is done via Cartesian coordinates. Also note that orientation information at the origin is *not* preserved by conversions through Cartesian coordinates. See the docstring for :meth:`~astropy.coordinates.BaseRepresentationOrDifferential.to_cartesian` for an example. Parameters ---------- other_class : `~astropy.coordinates.BaseRepresentation` subclass The type of representation to turn the coordinates into. differential_class : dict of `~astropy.coordinates.BaseDifferential`, optional Classes in which the differentials should be represented. Can be a single class if only a single differential is attached, otherwise it should be a `dict` keyed by the same keys as the differentials. """ if other_class is self.__class__ and not differential_class: return self.without_differentials() else: if isinstance(other_class, str): raise ValueError("Input to a representation's represent_as " "must be a class, not a string. For " "strings, use frame objects") if other_class is not self.__class__: # The default is to convert via cartesian coordinates new_rep = other_class.from_cartesian(self.to_cartesian()) else: new_rep = self new_rep._differentials = self._re_represent_differentials( new_rep, differential_class) return new_rep def transform(self, matrix): """Transform coordinates using a 3x3 matrix in a Cartesian basis. This returns a new representation and does not modify the original one. Any differentials attached to this representation will also be transformed. Parameters ---------- matrix : (3,3) array-like A 3x3 (or stack thereof) matrix, such as a rotation matrix. """ # route transformation through Cartesian difs_cls = {k: CartesianDifferential for k in self.differentials.keys()} crep = self.represent_as(CartesianRepresentation, differential_class=difs_cls ).transform(matrix) # move back to original representation difs_cls = {k: diff.__class__ for k, diff in self.differentials.items()} rep = crep.represent_as(self.__class__, difs_cls) return rep def with_differentials(self, differentials): """ Create a new representation with the same positions as this representation, but with these new differentials. Differential keys that already exist in this object's differential dict are overwritten. Parameters ---------- differentials : sequence of `~astropy.coordinates.BaseDifferential` subclass instance The differentials for the new representation to have. Returns ------- `~astropy.coordinates.BaseRepresentation` subclass instance A copy of this representation, but with the ``differentials`` as its differentials. """ if not differentials: return self args = [getattr(self, component) for component in self.components] # We shallow copy the differentials dictionary so we don't update the # current object's dictionary when adding new keys new_rep = self.__class__(*args, differentials=self.differentials.copy(), copy=False) new_rep._differentials.update( new_rep._validate_differentials(differentials)) return new_rep def without_differentials(self): """Return a copy of the representation without attached differentials. Returns ------- `~astropy.coordinates.BaseRepresentation` subclass instance A shallow copy of this representation, without any differentials. If no differentials were present, no copy is made. """ if not self._differentials: return self args = [getattr(self, component) for component in self.components] return self.__class__(*args, copy=False) @classmethod def from_representation(cls, representation): """Create a new instance of this representation from another one. Parameters ---------- representation : `~astropy.coordinates.BaseRepresentation` instance The presentation that should be converted to this class. """ return representation.represent_as(cls) def __eq__(self, value): """Equality operator for BaseRepresentation This implements strict equality and requires that the representation classes are identical, the differentials are identical, and that the representation data are exactly equal. """ # BaseRepresentationOrDifferental (checks classes and compares components) out = super().__eq__(value) # super() checks that the class is identical so can this even happen? # (same class, different differentials ?) if self._differentials.keys() != value._differentials.keys(): raise ValueError(f'cannot compare: objects must have same differentials') for self_diff, value_diff in zip(self._differentials.values(), value._differentials.values()): out &= (self_diff == value_diff) return out def __ne__(self, value): return np.logical_not(self == value) def _apply(self, method, *args, **kwargs): """Create a new representation with ``method`` applied to the component data. This is not a simple inherit from ``BaseRepresentationOrDifferential`` because we need to call ``._apply()`` on any associated differential classes. See docstring for `BaseRepresentationOrDifferential._apply`. Parameters ---------- method : str or callable If str, it is the name of a method that is applied to the internal ``components``. If callable, the function is applied. *args : tuple Any positional arguments for ``method``. **kwargs : dict Any keyword arguments for ``method``. """ rep = super()._apply(method, *args, **kwargs) rep._differentials = dict( [(k, diff._apply(method, *args, **kwargs)) for k, diff in self._differentials.items()]) return rep def __setitem__(self, item, value): if not isinstance(value, BaseRepresentation): raise TypeError(f'value must be a representation instance, ' f'not {type(value)}.') if not (isinstance(value, self.__class__) or len(value.attr_classes) == len(self.attr_classes)): raise ValueError( f'value must be representable as {self.__class__.__name__} ' f'without loss of information.') diff_classes = {} if self._differentials: if self._differentials.keys() != value._differentials.keys(): raise ValueError('value must have the same differentials.') for key, self_diff in self._differentials.items(): diff_classes[key] = self_diff_cls = self_diff.__class__ value_diff_cls = value._differentials[key].__class__ if not (isinstance(value_diff_cls, self_diff_cls) or (len(value_diff_cls.attr_classes) == len(self_diff_cls.attr_classes))): raise ValueError( f'value differential {key!r} must be representable as ' f'{self_diff.__class__.__name__} without loss of information.') value = value.represent_as(self.__class__, diff_classes) super().__setitem__(item, value) for key, differential in self._differentials.items(): differential[item] = value._differentials[key] def _scale_operation(self, op, *args): """Scale all non-angular components, leaving angular ones unchanged. Parameters ---------- op : `~operator` callable Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc. *args Any arguments required for the operator (typically, what is to be multiplied with, divided by). """ results = [] for component, cls in self.attr_classes.items(): value = getattr(self, component) if issubclass(cls, Angle): results.append(value) else: results.append(op(value, *args)) # try/except catches anything that cannot initialize the class, such # as operations that returned NotImplemented or a representation # instead of a quantity (as would happen for, e.g., rep * rep). try: result = self.__class__(*results) except Exception: return NotImplemented for key, differential in self.differentials.items(): diff_result = differential._scale_operation(op, *args, scaled_base=True) result.differentials[key] = diff_result return result def _combine_operation(self, op, other, reverse=False): """Combine two representation. By default, operate on the cartesian representations of both. Parameters ---------- op : `~operator` callable Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc. other : `~astropy.coordinates.BaseRepresentation` subclass instance The other representation. reverse : bool Whether the operands should be reversed (e.g., as we got here via ``self.__rsub__`` because ``self`` is a subclass of ``other``). """ self._raise_if_has_differentials(op.__name__) result = self.to_cartesian()._combine_operation(op, other, reverse) if result is NotImplemented: return NotImplemented else: return self.from_cartesian(result) # We need to override this setter to support differentials @BaseRepresentationOrDifferential.shape.setter def shape(self, shape): orig_shape = self.shape # See: https://stackoverflow.com/questions/3336767/ for an example BaseRepresentationOrDifferential.shape.fset(self, shape) # also try to perform shape-setting on any associated differentials try: for k in self.differentials: self.differentials[k].shape = shape except Exception: BaseRepresentationOrDifferential.shape.fset(self, orig_shape) for k in self.differentials: self.differentials[k].shape = orig_shape raise def norm(self): """Vector norm. The norm is the standard Frobenius norm, i.e., the square root of the sum of the squares of all components with non-angular units. Note that any associated differentials will be dropped during this operation. Returns ------- norm : `astropy.units.Quantity` Vector norm, with the same shape as the representation. """ return np.sqrt(functools.reduce( operator.add, (getattr(self, component)**2 for component, cls in self.attr_classes.items() if not issubclass(cls, Angle)))) def mean(self, *args, **kwargs): """Vector mean. Averaging is done by converting the representation to cartesian, and taking the mean of the x, y, and z components. The result is converted back to the same representation as the input. Refer to `~numpy.mean` for full documentation of the arguments, noting that ``axis`` is the entry in the ``shape`` of the representation, and that the ``out`` argument cannot be used. Returns ------- mean : `~astropy.coordinates.BaseRepresentation` subclass instance Vector mean, in the same representation as that of the input. """ self._raise_if_has_differentials('mean') return self.from_cartesian(self.to_cartesian().mean(*args, **kwargs)) def sum(self, *args, **kwargs): """Vector sum. Adding is done by converting the representation to cartesian, and summing the x, y, and z components. The result is converted back to the same representation as the input. Refer to `~numpy.sum` for full documentation of the arguments, noting that ``axis`` is the entry in the ``shape`` of the representation, and that the ``out`` argument cannot be used. Returns ------- sum : `~astropy.coordinates.BaseRepresentation` subclass instance Vector sum, in the same representation as that of the input. """ self._raise_if_has_differentials('sum') return self.from_cartesian(self.to_cartesian().sum(*args, **kwargs)) def dot(self, other): """Dot product of two representations. The calculation is done by converting both ``self`` and ``other`` to `~astropy.coordinates.CartesianRepresentation`. Note that any associated differentials will be dropped during this operation. Parameters ---------- other : `~astropy.coordinates.BaseRepresentation` The representation to take the dot product with. Returns ------- dot_product : `~astropy.units.Quantity` The sum of the product of the x, y, and z components of the cartesian representations of ``self`` and ``other``. """ return self.to_cartesian().dot(other) def cross(self, other): """Vector cross product of two representations. The calculation is done by converting both ``self`` and ``other`` to `~astropy.coordinates.CartesianRepresentation`, and converting the result back to the type of representation of ``self``. Parameters ---------- other : `~astropy.coordinates.BaseRepresentation` subclass instance The representation to take the cross product with. Returns ------- cross_product : `~astropy.coordinates.BaseRepresentation` subclass instance With vectors perpendicular to both ``self`` and ``other``, in the same type of representation as ``self``. """ self._raise_if_has_differentials('cross') return self.from_cartesian(self.to_cartesian().cross(other)) class CartesianRepresentation(BaseRepresentation): """ Representation of points in 3D cartesian coordinates. Parameters ---------- x, y, z : `~astropy.units.Quantity` or array The x, y, and z coordinates of the point(s). If ``x``, ``y``, and ``z`` have different shapes, they should be broadcastable. If not quantity, ``unit`` should be set. If only ``x`` is given, it is assumed that it contains an array with the 3 coordinates stored along ``xyz_axis``. unit : unit-like If given, the coordinates will be converted to this unit (or taken to be in this unit if not given. xyz_axis : int, optional The axis along which the coordinates are stored when a single array is provided rather than distinct ``x``, ``y``, and ``z`` (default: 0). differentials : dict, `CartesianDifferential`, optional Any differential classes that should be associated with this representation. The input must either be a single `CartesianDifferential` instance, or a dictionary of `CartesianDifferential` s with keys set to a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ attr_classes = {'x': u.Quantity, 'y': u.Quantity, 'z': u.Quantity} _xyz = None def __init__(self, x, y=None, z=None, unit=None, xyz_axis=None, differentials=None, copy=True): if y is None and z is None: if isinstance(x, np.ndarray) and x.dtype.kind not in 'OV': # Short-cut for 3-D array input. x = u.Quantity(x, unit, copy=copy, subok=True) # Keep a link to the array with all three coordinates # so that we can return it quickly if needed in get_xyz. self._xyz = x if xyz_axis: x = np.moveaxis(x, xyz_axis, 0) self._xyz_axis = xyz_axis else: self._xyz_axis = 0 self._x, self._y, self._z = x self._differentials = self._validate_differentials(differentials) return elif (isinstance(x, CartesianRepresentation) and unit is None and xyz_axis is None): if differentials is None: differentials = x._differentials return super().__init__(x, differentials=differentials, copy=copy) else: x, y, z = x if xyz_axis is not None: raise ValueError("xyz_axis should only be set if x, y, and z are " "in a single array passed in through x, " "i.e., y and z should not be not given.") if y is None or z is None: raise ValueError("x, y, and z are required to instantiate {}" .format(self.__class__.__name__)) if unit is not None: x = u.Quantity(x, unit, copy=copy, subok=True) y = u.Quantity(y, unit, copy=copy, subok=True) z = u.Quantity(z, unit, copy=copy, subok=True) copy = False super().__init__(x, y, z, copy=copy, differentials=differentials) if not (self._x.unit.is_equivalent(self._y.unit) and self._x.unit.is_equivalent(self._z.unit)): raise u.UnitsError("x, y, and z should have matching physical types") def unit_vectors(self): l = np.broadcast_to(1.*u.one, self.shape, subok=True) o = np.broadcast_to(0.*u.one, self.shape, subok=True) return { 'x': CartesianRepresentation(l, o, o, copy=False), 'y': CartesianRepresentation(o, l, o, copy=False), 'z': CartesianRepresentation(o, o, l, copy=False)} def scale_factors(self): l = np.broadcast_to(1.*u.one, self.shape, subok=True) return {'x': l, 'y': l, 'z': l} def get_xyz(self, xyz_axis=0): """Return a vector array of the x, y, and z coordinates. Parameters ---------- xyz_axis : int, optional The axis in the final array along which the x, y, z components should be stored (default: 0). Returns ------- xyz : `~astropy.units.Quantity` With dimension 3 along ``xyz_axis``. Note that, if possible, this will be a view. """ if self._xyz is not None: if self._xyz_axis == xyz_axis: return self._xyz else: return np.moveaxis(self._xyz, self._xyz_axis, xyz_axis) # Create combined array. TO DO: keep it in _xyz for repeated use? # But then in-place changes have to cancel it. Likely best to # also update components. return np.stack([self._x, self._y, self._z], axis=xyz_axis) xyz = property(get_xyz) @classmethod def from_cartesian(cls, other): return other def to_cartesian(self): return self def transform(self, matrix): """ Transform the cartesian coordinates using a 3x3 matrix. This returns a new representation and does not modify the original one. Any differentials attached to this representation will also be transformed. Parameters ---------- matrix : ndarray A 3x3 transformation matrix, such as a rotation matrix. Examples -------- We can start off by creating a cartesian representation object: >>> from astropy import units as u >>> from astropy.coordinates import CartesianRepresentation >>> rep = CartesianRepresentation([1, 2] * u.pc, ... [2, 3] * u.pc, ... [3, 4] * u.pc) We now create a rotation matrix around the z axis: >>> from astropy.coordinates.matrix_utilities import rotation_matrix >>> rotation = rotation_matrix(30 * u.deg, axis='z') Finally, we can apply this transformation: >>> rep_new = rep.transform(rotation) >>> rep_new.xyz # doctest: +FLOAT_CMP <Quantity [[ 1.8660254 , 3.23205081], [ 1.23205081, 1.59807621], [ 3. , 4. ]] pc> """ # erfa rxp: Multiply a p-vector by an r-matrix. p = erfa_ufunc.rxp(matrix, self.get_xyz(xyz_axis=-1)) # transformed representation rep = self.__class__(p, xyz_axis=-1, copy=False) # Handle differentials attached to this representation new_diffs = dict((k, d.transform(matrix, self, rep)) for k, d in self.differentials.items()) return rep.with_differentials(new_diffs) def _combine_operation(self, op, other, reverse=False): self._raise_if_has_differentials(op.__name__) try: other_c = other.to_cartesian() except Exception: return NotImplemented first, second = ((self, other_c) if not reverse else (other_c, self)) return self.__class__(*(op(getattr(first, component), getattr(second, component)) for component in first.components)) def norm(self): """Vector norm. The norm is the standard Frobenius norm, i.e., the square root of the sum of the squares of all components with non-angular units. Note that any associated differentials will be dropped during this operation. Returns ------- norm : `astropy.units.Quantity` Vector norm, with the same shape as the representation. """ # erfa pm: Modulus of p-vector. return erfa_ufunc.pm(self.get_xyz(xyz_axis=-1)) def mean(self, *args, **kwargs): """Vector mean. Returns a new CartesianRepresentation instance with the means of the x, y, and z components. Refer to `~numpy.mean` for full documentation of the arguments, noting that ``axis`` is the entry in the ``shape`` of the representation, and that the ``out`` argument cannot be used. """ self._raise_if_has_differentials('mean') return self._apply('mean', *args, **kwargs) def sum(self, *args, **kwargs): """Vector sum. Returns a new CartesianRepresentation instance with the sums of the x, y, and z components. Refer to `~numpy.sum` for full documentation of the arguments, noting that ``axis`` is the entry in the ``shape`` of the representation, and that the ``out`` argument cannot be used. """ self._raise_if_has_differentials('sum') return self._apply('sum', *args, **kwargs) def dot(self, other): """Dot product of two representations. Note that any associated differentials will be dropped during this operation. Parameters ---------- other : `~astropy.coordinates.BaseRepresentation` subclass instance If not already cartesian, it is converted. Returns ------- dot_product : `~astropy.units.Quantity` The sum of the product of the x, y, and z components of ``self`` and ``other``. """ try: other_c = other.to_cartesian() except Exception as err: raise TypeError("cannot only take dot product with another " "representation, not a {} instance." .format(type(other))) from err # erfa pdp: p-vector inner (=scalar=dot) product. return erfa_ufunc.pdp(self.get_xyz(xyz_axis=-1), other_c.get_xyz(xyz_axis=-1)) def cross(self, other): """Cross product of two representations. Parameters ---------- other : `~astropy.coordinates.BaseRepresentation` subclass instance If not already cartesian, it is converted. Returns ------- cross_product : `~astropy.coordinates.CartesianRepresentation` With vectors perpendicular to both ``self`` and ``other``. """ self._raise_if_has_differentials('cross') try: other_c = other.to_cartesian() except Exception as err: raise TypeError("cannot only take cross product with another " "representation, not a {} instance." .format(type(other))) from err # erfa pxp: p-vector outer (=vector=cross) product. sxo = erfa_ufunc.pxp(self.get_xyz(xyz_axis=-1), other_c.get_xyz(xyz_axis=-1)) return self.__class__(sxo, xyz_axis=-1) class UnitSphericalRepresentation(BaseRepresentation): """ Representation of points on a unit sphere. Parameters ---------- lon, lat : `~astropy.units.Quantity` ['angle'] or str The longitude and latitude of the point(s), in angular units. The latitude should be between -90 and 90 degrees, and the longitude will be wrapped to an angle between 0 and 360 degrees. These can also be instances of `~astropy.coordinates.Angle`, `~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`. differentials : dict, `~astropy.coordinates.BaseDifferential`, optional Any differential classes that should be associated with this representation. The input must either be a single `~astropy.coordinates.BaseDifferential` instance (see `._compatible_differentials` for valid types), or a dictionary of of differential instances with keys set to a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ attr_classes = {'lon': Longitude, 'lat': Latitude} @classproperty def _dimensional_representation(cls): return SphericalRepresentation def __init__(self, lon, lat=None, differentials=None, copy=True): super().__init__(lon, lat, differentials=differentials, copy=copy) @classproperty def _compatible_differentials(cls): return [UnitSphericalDifferential, UnitSphericalCosLatDifferential, SphericalDifferential, SphericalCosLatDifferential, RadialDifferential] # Could let the metaclass define these automatically, but good to have # a bit clearer docstrings. @property def lon(self): """ The longitude of the point(s). """ return self._lon @property def lat(self): """ The latitude of the point(s). """ return self._lat def unit_vectors(self): sinlon, coslon = np.sin(self.lon), np.cos(self.lon) sinlat, coslat = np.sin(self.lat), np.cos(self.lat) return { 'lon': CartesianRepresentation(-sinlon, coslon, 0., copy=False), 'lat': CartesianRepresentation(-sinlat*coslon, -sinlat*sinlon, coslat, copy=False)} def scale_factors(self, omit_coslat=False): sf_lat = np.broadcast_to(1./u.radian, self.shape, subok=True) sf_lon = sf_lat if omit_coslat else np.cos(self.lat) / u.radian return {'lon': sf_lon, 'lat': sf_lat} def to_cartesian(self): """ Converts spherical polar coordinates to 3D rectangular cartesian coordinates. """ # erfa s2c: Convert [unit]spherical coordinates to Cartesian. p = erfa_ufunc.s2c(self.lon, self.lat) return CartesianRepresentation(p, xyz_axis=-1, copy=False) @classmethod def from_cartesian(cls, cart): """ Converts 3D rectangular cartesian coordinates to spherical polar coordinates. """ p = cart.get_xyz(xyz_axis=-1) # erfa c2s: P-vector to [unit]spherical coordinates. return cls(*erfa_ufunc.c2s(p), copy=False) def represent_as(self, other_class, differential_class=None): # Take a short cut if the other class is a spherical representation # TODO! for differential_class. This cannot (currently) be implemented # like in the other Representations since `_re_represent_differentials` # keeps differentials' unit keys, but this can result in a mismatch # between the UnitSpherical expected key (e.g. "s") and that expected # in the other class (here "s / m"). For more info, see PR #11467 if inspect.isclass(other_class) and not differential_class: if issubclass(other_class, PhysicsSphericalRepresentation): return other_class(phi=self.lon, theta=90 * u.deg - self.lat, r=1.0, copy=False) elif issubclass(other_class, SphericalRepresentation): return other_class(lon=self.lon, lat=self.lat, distance=1.0, copy=False) return super().represent_as(other_class, differential_class) def transform(self, matrix): r"""Transform the unit-spherical coordinates using a 3x3 matrix. This returns a new representation and does not modify the original one. Any differentials attached to this representation will also be transformed. Parameters ---------- matrix : (3,3) array-like A 3x3 matrix, such as a rotation matrix (or a stack of matrices). Returns ------- `UnitSphericalRepresentation` or `SphericalRepresentation` If ``matrix`` is O(3) -- :math:`M \dot M^T = I` -- like a rotation, then the result is a `UnitSphericalRepresentation`. All other matrices will change the distance, so the dimensional representation is used instead. """ # the transformation matrix does not need to be a rotation matrix, # so the unit-distance is not guaranteed. For speed, we check if the # matrix is in O(3) and preserves lengths. if np.all(is_O3(matrix)): # remain in unit-rep xyz = erfa_ufunc.s2c(self.lon, self.lat) p = erfa_ufunc.rxp(matrix, xyz) lon, lat = erfa_ufunc.c2s(p) rep = self.__class__(lon=lon, lat=lat) # handle differentials new_diffs = dict((k, d.transform(matrix, self, rep)) for k, d in self.differentials.items()) rep = rep.with_differentials(new_diffs) else: # switch to dimensional representation rep = self._dimensional_representation( lon=self.lon, lat=self.lat, distance=1, differentials=self.differentials ).transform(matrix) return rep def _scale_operation(self, op, *args): return self._dimensional_representation( lon=self.lon, lat=self.lat, distance=1., differentials=self.differentials)._scale_operation(op, *args) def __neg__(self): if any(differential.base_representation is not self.__class__ for differential in self.differentials.values()): return super().__neg__() result = self.__class__(self.lon + 180. * u.deg, -self.lat, copy=False) for key, differential in self.differentials.items(): new_comps = (op(getattr(differential, comp)) for op, comp in zip((operator.pos, operator.neg), differential.components)) result.differentials[key] = differential.__class__(*new_comps, copy=False) return result def norm(self): """Vector norm. The norm is the standard Frobenius norm, i.e., the square root of the sum of the squares of all components with non-angular units, which is always unity for vectors on the unit sphere. Returns ------- norm : `~astropy.units.Quantity` ['dimensionless'] Dimensionless ones, with the same shape as the representation. """ return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled, copy=False) def _combine_operation(self, op, other, reverse=False): self._raise_if_has_differentials(op.__name__) result = self.to_cartesian()._combine_operation(op, other, reverse) if result is NotImplemented: return NotImplemented else: return self._dimensional_representation.from_cartesian(result) def mean(self, *args, **kwargs): """Vector mean. The representation is converted to cartesian, the means of the x, y, and z components are calculated, and the result is converted to a `~astropy.coordinates.SphericalRepresentation`. Refer to `~numpy.mean` for full documentation of the arguments, noting that ``axis`` is the entry in the ``shape`` of the representation, and that the ``out`` argument cannot be used. """ self._raise_if_has_differentials('mean') return self._dimensional_representation.from_cartesian( self.to_cartesian().mean(*args, **kwargs)) def sum(self, *args, **kwargs): """Vector sum. The representation is converted to cartesian, the sums of the x, y, and z components are calculated, and the result is converted to a `~astropy.coordinates.SphericalRepresentation`. Refer to `~numpy.sum` for full documentation of the arguments, noting that ``axis`` is the entry in the ``shape`` of the representation, and that the ``out`` argument cannot be used. """ self._raise_if_has_differentials('sum') return self._dimensional_representation.from_cartesian( self.to_cartesian().sum(*args, **kwargs)) def cross(self, other): """Cross product of two representations. The calculation is done by converting both ``self`` and ``other`` to `~astropy.coordinates.CartesianRepresentation`, and converting the result back to `~astropy.coordinates.SphericalRepresentation`. Parameters ---------- other : `~astropy.coordinates.BaseRepresentation` subclass instance The representation to take the cross product with. Returns ------- cross_product : `~astropy.coordinates.SphericalRepresentation` With vectors perpendicular to both ``self`` and ``other``. """ self._raise_if_has_differentials('cross') return self._dimensional_representation.from_cartesian( self.to_cartesian().cross(other)) class RadialRepresentation(BaseRepresentation): """ Representation of the distance of points from the origin. Note that this is mostly intended as an internal helper representation. It can do little else but being used as a scale in multiplication. Parameters ---------- distance : `~astropy.units.Quantity` ['length'] The distance of the point(s) from the origin. differentials : dict, `~astropy.coordinates.BaseDifferential`, optional Any differential classes that should be associated with this representation. The input must either be a single `~astropy.coordinates.BaseDifferential` instance (see `._compatible_differentials` for valid types), or a dictionary of of differential instances with keys set to a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ attr_classes = {'distance': u.Quantity} def __init__(self, distance, differentials=None, copy=True): super().__init__(distance, differentials=differentials, copy=copy) @property def distance(self): """ The distance from the origin to the point(s). """ return self._distance def unit_vectors(self): """Cartesian unit vectors are undefined for radial representation.""" raise NotImplementedError('Cartesian unit vectors are undefined for ' '{} instances'.format(self.__class__)) def scale_factors(self): l = np.broadcast_to(1.*u.one, self.shape, subok=True) return {'distance': l} def to_cartesian(self): """Cannot convert radial representation to cartesian.""" raise NotImplementedError('cannot convert {} instance to cartesian.' .format(self.__class__)) @classmethod def from_cartesian(cls, cart): """ Converts 3D rectangular cartesian coordinates to radial coordinate. """ return cls(distance=cart.norm(), copy=False) def __mul__(self, other): if isinstance(other, BaseRepresentation): return self.distance * other else: return super().__mul__(other) def norm(self): """Vector norm. Just the distance itself. Returns ------- norm : `~astropy.units.Quantity` ['dimensionless'] Dimensionless ones, with the same shape as the representation. """ return self.distance def _combine_operation(self, op, other, reverse=False): return NotImplemented def transform(self, matrix): """Radial representations cannot be transformed by a Cartesian matrix. Parameters ---------- matrix : array-like The transformation matrix in a Cartesian basis. Must be a multiplication: a diagonal matrix with identical elements. Must have shape (..., 3, 3), where the last 2 indices are for the matrix on each other axis. Make sure that the matrix shape is compatible with the shape of this representation. Raises ------ ValueError If the matrix is not a multiplication. """ scl = matrix[..., 0, 0] # check that the matrix is a scaled identity matrix on the last 2 axes. if np.any(matrix != scl[..., np.newaxis, np.newaxis] * np.identity(3)): raise ValueError("Radial representations can only be " "transformed by a scaled identity matrix") return self * scl def _spherical_op_funcs(op, *args): """For given operator, return functions that adjust lon, lat, distance.""" if op is operator.neg: return lambda x: x+180*u.deg, operator.neg, operator.pos try: scale_sign = np.sign(args[0]) except Exception: # This should always work, even if perhaps we get a negative distance. return operator.pos, operator.pos, lambda x: op(x, *args) scale = abs(args[0]) return (lambda x: x + 180*u.deg*np.signbit(scale_sign), lambda x: x * scale_sign, lambda x: op(x, scale)) class SphericalRepresentation(BaseRepresentation): """ Representation of points in 3D spherical coordinates. Parameters ---------- lon, lat : `~astropy.units.Quantity` ['angle'] The longitude and latitude of the point(s), in angular units. The latitude should be between -90 and 90 degrees, and the longitude will be wrapped to an angle between 0 and 360 degrees. These can also be instances of `~astropy.coordinates.Angle`, `~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`. distance : `~astropy.units.Quantity` ['length'] The distance to the point(s). If the distance is a length, it is passed to the :class:`~astropy.coordinates.Distance` class, otherwise it is passed to the :class:`~astropy.units.Quantity` class. differentials : dict, `~astropy.coordinates.BaseDifferential`, optional Any differential classes that should be associated with this representation. The input must either be a single `~astropy.coordinates.BaseDifferential` instance (see `._compatible_differentials` for valid types), or a dictionary of of differential instances with keys set to a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ attr_classes = {'lon': Longitude, 'lat': Latitude, 'distance': u.Quantity} _unit_representation = UnitSphericalRepresentation def __init__(self, lon, lat=None, distance=None, differentials=None, copy=True): super().__init__(lon, lat, distance, copy=copy, differentials=differentials) if (not isinstance(self._distance, Distance) and self._distance.unit.physical_type == 'length'): try: self._distance = Distance(self._distance, copy=False) except ValueError as e: if e.args[0].startswith('distance must be >= 0'): raise ValueError("Distance must be >= 0. To allow negative " "distance values, you must explicitly pass" " in a `Distance` object with the the " "argument 'allow_negative=True'.") from e else: raise @classproperty def _compatible_differentials(cls): return [UnitSphericalDifferential, UnitSphericalCosLatDifferential, SphericalDifferential, SphericalCosLatDifferential, RadialDifferential] @property def lon(self): """ The longitude of the point(s). """ return self._lon @property def lat(self): """ The latitude of the point(s). """ return self._lat @property def distance(self): """ The distance from the origin to the point(s). """ return self._distance def unit_vectors(self): sinlon, coslon = np.sin(self.lon), np.cos(self.lon) sinlat, coslat = np.sin(self.lat), np.cos(self.lat) return { 'lon': CartesianRepresentation(-sinlon, coslon, 0., copy=False), 'lat': CartesianRepresentation(-sinlat*coslon, -sinlat*sinlon, coslat, copy=False), 'distance': CartesianRepresentation(coslat*coslon, coslat*sinlon, sinlat, copy=False)} def scale_factors(self, omit_coslat=False): sf_lat = self.distance / u.radian sf_lon = sf_lat if omit_coslat else sf_lat * np.cos(self.lat) sf_distance = np.broadcast_to(1.*u.one, self.shape, subok=True) return {'lon': sf_lon, 'lat': sf_lat, 'distance': sf_distance} def represent_as(self, other_class, differential_class=None): # Take a short cut if the other class is a spherical representation if inspect.isclass(other_class): if issubclass(other_class, PhysicsSphericalRepresentation): diffs = self._re_represent_differentials(other_class, differential_class) return other_class(phi=self.lon, theta=90 * u.deg - self.lat, r=self.distance, differentials=diffs, copy=False) elif issubclass(other_class, UnitSphericalRepresentation): diffs = self._re_represent_differentials(other_class, differential_class) return other_class(lon=self.lon, lat=self.lat, differentials=diffs, copy=False) return super().represent_as(other_class, differential_class) def to_cartesian(self): """ Converts spherical polar coordinates to 3D rectangular cartesian coordinates. """ # We need to convert Distance to Quantity to allow negative values. if isinstance(self.distance, Distance): d = self.distance.view(u.Quantity) else: d = self.distance # erfa s2p: Convert spherical polar coordinates to p-vector. p = erfa_ufunc.s2p(self.lon, self.lat, d) return CartesianRepresentation(p, xyz_axis=-1, copy=False) @classmethod def from_cartesian(cls, cart): """ Converts 3D rectangular cartesian coordinates to spherical polar coordinates. """ p = cart.get_xyz(xyz_axis=-1) # erfa p2s: P-vector to spherical polar coordinates. return cls(*erfa_ufunc.p2s(p), copy=False) def transform(self, matrix): """Transform the spherical coordinates using a 3x3 matrix. This returns a new representation and does not modify the original one. Any differentials attached to this representation will also be transformed. Parameters ---------- matrix : (3,3) array-like A 3x3 matrix, such as a rotation matrix (or a stack of matrices). """ xyz = erfa_ufunc.s2c(self.lon, self.lat) p = erfa_ufunc.rxp(matrix, xyz) lon, lat, ur = erfa_ufunc.p2s(p) rep = self.__class__(lon=lon, lat=lat, distance=self.distance * ur) # handle differentials new_diffs = dict((k, d.transform(matrix, self, rep)) for k, d in self.differentials.items()) return rep.with_differentials(new_diffs) def norm(self): """Vector norm. The norm is the standard Frobenius norm, i.e., the square root of the sum of the squares of all components with non-angular units. For spherical coordinates, this is just the absolute value of the distance. Returns ------- norm : `astropy.units.Quantity` Vector norm, with the same shape as the representation. """ return np.abs(self.distance) def _scale_operation(self, op, *args): # TODO: expand special-casing to UnitSpherical and RadialDifferential. if any(differential.base_representation is not self.__class__ for differential in self.differentials.values()): return super()._scale_operation(op, *args) lon_op, lat_op, distance_op = _spherical_op_funcs(op, *args) result = self.__class__(lon_op(self.lon), lat_op(self.lat), distance_op(self.distance), copy=False) for key, differential in self.differentials.items(): new_comps = (op(getattr(differential, comp)) for op, comp in zip( (operator.pos, lat_op, distance_op), differential.components)) result.differentials[key] = differential.__class__(*new_comps, copy=False) return result class PhysicsSphericalRepresentation(BaseRepresentation): """ Representation of points in 3D spherical coordinates (using the physics convention of using ``phi`` and ``theta`` for azimuth and inclination from the pole). Parameters ---------- phi, theta : `~astropy.units.Quantity` or str The azimuth and inclination of the point(s), in angular units. The inclination should be between 0 and 180 degrees, and the azimuth will be wrapped to an angle between 0 and 360 degrees. These can also be instances of `~astropy.coordinates.Angle`. If ``copy`` is False, `phi` will be changed inplace if it is not between 0 and 360 degrees. r : `~astropy.units.Quantity` The distance to the point(s). If the distance is a length, it is passed to the :class:`~astropy.coordinates.Distance` class, otherwise it is passed to the :class:`~astropy.units.Quantity` class. differentials : dict, `PhysicsSphericalDifferential`, optional Any differential classes that should be associated with this representation. The input must either be a single `PhysicsSphericalDifferential` instance, or a dictionary of of differential instances with keys set to a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ attr_classes = {'phi': Angle, 'theta': Angle, 'r': u.Quantity} def __init__(self, phi, theta=None, r=None, differentials=None, copy=True): super().__init__(phi, theta, r, copy=copy, differentials=differentials) # Wrap/validate phi/theta # Note that _phi already holds our own copy if copy=True. self._phi.wrap_at(360 * u.deg, inplace=True) # This invalid catch block can be removed when the minimum numpy # version is >= 1.19 (NUMPY_LT_1_19) with np.errstate(invalid='ignore'): if np.any(self._theta < 0.*u.deg) or np.any(self._theta > 180.*u.deg): raise ValueError('Inclination angle(s) must be within ' '0 deg <= angle <= 180 deg, ' 'got {}'.format(theta.to(u.degree))) if self._r.unit.physical_type == 'length': self._r = self._r.view(Distance) @property def phi(self): """ The azimuth of the point(s). """ return self._phi @property def theta(self): """ The elevation of the point(s). """ return self._theta @property def r(self): """ The distance from the origin to the point(s). """ return self._r def unit_vectors(self): sinphi, cosphi = np.sin(self.phi), np.cos(self.phi) sintheta, costheta = np.sin(self.theta), np.cos(self.theta) return { 'phi': CartesianRepresentation(-sinphi, cosphi, 0., copy=False), 'theta': CartesianRepresentation(costheta*cosphi, costheta*sinphi, -sintheta, copy=False), 'r': CartesianRepresentation(sintheta*cosphi, sintheta*sinphi, costheta, copy=False)} def scale_factors(self): r = self.r / u.radian sintheta = np.sin(self.theta) l = np.broadcast_to(1.*u.one, self.shape, subok=True) return {'phi': r * sintheta, 'theta': r, 'r': l} def represent_as(self, other_class, differential_class=None): # Take a short cut if the other class is a spherical representation if inspect.isclass(other_class): if issubclass(other_class, SphericalRepresentation): diffs = self._re_represent_differentials(other_class, differential_class) return other_class(lon=self.phi, lat=90 * u.deg - self.theta, distance=self.r, differentials=diffs, copy=False) elif issubclass(other_class, UnitSphericalRepresentation): diffs = self._re_represent_differentials(other_class, differential_class) return other_class(lon=self.phi, lat=90 * u.deg - self.theta, differentials=diffs, copy=False) return super().represent_as(other_class, differential_class) def to_cartesian(self): """ Converts spherical polar coordinates to 3D rectangular cartesian coordinates. """ # We need to convert Distance to Quantity to allow negative values. if isinstance(self.r, Distance): d = self.r.view(u.Quantity) else: d = self.r x = d * np.sin(self.theta) * np.cos(self.phi) y = d * np.sin(self.theta) * np.sin(self.phi) z = d * np.cos(self.theta) return CartesianRepresentation(x=x, y=y, z=z, copy=False) @classmethod def from_cartesian(cls, cart): """ Converts 3D rectangular cartesian coordinates to spherical polar coordinates. """ s = np.hypot(cart.x, cart.y) r = np.hypot(s, cart.z) phi = np.arctan2(cart.y, cart.x) theta = np.arctan2(s, cart.z) return cls(phi=phi, theta=theta, r=r, copy=False) def transform(self, matrix): """Transform the spherical coordinates using a 3x3 matrix. This returns a new representation and does not modify the original one. Any differentials attached to this representation will also be transformed. Parameters ---------- matrix : (3,3) array-like A 3x3 matrix, such as a rotation matrix (or a stack of matrices). """ # apply transformation in unit-spherical coordinates xyz = erfa_ufunc.s2c(self.phi, 90*u.deg-self.theta) p = erfa_ufunc.rxp(matrix, xyz) lon, lat, ur = erfa_ufunc.p2s(p) # `ur` is transformed unit-`r` # create transformed physics-spherical representation, # reapplying the distance scaling rep = self.__class__(phi=lon, theta=90*u.deg-lat, r=self.r * ur) new_diffs = dict((k, d.transform(matrix, self, rep)) for k, d in self.differentials.items()) return rep.with_differentials(new_diffs) def norm(self): """Vector norm. The norm is the standard Frobenius norm, i.e., the square root of the sum of the squares of all components with non-angular units. For spherical coordinates, this is just the absolute value of the radius. Returns ------- norm : `astropy.units.Quantity` Vector norm, with the same shape as the representation. """ return np.abs(self.r) def _scale_operation(self, op, *args): if any(differential.base_representation is not self.__class__ for differential in self.differentials.values()): return super()._scale_operation(op, *args) phi_op, adjust_theta_sign, r_op = _spherical_op_funcs(op, *args) # Also run phi_op on theta to ensure theta remains between 0 and 180: # any time the scale is negative, we do -theta + 180 degrees. result = self.__class__(phi_op(self.phi), phi_op(adjust_theta_sign(self.theta)), r_op(self.r), copy=False) for key, differential in self.differentials.items(): new_comps = (op(getattr(differential, comp)) for op, comp in zip( (operator.pos, adjust_theta_sign, r_op), differential.components)) result.differentials[key] = differential.__class__(*new_comps, copy=False) return result class CylindricalRepresentation(BaseRepresentation): """ Representation of points in 3D cylindrical coordinates. Parameters ---------- rho : `~astropy.units.Quantity` The distance from the z axis to the point(s). phi : `~astropy.units.Quantity` or str The azimuth of the point(s), in angular units, which will be wrapped to an angle between 0 and 360 degrees. This can also be instances of `~astropy.coordinates.Angle`, z : `~astropy.units.Quantity` The z coordinate(s) of the point(s) differentials : dict, `CylindricalDifferential`, optional Any differential classes that should be associated with this representation. The input must either be a single `CylindricalDifferential` instance, or a dictionary of of differential instances with keys set to a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ attr_classes = {'rho': u.Quantity, 'phi': Angle, 'z': u.Quantity} def __init__(self, rho, phi=None, z=None, differentials=None, copy=True): super().__init__(rho, phi, z, copy=copy, differentials=differentials) if not self._rho.unit.is_equivalent(self._z.unit): raise u.UnitsError("rho and z should have matching physical types") @property def rho(self): """ The distance of the point(s) from the z-axis. """ return self._rho @property def phi(self): """ The azimuth of the point(s). """ return self._phi @property def z(self): """ The height of the point(s). """ return self._z def unit_vectors(self): sinphi, cosphi = np.sin(self.phi), np.cos(self.phi) l = np.broadcast_to(1., self.shape) return { 'rho': CartesianRepresentation(cosphi, sinphi, 0, copy=False), 'phi': CartesianRepresentation(-sinphi, cosphi, 0, copy=False), 'z': CartesianRepresentation(0, 0, l, unit=u.one, copy=False)} def scale_factors(self): rho = self.rho / u.radian l = np.broadcast_to(1.*u.one, self.shape, subok=True) return {'rho': l, 'phi': rho, 'z': l} @classmethod def from_cartesian(cls, cart): """ Converts 3D rectangular cartesian coordinates to cylindrical polar coordinates. """ rho = np.hypot(cart.x, cart.y) phi = np.arctan2(cart.y, cart.x) z = cart.z return cls(rho=rho, phi=phi, z=z, copy=False) def to_cartesian(self): """ Converts cylindrical polar coordinates to 3D rectangular cartesian coordinates. """ x = self.rho * np.cos(self.phi) y = self.rho * np.sin(self.phi) z = self.z return CartesianRepresentation(x=x, y=y, z=z, copy=False) def _scale_operation(self, op, *args): if any(differential.base_representation is not self.__class__ for differential in self.differentials.values()): return super()._scale_operation(op, *args) phi_op, _, rho_op = _spherical_op_funcs(op, *args) z_op = lambda x: op(x, *args) result = self.__class__(rho_op(self.rho), phi_op(self.phi), z_op(self.z), copy=False) for key, differential in self.differentials.items(): new_comps = (op(getattr(differential, comp)) for op, comp in zip( (rho_op, operator.pos, z_op), differential.components)) result.differentials[key] = differential.__class__(*new_comps, copy=False) return result class BaseDifferential(BaseRepresentationOrDifferential): r"""A base class representing differentials of representations. These represent differences or derivatives along each component. E.g., for physics spherical coordinates, these would be :math:`\delta r, \delta \theta, \delta \phi`. Parameters ---------- d_comp1, d_comp2, d_comp3 : `~astropy.units.Quantity` or subclass The components of the 3D differentials. The names are the keys and the subclasses the values of the ``attr_classes`` attribute. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. Notes ----- All differential representation classes should subclass this base class, and define an ``base_representation`` attribute with the class of the regular `~astropy.coordinates.BaseRepresentation` for which differential coordinates are provided. This will set up a default ``attr_classes`` instance with names equal to the base component names prefixed by ``d_``, and all classes set to `~astropy.units.Quantity`, plus properties to access those, and a default ``__init__`` for initialization. """ def __init_subclass__(cls, **kwargs): """Set default ``attr_classes`` and component getters on a Differential. class BaseDifferential(BaseRepresentationOrDifferential): For these, the components are those of the base representation prefixed by 'd_', and the class is `~astropy.units.Quantity`. """ # Don't do anything for base helper classes. if cls.__name__ in ('BaseDifferential', 'BaseSphericalDifferential', 'BaseSphericalCosLatDifferential'): return if not hasattr(cls, 'base_representation'): raise NotImplementedError('Differential representations must have a' '"base_representation" class attribute.') # If not defined explicitly, create attr_classes. if not hasattr(cls, 'attr_classes'): base_attr_classes = cls.base_representation.attr_classes cls.attr_classes = {'d_' + c: u.Quantity for c in base_attr_classes} repr_name = cls.get_name() if repr_name in DIFFERENTIAL_CLASSES: raise ValueError(f"Differential class {repr_name} already defined") DIFFERENTIAL_CLASSES[repr_name] = cls _invalidate_reprdiff_cls_hash() # If not defined explicitly, create properties for the components. for component in cls.attr_classes: if not hasattr(cls, component): setattr(cls, component, property(_make_getter(component), doc=f"Component '{component}' of the Differential.")) super().__init_subclass__(**kwargs) @classmethod def _check_base(cls, base): if cls not in base._compatible_differentials: raise TypeError(f"Differential class {cls} is not compatible with the " f"base (representation) class {base.__class__}") def _get_deriv_key(self, base): """Given a base (representation instance), determine the unit of the derivative by removing the representation unit from the component units of this differential. """ # This check is just a last resort so we don't return a strange unit key # from accidentally passing in the wrong base. self._check_base(base) for name in base.components: comp = getattr(base, name) d_comp = getattr(self, f'd_{name}', None) if d_comp is not None: d_unit = comp.unit / d_comp.unit # This is quite a bit faster than using to_system() or going # through Quantity() d_unit_si = d_unit.decompose(u.si.bases) d_unit_si._scale = 1 # remove the scale from the unit return str(d_unit_si) else: raise RuntimeError("Invalid representation-differential units! This" " likely happened because either the " "representation or the associated differential " "have non-standard units. Check that the input " "positional data have positional units, and the " "input velocity data have velocity units, or " "are both dimensionless.") @classmethod def _get_base_vectors(cls, base): """Get unit vectors and scale factors from base. Parameters ---------- base : instance of ``self.base_representation`` The points for which the unit vectors and scale factors should be retrieved. Returns ------- unit_vectors : dict of `CartesianRepresentation` In the directions of the coordinates of base. scale_factors : dict of `~astropy.units.Quantity` Scale factors for each of the coordinates Raises ------ TypeError : if the base is not of the correct type """ cls._check_base(base) return base.unit_vectors(), base.scale_factors() def to_cartesian(self, base): """Convert the differential to 3D rectangular cartesian coordinates. Parameters ---------- base : instance of ``self.base_representation`` The points for which the differentials are to be converted: each of the components is multiplied by its unit vectors and scale factors. Returns ------- `CartesianDifferential` This object, converted. """ base_e, base_sf = self._get_base_vectors(base) return functools.reduce( operator.add, (getattr(self, d_c) * base_sf[c] * base_e[c] for d_c, c in zip(self.components, base.components))) @classmethod def from_cartesian(cls, other, base): """Convert the differential from 3D rectangular cartesian coordinates to the desired class. Parameters ---------- other The object to convert into this differential. base : `BaseRepresentation` The points for which the differentials are to be converted: each of the components is multiplied by its unit vectors and scale factors. Will be converted to ``cls.base_representation`` if needed. Returns ------- `BaseDifferential` subclass instance A new differential object that is this class' type. """ base = base.represent_as(cls.base_representation) base_e, base_sf = cls._get_base_vectors(base) return cls(*(other.dot(e / base_sf[component]) for component, e in base_e.items()), copy=False) def represent_as(self, other_class, base): """Convert coordinates to another representation. If the instance is of the requested class, it is returned unmodified. By default, conversion is done via cartesian coordinates. Parameters ---------- other_class : `~astropy.coordinates.BaseRepresentation` subclass The type of representation to turn the coordinates into. base : instance of ``self.base_representation`` Base relative to which the differentials are defined. If the other class is a differential representation, the base will be converted to its ``base_representation``. """ if other_class is self.__class__: return self # The default is to convert via cartesian coordinates. self_cartesian = self.to_cartesian(base) if issubclass(other_class, BaseDifferential): return other_class.from_cartesian(self_cartesian, base) else: return other_class.from_cartesian(self_cartesian) @classmethod def from_representation(cls, representation, base): """Create a new instance of this representation from another one. Parameters ---------- representation : `~astropy.coordinates.BaseRepresentation` instance The presentation that should be converted to this class. base : instance of ``cls.base_representation`` The base relative to which the differentials will be defined. If the representation is a differential itself, the base will be converted to its ``base_representation`` to help convert it. """ if isinstance(representation, BaseDifferential): cartesian = representation.to_cartesian( base.represent_as(representation.base_representation)) else: cartesian = representation.to_cartesian() return cls.from_cartesian(cartesian, base) def transform(self, matrix, base, transformed_base): """Transform differential using a 3x3 matrix in a Cartesian basis. This returns a new differential and does not modify the original one. Parameters ---------- matrix : (3,3) array-like A 3x3 (or stack thereof) matrix, such as a rotation matrix. base : instance of ``cls.base_representation`` Base relative to which the differentials are defined. If the other class is a differential representation, the base will be converted to its ``base_representation``. transformed_base : instance of ``cls.base_representation`` Base relative to which the transformed differentials are defined. If the other class is a differential representation, the base will be converted to its ``base_representation``. """ # route transformation through Cartesian cdiff = self.represent_as(CartesianDifferential, base=base ).transform(matrix) # move back to original representation diff = cdiff.represent_as(self.__class__, transformed_base) return diff def _scale_operation(self, op, *args, scaled_base=False): """Scale all components. Parameters ---------- op : `~operator` callable Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc. *args Any arguments required for the operator (typically, what is to be multiplied with, divided by). scaled_base : bool, optional Whether the base was scaled the same way. This affects whether differential components should be scaled. For instance, a differential in longitude should not be scaled if its spherical base is scaled in radius. """ scaled_attrs = [op(getattr(self, c), *args) for c in self.components] return self.__class__(*scaled_attrs, copy=False) def _combine_operation(self, op, other, reverse=False): """Combine two differentials, or a differential with a representation. If ``other`` is of the same differential type as ``self``, the components will simply be combined. If ``other`` is a representation, it will be used as a base for which to evaluate the differential, and the result is a new representation. Parameters ---------- op : `~operator` callable Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc. other : `~astropy.coordinates.BaseRepresentation` subclass instance The other differential or representation. reverse : bool Whether the operands should be reversed (e.g., as we got here via ``self.__rsub__`` because ``self`` is a subclass of ``other``). """ if isinstance(self, type(other)): first, second = (self, other) if not reverse else (other, self) return self.__class__(*[op(getattr(first, c), getattr(second, c)) for c in self.components]) else: try: self_cartesian = self.to_cartesian(other) except TypeError: return NotImplemented return other._combine_operation(op, self_cartesian, not reverse) def __sub__(self, other): # avoid "differential - representation". if isinstance(other, BaseRepresentation): return NotImplemented return super().__sub__(other) def norm(self, base=None): """Vector norm. The norm is the standard Frobenius norm, i.e., the square root of the sum of the squares of all components with non-angular units. Parameters ---------- base : instance of ``self.base_representation`` Base relative to which the differentials are defined. This is required to calculate the physical size of the differential for all but Cartesian differentials or radial differentials. Returns ------- norm : `astropy.units.Quantity` Vector norm, with the same shape as the representation. """ # RadialDifferential overrides this function, so there is no handling here if not isinstance(self, CartesianDifferential) and base is None: raise ValueError("`base` must be provided to calculate the norm of a" f" {type(self).__name__}") return self.to_cartesian(base).norm() class CartesianDifferential(BaseDifferential): """Differentials in of points in 3D cartesian coordinates. Parameters ---------- d_x, d_y, d_z : `~astropy.units.Quantity` or array The x, y, and z coordinates of the differentials. If ``d_x``, ``d_y``, and ``d_z`` have different shapes, they should be broadcastable. If not quantities, ``unit`` should be set. If only ``d_x`` is given, it is assumed that it contains an array with the 3 coordinates stored along ``xyz_axis``. unit : `~astropy.units.Unit` or str If given, the differentials will be converted to this unit (or taken to be in this unit if not given. xyz_axis : int, optional The axis along which the coordinates are stored when a single array is provided instead of distinct ``d_x``, ``d_y``, and ``d_z`` (default: 0). copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = CartesianRepresentation _d_xyz = None def __init__(self, d_x, d_y=None, d_z=None, unit=None, xyz_axis=None, copy=True): if d_y is None and d_z is None: if isinstance(d_x, np.ndarray) and d_x.dtype.kind not in 'OV': # Short-cut for 3-D array input. d_x = u.Quantity(d_x, unit, copy=copy, subok=True) # Keep a link to the array with all three coordinates # so that we can return it quickly if needed in get_xyz. self._d_xyz = d_x if xyz_axis: d_x = np.moveaxis(d_x, xyz_axis, 0) self._xyz_axis = xyz_axis else: self._xyz_axis = 0 self._d_x, self._d_y, self._d_z = d_x return else: d_x, d_y, d_z = d_x if xyz_axis is not None: raise ValueError("xyz_axis should only be set if d_x, d_y, and d_z " "are in a single array passed in through d_x, " "i.e., d_y and d_z should not be not given.") if d_y is None or d_z is None: raise ValueError("d_x, d_y, and d_z are required to instantiate {}" .format(self.__class__.__name__)) if unit is not None: d_x = u.Quantity(d_x, unit, copy=copy, subok=True) d_y = u.Quantity(d_y, unit, copy=copy, subok=True) d_z = u.Quantity(d_z, unit, copy=copy, subok=True) copy = False super().__init__(d_x, d_y, d_z, copy=copy) if not (self._d_x.unit.is_equivalent(self._d_y.unit) and self._d_x.unit.is_equivalent(self._d_z.unit)): raise u.UnitsError('d_x, d_y and d_z should have equivalent units.') def to_cartesian(self, base=None): return CartesianRepresentation(*[getattr(self, c) for c in self.components]) @classmethod def from_cartesian(cls, other, base=None): return cls(*[getattr(other, c) for c in other.components]) def transform(self, matrix, base=None, transformed_base=None): """Transform differentials using a 3x3 matrix in a Cartesian basis. This returns a new differential and does not modify the original one. Parameters ---------- matrix : (3,3) array-like A 3x3 (or stack thereof) matrix, such as a rotation matrix. base, transformed_base : `~astropy.coordinates.CartesianRepresentation` or None, optional Not used in the Cartesian transformation. """ # erfa rxp: Multiply a p-vector by an r-matrix. p = erfa_ufunc.rxp(matrix, self.get_d_xyz(xyz_axis=-1)) return self.__class__(p, xyz_axis=-1, copy=False) def get_d_xyz(self, xyz_axis=0): """Return a vector array of the x, y, and z coordinates. Parameters ---------- xyz_axis : int, optional The axis in the final array along which the x, y, z components should be stored (default: 0). Returns ------- d_xyz : `~astropy.units.Quantity` With dimension 3 along ``xyz_axis``. Note that, if possible, this will be a view. """ if self._d_xyz is not None: if self._xyz_axis == xyz_axis: return self._d_xyz else: return np.moveaxis(self._d_xyz, self._xyz_axis, xyz_axis) # Create combined array. TO DO: keep it in _d_xyz for repeated use? # But then in-place changes have to cancel it. Likely best to # also update components. return np.stack([self._d_x, self._d_y, self._d_z], axis=xyz_axis) d_xyz = property(get_d_xyz) class BaseSphericalDifferential(BaseDifferential): def _d_lon_coslat(self, base): """Convert longitude differential d_lon to d_lon_coslat. Parameters ---------- base : instance of ``cls.base_representation`` The base from which the latitude will be taken. """ self._check_base(base) return self.d_lon * np.cos(base.lat) @classmethod def _get_d_lon(cls, d_lon_coslat, base): """Convert longitude differential d_lon_coslat to d_lon. Parameters ---------- d_lon_coslat : `~astropy.units.Quantity` Longitude differential that includes ``cos(lat)``. base : instance of ``cls.base_representation`` The base from which the latitude will be taken. """ cls._check_base(base) return d_lon_coslat / np.cos(base.lat) def _combine_operation(self, op, other, reverse=False): """Combine two differentials, or a differential with a representation. If ``other`` is of the same differential type as ``self``, the components will simply be combined. If both are different parts of a `~astropy.coordinates.SphericalDifferential` (e.g., a `~astropy.coordinates.UnitSphericalDifferential` and a `~astropy.coordinates.RadialDifferential`), they will combined appropriately. If ``other`` is a representation, it will be used as a base for which to evaluate the differential, and the result is a new representation. Parameters ---------- op : `~operator` callable Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc. other : `~astropy.coordinates.BaseRepresentation` subclass instance The other differential or representation. reverse : bool Whether the operands should be reversed (e.g., as we got here via ``self.__rsub__`` because ``self`` is a subclass of ``other``). """ if (isinstance(other, BaseSphericalDifferential) and not isinstance(self, type(other)) or isinstance(other, RadialDifferential)): all_components = set(self.components) | set(other.components) first, second = (self, other) if not reverse else (other, self) result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.)) for c in all_components} return SphericalDifferential(**result_args) return super()._combine_operation(op, other, reverse) class UnitSphericalDifferential(BaseSphericalDifferential): """Differential(s) of points on a unit sphere. Parameters ---------- d_lon, d_lat : `~astropy.units.Quantity` The longitude and latitude of the differentials. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = UnitSphericalRepresentation @classproperty def _dimensional_differential(cls): return SphericalDifferential def __init__(self, d_lon, d_lat=None, copy=True): super().__init__(d_lon, d_lat, copy=copy) if not self._d_lon.unit.is_equivalent(self._d_lat.unit): raise u.UnitsError('d_lon and d_lat should have equivalent units.') @classmethod def from_cartesian(cls, other, base): # Go via the dimensional equivalent, so that the longitude and latitude # differentials correctly take into account the norm of the base. dimensional = cls._dimensional_differential.from_cartesian(other, base) return dimensional.represent_as(cls) def to_cartesian(self, base): if isinstance(base, SphericalRepresentation): scale = base.distance elif isinstance(base, PhysicsSphericalRepresentation): scale = base.r else: return super().to_cartesian(base) base = base.represent_as(UnitSphericalRepresentation) return scale * super().to_cartesian(base) def represent_as(self, other_class, base=None): # Only have enough information to represent other unit-spherical. if issubclass(other_class, UnitSphericalCosLatDifferential): return other_class(self._d_lon_coslat(base), self.d_lat) return super().represent_as(other_class, base) @classmethod def from_representation(cls, representation, base=None): # All spherical differentials can be done without going to Cartesian, # though CosLat needs base for the latitude. if isinstance(representation, SphericalDifferential): return cls(representation.d_lon, representation.d_lat) elif isinstance(representation, (SphericalCosLatDifferential, UnitSphericalCosLatDifferential)): d_lon = cls._get_d_lon(representation.d_lon_coslat, base) return cls(d_lon, representation.d_lat) elif isinstance(representation, PhysicsSphericalDifferential): return cls(representation.d_phi, -representation.d_theta) return super().from_representation(representation, base) def transform(self, matrix, base, transformed_base): """Transform differential using a 3x3 matrix in a Cartesian basis. This returns a new differential and does not modify the original one. Parameters ---------- matrix : (3,3) array-like A 3x3 (or stack thereof) matrix, such as a rotation matrix. base : instance of ``cls.base_representation`` Base relative to which the differentials are defined. If the other class is a differential representation, the base will be converted to its ``base_representation``. transformed_base : instance of ``cls.base_representation`` Base relative to which the transformed differentials are defined. If the other class is a differential representation, the base will be converted to its ``base_representation``. """ # the transformation matrix does not need to be a rotation matrix, # so the unit-distance is not guaranteed. For speed, we check if the # matrix is in O(3) and preserves lengths. if np.all(is_O3(matrix)): # remain in unit-rep # TODO! implement without Cartesian intermediate step. # some of this can be moved to the parent class. diff = super().transform(matrix, base, transformed_base) else: # switch to dimensional representation du = self.d_lon.unit / base.lon.unit # derivative unit diff = self._dimensional_differential( d_lon=self.d_lon, d_lat=self.d_lat, d_distance=0 * du ).transform(matrix, base, transformed_base) return diff def _scale_operation(self, op, *args, scaled_base=False): if scaled_base: return self.copy() else: return super()._scale_operation(op, *args) class SphericalDifferential(BaseSphericalDifferential): """Differential(s) of points in 3D spherical coordinates. Parameters ---------- d_lon, d_lat : `~astropy.units.Quantity` The differential longitude and latitude. d_distance : `~astropy.units.Quantity` The differential distance. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = SphericalRepresentation _unit_differential = UnitSphericalDifferential def __init__(self, d_lon, d_lat=None, d_distance=None, copy=True): super().__init__(d_lon, d_lat, d_distance, copy=copy) if not self._d_lon.unit.is_equivalent(self._d_lat.unit): raise u.UnitsError('d_lon and d_lat should have equivalent units.') def represent_as(self, other_class, base=None): # All spherical differentials can be done without going to Cartesian, # though CosLat needs base for the latitude. if issubclass(other_class, UnitSphericalDifferential): return other_class(self.d_lon, self.d_lat) elif issubclass(other_class, RadialDifferential): return other_class(self.d_distance) elif issubclass(other_class, SphericalCosLatDifferential): return other_class(self._d_lon_coslat(base), self.d_lat, self.d_distance) elif issubclass(other_class, UnitSphericalCosLatDifferential): return other_class(self._d_lon_coslat(base), self.d_lat) elif issubclass(other_class, PhysicsSphericalDifferential): return other_class(self.d_lon, -self.d_lat, self.d_distance) else: return super().represent_as(other_class, base) @classmethod def from_representation(cls, representation, base=None): # Other spherical differentials can be done without going to Cartesian, # though CosLat needs base for the latitude. if isinstance(representation, SphericalCosLatDifferential): d_lon = cls._get_d_lon(representation.d_lon_coslat, base) return cls(d_lon, representation.d_lat, representation.d_distance) elif isinstance(representation, PhysicsSphericalDifferential): return cls(representation.d_phi, -representation.d_theta, representation.d_r) return super().from_representation(representation, base) def _scale_operation(self, op, *args, scaled_base=False): if scaled_base: return self.__class__(self.d_lon, self.d_lat, op(self.d_distance, *args)) else: return super()._scale_operation(op, *args) class BaseSphericalCosLatDifferential(BaseDifferential): """Differentials from points on a spherical base representation. With cos(lat) assumed to be included in the longitude differential. """ @classmethod def _get_base_vectors(cls, base): """Get unit vectors and scale factors from (unit)spherical base. Parameters ---------- base : instance of ``self.base_representation`` The points for which the unit vectors and scale factors should be retrieved. Returns ------- unit_vectors : dict of `CartesianRepresentation` In the directions of the coordinates of base. scale_factors : dict of `~astropy.units.Quantity` Scale factors for each of the coordinates. The scale factor for longitude does not include the cos(lat) factor. Raises ------ TypeError : if the base is not of the correct type """ cls._check_base(base) return base.unit_vectors(), base.scale_factors(omit_coslat=True) def _d_lon(self, base): """Convert longitude differential with cos(lat) to one without. Parameters ---------- base : instance of ``cls.base_representation`` The base from which the latitude will be taken. """ self._check_base(base) return self.d_lon_coslat / np.cos(base.lat) @classmethod def _get_d_lon_coslat(cls, d_lon, base): """Convert longitude differential d_lon to d_lon_coslat. Parameters ---------- d_lon : `~astropy.units.Quantity` Value of the longitude differential without ``cos(lat)``. base : instance of ``cls.base_representation`` The base from which the latitude will be taken. """ cls._check_base(base) return d_lon * np.cos(base.lat) def _combine_operation(self, op, other, reverse=False): """Combine two differentials, or a differential with a representation. If ``other`` is of the same differential type as ``self``, the components will simply be combined. If both are different parts of a `~astropy.coordinates.SphericalDifferential` (e.g., a `~astropy.coordinates.UnitSphericalDifferential` and a `~astropy.coordinates.RadialDifferential`), they will combined appropriately. If ``other`` is a representation, it will be used as a base for which to evaluate the differential, and the result is a new representation. Parameters ---------- op : `~operator` callable Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc. other : `~astropy.coordinates.BaseRepresentation` subclass instance The other differential or representation. reverse : bool Whether the operands should be reversed (e.g., as we got here via ``self.__rsub__`` because ``self`` is a subclass of ``other``). """ if (isinstance(other, BaseSphericalCosLatDifferential) and not isinstance(self, type(other)) or isinstance(other, RadialDifferential)): all_components = set(self.components) | set(other.components) first, second = (self, other) if not reverse else (other, self) result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.)) for c in all_components} return SphericalCosLatDifferential(**result_args) return super()._combine_operation(op, other, reverse) class UnitSphericalCosLatDifferential(BaseSphericalCosLatDifferential): """Differential(s) of points on a unit sphere. Parameters ---------- d_lon_coslat, d_lat : `~astropy.units.Quantity` The longitude and latitude of the differentials. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = UnitSphericalRepresentation attr_classes = {'d_lon_coslat': u.Quantity, 'd_lat': u.Quantity} @classproperty def _dimensional_differential(cls): return SphericalCosLatDifferential def __init__(self, d_lon_coslat, d_lat=None, copy=True): super().__init__(d_lon_coslat, d_lat, copy=copy) if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit): raise u.UnitsError('d_lon_coslat and d_lat should have equivalent ' 'units.') @classmethod def from_cartesian(cls, other, base): # Go via the dimensional equivalent, so that the longitude and latitude # differentials correctly take into account the norm of the base. dimensional = cls._dimensional_differential.from_cartesian(other, base) return dimensional.represent_as(cls) def to_cartesian(self, base): if isinstance(base, SphericalRepresentation): scale = base.distance elif isinstance(base, PhysicsSphericalRepresentation): scale = base.r else: return super().to_cartesian(base) base = base.represent_as(UnitSphericalRepresentation) return scale * super().to_cartesian(base) def represent_as(self, other_class, base=None): # Only have enough information to represent other unit-spherical. if issubclass(other_class, UnitSphericalDifferential): return other_class(self._d_lon(base), self.d_lat) return super().represent_as(other_class, base) @classmethod def from_representation(cls, representation, base=None): # All spherical differentials can be done without going to Cartesian, # though w/o CosLat needs base for the latitude. if isinstance(representation, SphericalCosLatDifferential): return cls(representation.d_lon_coslat, representation.d_lat) elif isinstance(representation, (SphericalDifferential, UnitSphericalDifferential)): d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base) return cls(d_lon_coslat, representation.d_lat) elif isinstance(representation, PhysicsSphericalDifferential): d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base) return cls(d_lon_coslat, -representation.d_theta) return super().from_representation(representation, base) def transform(self, matrix, base, transformed_base): """Transform differential using a 3x3 matrix in a Cartesian basis. This returns a new differential and does not modify the original one. Parameters ---------- matrix : (3,3) array-like A 3x3 (or stack thereof) matrix, such as a rotation matrix. base : instance of ``cls.base_representation`` Base relative to which the differentials are defined. If the other class is a differential representation, the base will be converted to its ``base_representation``. transformed_base : instance of ``cls.base_representation`` Base relative to which the transformed differentials are defined. If the other class is a differential representation, the base will be converted to its ``base_representation``. """ # the transformation matrix does not need to be a rotation matrix, # so the unit-distance is not guaranteed. For speed, we check if the # matrix is in O(3) and preserves lengths. if np.all(is_O3(matrix)): # remain in unit-rep # TODO! implement without Cartesian intermediate step. diff = super().transform(matrix, base, transformed_base) else: # switch to dimensional representation du = self.d_lat.unit / base.lat.unit # derivative unit diff = self._dimensional_differential( d_lon_coslat=self.d_lon_coslat, d_lat=self.d_lat, d_distance=0 * du ).transform(matrix, base, transformed_base) return diff def _scale_operation(self, op, *args, scaled_base=False): if scaled_base: return self.copy() else: return super()._scale_operation(op, *args) class SphericalCosLatDifferential(BaseSphericalCosLatDifferential): """Differential(s) of points in 3D spherical coordinates. Parameters ---------- d_lon_coslat, d_lat : `~astropy.units.Quantity` The differential longitude (with cos(lat) included) and latitude. d_distance : `~astropy.units.Quantity` The differential distance. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = SphericalRepresentation _unit_differential = UnitSphericalCosLatDifferential attr_classes = {'d_lon_coslat': u.Quantity, 'd_lat': u.Quantity, 'd_distance': u.Quantity} def __init__(self, d_lon_coslat, d_lat=None, d_distance=None, copy=True): super().__init__(d_lon_coslat, d_lat, d_distance, copy=copy) if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit): raise u.UnitsError('d_lon_coslat and d_lat should have equivalent ' 'units.') def represent_as(self, other_class, base=None): # All spherical differentials can be done without going to Cartesian, # though some need base for the latitude to remove cos(lat). if issubclass(other_class, UnitSphericalCosLatDifferential): return other_class(self.d_lon_coslat, self.d_lat) elif issubclass(other_class, RadialDifferential): return other_class(self.d_distance) elif issubclass(other_class, SphericalDifferential): return other_class(self._d_lon(base), self.d_lat, self.d_distance) elif issubclass(other_class, UnitSphericalDifferential): return other_class(self._d_lon(base), self.d_lat) elif issubclass(other_class, PhysicsSphericalDifferential): return other_class(self._d_lon(base), -self.d_lat, self.d_distance) return super().represent_as(other_class, base) @classmethod def from_representation(cls, representation, base=None): # Other spherical differentials can be done without going to Cartesian, # though we need base for the latitude to remove coslat. if isinstance(representation, SphericalDifferential): d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base) return cls(d_lon_coslat, representation.d_lat, representation.d_distance) elif isinstance(representation, PhysicsSphericalDifferential): d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base) return cls(d_lon_coslat, -representation.d_theta, representation.d_r) return super().from_representation(representation, base) def _scale_operation(self, op, *args, scaled_base=False): if scaled_base: return self.__class__(self.d_lon_coslat, self.d_lat, op(self.d_distance, *args)) else: return super()._scale_operation(op, *args) class RadialDifferential(BaseDifferential): """Differential(s) of radial distances. Parameters ---------- d_distance : `~astropy.units.Quantity` The differential distance. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = RadialRepresentation def to_cartesian(self, base): return self.d_distance * base.represent_as( UnitSphericalRepresentation).to_cartesian() def norm(self, base=None): return self.d_distance @classmethod def from_cartesian(cls, other, base): return cls(other.dot(base.represent_as(UnitSphericalRepresentation)), copy=False) @classmethod def from_representation(cls, representation, base=None): if isinstance(representation, (SphericalDifferential, SphericalCosLatDifferential)): return cls(representation.d_distance) elif isinstance(representation, PhysicsSphericalDifferential): return cls(representation.d_r) else: return super().from_representation(representation, base) def _combine_operation(self, op, other, reverse=False): if isinstance(other, self.base_representation): if reverse: first, second = other.distance, self.d_distance else: first, second = self.d_distance, other.distance return other.__class__(op(first, second), copy=False) elif isinstance(other, (BaseSphericalDifferential, BaseSphericalCosLatDifferential)): all_components = set(self.components) | set(other.components) first, second = (self, other) if not reverse else (other, self) result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.)) for c in all_components} return SphericalDifferential(**result_args) else: return super()._combine_operation(op, other, reverse) class PhysicsSphericalDifferential(BaseDifferential): """Differential(s) of 3D spherical coordinates using physics convention. Parameters ---------- d_phi, d_theta : `~astropy.units.Quantity` The differential azimuth and inclination. d_r : `~astropy.units.Quantity` The differential radial distance. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = PhysicsSphericalRepresentation def __init__(self, d_phi, d_theta=None, d_r=None, copy=True): super().__init__(d_phi, d_theta, d_r, copy=copy) if not self._d_phi.unit.is_equivalent(self._d_theta.unit): raise u.UnitsError('d_phi and d_theta should have equivalent ' 'units.') def represent_as(self, other_class, base=None): # All spherical differentials can be done without going to Cartesian, # though CosLat needs base for the latitude. For those, explicitly # do the equivalent of self._d_lon_coslat in SphericalDifferential. if issubclass(other_class, SphericalDifferential): return other_class(self.d_phi, -self.d_theta, self.d_r) elif issubclass(other_class, UnitSphericalDifferential): return other_class(self.d_phi, -self.d_theta) elif issubclass(other_class, SphericalCosLatDifferential): self._check_base(base) d_lon_coslat = self.d_phi * np.sin(base.theta) return other_class(d_lon_coslat, -self.d_theta, self.d_r) elif issubclass(other_class, UnitSphericalCosLatDifferential): self._check_base(base) d_lon_coslat = self.d_phi * np.sin(base.theta) return other_class(d_lon_coslat, -self.d_theta) elif issubclass(other_class, RadialDifferential): return other_class(self.d_r) return super().represent_as(other_class, base) @classmethod def from_representation(cls, representation, base=None): # Other spherical differentials can be done without going to Cartesian, # though we need base for the latitude to remove coslat. For that case, # do the equivalent of cls._d_lon in SphericalDifferential. if isinstance(representation, SphericalDifferential): return cls(representation.d_lon, -representation.d_lat, representation.d_distance) elif isinstance(representation, SphericalCosLatDifferential): cls._check_base(base) d_phi = representation.d_lon_coslat / np.sin(base.theta) return cls(d_phi, -representation.d_lat, representation.d_distance) return super().from_representation(representation, base) def _scale_operation(self, op, *args, scaled_base=False): if scaled_base: return self.__class__(self.d_phi, self.d_theta, op(self.d_r, *args)) else: return super()._scale_operation(op, *args) class CylindricalDifferential(BaseDifferential): """Differential(s) of points in cylindrical coordinates. Parameters ---------- d_rho : `~astropy.units.Quantity` ['speed'] The differential cylindrical radius. d_phi : `~astropy.units.Quantity` ['angular speed'] The differential azimuth. d_z : `~astropy.units.Quantity` ['speed'] The differential height. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = CylindricalRepresentation def __init__(self, d_rho, d_phi=None, d_z=None, copy=False): super().__init__(d_rho, d_phi, d_z, copy=copy) if not self._d_rho.unit.is_equivalent(self._d_z.unit): raise u.UnitsError("d_rho and d_z should have equivalent units.")
6c61ed429d9626e1a07896f97ee06d3234a440011cf2afc6486b6a32652b9945
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains utility functions for working with angles. These are both used internally in astropy.coordinates.angles, and of possible """ __all__ = ['angular_separation', 'position_angle', 'offset_by', 'golden_spiral_grid', 'uniform_spherical_random_surface', 'uniform_spherical_random_volume'] # Third-party import numpy as np # Astropy import astropy.units as u from astropy.coordinates.representation import ( UnitSphericalRepresentation, SphericalRepresentation) def angular_separation(lon1, lat1, lon2, lat2): """ Angular separation between two points on a sphere. Parameters ---------- lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float Longitude and latitude of the two points. Quantities should be in angular units; floats in radians. Returns ------- angular separation : `~astropy.units.Quantity` ['angle'] or float Type depends on input; ``Quantity`` in angular units, or float in radians. Notes ----- The angular separation is calculated using the Vincenty formula [1]_, which is slightly more complex and computationally expensive than some alternatives, but is stable at at all distances, including the poles and antipodes. .. [1] https://en.wikipedia.org/wiki/Great-circle_distance """ sdlon = np.sin(lon2 - lon1) cdlon = np.cos(lon2 - lon1) slat1 = np.sin(lat1) slat2 = np.sin(lat2) clat1 = np.cos(lat1) clat2 = np.cos(lat2) num1 = clat2 * sdlon num2 = clat1 * slat2 - slat1 * clat2 * cdlon denominator = slat1 * slat2 + clat1 * clat2 * cdlon return np.arctan2(np.hypot(num1, num2), denominator) def position_angle(lon1, lat1, lon2, lat2): """ Position Angle (East of North) between two points on a sphere. Parameters ---------- lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float Longitude and latitude of the two points. Quantities should be in angular units; floats in radians. Returns ------- pa : `~astropy.coordinates.Angle` The (positive) position angle of the vector pointing from position 1 to position 2. If any of the angles are arrays, this will contain an array following the appropriate `numpy` broadcasting rules. """ from .angles import Angle deltalon = lon2 - lon1 colat = np.cos(lat2) x = np.sin(lat2) * np.cos(lat1) - colat * np.sin(lat1) * np.cos(deltalon) y = np.sin(deltalon) * colat return Angle(np.arctan2(y, x), u.radian).wrap_at(360*u.deg) def offset_by(lon, lat, posang, distance): """ Point with the given offset from the given point. Parameters ---------- lon, lat, posang, distance : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float Longitude and latitude of the starting point, position angle and distance to the final point. Quantities should be in angular units; floats in radians. Polar points at lat= +/-90 are treated as limit of +/-(90-epsilon) and same lon. Returns ------- lon, lat : `~astropy.coordinates.Angle` The position of the final point. If any of the angles are arrays, these will contain arrays following the appropriate `numpy` broadcasting rules. 0 <= lon < 2pi. """ from .angles import Angle # Calculations are done using the spherical trigonometry sine and cosine rules # of the triangle A at North Pole, B at starting point, C at final point # with angles A (change in lon), B (posang), C (not used, but negative reciprocal posang) # with sides a (distance), b (final co-latitude), c (starting colatitude) # B, a, c are knowns; A and b are unknowns # https://en.wikipedia.org/wiki/Spherical_trigonometry cos_a = np.cos(distance) sin_a = np.sin(distance) cos_c = np.sin(lat) sin_c = np.cos(lat) cos_B = np.cos(posang) sin_B = np.sin(posang) # cosine rule: Know two sides: a,c and included angle: B; get unknown side b cos_b = cos_c * cos_a + sin_c * sin_a * cos_B # sin_b = np.sqrt(1 - cos_b**2) # sine rule and cosine rule for A (using both lets arctan2 pick quadrant). # multiplying both sin_A and cos_A by x=sin_b * sin_c prevents /0 errors # at poles. Correct for the x=0 multiplication a few lines down. # sin_A/sin_a == sin_B/sin_b # Sine rule xsin_A = sin_a * sin_B * sin_c # cos_a == cos_b * cos_c + sin_b * sin_c * cos_A # cosine rule xcos_A = cos_a - cos_b * cos_c A = Angle(np.arctan2(xsin_A, xcos_A), u.radian) # Treat the poles as if they are infinitesimally far from pole but at given lon small_sin_c = sin_c < 1e-12 if small_sin_c.any(): # For south pole (cos_c = -1), A = posang; for North pole, A=180 deg - posang A_pole = (90*u.deg + cos_c*(90*u.deg-Angle(posang, u.radian))).to(u.rad) if A.shape: # broadcast to ensure the shape is like that of A, which is also # affected by the (possible) shapes of lat, posang, and distance. small_sin_c = np.broadcast_to(small_sin_c, A.shape) A[small_sin_c] = A_pole[small_sin_c] else: A = A_pole outlon = (Angle(lon, u.radian) + A).wrap_at(360.0*u.deg).to(u.deg) outlat = Angle(np.arcsin(cos_b), u.radian).to(u.deg) return outlon, outlat def golden_spiral_grid(size): """Generate a grid of points on the surface of the unit sphere using the Fibonacci or Golden Spiral method. .. seealso:: `Evenly distributing points on a sphere <https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere>`_ Parameters ---------- size : int The number of points to generate. Returns ------- rep : `~astropy.coordinates.UnitSphericalRepresentation` The grid of points. """ golden_r = (1 + 5**0.5) / 2 grid = np.arange(0, size, dtype=float) + 0.5 lon = 2*np.pi / golden_r * grid * u.rad lat = np.arcsin(1 - 2 * grid / size) * u.rad return UnitSphericalRepresentation(lon, lat) def uniform_spherical_random_surface(size=1): """Generate a random sampling of points on the surface of the unit sphere. Parameters ---------- size : int The number of points to generate. Returns ------- rep : `~astropy.coordinates.UnitSphericalRepresentation` The random points. """ rng = np.random # can maybe switch to this being an input later - see #11628 lon = rng.uniform(0, 2*np.pi, size) * u.rad lat = np.arcsin(rng.uniform(-1, 1, size=size)) * u.rad return UnitSphericalRepresentation(lon, lat) def uniform_spherical_random_volume(size=1, max_radius=1): """Generate a random sampling of points that follow a uniform volume density distribution within a sphere. Parameters ---------- size : int The number of points to generate. max_radius : number, quantity-like, optional A dimensionless or unit-ful factor to scale the random distances. Returns ------- rep : `~astropy.coordinates.SphericalRepresentation` The random points. """ rng = np.random # can maybe switch to this being an input later - see #11628 usph = uniform_spherical_random_surface(size=size) r = np.cbrt(rng.uniform(size=size)) * u.Quantity(max_radius, copy=False) return SphericalRepresentation(usph.lon, usph.lat, r) # # below here can be deleted in v5.0 from astropy.utils.decorators import deprecated from astropy.coordinates import angle_formats __old_angle_utilities_funcs = ['check_hms_ranges', 'degrees_to_dms', 'degrees_to_string', 'dms_to_degrees', 'format_exception', 'hms_to_degrees', 'hms_to_dms', 'hms_to_hours', 'hms_to_radians', 'hours_to_decimal', 'hours_to_hms', 'hours_to_radians', 'hours_to_string', 'parse_angle', 'radians_to_degrees', 'radians_to_dms', 'radians_to_hms', 'radians_to_hours', 'sexagesimal_to_string'] for funcname in __old_angle_utilities_funcs: vars()[funcname] = deprecated(name='astropy.coordinates.angle_utilities.' + funcname, alternative='astropy.coordinates.angle_formats.' + funcname, since='v4.3')(getattr(angle_formats, funcname))
c5c5952a060b137476d732a5c8f21468af41ef37e208596d90bc9f7be3c83c66
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains a general framework for defining graphs of transformations between coordinates, suitable for either spatial coordinates or more generalized coordinate systems. The fundamental idea is that each class is a node in the transformation graph, and transitions from one node to another are defined as functions (or methods) wrapped in transformation objects. This module also includes more specific transformation classes for celestial/spatial coordinate frames, generally focused around matrix-style transformations that are typically how the algorithms are defined. """ import heapq import inspect import subprocess from warnings import warn from abc import ABCMeta, abstractmethod from collections import defaultdict from contextlib import suppress, contextmanager from inspect import signature import numpy as np from astropy import units as u from astropy.utils.exceptions import AstropyWarning from .matrix_utilities import matrix_product __all__ = ['TransformGraph', 'CoordinateTransform', 'FunctionTransform', 'BaseAffineTransform', 'AffineTransform', 'StaticMatrixTransform', 'DynamicMatrixTransform', 'FunctionTransformWithFiniteDifference', 'CompositeTransform'] def frame_attrs_from_set(frame_set): """ A `dict` of all the attributes of all frame classes in this `TransformGraph`. Broken out of the class so this can be called on a temporary frame set to validate new additions to the transform graph before actually adding them. """ result = {} for frame_cls in frame_set: result.update(frame_cls.frame_attributes) return result def frame_comps_from_set(frame_set): """ A `set` of all component names every defined within any frame class in this `TransformGraph`. Broken out of the class so this can be called on a temporary frame set to validate new additions to the transform graph before actually adding them. """ result = set() for frame_cls in frame_set: rep_info = frame_cls._frame_specific_representation_info for mappings in rep_info.values(): for rep_map in mappings: result.update([rep_map.framename]) return result class TransformGraph: """ A graph representing the paths between coordinate frames. """ def __init__(self): self._graph = defaultdict(dict) self.invalidate_cache() # generates cache entries @property def _cached_names(self): if self._cached_names_dct is None: self._cached_names_dct = dct = {} for c in self.frame_set: nm = getattr(c, 'name', None) if nm is not None: if not isinstance(nm, list): nm = [nm] for name in nm: dct[name] = c return self._cached_names_dct @property def frame_set(self): """ A `set` of all the frame classes present in this `TransformGraph`. """ if self._cached_frame_set is None: self._cached_frame_set = set() for a in self._graph: self._cached_frame_set.add(a) for b in self._graph[a]: self._cached_frame_set.add(b) return self._cached_frame_set.copy() @property def frame_attributes(self): """ A `dict` of all the attributes of all frame classes in this `TransformGraph`. """ if self._cached_frame_attributes is None: self._cached_frame_attributes = frame_attrs_from_set(self.frame_set) return self._cached_frame_attributes @property def frame_component_names(self): """ A `set` of all component names every defined within any frame class in this `TransformGraph`. """ if self._cached_component_names is None: self._cached_component_names = frame_comps_from_set(self.frame_set) return self._cached_component_names def invalidate_cache(self): """ Invalidates the cache that stores optimizations for traversing the transform graph. This is called automatically when transforms are added or removed, but will need to be called manually if weights on transforms are modified inplace. """ self._cached_names_dct = None self._cached_frame_set = None self._cached_frame_attributes = None self._cached_component_names = None self._shortestpaths = {} self._composite_cache = {} def add_transform(self, fromsys, tosys, transform): """ Add a new coordinate transformation to the graph. Parameters ---------- fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. transform : `CoordinateTransform` The transformation object. Typically a `CoordinateTransform` object, although it may be some other callable that is called with the same signature. Raises ------ TypeError If ``fromsys`` or ``tosys`` are not classes or ``transform`` is not callable. """ if not inspect.isclass(fromsys): raise TypeError('fromsys must be a class') if not inspect.isclass(tosys): raise TypeError('tosys must be a class') if not callable(transform): raise TypeError('transform must be callable') frame_set = self.frame_set.copy() frame_set.add(fromsys) frame_set.add(tosys) # Now we check to see if any attributes on the proposed frames override # *any* component names, which we can't allow for some of the logic in # the SkyCoord initializer to work attrs = set(frame_attrs_from_set(frame_set).keys()) comps = frame_comps_from_set(frame_set) invalid_attrs = attrs.intersection(comps) if invalid_attrs: invalid_frames = set() for attr in invalid_attrs: if attr in fromsys.frame_attributes: invalid_frames.update([fromsys]) if attr in tosys.frame_attributes: invalid_frames.update([tosys]) raise ValueError("Frame(s) {} contain invalid attribute names: {}" "\nFrame attributes can not conflict with *any* of" " the frame data component names (see" " `frame_transform_graph.frame_component_names`)." .format(list(invalid_frames), invalid_attrs)) self._graph[fromsys][tosys] = transform self.invalidate_cache() def remove_transform(self, fromsys, tosys, transform): """ Removes a coordinate transform from the graph. Parameters ---------- fromsys : class or None The coordinate frame *class* to start from. If `None`, ``transform`` will be searched for and removed (``tosys`` must also be `None`). tosys : class or None The coordinate frame *class* to transform into. If `None`, ``transform`` will be searched for and removed (``fromsys`` must also be `None`). transform : callable or None The transformation object to be removed or `None`. If `None` and ``tosys`` and ``fromsys`` are supplied, there will be no check to ensure the correct object is removed. """ if fromsys is None or tosys is None: if not (tosys is None and fromsys is None): raise ValueError('fromsys and tosys must both be None if either are') if transform is None: raise ValueError('cannot give all Nones to remove_transform') # search for the requested transform by brute force and remove it for a in self._graph: agraph = self._graph[a] for b in agraph: if agraph[b] is transform: del agraph[b] fromsys = a break # If the transform was found, need to break out of the outer for loop too if fromsys: break else: raise ValueError(f'Could not find transform {transform} in the graph') else: if transform is None: self._graph[fromsys].pop(tosys, None) else: curr = self._graph[fromsys].get(tosys, None) if curr is transform: self._graph[fromsys].pop(tosys) else: raise ValueError('Current transform from {} to {} is not ' '{}'.format(fromsys, tosys, transform)) # Remove the subgraph if it is now empty if self._graph[fromsys] == {}: self._graph.pop(fromsys) self.invalidate_cache() def find_shortest_path(self, fromsys, tosys): """ Computes the shortest distance along the transform graph from one system to another. Parameters ---------- fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. Returns ------- path : list of class or None The path from ``fromsys`` to ``tosys`` as an in-order sequence of classes. This list includes *both* ``fromsys`` and ``tosys``. Is `None` if there is no possible path. distance : float or int The total distance/priority from ``fromsys`` to ``tosys``. If priorities are not set this is the number of transforms needed. Is ``inf`` if there is no possible path. """ inf = float('inf') # special-case the 0 or 1-path if tosys is fromsys: if tosys not in self._graph[fromsys]: # Means there's no transform necessary to go from it to itself. return [tosys], 0 if tosys in self._graph[fromsys]: # this will also catch the case where tosys is fromsys, but has # a defined transform. t = self._graph[fromsys][tosys] return [fromsys, tosys], float(t.priority if hasattr(t, 'priority') else 1) # otherwise, need to construct the path: if fromsys in self._shortestpaths: # already have a cached result fpaths = self._shortestpaths[fromsys] if tosys in fpaths: return fpaths[tosys] else: return None, inf # use Dijkstra's algorithm to find shortest path in all other cases nodes = [] # first make the list of nodes for a in self._graph: if a not in nodes: nodes.append(a) for b in self._graph[a]: if b not in nodes: nodes.append(b) if fromsys not in nodes or tosys not in nodes: # fromsys or tosys are isolated or not registered, so there's # certainly no way to get from one to the other return None, inf edgeweights = {} # construct another graph that is a dict of dicts of priorities # (used as edge weights in Dijkstra's algorithm) for a in self._graph: edgeweights[a] = aew = {} agraph = self._graph[a] for b in agraph: aew[b] = float(agraph[b].priority if hasattr(agraph[b], 'priority') else 1) # entries in q are [distance, count, nodeobj, pathlist] # count is needed because in py 3.x, tie-breaking fails on the nodes. # this way, insertion order is preserved if the weights are the same q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys] q.insert(0, [0, -1, fromsys, []]) # this dict will store the distance to node from ``fromsys`` and the path result = {} # definitely starts as a valid heap because of the insert line; from the # node to itself is always the shortest distance while len(q) > 0: d, orderi, n, path = heapq.heappop(q) if d == inf: # everything left is unreachable from fromsys, just copy them to # the results and jump out of the loop result[n] = (None, d) for d, orderi, n, path in q: result[n] = (None, d) break else: result[n] = (path, d) path.append(n) if n not in edgeweights: # this is a system that can be transformed to, but not from. continue for n2 in edgeweights[n]: if n2 not in result: # already visited # find where n2 is in the heap for i in range(len(q)): if q[i][2] == n2: break else: raise ValueError('n2 not in heap - this should be impossible!') newd = d + edgeweights[n][n2] if newd < q[i][0]: q[i][0] = newd q[i][3] = list(path) heapq.heapify(q) # cache for later use self._shortestpaths[fromsys] = result return result[tosys] def get_transform(self, fromsys, tosys): """ Generates and returns the `CompositeTransform` for a transformation between two coordinate systems. Parameters ---------- fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. Returns ------- trans : `CompositeTransform` or None If there is a path from ``fromsys`` to ``tosys``, this is a transform object for that path. If no path could be found, this is `None`. Notes ----- This function always returns a `CompositeTransform`, because `CompositeTransform` is slightly more adaptable in the way it can be called than other transform classes. Specifically, it takes care of intermediate steps of transformations in a way that is consistent with 1-hop transformations. """ if not inspect.isclass(fromsys): raise TypeError('fromsys is not a class') if not inspect.isclass(tosys): raise TypeError('tosys is not a class') path, distance = self.find_shortest_path(fromsys, tosys) if path is None: return None transforms = [] currsys = fromsys for p in path[1:]: # first element is fromsys so we skip it transforms.append(self._graph[currsys][p]) currsys = p fttuple = (fromsys, tosys) if fttuple not in self._composite_cache: comptrans = CompositeTransform(transforms, fromsys, tosys, register_graph=False) self._composite_cache[fttuple] = comptrans return self._composite_cache[fttuple] def lookup_name(self, name): """ Tries to locate the coordinate class with the provided alias. Parameters ---------- name : str The alias to look up. Returns ------- `BaseCoordinateFrame` subclass The coordinate class corresponding to the ``name`` or `None` if no such class exists. """ return self._cached_names.get(name, None) def get_names(self): """ Returns all available transform names. They will all be valid arguments to `lookup_name`. Returns ------- nms : list The aliases for coordinate systems. """ return list(self._cached_names.keys()) def to_dot_graph(self, priorities=True, addnodes=[], savefn=None, savelayout='plain', saveformat=None, color_edges=True): """ Converts this transform graph to the graphviz_ DOT format. Optionally saves it (requires `graphviz`_ be installed and on your path). .. _graphviz: http://www.graphviz.org/ Parameters ---------- priorities : bool If `True`, show the priority values for each transform. Otherwise, the will not be included in the graph. addnodes : sequence of str Additional coordinate systems to add (this can include systems already in the transform graph, but they will only appear once). savefn : None or str The file name to save this graph to or `None` to not save to a file. savelayout : str The graphviz program to use to layout the graph (see graphviz_ for details) or 'plain' to just save the DOT graph content. Ignored if ``savefn`` is `None`. saveformat : str The graphviz output format. (e.g. the ``-Txxx`` option for the command line program - see graphviz docs for details). Ignored if ``savefn`` is `None`. color_edges : bool Color the edges between two nodes (frames) based on the type of transform. ``FunctionTransform``: red, ``StaticMatrixTransform``: blue, ``DynamicMatrixTransform``: green. Returns ------- dotgraph : str A string with the DOT format graph. """ nodes = [] # find the node names for a in self._graph: if a not in nodes: nodes.append(a) for b in self._graph[a]: if b not in nodes: nodes.append(b) for node in addnodes: if node not in nodes: nodes.append(node) nodenames = [] invclsaliases = dict([(f, [k for k, v in self._cached_names.items() if v == f]) for f in self.frame_set]) for n in nodes: if n in invclsaliases: aliases = '`\\n`'.join(invclsaliases[n]) nodenames.append('{0} [shape=oval label="{0}\\n`{1}`"]'.format(n.__name__, aliases)) else: nodenames.append(n.__name__ + '[ shape=oval ]') edgenames = [] # Now the edges for a in self._graph: agraph = self._graph[a] for b in agraph: transform = agraph[b] pri = transform.priority if hasattr(transform, 'priority') else 1 color = trans_to_color[transform.__class__] if color_edges else 'black' edgenames.append((a.__name__, b.__name__, pri, color)) # generate simple dot format graph lines = ['digraph AstropyCoordinateTransformGraph {'] lines.append('graph [rankdir=LR]') lines.append('; '.join(nodenames) + ';') for enm1, enm2, weights, color in edgenames: labelstr_fmt = '[ {0} {1} ]' if priorities: priority_part = f'label = "{weights}"' else: priority_part = '' color_part = f'color = "{color}"' labelstr = labelstr_fmt.format(priority_part, color_part) lines.append(f'{enm1} -> {enm2}{labelstr};') lines.append('') lines.append('overlap=false') lines.append('}') dotgraph = '\n'.join(lines) if savefn is not None: if savelayout == 'plain': with open(savefn, 'w') as f: f.write(dotgraph) else: args = [savelayout] if saveformat is not None: args.append('-T' + saveformat) proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate(dotgraph) if proc.returncode != 0: raise OSError('problem running graphviz: \n' + stderr) with open(savefn, 'w') as f: f.write(stdout) return dotgraph def to_networkx_graph(self): """ Converts this transform graph into a networkx graph. .. note:: You must have the `networkx <https://networkx.github.io/>`_ package installed for this to work. Returns ------- nxgraph : ``networkx.Graph`` This `TransformGraph` as a `networkx.Graph <https://networkx.github.io/documentation/stable/reference/classes/graph.html>`_. """ import networkx as nx nxgraph = nx.Graph() # first make the nodes for a in self._graph: if a not in nxgraph: nxgraph.add_node(a) for b in self._graph[a]: if b not in nxgraph: nxgraph.add_node(b) # Now the edges for a in self._graph: agraph = self._graph[a] for b in agraph: transform = agraph[b] pri = transform.priority if hasattr(transform, 'priority') else 1 color = trans_to_color[transform.__class__] nxgraph.add_edge(a, b, weight=pri, color=color) return nxgraph def transform(self, transcls, fromsys, tosys, priority=1, **kwargs): """ A function decorator for defining transformations. .. note:: If decorating a static method of a class, ``@staticmethod`` should be added *above* this decorator. Parameters ---------- transcls : class The class of the transformation object to create. fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. priority : float or int The priority if this transform when finding the shortest coordinate transform path - large numbers are lower priorities. Additional keyword arguments are passed into the ``transcls`` constructor. Returns ------- deco : function A function that can be called on another function as a decorator (see example). Notes ----- This decorator assumes the first argument of the ``transcls`` initializer accepts a callable, and that the second and third are ``fromsys`` and ``tosys``. If this is not true, you should just initialize the class manually and use `add_transform` instead of using this decorator. Examples -------- :: graph = TransformGraph() class Frame1(BaseCoordinateFrame): ... class Frame2(BaseCoordinateFrame): ... @graph.transform(FunctionTransform, Frame1, Frame2) def f1_to_f2(f1_obj): ... do something with f1_obj ... return f2_obj """ def deco(func): # this doesn't do anything directly with the transform because # ``register_graph=self`` stores it in the transform graph # automatically transcls(func, fromsys, tosys, priority=priority, register_graph=self, **kwargs) return func return deco def _add_merged_transform(self, fromsys, tosys, *furthersys, priority=1): """ Add a single-step transform that encapsulates a multi-step transformation path, using the transforms that already exist in the graph. The created transform internally calls the existing transforms. If all of the transforms are affine, the merged transform is `~astropy.coordinates.transformations.DynamicMatrixTransform` (if there are no origin shifts) or `~astropy.coordinates.transformations.AffineTransform` (otherwise). If at least one of the transforms is not affine, the merged transform is `~astropy.coordinates.transformations.FunctionTransformWithFiniteDifference`. This method is primarily useful for defining loopback transformations (i.e., where ``fromsys`` and the final ``tosys`` are the same). Parameters ---------- fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform to. *furthersys : class Additional coordinate frame classes to transform to in order. priority : number The priority of this transform when finding the shortest coordinate transform path - large numbers are lower priorities. Notes ----- Even though the created transform is a single step in the graph, it will still internally call the constituent transforms. Thus, there is no performance benefit for using this created transform. For Astropy's built-in frames, loopback transformations typically use `~astropy.coordinates.ICRS` to be safe. Tranforming through an inertial frame ensures that changes in observation time and observer location/velocity are properly accounted for. An error will be raised if a direct transform between ``fromsys`` and ``tosys`` already exist. """ frames = [fromsys, tosys, *furthersys] lastsys = frames[-1] full_path = self.get_transform(fromsys, lastsys) transforms = [self.get_transform(frame_a, frame_b) for frame_a, frame_b in zip(frames[:-1], frames[1:])] if None in transforms: raise ValueError(f"This transformation path is not possible") if len(full_path.transforms) == 1: raise ValueError(f"A direct transform for {fromsys.__name__}->{lastsys.__name__} already exists") self.add_transform(fromsys, lastsys, CompositeTransform(transforms, fromsys, lastsys, priority=priority)._as_single_transform()) @contextmanager def impose_finite_difference_dt(self, dt): """ Context manager to impose a finite-difference time step on all applicable transformations For each transformation in this transformation graph that has the attribute ``finite_difference_dt``, that attribute is set to the provided value. The only standard transformation with this attribute is `~astropy.coordinates.transformations.FunctionTransformWithFiniteDifference`. Parameters ---------- dt : `~astropy.units.Quantity` ['time'] or callable If a quantity, this is the size of the differential used to do the finite difference. If a callable, should accept ``(fromcoord, toframe)`` and return the ``dt`` value. """ key = 'finite_difference_dt' saved_settings = [] try: for to_frames in self._graph.values(): for transform in to_frames.values(): if hasattr(transform, key): old_setting = (transform, key, getattr(transform, key)) saved_settings.append(old_setting) setattr(transform, key, dt) yield finally: for setting in saved_settings: setattr(*setting) # <-------------------Define the builtin transform classes--------------------> class CoordinateTransform(metaclass=ABCMeta): """ An object that transforms a coordinate from one system to another. Subclasses must implement `__call__` with the provided signature. They should also call this superclass's ``__init__`` in their ``__init__``. Parameters ---------- fromsys : `~astropy.coordinates.BaseCoordinateFrame` subclass The coordinate frame class to start from. tosys : `~astropy.coordinates.BaseCoordinateFrame` subclass The coordinate frame class to transform into. priority : float or int The priority if this transform when finding the shortest coordinate transform path - large numbers are lower priorities. register_graph : `TransformGraph` or None A graph to register this transformation with on creation, or `None` to leave it unregistered. """ def __init__(self, fromsys, tosys, priority=1, register_graph=None): if not inspect.isclass(fromsys): raise TypeError('fromsys must be a class') if not inspect.isclass(tosys): raise TypeError('tosys must be a class') self.fromsys = fromsys self.tosys = tosys self.priority = float(priority) if register_graph: # this will do the type-checking when it adds to the graph self.register(register_graph) else: if not inspect.isclass(fromsys) or not inspect.isclass(tosys): raise TypeError('fromsys and tosys must be classes') self.overlapping_frame_attr_names = overlap = [] if (hasattr(fromsys, 'get_frame_attr_names') and hasattr(tosys, 'get_frame_attr_names')): # the if statement is there so that non-frame things might be usable # if it makes sense for from_nm in fromsys.frame_attributes.keys(): if from_nm in tosys.frame_attributes.keys(): overlap.append(from_nm) def register(self, graph): """ Add this transformation to the requested Transformation graph, replacing anything already connecting these two coordinates. Parameters ---------- graph : `TransformGraph` object The graph to register this transformation with. """ graph.add_transform(self.fromsys, self.tosys, self) def unregister(self, graph): """ Remove this transformation from the requested transformation graph. Parameters ---------- graph : a TransformGraph object The graph to unregister this transformation from. Raises ------ ValueError If this is not currently in the transform graph. """ graph.remove_transform(self.fromsys, self.tosys, self) @abstractmethod def __call__(self, fromcoord, toframe): """ Does the actual coordinate transformation from the ``fromsys`` class to the ``tosys`` class. Parameters ---------- fromcoord : `~astropy.coordinates.BaseCoordinateFrame` subclass instance An object of class matching ``fromsys`` that is to be transformed. toframe : object An object that has the attributes necessary to fully specify the frame. That is, it must have attributes with names that match the keys of the dictionary that ``tosys.get_frame_attr_names()`` returns. Typically this is of class ``tosys``, but it *might* be some other class as long as it has the appropriate attributes. Returns ------- tocoord : `BaseCoordinateFrame` subclass instance The new coordinate after the transform has been applied. """ class FunctionTransform(CoordinateTransform): """ A coordinate transformation defined by a function that accepts a coordinate object and returns the transformed coordinate object. Parameters ---------- func : callable The transformation function. Should have a call signature ``func(formcoord, toframe)``. Note that, unlike `CoordinateTransform.__call__`, ``toframe`` is assumed to be of type ``tosys`` for this function. fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. priority : float or int The priority if this transform when finding the shortest coordinate transform path - large numbers are lower priorities. register_graph : `TransformGraph` or None A graph to register this transformation with on creation, or `None` to leave it unregistered. Raises ------ TypeError If ``func`` is not callable. ValueError If ``func`` cannot accept two arguments. """ def __init__(self, func, fromsys, tosys, priority=1, register_graph=None): if not callable(func): raise TypeError('func must be callable') with suppress(TypeError): sig = signature(func) kinds = [x.kind for x in sig.parameters.values()] if (len(x for x in kinds if x == sig.POSITIONAL_ONLY) != 2 and sig.VAR_POSITIONAL not in kinds): raise ValueError('provided function does not accept two arguments') self.func = func super().__init__(fromsys, tosys, priority=priority, register_graph=register_graph) def __call__(self, fromcoord, toframe): res = self.func(fromcoord, toframe) if not isinstance(res, self.tosys): raise TypeError(f'the transformation function yielded {res} but ' f'should have been of type {self.tosys}') if fromcoord.data.differentials and not res.data.differentials: warn("Applied a FunctionTransform to a coordinate frame with " "differentials, but the FunctionTransform does not handle " "differentials, so they have been dropped.", AstropyWarning) return res class FunctionTransformWithFiniteDifference(FunctionTransform): r""" A coordinate transformation that works like a `FunctionTransform`, but computes velocity shifts based on the finite-difference relative to one of the frame attributes. Note that the transform function should *not* change the differential at all in this case, as any differentials will be overridden. When a differential is in the from coordinate, the finite difference calculation has two components. The first part is simple the existing differential, but re-orientation (using finite-difference techniques) to point in the direction the velocity vector has in the *new* frame. The second component is the "induced" velocity. That is, the velocity intrinsic to the frame itself, estimated by shifting the frame using the ``finite_difference_frameattr_name`` frame attribute a small amount (``finite_difference_dt``) in time and re-calculating the position. Parameters ---------- finite_difference_frameattr_name : str or None The name of the frame attribute on the frames to use for the finite difference. Both the to and the from frame will be checked for this attribute, but only one needs to have it. If None, no velocity component induced from the frame itself will be included - only the re-orientation of any existing differential. finite_difference_dt : `~astropy.units.Quantity` ['time'] or callable If a quantity, this is the size of the differential used to do the finite difference. If a callable, should accept ``(fromcoord, toframe)`` and return the ``dt`` value. symmetric_finite_difference : bool If True, the finite difference is computed as :math:`\frac{x(t + \Delta t / 2) - x(t + \Delta t / 2)}{\Delta t}`, or if False, :math:`\frac{x(t + \Delta t) - x(t)}{\Delta t}`. The latter case has slightly better performance (and more stable finite difference behavior). All other parameters are identical to the initializer for `FunctionTransform`. """ def __init__(self, func, fromsys, tosys, priority=1, register_graph=None, finite_difference_frameattr_name='obstime', finite_difference_dt=1*u.second, symmetric_finite_difference=True): super().__init__(func, fromsys, tosys, priority, register_graph) self.finite_difference_frameattr_name = finite_difference_frameattr_name self.finite_difference_dt = finite_difference_dt self.symmetric_finite_difference = symmetric_finite_difference @property def finite_difference_frameattr_name(self): return self._finite_difference_frameattr_name @finite_difference_frameattr_name.setter def finite_difference_frameattr_name(self, value): if value is None: self._diff_attr_in_fromsys = self._diff_attr_in_tosys = False else: diff_attr_in_fromsys = value in self.fromsys.frame_attributes diff_attr_in_tosys = value in self.tosys.frame_attributes if diff_attr_in_fromsys or diff_attr_in_tosys: self._diff_attr_in_fromsys = diff_attr_in_fromsys self._diff_attr_in_tosys = diff_attr_in_tosys else: raise ValueError('Frame attribute name {} is not a frame ' 'attribute of {} or {}'.format(value, self.fromsys, self.tosys)) self._finite_difference_frameattr_name = value def __call__(self, fromcoord, toframe): from .representation import (CartesianRepresentation, CartesianDifferential) supcall = self.func if fromcoord.data.differentials: # this is the finite difference case if callable(self.finite_difference_dt): dt = self.finite_difference_dt(fromcoord, toframe) else: dt = self.finite_difference_dt halfdt = dt/2 from_diffless = fromcoord.realize_frame(fromcoord.data.without_differentials()) reprwithoutdiff = supcall(from_diffless, toframe) # first we use the existing differential to compute an offset due to # the already-existing velocity, but in the new frame fromcoord_cart = fromcoord.cartesian if self.symmetric_finite_difference: fwdxyz = (fromcoord_cart.xyz + fromcoord_cart.differentials['s'].d_xyz*halfdt) fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe) backxyz = (fromcoord_cart.xyz - fromcoord_cart.differentials['s'].d_xyz*halfdt) back = supcall(fromcoord.realize_frame(CartesianRepresentation(backxyz)), toframe) else: fwdxyz = (fromcoord_cart.xyz + fromcoord_cart.differentials['s'].d_xyz*dt) fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe) back = reprwithoutdiff diffxyz = (fwd.cartesian - back.cartesian).xyz / dt # now we compute the "induced" velocities due to any movement in # the frame itself over time attrname = self.finite_difference_frameattr_name if attrname is not None: if self.symmetric_finite_difference: if self._diff_attr_in_fromsys: kws = {attrname: getattr(from_diffless, attrname) + halfdt} from_diffless_fwd = from_diffless.replicate(**kws) else: from_diffless_fwd = from_diffless if self._diff_attr_in_tosys: kws = {attrname: getattr(toframe, attrname) + halfdt} fwd_frame = toframe.replicate_without_data(**kws) else: fwd_frame = toframe fwd = supcall(from_diffless_fwd, fwd_frame) if self._diff_attr_in_fromsys: kws = {attrname: getattr(from_diffless, attrname) - halfdt} from_diffless_back = from_diffless.replicate(**kws) else: from_diffless_back = from_diffless if self._diff_attr_in_tosys: kws = {attrname: getattr(toframe, attrname) - halfdt} back_frame = toframe.replicate_without_data(**kws) else: back_frame = toframe back = supcall(from_diffless_back, back_frame) else: if self._diff_attr_in_fromsys: kws = {attrname: getattr(from_diffless, attrname) + dt} from_diffless_fwd = from_diffless.replicate(**kws) else: from_diffless_fwd = from_diffless if self._diff_attr_in_tosys: kws = {attrname: getattr(toframe, attrname) + dt} fwd_frame = toframe.replicate_without_data(**kws) else: fwd_frame = toframe fwd = supcall(from_diffless_fwd, fwd_frame) back = reprwithoutdiff diffxyz += (fwd.cartesian - back.cartesian).xyz / dt newdiff = CartesianDifferential(diffxyz) reprwithdiff = reprwithoutdiff.data.to_cartesian().with_differentials(newdiff) return reprwithoutdiff.realize_frame(reprwithdiff) else: return supcall(fromcoord, toframe) class BaseAffineTransform(CoordinateTransform): """Base class for common functionality between the ``AffineTransform``-type subclasses. This base class is needed because ``AffineTransform`` and the matrix transform classes share the ``__call__()`` method, but differ in how they generate the affine parameters. ``StaticMatrixTransform`` passes in a matrix stored as a class attribute, and both of the matrix transforms pass in ``None`` for the offset. Hence, user subclasses would likely want to subclass this (rather than ``AffineTransform``) if they want to provide alternative transformations using this machinery. """ def _apply_transform(self, fromcoord, matrix, offset): from .representation import (UnitSphericalRepresentation, CartesianDifferential, SphericalDifferential, SphericalCosLatDifferential, RadialDifferential) data = fromcoord.data has_velocity = 's' in data.differentials # Bail out if no transform is actually requested if matrix is None and offset is None: return data # list of unit differentials _unit_diffs = (SphericalDifferential._unit_differential, SphericalCosLatDifferential._unit_differential) unit_vel_diff = (has_velocity and isinstance(data.differentials['s'], _unit_diffs)) rad_vel_diff = (has_velocity and isinstance(data.differentials['s'], RadialDifferential)) # Some initial checking to short-circuit doing any re-representation if # we're going to fail anyways: if isinstance(data, UnitSphericalRepresentation) and offset is not None: raise TypeError("Position information stored on coordinate frame " "is insufficient to do a full-space position " "transformation (representation class: {})" .format(data.__class__)) elif (has_velocity and (unit_vel_diff or rad_vel_diff) and offset is not None and 's' in offset.differentials): # Coordinate has a velocity, but it is not a full-space velocity # that we need to do a velocity offset raise TypeError("Velocity information stored on coordinate frame " "is insufficient to do a full-space velocity " "transformation (differential class: {})" .format(data.differentials['s'].__class__)) elif len(data.differentials) > 1: # We should never get here because the frame initializer shouldn't # allow more differentials, but this just adds protection for # subclasses that somehow skip the checks raise ValueError("Representation passed to AffineTransform contains" " multiple associated differentials. Only a single" " differential with velocity units is presently" " supported (differentials: {})." .format(str(data.differentials))) # If the representation is a UnitSphericalRepresentation, and this is # just a MatrixTransform, we have to try to turn the differential into a # Unit version of the differential (if no radial velocity) or a # sphericaldifferential with zero proper motion (if only a radial # velocity) so that the matrix operation works if (has_velocity and isinstance(data, UnitSphericalRepresentation) and not unit_vel_diff and not rad_vel_diff): # retrieve just velocity differential unit_diff = data.differentials['s'].represent_as( data.differentials['s']._unit_differential, data) data = data.with_differentials({'s': unit_diff}) # updates key # If it's a RadialDifferential, we flat-out ignore the differentials # This is because, by this point (past the validation above), we can # only possibly be doing a rotation-only transformation, and that # won't change the radial differential. We later add it back in elif rad_vel_diff: data = data.without_differentials() # Convert the representation and differentials to cartesian without # having them attached to a frame rep = data.to_cartesian() diffs = dict([(k, diff.represent_as(CartesianDifferential, data)) for k, diff in data.differentials.items()]) rep = rep.with_differentials(diffs) # Only do transform if matrix is specified. This is for speed in # transformations that only specify an offset (e.g., LSR) if matrix is not None: # Note: this applies to both representation and differentials rep = rep.transform(matrix) # TODO: if we decide to allow arithmetic between representations that # contain differentials, this can be tidied up if offset is not None: newrep = (rep.without_differentials() + offset.without_differentials()) else: newrep = rep.without_differentials() # We need a velocity (time derivative) and, for now, are strict: the # representation can only contain a velocity differential and no others. if has_velocity and not rad_vel_diff: veldiff = rep.differentials['s'] # already in Cartesian form if offset is not None and 's' in offset.differentials: veldiff = veldiff + offset.differentials['s'] newrep = newrep.with_differentials({'s': veldiff}) if isinstance(fromcoord.data, UnitSphericalRepresentation): # Special-case this because otherwise the return object will think # it has a valid distance with the default return (a # CartesianRepresentation instance) if has_velocity and not unit_vel_diff and not rad_vel_diff: # We have to first represent as the Unit types we converted to, # then put the d_distance information back in to the # differentials and re-represent as their original forms newdiff = newrep.differentials['s'] _unit_cls = fromcoord.data.differentials['s']._unit_differential newdiff = newdiff.represent_as(_unit_cls, newrep) kwargs = dict([(comp, getattr(newdiff, comp)) for comp in newdiff.components]) kwargs['d_distance'] = fromcoord.data.differentials['s'].d_distance diffs = {'s': fromcoord.data.differentials['s'].__class__( copy=False, **kwargs)} elif has_velocity and unit_vel_diff: newdiff = newrep.differentials['s'].represent_as( fromcoord.data.differentials['s'].__class__, newrep) diffs = {'s': newdiff} else: diffs = newrep.differentials newrep = newrep.represent_as(fromcoord.data.__class__) # drops diffs newrep = newrep.with_differentials(diffs) elif has_velocity and unit_vel_diff: # Here, we're in the case where the representation is not # UnitSpherical, but the differential *is* one of the UnitSpherical # types. We have to convert back to that differential class or the # resulting frame will think it has a valid radial_velocity. This # can probably be cleaned up: we currently have to go through the # dimensional version of the differential before representing as the # unit differential so that the units work out (the distance length # unit shouldn't appear in the resulting proper motions) diff_cls = fromcoord.data.differentials['s'].__class__ newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls._dimensional_differential) newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls) # We pulled the radial differential off of the representation # earlier, so now we need to put it back. But, in order to do that, we # have to turn the representation into a repr that is compatible with # having a RadialDifferential if has_velocity and rad_vel_diff: newrep = newrep.represent_as(fromcoord.data.__class__) newrep = newrep.with_differentials( {'s': fromcoord.data.differentials['s']}) return newrep def __call__(self, fromcoord, toframe): params = self._affine_params(fromcoord, toframe) newrep = self._apply_transform(fromcoord, *params) return toframe.realize_frame(newrep) @abstractmethod def _affine_params(self, fromcoord, toframe): pass class AffineTransform(BaseAffineTransform): """ A coordinate transformation specified as a function that yields a 3 x 3 cartesian transformation matrix and a tuple of displacement vectors. See `~astropy.coordinates.builtin_frames.galactocentric.Galactocentric` for an example. Parameters ---------- transform_func : callable A callable that has the signature ``transform_func(fromcoord, toframe)`` and returns: a (3, 3) matrix that operates on ``fromcoord`` in a Cartesian representation, and a ``CartesianRepresentation`` with (optionally) an attached velocity ``CartesianDifferential`` to represent a translation and offset in velocity to apply after the matrix operation. fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. priority : float or int The priority if this transform when finding the shortest coordinate transform path - large numbers are lower priorities. register_graph : `TransformGraph` or None A graph to register this transformation with on creation, or `None` to leave it unregistered. Raises ------ TypeError If ``transform_func`` is not callable """ def __init__(self, transform_func, fromsys, tosys, priority=1, register_graph=None): if not callable(transform_func): raise TypeError('transform_func is not callable') self.transform_func = transform_func super().__init__(fromsys, tosys, priority=priority, register_graph=register_graph) def _affine_params(self, fromcoord, toframe): return self.transform_func(fromcoord, toframe) class StaticMatrixTransform(BaseAffineTransform): """ A coordinate transformation defined as a 3 x 3 cartesian transformation matrix. This is distinct from DynamicMatrixTransform in that this kind of matrix is independent of frame attributes. That is, it depends *only* on the class of the frame. Parameters ---------- matrix : array-like or callable A 3 x 3 matrix for transforming 3-vectors. In most cases will be unitary (although this is not strictly required). If a callable, will be called *with no arguments* to get the matrix. fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. priority : float or int The priority if this transform when finding the shortest coordinate transform path - large numbers are lower priorities. register_graph : `TransformGraph` or None A graph to register this transformation with on creation, or `None` to leave it unregistered. Raises ------ ValueError If the matrix is not 3 x 3 """ def __init__(self, matrix, fromsys, tosys, priority=1, register_graph=None): if callable(matrix): matrix = matrix() self.matrix = np.array(matrix) if self.matrix.shape != (3, 3): raise ValueError('Provided matrix is not 3 x 3') super().__init__(fromsys, tosys, priority=priority, register_graph=register_graph) def _affine_params(self, fromcoord, toframe): return self.matrix, None class DynamicMatrixTransform(BaseAffineTransform): """ A coordinate transformation specified as a function that yields a 3 x 3 cartesian transformation matrix. This is similar to, but distinct from StaticMatrixTransform, in that the matrix for this class might depend on frame attributes. Parameters ---------- matrix_func : callable A callable that has the signature ``matrix_func(fromcoord, toframe)`` and returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian representation to the new coordinate system. fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. priority : float or int The priority if this transform when finding the shortest coordinate transform path - large numbers are lower priorities. register_graph : `TransformGraph` or None A graph to register this transformation with on creation, or `None` to leave it unregistered. Raises ------ TypeError If ``matrix_func`` is not callable """ def __init__(self, matrix_func, fromsys, tosys, priority=1, register_graph=None): if not callable(matrix_func): raise TypeError('matrix_func is not callable') self.matrix_func = matrix_func super().__init__(fromsys, tosys, priority=priority, register_graph=register_graph) def _affine_params(self, fromcoord, toframe): return self.matrix_func(fromcoord, toframe), None class CompositeTransform(CoordinateTransform): """ A transformation constructed by combining together a series of single-step transformations. Note that the intermediate frame objects are constructed using any frame attributes in ``toframe`` or ``fromframe`` that overlap with the intermediate frame (``toframe`` favored over ``fromframe`` if there's a conflict). Any frame attributes that are not present use the defaults. Parameters ---------- transforms : sequence of `CoordinateTransform` object The sequence of transformations to apply. fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. priority : float or int The priority if this transform when finding the shortest coordinate transform path - large numbers are lower priorities. register_graph : `TransformGraph` or None A graph to register this transformation with on creation, or `None` to leave it unregistered. collapse_static_mats : bool If `True`, consecutive `StaticMatrixTransform` will be collapsed into a single transformation to speed up the calculation. """ def __init__(self, transforms, fromsys, tosys, priority=1, register_graph=None, collapse_static_mats=True): super().__init__(fromsys, tosys, priority=priority, register_graph=register_graph) if collapse_static_mats: transforms = self._combine_statics(transforms) self.transforms = tuple(transforms) def _combine_statics(self, transforms): """ Combines together sequences of `StaticMatrixTransform`s into a single transform and returns it. """ newtrans = [] for currtrans in transforms: lasttrans = newtrans[-1] if len(newtrans) > 0 else None if (isinstance(lasttrans, StaticMatrixTransform) and isinstance(currtrans, StaticMatrixTransform)): combinedmat = matrix_product(currtrans.matrix, lasttrans.matrix) newtrans[-1] = StaticMatrixTransform(combinedmat, lasttrans.fromsys, currtrans.tosys) else: newtrans.append(currtrans) return newtrans def __call__(self, fromcoord, toframe): curr_coord = fromcoord for t in self.transforms: # build an intermediate frame with attributes taken from either # `toframe`, or if not there, `fromcoord`, or if not there, use # the defaults # TODO: caching this information when creating the transform may # speed things up a lot frattrs = {} for inter_frame_attr_nm in t.tosys.get_frame_attr_names(): if hasattr(toframe, inter_frame_attr_nm): attr = getattr(toframe, inter_frame_attr_nm) frattrs[inter_frame_attr_nm] = attr elif hasattr(fromcoord, inter_frame_attr_nm): attr = getattr(fromcoord, inter_frame_attr_nm) frattrs[inter_frame_attr_nm] = attr curr_toframe = t.tosys(**frattrs) curr_coord = t(curr_coord, curr_toframe) # this is safe even in the case where self.transforms is empty, because # coordinate objects are immutable, so copying is not needed return curr_coord def _as_single_transform(self): """ Return an encapsulated version of the composite transform so that it appears to be a single transform. The returned transform internally calls the constituent transforms. If all of the transforms are affine, the merged transform is `~astropy.coordinates.transformations.DynamicMatrixTransform` (if there are no origin shifts) or `~astropy.coordinates.transformations.AffineTransform` (otherwise). If at least one of the transforms is not affine, the merged transform is `~astropy.coordinates.transformations.FunctionTransformWithFiniteDifference`. """ # Create a list of the transforms including flattening any constituent CompositeTransform transforms = [t if not isinstance(t, CompositeTransform) else t._as_single_transform() for t in self.transforms] if all([isinstance(t, BaseAffineTransform) for t in transforms]): # Check if there may be an origin shift fixed_origin = all([isinstance(t, (StaticMatrixTransform, DynamicMatrixTransform)) for t in transforms]) # Dynamically define the transformation function def single_transform(from_coo, to_frame): if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame return None if fixed_origin else (None, None) # Create a merged attribute dictionary for any intermediate frames # For any attributes shared by the "from"/"to" frames, the "to" frame takes # precedence because this is the same choice implemented in __call__() merged_attr = {name: getattr(from_coo, name) for name in from_coo.frame_attributes} merged_attr.update({name: getattr(to_frame, name) for name in to_frame.frame_attributes}) affine_params = (None, None) # Step through each transform step (frame A -> frame B) for i, t in enumerate(transforms): # Extract the relevant attributes for frame A if i == 0: # If frame A is actually the initial frame, preserve its attributes a_attr = {name: getattr(from_coo, name) for name in from_coo.frame_attributes} else: a_attr = {k: v for k, v in merged_attr.items() if k in t.fromsys.frame_attributes} # Extract the relevant attributes for frame B b_attr = {k: v for k, v in merged_attr.items() if k in t.tosys.frame_attributes} # Obtain the affine parameters for the transform # Note that we insert some dummy data into frame A because the transformation # machinery requires there to be data present. Removing that limitation # is a possible TODO, but some care would need to be taken because some affine # transforms have branching code depending on the presence of differentials. next_affine_params = t._affine_params(t.fromsys(from_coo.data, **a_attr), t.tosys(**b_attr)) # Combine the affine parameters with the running set affine_params = _combine_affine_params(affine_params, next_affine_params) # If there is no origin shift, return only the matrix return affine_params[0] if fixed_origin else affine_params # The return type depends on whether there is any origin shift transform_type = DynamicMatrixTransform if fixed_origin else AffineTransform else: # Dynamically define the transformation function def single_transform(from_coo, to_frame): if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame return to_frame.realize_frame(from_coo.data) return self(from_coo, to_frame) transform_type = FunctionTransformWithFiniteDifference return transform_type(single_transform, self.fromsys, self.tosys, priority=self.priority) def _combine_affine_params(params, next_params): """ Combine two sets of affine parameters. The parameters for an affine transformation are a 3 x 3 Cartesian transformation matrix and a displacement vector, which can include an attached velocity. Either type of parameter can be ``None``. """ M, vec = params next_M, next_vec = next_params # Multiply the transformation matrices if they both exist if M is not None and next_M is not None: new_M = next_M @ M else: new_M = M if M is not None else next_M if vec is not None: # Transform the first displacement vector by the second transformation matrix if next_M is not None: vec = vec.transform(next_M) # Calculate the new displacement vector if next_vec is not None: if 's' in vec.differentials and 's' in next_vec.differentials: # Adding vectors with velocities takes more steps # TODO: Add support in representation.py new_vec_velocity = vec.differentials['s'] + next_vec.differentials['s'] new_vec = vec.without_differentials() + next_vec.without_differentials() new_vec = new_vec.with_differentials({'s': new_vec_velocity}) else: new_vec = vec + next_vec else: new_vec = vec else: new_vec = next_vec return new_M, new_vec # map class names to colorblind-safe colors trans_to_color = {} trans_to_color[AffineTransform] = '#555555' # gray trans_to_color[FunctionTransform] = '#783001' # dark red-ish/brown trans_to_color[FunctionTransformWithFiniteDifference] = '#d95f02' # red-ish trans_to_color[StaticMatrixTransform] = '#7570b3' # blue-ish trans_to_color[DynamicMatrixTransform] = '#1b9e77' # green-ish
8931907d160e3ebb83ebc7ecb5426f5086eba9cc6864645387aace0e1b9ee1f2
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains the classes and utility functions for distance and cartesian coordinates. """ import warnings import numpy as np from astropy import units as u from astropy.utils.exceptions import AstropyWarning from .angles import Angle __all__ = ['Distance'] __doctest_requires__ = {'*': ['scipy']} class Distance(u.SpecificTypeQuantity): """ A one-dimensional distance. This can be initialized by providing one of the following: * Distance ``value`` (array or float) and a ``unit`` * |Quantity| object with dimensionality of length * Redshift and (optionally) a `~astropy.cosmology.Cosmology` * Distance modulus * Parallax Parameters ---------- value : scalar or `~astropy.units.Quantity` ['length'] The value of this distance. unit : `~astropy.units.UnitBase` ['length'] The unit for this distance. z : float A redshift for this distance. It will be converted to a distance by computing the luminosity distance for this redshift given the cosmology specified by ``cosmology``. Must be given as a keyword argument. cosmology : `~astropy.cosmology.Cosmology` or None A cosmology that will be used to compute the distance from ``z``. If `None`, the current cosmology will be used (see `astropy.cosmology` for details). distmod : float or `~astropy.units.Quantity` The distance modulus for this distance. Note that if ``unit`` is not provided, a guess will be made at the unit between AU, pc, kpc, and Mpc. parallax : `~astropy.units.Quantity` or `~astropy.coordinates.Angle` The parallax in angular units. dtype : `~numpy.dtype`, optional See `~astropy.units.Quantity`. copy : bool, optional See `~astropy.units.Quantity`. order : {'C', 'F', 'A'}, optional See `~astropy.units.Quantity`. subok : bool, optional See `~astropy.units.Quantity`. ndmin : int, optional See `~astropy.units.Quantity`. allow_negative : bool, optional Whether to allow negative distances (which are possible in some cosmologies). Default: `False`. Raises ------ `~astropy.units.UnitsError` If the ``unit`` is not a length unit. ValueError If value specified is less than 0 and ``allow_negative=False``. If ``cosmology`` is provided when ``z`` is *not* given. If either none or more than one of ``value``, ``z``, ``distmod``, or ``parallax`` were given. Examples -------- >>> from astropy import units as u >>> from astropy.cosmology import WMAP5 >>> Distance(10, u.Mpc) <Distance 10. Mpc> >>> Distance(40*u.pc, unit=u.kpc) <Distance 0.04 kpc> >>> Distance(z=0.23) # doctest: +FLOAT_CMP <Distance 1184.01657566 Mpc> >>> Distance(z=0.23, cosmology=WMAP5) # doctest: +FLOAT_CMP <Distance 1147.78831918 Mpc> >>> Distance(distmod=24.47*u.mag) # doctest: +FLOAT_CMP <Distance 783.42964277 kpc> >>> Distance(parallax=21.34*u.mas) # doctest: +FLOAT_CMP <Distance 46.86035614 pc> """ _equivalent_unit = u.m _include_easy_conversion_members = True def __new__(cls, value=None, unit=None, z=None, cosmology=None, distmod=None, parallax=None, dtype=None, copy=True, order=None, subok=False, ndmin=0, allow_negative=False): n_not_none = sum(x is not None for x in [value, z, distmod, parallax]) if n_not_none == 0: raise ValueError('none of `value`, `z`, `distmod`, or `parallax` ' 'were given to Distance constructor') elif n_not_none > 1: raise ValueError('more than one of `value`, `z`, `distmod`, or ' '`parallax` were given to Distance constructor') if value is None: # If something else but `value` was provided then a new array will # be created anyways and there is no need to copy that. copy = False if z is not None: if cosmology is None: from astropy.cosmology import default_cosmology cosmology = default_cosmology.get() value = cosmology.luminosity_distance(z) elif cosmology is not None: raise ValueError('a `cosmology` was given but `z` was not ' 'provided in Distance constructor') elif distmod is not None: value = cls._distmod_to_pc(distmod) if unit is None: # if the unit is not specified, guess based on the mean of # the log of the distance meanlogval = np.log10(value.value).mean() if meanlogval > 6: unit = u.Mpc elif meanlogval > 3: unit = u.kpc elif meanlogval < -3: # ~200 AU unit = u.AU else: unit = u.pc elif parallax is not None: if unit is None: unit = u.pc value = parallax.to_value(unit, equivalencies=u.parallax()) if np.any(parallax < 0): if allow_negative: warnings.warn( "negative parallaxes are converted to NaN " "distances even when `allow_negative=True`, " "because negative parallaxes cannot be transformed " "into distances. See the discussion in this paper: " "https://arxiv.org/abs/1507.02105", AstropyWarning) else: raise ValueError( "some parallaxes are negative, which are not " "interpretable as distances. See the discussion in " "this paper: https://arxiv.org/abs/1507.02105 . You " "can convert negative parallaxes to NaN distances by " "providing the `allow_negative=True` argument.") # now we have arguments like for a Quantity, so let it do the work distance = super().__new__( cls, value, unit, dtype=dtype, copy=copy, order=order, subok=subok, ndmin=ndmin) # This invalid catch block can be removed when the minimum numpy # version is >= 1.19 (NUMPY_LT_1_19) with np.errstate(invalid='ignore'): any_negative = np.any(distance.value < 0) if not allow_negative and any_negative: raise ValueError("distance must be >= 0. Use the argument " "`allow_negative=True` to allow negative values.") return distance @property def z(self): """Short for ``self.compute_z()``""" return self.compute_z() def compute_z(self, cosmology=None, **atzkw): """ The redshift for this distance assuming its physical distance is a luminosity distance. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` or None The cosmology to assume for this calculation, or `None` to use the current cosmology (see `astropy.cosmology` for details). **atzkw keyword arguments for :func:`~astropy.cosmology.z_at_value` Returns ------- z : `~astropy.units.Quantity` The redshift of this distance given the provided ``cosmology``. Warnings -------- This method can be slow for large arrays. The redshift is determined using :func:`astropy.cosmology.z_at_value`, which handles vector inputs (e.g. an array of distances) by element-wise calling of :func:`scipy.optimize.minimize_scalar`. For faster results consider using an interpolation table; :func:`astropy.cosmology.z_at_value` provides details. See Also -------- :func:`astropy.cosmology.z_at_value` : Find the redshift corresponding to a :meth:`astropy.cosmology.FLRW.luminosity_distance`. """ from astropy.cosmology import z_at_value if cosmology is None: from astropy.cosmology import default_cosmology cosmology = default_cosmology.get() atzkw.setdefault("ztol", 1.e-10) return z_at_value(cosmology.luminosity_distance, self, **atzkw) @property def distmod(self): """The distance modulus as a `~astropy.units.Quantity`""" val = 5. * np.log10(self.to_value(u.pc)) - 5. return u.Quantity(val, u.mag, copy=False) @classmethod def _distmod_to_pc(cls, dm): dm = u.Quantity(dm, u.mag) return cls(10 ** ((dm.value + 5) / 5.), u.pc, copy=False) @property def parallax(self): """The parallax angle as an `~astropy.coordinates.Angle` object""" return Angle(self.to(u.milliarcsecond, u.parallax()))
3437ac942617574b4c51dfb2c3f0b280b71f6cf468cee880d9c497553edf8824
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains convenience functions for coordinate-related functionality. This is generally just wrapping around the object-oriented coordinates framework, but it is useful for some users who are used to more functional interfaces. """ import warnings from collections.abc import Sequence import numpy as np import erfa from astropy import units as u from astropy.constants import c from astropy.io import ascii from astropy.utils import isiterable, data from .sky_coordinate import SkyCoord from .builtin_frames import GCRS, PrecessedGeocentric from .representation import SphericalRepresentation, CartesianRepresentation from .builtin_frames.utils import get_jd12 __all__ = ['cartesian_to_spherical', 'spherical_to_cartesian', 'get_sun', 'get_constellation', 'concatenate_representations', 'concatenate'] def cartesian_to_spherical(x, y, z): """ Converts 3D rectangular cartesian coordinates to spherical polar coordinates. Note that the resulting angles are latitude/longitude or elevation/azimuthal form. I.e., the origin is along the equator rather than at the north pole. .. note:: This function simply wraps functionality provided by the `~astropy.coordinates.CartesianRepresentation` and `~astropy.coordinates.SphericalRepresentation` classes. In general, for both performance and readability, we suggest using these classes directly. But for situations where a quick one-off conversion makes sense, this function is provided. Parameters ---------- x : scalar, array-like, or `~astropy.units.Quantity` The first Cartesian coordinate. y : scalar, array-like, or `~astropy.units.Quantity` The second Cartesian coordinate. z : scalar, array-like, or `~astropy.units.Quantity` The third Cartesian coordinate. Returns ------- r : `~astropy.units.Quantity` The radial coordinate (in the same units as the inputs). lat : `~astropy.units.Quantity` ['angle'] The latitude in radians lon : `~astropy.units.Quantity` ['angle'] The longitude in radians """ if not hasattr(x, 'unit'): x = x * u.dimensionless_unscaled if not hasattr(y, 'unit'): y = y * u.dimensionless_unscaled if not hasattr(z, 'unit'): z = z * u.dimensionless_unscaled cart = CartesianRepresentation(x, y, z) sph = cart.represent_as(SphericalRepresentation) return sph.distance, sph.lat, sph.lon def spherical_to_cartesian(r, lat, lon): """ Converts spherical polar coordinates to rectangular cartesian coordinates. Note that the input angles should be in latitude/longitude or elevation/azimuthal form. I.e., the origin is along the equator rather than at the north pole. .. note:: This is a low-level function used internally in `astropy.coordinates`. It is provided for users if they really want to use it, but it is recommended that you use the `astropy.coordinates` coordinate systems. Parameters ---------- r : scalar, array-like, or `~astropy.units.Quantity` The radial coordinate (in the same units as the inputs). lat : scalar, array-like, or `~astropy.units.Quantity` ['angle'] The latitude (in radians if array or scalar) lon : scalar, array-like, or `~astropy.units.Quantity` ['angle'] The longitude (in radians if array or scalar) Returns ------- x : float or array The first cartesian coordinate. y : float or array The second cartesian coordinate. z : float or array The third cartesian coordinate. """ if not hasattr(r, 'unit'): r = r * u.dimensionless_unscaled if not hasattr(lat, 'unit'): lat = lat * u.radian if not hasattr(lon, 'unit'): lon = lon * u.radian sph = SphericalRepresentation(distance=r, lat=lat, lon=lon) cart = sph.represent_as(CartesianRepresentation) return cart.x, cart.y, cart.z def get_sun(time): """ Determines the location of the sun at a given time (or times, if the input is an array `~astropy.time.Time` object), in geocentric coordinates. Parameters ---------- time : `~astropy.time.Time` The time(s) at which to compute the location of the sun. Returns ------- newsc : `~astropy.coordinates.SkyCoord` The location of the sun as a `~astropy.coordinates.SkyCoord` in the `~astropy.coordinates.GCRS` frame. Notes ----- The algorithm for determining the sun/earth relative position is based on the simplified version of VSOP2000 that is part of ERFA. Compared to JPL's ephemeris, it should be good to about 4 km (in the Sun-Earth vector) from 1900-2100 C.E., 8 km for the 1800-2200 span, and perhaps 250 km over the 1000-3000. """ earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(time, 'tdb')) # We have to manually do aberration because we're outputting directly into # GCRS earth_p = earth_pv_helio['p'] earth_v = earth_pv_bary['v'] # convert barycentric velocity to units of c, but keep as array for passing in to erfa earth_v /= c.to_value(u.au/u.d) dsun = np.sqrt(np.sum(earth_p**2, axis=-1)) invlorentz = (1-np.sum(earth_v**2, axis=-1))**0.5 properdir = erfa.ab(earth_p/dsun.reshape(dsun.shape + (1,)), -earth_v, dsun, invlorentz) cartrep = CartesianRepresentation(x=-dsun*properdir[..., 0] * u.AU, y=-dsun*properdir[..., 1] * u.AU, z=-dsun*properdir[..., 2] * u.AU) return SkyCoord(cartrep, frame=GCRS(obstime=time)) # global dictionary that caches repeatedly-needed info for get_constellation _constellation_data = {} def get_constellation(coord, short_name=False, constellation_list='iau'): """ Determines the constellation(s) a given coordinate object contains. Parameters ---------- coord : coordinate-like The object to determine the constellation of. short_name : bool If True, the returned names are the IAU-sanctioned abbreviated names. Otherwise, full names for the constellations are used. constellation_list : str The set of constellations to use. Currently only ``'iau'`` is supported, meaning the 88 "modern" constellations endorsed by the IAU. Returns ------- constellation : str or string array If ``coords`` contains a scalar coordinate, returns the name of the constellation. If it is an array coordinate object, it returns an array of names. Notes ----- To determine which constellation a point on the sky is in, this precesses to B1875, and then uses the Delporte boundaries of the 88 modern constellations, as tabulated by `Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_. """ if constellation_list != 'iau': raise ValueError("only 'iau' us currently supported for constellation_list") # read the data files and cache them if they haven't been already if not _constellation_data: cdata = data.get_pkg_data_contents('data/constellation_data_roman87.dat') ctable = ascii.read(cdata, names=['ral', 'rau', 'decl', 'name']) cnames = data.get_pkg_data_contents('data/constellation_names.dat', encoding='UTF8') cnames_short_to_long = dict([(l[:3], l[4:]) for l in cnames.split('\n') if not l.startswith('#')]) cnames_long = np.array([cnames_short_to_long[nm] for nm in ctable['name']]) _constellation_data['ctable'] = ctable _constellation_data['cnames_long'] = cnames_long else: ctable = _constellation_data['ctable'] cnames_long = _constellation_data['cnames_long'] isscalar = coord.isscalar # if it is geocentric, we reproduce the frame but with the 1875 equinox, # which is where the constellations are defined # this yields a "dubious year" warning because ERFA considers the year 1875 # "dubious", probably because UTC isn't well-defined then and precession # models aren't precisely calibrated back to then. But it's plenty # sufficient for constellations with warnings.catch_warnings(): warnings.simplefilter('ignore', erfa.ErfaWarning) constel_coord = coord.transform_to(PrecessedGeocentric(equinox='B1875')) if isscalar: rah = constel_coord.ra.ravel().hour decd = constel_coord.dec.ravel().deg else: rah = constel_coord.ra.hour decd = constel_coord.dec.deg constellidx = -np.ones(len(rah), dtype=int) notided = constellidx == -1 # should be all for i, row in enumerate(ctable): msk = (row['ral'] < rah) & (rah < row['rau']) & (decd > row['decl']) constellidx[notided & msk] = i notided = constellidx == -1 if np.sum(notided) == 0: break else: raise ValueError(f'Could not find constellation for coordinates {constel_coord[notided]}') if short_name: names = ctable['name'][constellidx] else: names = cnames_long[constellidx] if isscalar: return names[0] else: return names def _concatenate_components(reps_difs, names): """ Helper function for the concatenate function below. Gets and concatenates all of the individual components for an iterable of representations or differentials. """ values = [] for name in names: unit0 = getattr(reps_difs[0], name).unit # Go via to_value because np.concatenate doesn't work with Quantity data_vals = [getattr(x, name).to_value(unit0) for x in reps_difs] concat_vals = np.concatenate(np.atleast_1d(*data_vals)) concat_vals = concat_vals << unit0 values.append(concat_vals) return values def concatenate_representations(reps): """ Combine multiple representation objects into a single instance by concatenating the data in each component. Currently, all of the input representations have to be the same type. This properly handles differential or velocity data, but all input objects must have the same differential object type as well. Parameters ---------- reps : sequence of `~astropy.coordinates.BaseRepresentation` The objects to concatenate Returns ------- rep : `~astropy.coordinates.BaseRepresentation` subclass instance A single representation object with its data set to the concatenation of all the elements of the input sequence of representations. """ if not isinstance(reps, (Sequence, np.ndarray)): raise TypeError('Input must be a list or iterable of representation ' 'objects.') # First, validate that the representations are the same, and # concatenate all of the positional data: rep_type = type(reps[0]) if any(type(r) != rep_type for r in reps): raise TypeError('Input representations must all have the same type.') # Construct the new representation with the concatenated data from the # representations passed in values = _concatenate_components(reps, rep_type.attr_classes.keys()) new_rep = rep_type(*values) has_diff = any('s' in rep.differentials for rep in reps) if has_diff and any('s' not in rep.differentials for rep in reps): raise ValueError('Input representations must either all contain ' 'differentials, or not contain differentials.') if has_diff: dif_type = type(reps[0].differentials['s']) if any('s' not in r.differentials or type(r.differentials['s']) != dif_type for r in reps): raise TypeError('All input representations must have the same ' 'differential type.') values = _concatenate_components([r.differentials['s'] for r in reps], dif_type.attr_classes.keys()) new_dif = dif_type(*values) new_rep = new_rep.with_differentials({'s': new_dif}) return new_rep def concatenate(coords): """ Combine multiple coordinate objects into a single `~astropy.coordinates.SkyCoord`. "Coordinate objects" here mean frame objects with data, `~astropy.coordinates.SkyCoord`, or representation objects. Currently, they must all be in the same frame, but in a future version this may be relaxed to allow inhomogeneous sequences of objects. Parameters ---------- coords : sequence of coordinate-like The objects to concatenate Returns ------- cskycoord : SkyCoord A single sky coordinate with its data set to the concatenation of all the elements in ``coords`` """ if getattr(coords, 'isscalar', False) or not isiterable(coords): raise TypeError('The argument to concatenate must be iterable') scs = [SkyCoord(coord, copy=False) for coord in coords] # Check that all frames are equivalent for sc in scs[1:]: if not sc.is_equivalent_frame(scs[0]): raise ValueError("All inputs must have equivalent frames: " "{} != {}".format(sc, scs[0])) # TODO: this can be changed to SkyCoord.from_representation() for a speed # boost when we switch to using classmethods return SkyCoord(concatenate_representations([c.data for c in coords]), frame=scs[0].frame)
43599a377a0eabec14c984534e2b5096fbb6883bc0ede3e7adda6d57b8920487
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Framework and base classes for coordinate frames/"low-level" coordinate classes. """ # Standard library import copy import inspect from collections import namedtuple, defaultdict import warnings # Dependencies import numpy as np # Project from astropy.utils.compat.misc import override__dir__ from astropy.utils.decorators import lazyproperty, format_doc from astropy.utils.exceptions import AstropyWarning, AstropyDeprecationWarning from astropy import units as u from astropy.utils import ShapedLikeNDArray, check_broadcast from .transformations import TransformGraph from . import representation as r from .angles import Angle from .attributes import Attribute __all__ = ['BaseCoordinateFrame', 'frame_transform_graph', 'GenericFrame', 'RepresentationMapping'] # the graph used for all transformations between frames frame_transform_graph = TransformGraph() def _get_repr_cls(value): """ Return a valid representation class from ``value`` or raise exception. """ if value in r.REPRESENTATION_CLASSES: value = r.REPRESENTATION_CLASSES[value] elif (not isinstance(value, type) or not issubclass(value, r.BaseRepresentation)): raise ValueError( 'Representation is {!r} but must be a BaseRepresentation class ' 'or one of the string aliases {}'.format( value, list(r.REPRESENTATION_CLASSES))) return value def _get_diff_cls(value): """ Return a valid differential class from ``value`` or raise exception. As originally created, this is only used in the SkyCoord initializer, so if that is refactored, this function my no longer be necessary. """ if value in r.DIFFERENTIAL_CLASSES: value = r.DIFFERENTIAL_CLASSES[value] elif (not isinstance(value, type) or not issubclass(value, r.BaseDifferential)): raise ValueError( 'Differential is {!r} but must be a BaseDifferential class ' 'or one of the string aliases {}'.format( value, list(r.DIFFERENTIAL_CLASSES))) return value def _get_repr_classes(base, **differentials): """Get valid representation and differential classes. Parameters ---------- base : str or `~astropy.coordinates.BaseRepresentation` subclass class for the representation of the base coordinates. If a string, it is looked up among the known representation classes. **differentials : dict of str or `~astropy.coordinates.BaseDifferentials` Keys are like for normal differentials, i.e., 's' for a first derivative in time, etc. If an item is set to `None`, it will be guessed from the base class. Returns ------- repr_classes : dict of subclasses The base class is keyed by 'base'; the others by the keys of ``diffferentials``. """ base = _get_repr_cls(base) repr_classes = {'base': base} for name, differential_type in differentials.items(): if differential_type == 'base': # We don't want to fail for this case. differential_type = r.DIFFERENTIAL_CLASSES.get(base.get_name(), None) elif differential_type in r.DIFFERENTIAL_CLASSES: differential_type = r.DIFFERENTIAL_CLASSES[differential_type] elif (differential_type is not None and (not isinstance(differential_type, type) or not issubclass(differential_type, r.BaseDifferential))): raise ValueError( 'Differential is {!r} but must be a BaseDifferential class ' 'or one of the string aliases {}'.format( differential_type, list(r.DIFFERENTIAL_CLASSES))) repr_classes[name] = differential_type return repr_classes _RepresentationMappingBase = \ namedtuple('RepresentationMapping', ('reprname', 'framename', 'defaultunit')) class RepresentationMapping(_RepresentationMappingBase): """ This `~collections.namedtuple` is used with the ``frame_specific_representation_info`` attribute to tell frames what attribute names (and default units) to use for a particular representation. ``reprname`` and ``framename`` should be strings, while ``defaultunit`` can be either an astropy unit, the string ``'recommended'`` (which is degrees for Angles, nothing otherwise), or None (to indicate that no unit mapping should be done). """ def __new__(cls, reprname, framename, defaultunit='recommended'): # this trick just provides some defaults return super().__new__(cls, reprname, framename, defaultunit) base_doc = """{__doc__} Parameters ---------- data : `~astropy.coordinates.BaseRepresentation` subclass instance A representation object or ``None`` to have no data (or use the coordinate component arguments, see below). {components} representation_type : `~astropy.coordinates.BaseRepresentation` subclass, str, optional A representation class or string name of a representation class. This sets the expected input representation class, thereby changing the expected keyword arguments for the data passed in. For example, passing ``representation_type='cartesian'`` will make the classes expect position data with cartesian names, i.e. ``x, y, z`` in most cases unless overridden via ``frame_specific_representation_info``. To see this frame's names, check out ``<this frame>().representation_info``. differential_type : `~astropy.coordinates.BaseDifferential` subclass, str, dict, optional A differential class or dictionary of differential classes (currently only a velocity differential with key 's' is supported). This sets the expected input differential class, thereby changing the expected keyword arguments of the data passed in. For example, passing ``differential_type='cartesian'`` will make the classes expect velocity data with the argument names ``v_x, v_y, v_z`` unless overridden via ``frame_specific_representation_info``. To see this frame's names, check out ``<this frame>().representation_info``. copy : bool, optional If `True` (default), make copies of the input coordinate arrays. Can only be passed in as a keyword argument. {footer} """ _components = """ *args, **kwargs Coordinate components, with names that depend on the subclass. """ @format_doc(base_doc, components=_components, footer="") class BaseCoordinateFrame(ShapedLikeNDArray): """ The base class for coordinate frames. This class is intended to be subclassed to create instances of specific systems. Subclasses can implement the following attributes: * `default_representation` A subclass of `~astropy.coordinates.BaseRepresentation` that will be treated as the default representation of this frame. This is the representation assumed by default when the frame is created. * `default_differential` A subclass of `~astropy.coordinates.BaseDifferential` that will be treated as the default differential class of this frame. This is the differential class assumed by default when the frame is created. * `~astropy.coordinates.Attribute` class attributes Frame attributes such as ``FK4.equinox`` or ``FK4.obstime`` are defined using a descriptor class. See the narrative documentation or built-in classes code for details. * `frame_specific_representation_info` A dictionary mapping the name or class of a representation to a list of `~astropy.coordinates.RepresentationMapping` objects that tell what names and default units should be used on this frame for the components of that representation. Unless overridden via `frame_specific_representation_info`, velocity name defaults are: * ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for `SphericalCosLatDifferential` proper motion components * ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper motion components * ``radial_velocity`` for any ``d_distance`` component * ``v_{x,y,z}`` for `CartesianDifferential` velocity components where ``{lon}`` and ``{lat}`` are the frame names of the angular components. """ default_representation = None default_differential = None # Specifies special names and units for representation and differential # attributes. frame_specific_representation_info = {} frame_attributes = {} # Default empty frame_attributes dict def __init_subclass__(cls, **kwargs): # We first check for explicitly set values for these: default_repr = getattr(cls, 'default_representation', None) default_diff = getattr(cls, 'default_differential', None) repr_info = getattr(cls, 'frame_specific_representation_info', None) # Then, to make sure this works for subclasses-of-subclasses, we also # have to check for cases where the attribute names have already been # replaced by underscore-prefaced equivalents by the logic below: if default_repr is None or isinstance(default_repr, property): default_repr = getattr(cls, '_default_representation', None) if default_diff is None or isinstance(default_diff, property): default_diff = getattr(cls, '_default_differential', None) if repr_info is None or isinstance(repr_info, property): repr_info = getattr(cls, '_frame_specific_representation_info', None) repr_info = cls._infer_repr_info(repr_info) # Make read-only properties for the frame class attributes that should # be read-only to make them immutable after creation. # We copy attributes instead of linking to make sure there's no # accidental cross-talk between classes cls._create_readonly_property('default_representation', default_repr, 'Default representation for position data') cls._create_readonly_property('default_differential', default_diff, 'Default representation for differential data ' '(e.g., velocity)') cls._create_readonly_property('frame_specific_representation_info', copy.deepcopy(repr_info), 'Mapping for frame-specific component names') # Set the frame attributes. We first construct the attributes from # superclasses, going in reverse order to keep insertion order, # and then add any attributes from the frame now being defined # (if any old definitions are overridden, this keeps the order). # Note that we cannot simply start with the inherited frame_attributes # since we could be a mixin between multiple coordinate frames. # TODO: Should this be made to use readonly_prop_factory as well or # would it be inconvenient for getting the frame_attributes from # classes? frame_attrs = {} for basecls in reversed(cls.__bases__): if issubclass(basecls, BaseCoordinateFrame): frame_attrs.update(basecls.frame_attributes) for k, v in cls.__dict__.items(): if isinstance(v, Attribute): frame_attrs[k] = v cls.frame_attributes = frame_attrs # Deal with setting the name of the frame: if not hasattr(cls, 'name'): cls.name = cls.__name__.lower() elif (BaseCoordinateFrame not in cls.__bases__ and cls.name in [getattr(base, 'name', None) for base in cls.__bases__]): # This may be a subclass of a subclass of BaseCoordinateFrame, # like ICRS(BaseRADecFrame). In this case, cls.name will have been # set by init_subclass cls.name = cls.__name__.lower() # A cache that *must be unique to each frame class* - it is # insufficient to share them with superclasses, hence the need to put # them in the meta cls._frame_class_cache = {} super().__init_subclass__(**kwargs) def __init__(self, *args, copy=True, representation_type=None, differential_type=None, **kwargs): self._attr_names_with_defaults = [] self._representation = self._infer_representation(representation_type, differential_type) self._data = self._infer_data(args, copy, kwargs) # possibly None. # Set frame attributes, if any values = {} for fnm, fdefault in self.get_frame_attr_names().items(): # Read-only frame attributes are defined as FrameAttribute # descriptors which are not settable, so set 'real' attributes as # the name prefaced with an underscore. if fnm in kwargs: value = kwargs.pop(fnm) setattr(self, '_' + fnm, value) # Validate attribute by getting it. If the instance has data, # this also checks its shape is OK. If not, we do it below. values[fnm] = getattr(self, fnm) else: setattr(self, '_' + fnm, fdefault) self._attr_names_with_defaults.append(fnm) if kwargs: raise TypeError( f'Coordinate frame {self.__class__.__name__} got unexpected ' f'keywords: {list(kwargs)}') # We do ``is None`` because self._data might evaluate to false for # empty arrays or data == 0 if self._data is None: # No data: we still need to check that any non-scalar attributes # have consistent shapes. Collect them for all attributes with # size > 1 (which should be array-like and thus have a shape). shapes = {fnm: value.shape for fnm, value in values.items() if getattr(value, 'shape', ())} if shapes: if len(shapes) > 1: try: self._no_data_shape = check_broadcast(*shapes.values()) except ValueError as err: raise ValueError( f"non-scalar attributes with inconsistent shapes: {shapes}") from err # Above, we checked that it is possible to broadcast all # shapes. By getting and thus validating the attributes, # we verify that the attributes can in fact be broadcast. for fnm in shapes: getattr(self, fnm) else: self._no_data_shape = shapes.popitem()[1] else: self._no_data_shape = () # The logic of this block is not related to the previous one if self._data is not None: # This makes the cache keys backwards-compatible, but also adds # support for having differentials attached to the frame data # representation object. if 's' in self._data.differentials: # TODO: assumes a velocity unit differential key = (self._data.__class__.__name__, self._data.differentials['s'].__class__.__name__, False) else: key = (self._data.__class__.__name__, False) # Set up representation cache. self.cache['representation'][key] = self._data def _infer_representation(self, representation_type, differential_type): if representation_type is None and differential_type is None: return {'base': self.default_representation, 's': self.default_differential} if representation_type is None: representation_type = self.default_representation if (inspect.isclass(differential_type) and issubclass(differential_type, r.BaseDifferential)): # TODO: assumes the differential class is for the velocity # differential differential_type = {'s': differential_type} elif isinstance(differential_type, str): # TODO: assumes the differential class is for the velocity # differential diff_cls = r.DIFFERENTIAL_CLASSES[differential_type] differential_type = {'s': diff_cls} elif differential_type is None: if representation_type == self.default_representation: differential_type = {'s': self.default_differential} else: differential_type = {'s': 'base'} # see set_representation_cls() return _get_repr_classes(representation_type, **differential_type) def _infer_data(self, args, copy, kwargs): # if not set below, this is a frame with no data representation_data = None differential_data = None args = list(args) # need to be able to pop them if (len(args) > 0) and (isinstance(args[0], r.BaseRepresentation) or args[0] is None): representation_data = args.pop(0) # This can still be None if len(args) > 0: raise TypeError( 'Cannot create a frame with both a representation object ' 'and other positional arguments') if representation_data is not None: diffs = representation_data.differentials differential_data = diffs.get('s', None) if ((differential_data is None and len(diffs) > 0) or (differential_data is not None and len(diffs) > 1)): raise ValueError('Multiple differentials are associated ' 'with the representation object passed in ' 'to the frame initializer. Only a single ' 'velocity differential is supported. Got: ' '{}'.format(diffs)) else: representation_cls = self.get_representation_cls() # Get any representation data passed in to the frame initializer # using keyword or positional arguments for the component names repr_kwargs = {} for nmkw, nmrep in self.representation_component_names.items(): if len(args) > 0: # first gather up positional args repr_kwargs[nmrep] = args.pop(0) elif nmkw in kwargs: repr_kwargs[nmrep] = kwargs.pop(nmkw) # special-case the Spherical->UnitSpherical if no `distance` if repr_kwargs: # TODO: determine how to get rid of the part before the "try" - # currently removing it has a performance regression for # unitspherical because of the try-related overhead. # Also frames have no way to indicate what the "distance" is if repr_kwargs.get('distance', True) is None: del repr_kwargs['distance'] if (issubclass(representation_cls, r.SphericalRepresentation) and 'distance' not in repr_kwargs): representation_cls = representation_cls._unit_representation try: representation_data = representation_cls(copy=copy, **repr_kwargs) except TypeError as e: # this except clause is here to make the names of the # attributes more human-readable. Without this the names # come from the representation instead of the frame's # attribute names. try: representation_data = ( representation_cls._unit_representation( copy=copy, **repr_kwargs)) except Exception: msg = str(e) names = self.get_representation_component_names() for frame_name, repr_name in names.items(): msg = msg.replace(repr_name, frame_name) msg = msg.replace('__init__()', f'{self.__class__.__name__}()') e.args = (msg,) raise e # Now we handle the Differential data: # Get any differential data passed in to the frame initializer # using keyword or positional arguments for the component names differential_cls = self.get_representation_cls('s') diff_component_names = self.get_representation_component_names('s') diff_kwargs = {} for nmkw, nmrep in diff_component_names.items(): if len(args) > 0: # first gather up positional args diff_kwargs[nmrep] = args.pop(0) elif nmkw in kwargs: diff_kwargs[nmrep] = kwargs.pop(nmkw) if diff_kwargs: if (hasattr(differential_cls, '_unit_differential') and 'd_distance' not in diff_kwargs): differential_cls = differential_cls._unit_differential elif len(diff_kwargs) == 1 and 'd_distance' in diff_kwargs: differential_cls = r.RadialDifferential try: differential_data = differential_cls(copy=copy, **diff_kwargs) except TypeError as e: # this except clause is here to make the names of the # attributes more human-readable. Without this the names # come from the representation instead of the frame's # attribute names. msg = str(e) names = self.get_representation_component_names('s') for frame_name, repr_name in names.items(): msg = msg.replace(repr_name, frame_name) msg = msg.replace('__init__()', f'{self.__class__.__name__}()') e.args = (msg,) raise if len(args) > 0: raise TypeError( '{}.__init__ had {} remaining unhandled arguments'.format( self.__class__.__name__, len(args))) if representation_data is None and differential_data is not None: raise ValueError("Cannot pass in differential component data " "without positional (representation) data.") if differential_data: # Check that differential data provided has units compatible # with time-derivative of representation data. # NOTE: there is no dimensionless time while lengths can be # dimensionless (u.dimensionless_unscaled). for comp in representation_data.components: if (diff_comp := f'd_{comp}') in differential_data.components: current_repr_unit = representation_data._units[comp] current_diff_unit = differential_data._units[diff_comp] expected_unit = current_repr_unit / u.s if not current_diff_unit.is_equivalent(expected_unit): for key, val in self.get_representation_component_names().items(): if val == comp: current_repr_name = key break for key, val in self.get_representation_component_names('s').items(): if val == diff_comp: current_diff_name = key break raise ValueError( f'{current_repr_name} has unit "{current_repr_unit}" with physical ' f'type "{current_repr_unit.physical_type}", but {current_diff_name} ' f'has incompatible unit "{current_diff_unit}" with physical type ' f'"{current_diff_unit.physical_type}" instead of the expected ' f'"{(expected_unit).physical_type}".') representation_data = representation_data.with_differentials({'s': differential_data}) return representation_data @classmethod def _infer_repr_info(cls, repr_info): # Unless overridden via `frame_specific_representation_info`, velocity # name defaults are (see also docstring for BaseCoordinateFrame): # * ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for # `SphericalCosLatDifferential` proper motion components # * ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper # motion components # * ``radial_velocity`` for any `d_distance` component # * ``v_{x,y,z}`` for `CartesianDifferential` velocity components # where `{lon}` and `{lat}` are the frame names of the angular # components. if repr_info is None: repr_info = {} # the tuple() call below is necessary because if it is not there, # the iteration proceeds in a difficult-to-predict manner in the # case that one of the class objects hash is such that it gets # revisited by the iteration. The tuple() call prevents this by # making the items iterated over fixed regardless of how the dict # changes for cls_or_name in tuple(repr_info.keys()): if isinstance(cls_or_name, str): # TODO: this provides a layer of backwards compatibility in # case the key is a string, but now we want explicit classes. _cls = _get_repr_cls(cls_or_name) repr_info[_cls] = repr_info.pop(cls_or_name) # The default spherical names are 'lon' and 'lat' repr_info.setdefault(r.SphericalRepresentation, [RepresentationMapping('lon', 'lon'), RepresentationMapping('lat', 'lat')]) sph_component_map = {m.reprname: m.framename for m in repr_info[r.SphericalRepresentation]} repr_info.setdefault(r.SphericalCosLatDifferential, [ RepresentationMapping( 'd_lon_coslat', 'pm_{lon}_cos{lat}'.format(**sph_component_map), u.mas/u.yr), RepresentationMapping('d_lat', 'pm_{lat}'.format(**sph_component_map), u.mas/u.yr), RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s) ]) repr_info.setdefault(r.SphericalDifferential, [ RepresentationMapping('d_lon', 'pm_{lon}'.format(**sph_component_map), u.mas/u.yr), RepresentationMapping('d_lat', 'pm_{lat}'.format(**sph_component_map), u.mas/u.yr), RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s) ]) repr_info.setdefault(r.CartesianDifferential, [ RepresentationMapping('d_x', 'v_x', u.km/u.s), RepresentationMapping('d_y', 'v_y', u.km/u.s), RepresentationMapping('d_z', 'v_z', u.km/u.s)]) # Unit* classes should follow the same naming conventions # TODO: this adds some unnecessary mappings for the Unit classes, so # this could be cleaned up, but in practice doesn't seem to have any # negative side effects repr_info.setdefault(r.UnitSphericalRepresentation, repr_info[r.SphericalRepresentation]) repr_info.setdefault(r.UnitSphericalCosLatDifferential, repr_info[r.SphericalCosLatDifferential]) repr_info.setdefault(r.UnitSphericalDifferential, repr_info[r.SphericalDifferential]) return repr_info @classmethod def _create_readonly_property(cls, attr_name, value, doc=None): private_attr = '_' + attr_name def getter(self): return getattr(self, private_attr) setattr(cls, private_attr, value) setattr(cls, attr_name, property(getter, doc=doc)) @lazyproperty def cache(self): """ Cache for this frame, a dict. It stores anything that should be computed from the coordinate data (*not* from the frame attributes). This can be used in functions to store anything that might be expensive to compute but might be re-used by some other function. E.g.:: if 'user_data' in myframe.cache: data = myframe.cache['user_data'] else: myframe.cache['user_data'] = data = expensive_func(myframe.lat) If in-place modifications are made to the frame data, the cache should be cleared:: myframe.cache.clear() """ return defaultdict(dict) @property def data(self): """ The coordinate data for this object. If this frame has no data, an `ValueError` will be raised. Use `has_data` to check if data is present on this frame object. """ if self._data is None: raise ValueError('The frame object "{!r}" does not have ' 'associated data'.format(self)) return self._data @property def has_data(self): """ True if this frame has `data`, False otherwise. """ return self._data is not None @property def shape(self): return self.data.shape if self.has_data else self._no_data_shape # We have to override the ShapedLikeNDArray definitions, since our shape # does not have to be that of the data. def __len__(self): return len(self.data) def __bool__(self): return self.has_data and self.size > 0 @property def size(self): return self.data.size @property def isscalar(self): return self.has_data and self.data.isscalar @classmethod def get_frame_attr_names(cls): return {name: getattr(cls, name) for name in cls.frame_attributes} def get_representation_cls(self, which='base'): """The class used for part of this frame's data. Parameters ---------- which : ('base', 's', `None`) The class of which part to return. 'base' means the class used to represent the coordinates; 's' the first derivative to time, i.e., the class representing the proper motion and/or radial velocity. If `None`, return a dict with both. Returns ------- representation : `~astropy.coordinates.BaseRepresentation` or `~astropy.coordinates.BaseDifferential`. """ if which is not None: return self._representation[which] else: return self._representation def set_representation_cls(self, base=None, s='base'): """Set representation and/or differential class for this frame's data. Parameters ---------- base : str, `~astropy.coordinates.BaseRepresentation` subclass, optional The name or subclass to use to represent the coordinate data. s : `~astropy.coordinates.BaseDifferential` subclass, optional The differential subclass to use to represent any velocities, such as proper motion and radial velocity. If equal to 'base', which is the default, it will be inferred from the representation. If `None`, the representation will drop any differentials. """ if base is None: base = self._representation['base'] self._representation = _get_repr_classes(base=base, s=s) representation_type = property( fget=get_representation_cls, fset=set_representation_cls, doc="""The representation class used for this frame's data. This will be a subclass from `~astropy.coordinates.BaseRepresentation`. Can also be *set* using the string name of the representation. If you wish to set an explicit differential class (rather than have it be inferred), use the ``set_representation_cls`` method. """) @property def differential_type(self): """ The differential used for this frame's data. This will be a subclass from `~astropy.coordinates.BaseDifferential`. For simultaneous setting of representation and differentials, see the ``set_representation_cls`` method. """ return self.get_representation_cls('s') @differential_type.setter def differential_type(self, value): self.set_representation_cls(s=value) @classmethod def _get_representation_info(cls): # This exists as a class method only to support handling frame inputs # without units, which are deprecated and will be removed. This can be # moved into the representation_info property at that time. # note that if so moved, the cache should be acceessed as # self.__class__._frame_class_cache if cls._frame_class_cache.get('last_reprdiff_hash', None) != r.get_reprdiff_cls_hash(): repr_attrs = {} for repr_diff_cls in (list(r.REPRESENTATION_CLASSES.values()) + list(r.DIFFERENTIAL_CLASSES.values())): repr_attrs[repr_diff_cls] = {'names': [], 'units': []} for c, c_cls in repr_diff_cls.attr_classes.items(): repr_attrs[repr_diff_cls]['names'].append(c) rec_unit = u.deg if issubclass(c_cls, Angle) else None repr_attrs[repr_diff_cls]['units'].append(rec_unit) for repr_diff_cls, mappings in cls._frame_specific_representation_info.items(): # take the 'names' and 'units' tuples from repr_attrs, # and then use the RepresentationMapping objects # to update as needed for this frame. nms = repr_attrs[repr_diff_cls]['names'] uns = repr_attrs[repr_diff_cls]['units'] comptomap = dict([(m.reprname, m) for m in mappings]) for i, c in enumerate(repr_diff_cls.attr_classes.keys()): if c in comptomap: mapp = comptomap[c] nms[i] = mapp.framename # need the isinstance because otherwise if it's a unit it # will try to compare to the unit string representation if not (isinstance(mapp.defaultunit, str) and mapp.defaultunit == 'recommended'): uns[i] = mapp.defaultunit # else we just leave it as recommended_units says above # Convert to tuples so that this can't mess with frame internals repr_attrs[repr_diff_cls]['names'] = tuple(nms) repr_attrs[repr_diff_cls]['units'] = tuple(uns) cls._frame_class_cache['representation_info'] = repr_attrs cls._frame_class_cache['last_reprdiff_hash'] = r.get_reprdiff_cls_hash() return cls._frame_class_cache['representation_info'] @lazyproperty def representation_info(self): """ A dictionary with the information of what attribute names for this frame apply to particular representations. """ return self._get_representation_info() def get_representation_component_names(self, which='base'): out = {} repr_or_diff_cls = self.get_representation_cls(which) if repr_or_diff_cls is None: return out data_names = repr_or_diff_cls.attr_classes.keys() repr_names = self.representation_info[repr_or_diff_cls]['names'] for repr_name, data_name in zip(repr_names, data_names): out[repr_name] = data_name return out def get_representation_component_units(self, which='base'): out = {} repr_or_diff_cls = self.get_representation_cls(which) if repr_or_diff_cls is None: return out repr_attrs = self.representation_info[repr_or_diff_cls] repr_names = repr_attrs['names'] repr_units = repr_attrs['units'] for repr_name, repr_unit in zip(repr_names, repr_units): if repr_unit: out[repr_name] = repr_unit return out representation_component_names = property(get_representation_component_names) representation_component_units = property(get_representation_component_units) def _replicate(self, data, copy=False, **kwargs): """Base for replicating a frame, with possibly different attributes. Produces a new instance of the frame using the attributes of the old frame (unless overridden) and with the data given. Parameters ---------- data : `~astropy.coordinates.BaseRepresentation` or None Data to use in the new frame instance. If `None`, it will be a data-less frame. copy : bool, optional Whether data and the attributes on the old frame should be copied (default), or passed on by reference. **kwargs Any attributes that should be overridden. """ # This is to provide a slightly nicer error message if the user tries # to use frame_obj.representation instead of frame_obj.data to get the # underlying representation object [e.g., #2890] if inspect.isclass(data): raise TypeError('Class passed as data instead of a representation ' 'instance. If you called frame.representation, this' ' returns the representation class. frame.data ' 'returns the instantiated object - you may want to ' ' use this instead.') if copy and data is not None: data = data.copy() for attr in self.get_frame_attr_names(): if (attr not in self._attr_names_with_defaults and attr not in kwargs): value = getattr(self, attr) if copy: value = value.copy() kwargs[attr] = value return self.__class__(data, copy=False, **kwargs) def replicate(self, copy=False, **kwargs): """ Return a replica of the frame, optionally with new frame attributes. The replica is a new frame object that has the same data as this frame object and with frame attributes overridden if they are provided as extra keyword arguments to this method. If ``copy`` is set to `True` then a copy of the internal arrays will be made. Otherwise the replica will use a reference to the original arrays when possible to save memory. The internal arrays are normally not changeable by the user so in most cases it should not be necessary to set ``copy`` to `True`. Parameters ---------- copy : bool, optional If True, the resulting object is a copy of the data. When False, references are used where possible. This rule also applies to the frame attributes. **kwargs Any additional keywords are treated as frame attributes to be set on the new frame object. Returns ------- frameobj : `BaseCoordinateFrame` subclass instance Replica of this object, but possibly with new frame attributes. """ return self._replicate(self.data, copy=copy, **kwargs) def replicate_without_data(self, copy=False, **kwargs): """ Return a replica without data, optionally with new frame attributes. The replica is a new frame object without data but with the same frame attributes as this object, except where overridden by extra keyword arguments to this method. The ``copy`` keyword determines if the frame attributes are truly copied vs being references (which saves memory for cases where frame attributes are large). This method is essentially the converse of `realize_frame`. Parameters ---------- copy : bool, optional If True, the resulting object has copies of the frame attributes. When False, references are used where possible. **kwargs Any additional keywords are treated as frame attributes to be set on the new frame object. Returns ------- frameobj : `BaseCoordinateFrame` subclass instance Replica of this object, but without data and possibly with new frame attributes. """ return self._replicate(None, copy=copy, **kwargs) def realize_frame(self, data, **kwargs): """ Generates a new frame with new data from another frame (which may or may not have data). Roughly speaking, the converse of `replicate_without_data`. Parameters ---------- data : `~astropy.coordinates.BaseRepresentation` The representation to use as the data for the new frame. **kwargs Any additional keywords are treated as frame attributes to be set on the new frame object. In particular, `representation_type` can be specified. Returns ------- frameobj : `BaseCoordinateFrame` subclass instance A new object in *this* frame, with the same frame attributes as this one, but with the ``data`` as the coordinate data. """ return self._replicate(data, **kwargs) def represent_as(self, base, s='base', in_frame_units=False): """ Generate and return a new representation of this frame's `data` as a Representation object. Note: In order to make an in-place change of the representation of a Frame or SkyCoord object, set the ``representation`` attribute of that object to the desired new representation, or use the ``set_representation_cls`` method to also set the differential. Parameters ---------- base : subclass of BaseRepresentation or string The type of representation to generate. Must be a *class* (not an instance), or the string name of the representation class. s : subclass of `~astropy.coordinates.BaseDifferential`, str, optional Class in which any velocities should be represented. Must be a *class* (not an instance), or the string name of the differential class. If equal to 'base' (default), inferred from the base class. If `None`, all velocity information is dropped. in_frame_units : bool, keyword-only Force the representation units to match the specified units particular to this frame Returns ------- newrep : BaseRepresentation-derived object A new representation object of this frame's `data`. Raises ------ AttributeError If this object had no `data` Examples -------- >>> from astropy import units as u >>> from astropy.coordinates import SkyCoord, CartesianRepresentation >>> coord = SkyCoord(0*u.deg, 0*u.deg) >>> coord.represent_as(CartesianRepresentation) # doctest: +FLOAT_CMP <CartesianRepresentation (x, y, z) [dimensionless] (1., 0., 0.)> >>> coord.representation_type = CartesianRepresentation >>> coord # doctest: +FLOAT_CMP <SkyCoord (ICRS): (x, y, z) [dimensionless] (1., 0., 0.)> """ # For backwards compatibility (because in_frame_units used to be the # 2nd argument), we check to see if `new_differential` is a boolean. If # it is, we ignore the value of `new_differential` and warn about the # position change if isinstance(s, bool): warnings.warn("The argument position for `in_frame_units` in " "`represent_as` has changed. Use as a keyword " "argument if needed.", AstropyWarning) in_frame_units = s s = 'base' # In the future, we may want to support more differentials, in which # case one probably needs to define **kwargs above and use it here. # But for now, we only care about the velocity. repr_classes = _get_repr_classes(base=base, s=s) representation_cls = repr_classes['base'] # We only keep velocity information if 's' in self.data.differentials: # For the default 'base' option in which _get_repr_classes has # given us a best guess based on the representation class, we only # use it if the class we had already is incompatible. if (s == 'base' and (self.data.differentials['s'].__class__ in representation_cls._compatible_differentials)): differential_cls = self.data.differentials['s'].__class__ else: differential_cls = repr_classes['s'] elif s is None or s == 'base': differential_cls = None else: raise TypeError('Frame data has no associated differentials ' '(i.e. the frame has no velocity data) - ' 'represent_as() only accepts a new ' 'representation.') if differential_cls: cache_key = (representation_cls.__name__, differential_cls.__name__, in_frame_units) else: cache_key = (representation_cls.__name__, in_frame_units) cached_repr = self.cache['representation'].get(cache_key) if not cached_repr: if differential_cls: # Sanity check to ensure we do not just drop radial # velocity. TODO: should Representation.represent_as # allow this transformation in the first place? if (isinstance(self.data, r.UnitSphericalRepresentation) and issubclass(representation_cls, r.CartesianRepresentation) and not isinstance(self.data.differentials['s'], (r.UnitSphericalDifferential, r.UnitSphericalCosLatDifferential, r.RadialDifferential))): raise u.UnitConversionError( 'need a distance to retrieve a cartesian representation ' 'when both radial velocity and proper motion are present, ' 'since otherwise the units cannot match.') # TODO NOTE: only supports a single differential data = self.data.represent_as(representation_cls, differential_cls) diff = data.differentials['s'] # TODO: assumes velocity else: data = self.data.represent_as(representation_cls) # If the new representation is known to this frame and has a defined # set of names and units, then use that. new_attrs = self.representation_info.get(representation_cls) if new_attrs and in_frame_units: datakwargs = dict((comp, getattr(data, comp)) for comp in data.components) for comp, new_attr_unit in zip(data.components, new_attrs['units']): if new_attr_unit: datakwargs[comp] = datakwargs[comp].to(new_attr_unit) data = data.__class__(copy=False, **datakwargs) if differential_cls: # the original differential data_diff = self.data.differentials['s'] # If the new differential is known to this frame and has a # defined set of names and units, then use that. new_attrs = self.representation_info.get(differential_cls) if new_attrs and in_frame_units: diffkwargs = dict((comp, getattr(diff, comp)) for comp in diff.components) for comp, new_attr_unit in zip(diff.components, new_attrs['units']): # Some special-casing to treat a situation where the # input data has a UnitSphericalDifferential or a # RadialDifferential. It is re-represented to the # frame's differential class (which might be, e.g., a # dimensional Differential), so we don't want to try to # convert the empty component units if (isinstance(data_diff, (r.UnitSphericalDifferential, r.UnitSphericalCosLatDifferential)) and comp not in data_diff.__class__.attr_classes): continue elif (isinstance(data_diff, r.RadialDifferential) and comp not in data_diff.__class__.attr_classes): continue # Try to convert to requested units. Since that might # not be possible (e.g., for a coordinate with proper # motion but without distance, one cannot convert to a # cartesian differential in km/s), we allow the unit # conversion to fail. See gh-7028 for discussion. if new_attr_unit and hasattr(diff, comp): try: diffkwargs[comp] = diffkwargs[comp].to(new_attr_unit) except Exception: pass diff = diff.__class__(copy=False, **diffkwargs) # Here we have to bypass using with_differentials() because # it has a validation check. But because # .representation_type and .differential_type don't point to # the original classes, if the input differential is a # RadialDifferential, it usually gets turned into a # SphericalCosLatDifferential (or whatever the default is) # with strange units for the d_lon and d_lat attributes. # This then causes the dictionary key check to fail (i.e. # comparison against `diff._get_deriv_key()`) data._differentials.update({'s': diff}) self.cache['representation'][cache_key] = data return self.cache['representation'][cache_key] def transform_to(self, new_frame): """ Transform this object's coordinate data to a new frame. Parameters ---------- new_frame : coordinate-like or `BaseCoordinateFrame` subclass instance The frame to transform this coordinate frame into. The frame class option is deprecated. Returns ------- transframe : coordinate-like A new object with the coordinate data represented in the ``newframe`` system. Raises ------ ValueError If there is no possible transformation route. """ from .errors import ConvertError if self._data is None: raise ValueError('Cannot transform a frame with no data') if (getattr(self.data, 'differentials', None) and hasattr(self, 'obstime') and hasattr(new_frame, 'obstime') and np.any(self.obstime != new_frame.obstime)): raise NotImplementedError('You cannot transform a frame that has ' 'velocities to another frame at a ' 'different obstime. If you think this ' 'should (or should not) be possible, ' 'please comment at https://github.com/astropy/astropy/issues/6280') if inspect.isclass(new_frame): warnings.warn("Transforming a frame instance to a frame class (as opposed to another " "frame instance) will not be supported in the future. Either " "explicitly instantiate the target frame, or first convert the source " "frame instance to a `astropy.coordinates.SkyCoord` and use its " "`transform_to()` method.", AstropyDeprecationWarning) # Use the default frame attributes for this class new_frame = new_frame() if hasattr(new_frame, '_sky_coord_frame'): # Input new_frame is not a frame instance or class and is most # likely a SkyCoord object. new_frame = new_frame._sky_coord_frame trans = frame_transform_graph.get_transform(self.__class__, new_frame.__class__) if trans is None: if new_frame is self.__class__: # no special transform needed, but should update frame info return new_frame.realize_frame(self.data) msg = 'Cannot transform from {0} to {1}' raise ConvertError(msg.format(self.__class__, new_frame.__class__)) return trans(self, new_frame) def is_transformable_to(self, new_frame): """ Determines if this coordinate frame can be transformed to another given frame. Parameters ---------- new_frame : `BaseCoordinateFrame` subclass or instance The proposed frame to transform into. Returns ------- transformable : bool or str `True` if this can be transformed to ``new_frame``, `False` if not, or the string 'same' if ``new_frame`` is the same system as this object but no transformation is defined. Notes ----- A return value of 'same' means the transformation will work, but it will just give back a copy of this object. The intended usage is:: if coord.is_transformable_to(some_unknown_frame): coord2 = coord.transform_to(some_unknown_frame) This will work even if ``some_unknown_frame`` turns out to be the same frame class as ``coord``. This is intended for cases where the frame is the same regardless of the frame attributes (e.g. ICRS), but be aware that it *might* also indicate that someone forgot to define the transformation between two objects of the same frame class but with different attributes. """ new_frame_cls = new_frame if inspect.isclass(new_frame) else new_frame.__class__ trans = frame_transform_graph.get_transform(self.__class__, new_frame_cls) if trans is None: if new_frame_cls is self.__class__: return 'same' else: return False else: return True def is_frame_attr_default(self, attrnm): """ Determine whether or not a frame attribute has its value because it's the default value, or because this frame was created with that value explicitly requested. Parameters ---------- attrnm : str The name of the attribute to check. Returns ------- isdefault : bool True if the attribute ``attrnm`` has its value by default, False if it was specified at creation of this frame. """ return attrnm in self._attr_names_with_defaults @staticmethod def _frameattr_equiv(left_fattr, right_fattr): """ Determine if two frame attributes are equivalent. Implemented as a staticmethod mainly as a convenient location, although conceivable it might be desirable for subclasses to override this behavior. Primary purpose is to check for equality of representations. This aspect can actually be simplified/removed now that representations have equality defined. Secondary purpose is to check for equality of coordinate attributes, which first checks whether they themselves are in equivalent frames before checking for equality in the normal fashion. This is because checking for equality with non-equivalent frames raises an error. """ if left_fattr is right_fattr: # shortcut if it's exactly the same object return True elif left_fattr is None or right_fattr is None: # shortcut if one attribute is unspecified and the other isn't return False left_is_repr = isinstance(left_fattr, r.BaseRepresentationOrDifferential) right_is_repr = isinstance(right_fattr, r.BaseRepresentationOrDifferential) if left_is_repr and right_is_repr: # both are representations. if (getattr(left_fattr, 'differentials', False) or getattr(right_fattr, 'differentials', False)): warnings.warn('Two representation frame attributes were ' 'checked for equivalence when at least one of' ' them has differentials. This yields False ' 'even if the underlying representations are ' 'equivalent (although this may change in ' 'future versions of Astropy)', AstropyWarning) return False if isinstance(right_fattr, left_fattr.__class__): # if same representation type, compare components. return np.all([(getattr(left_fattr, comp) == getattr(right_fattr, comp)) for comp in left_fattr.components]) else: # convert to cartesian and see if they match return np.all(left_fattr.to_cartesian().xyz == right_fattr.to_cartesian().xyz) elif left_is_repr or right_is_repr: return False left_is_coord = isinstance(left_fattr, BaseCoordinateFrame) right_is_coord = isinstance(right_fattr, BaseCoordinateFrame) if left_is_coord and right_is_coord: # both are coordinates if left_fattr.is_equivalent_frame(right_fattr): return np.all(left_fattr == right_fattr) else: return False elif left_is_coord or right_is_coord: return False return np.all(left_fattr == right_fattr) def is_equivalent_frame(self, other): """ Checks if this object is the same frame as the ``other`` object. To be the same frame, two objects must be the same frame class and have the same frame attributes. Note that it does *not* matter what, if any, data either object has. Parameters ---------- other : :class:`~astropy.coordinates.BaseCoordinateFrame` the other frame to check Returns ------- isequiv : bool True if the frames are the same, False if not. Raises ------ TypeError If ``other`` isn't a `BaseCoordinateFrame` or subclass. """ if self.__class__ == other.__class__: for frame_attr_name in self.get_frame_attr_names(): if not self._frameattr_equiv(getattr(self, frame_attr_name), getattr(other, frame_attr_name)): return False return True elif not isinstance(other, BaseCoordinateFrame): raise TypeError("Tried to do is_equivalent_frame on something that " "isn't a frame") else: return False def __repr__(self): frameattrs = self._frame_attrs_repr() data_repr = self._data_repr() if frameattrs: frameattrs = f' ({frameattrs})' if data_repr: return f'<{self.__class__.__name__} Coordinate{frameattrs}: {data_repr}>' else: return f'<{self.__class__.__name__} Frame{frameattrs}>' def _data_repr(self): """Returns a string representation of the coordinate data.""" if not self.has_data: return '' if self.representation_type: if (hasattr(self.representation_type, '_unit_representation') and isinstance(self.data, self.representation_type._unit_representation)): rep_cls = self.data.__class__ else: rep_cls = self.representation_type if 's' in self.data.differentials: dif_cls = self.get_representation_cls('s') dif_data = self.data.differentials['s'] if isinstance(dif_data, (r.UnitSphericalDifferential, r.UnitSphericalCosLatDifferential, r.RadialDifferential)): dif_cls = dif_data.__class__ else: dif_cls = None data = self.represent_as(rep_cls, dif_cls, in_frame_units=True) data_repr = repr(data) # Generate the list of component names out of the repr string part1, _, remainder = data_repr.partition('(') if remainder != '': comp_str, _, part2 = remainder.partition(')') comp_names = comp_str.split(', ') # Swap in frame-specific component names invnames = dict([(nmrepr, nmpref) for nmpref, nmrepr in self.representation_component_names.items()]) for i, name in enumerate(comp_names): comp_names[i] = invnames.get(name, name) # Reassemble the repr string data_repr = part1 + '(' + ', '.join(comp_names) + ')' + part2 else: data = self.data data_repr = repr(self.data) if data_repr.startswith('<' + data.__class__.__name__): # remove both the leading "<" and the space after the name, as well # as the trailing ">" data_repr = data_repr[(len(data.__class__.__name__) + 2):-1] else: data_repr = 'Data:\n' + data_repr if 's' in self.data.differentials: data_repr_spl = data_repr.split('\n') if 'has differentials' in data_repr_spl[-1]: diffrepr = repr(data.differentials['s']).split('\n') if diffrepr[0].startswith('<'): diffrepr[0] = ' ' + ' '.join(diffrepr[0].split(' ')[1:]) for frm_nm, rep_nm in self.get_representation_component_names('s').items(): diffrepr[0] = diffrepr[0].replace(rep_nm, frm_nm) if diffrepr[-1].endswith('>'): diffrepr[-1] = diffrepr[-1][:-1] data_repr_spl[-1] = '\n'.join(diffrepr) data_repr = '\n'.join(data_repr_spl) return data_repr def _frame_attrs_repr(self): """ Returns a string representation of the frame's attributes, if any. """ attr_strs = [] for attribute_name in self.get_frame_attr_names(): attr = getattr(self, attribute_name) # Check to see if this object has a way of representing itself # specific to being an attribute of a frame. (Note, this is not the # Attribute class, it's the actual object). if hasattr(attr, "_astropy_repr_in_frame"): attrstr = attr._astropy_repr_in_frame() else: attrstr = str(attr) attr_strs.append(f"{attribute_name}={attrstr}") return ', '.join(attr_strs) def _apply(self, method, *args, **kwargs): """Create a new instance, applying a method to the underlying data. In typical usage, the method is any of the shape-changing methods for `~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those picking particular elements (``__getitem__``, ``take``, etc.), which are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be applied to the underlying arrays in the representation (e.g., ``x``, ``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`), as well as to any frame attributes that have a shape, with the results used to create a new instance. Internally, it is also used to apply functions to the above parts (in particular, `~numpy.broadcast_to`). Parameters ---------- method : str or callable If str, it is the name of a method that is applied to the internal ``components``. If callable, the function is applied. *args : tuple Any positional arguments for ``method``. **kwargs : dict Any keyword arguments for ``method``. """ def apply_method(value): if isinstance(value, ShapedLikeNDArray): return value._apply(method, *args, **kwargs) else: if callable(method): return method(value, *args, **kwargs) else: return getattr(value, method)(*args, **kwargs) new = super().__new__(self.__class__) if hasattr(self, '_representation'): new._representation = self._representation.copy() new._attr_names_with_defaults = self._attr_names_with_defaults.copy() for attr in self.frame_attributes: _attr = '_' + attr if attr in self._attr_names_with_defaults: setattr(new, _attr, getattr(self, _attr)) else: value = getattr(self, _attr) if getattr(value, 'shape', ()): value = apply_method(value) elif method == 'copy' or method == 'flatten': # flatten should copy also for a single element array, but # we cannot use it directly for array scalars, since it # always returns a one-dimensional array. So, just copy. value = copy.copy(value) setattr(new, _attr, value) if self.has_data: new._data = apply_method(self.data) else: new._data = None shapes = [getattr(new, '_' + attr).shape for attr in new.frame_attributes if (attr not in new._attr_names_with_defaults and getattr(getattr(new, '_' + attr), 'shape', ()))] if shapes: new._no_data_shape = (check_broadcast(*shapes) if len(shapes) > 1 else shapes[0]) else: new._no_data_shape = () return new def __setitem__(self, item, value): if self.__class__ is not value.__class__: raise TypeError(f'can only set from object of same class: ' f'{self.__class__.__name__} vs. ' f'{value.__class__.__name__}') if not self.is_equivalent_frame(value): raise ValueError('can only set frame item from an equivalent frame') if value._data is None: raise ValueError('can only set frame with value that has data') if self._data is None: raise ValueError('cannot set frame which has no data') if self.shape == (): raise TypeError(f"scalar '{self.__class__.__name__}' frame object " f"does not support item assignment") if self._data is None: raise ValueError('can only set frame if it has data') if self._data.__class__ is not value._data.__class__: raise TypeError(f'can only set from object of same class: ' f'{self._data.__class__.__name__} vs. ' f'{value._data.__class__.__name__}') if self._data._differentials: # Can this ever occur? (Same class but different differential keys). # This exception is not tested since it is not clear how to generate it. if self._data._differentials.keys() != value._data._differentials.keys(): raise ValueError(f'setitem value must have same differentials') for key, self_diff in self._data._differentials.items(): if self_diff.__class__ is not value._data._differentials[key].__class__: raise TypeError(f'can only set from object of same class: ' f'{self_diff.__class__.__name__} vs. ' f'{value._data._differentials[key].__class__.__name__}') # Set representation data self._data[item] = value._data # Frame attributes required to be identical by is_equivalent_frame, # no need to set them here. self.cache.clear() @override__dir__ def __dir__(self): """ Override the builtin `dir` behavior to include representation names. TODO: dynamic representation transforms (i.e. include cylindrical et al.). """ dir_values = set(self.representation_component_names) dir_values |= set(self.get_representation_component_names('s')) return dir_values def __getattr__(self, attr): """ Allow access to attributes on the representation and differential as found via ``self.get_representation_component_names``. TODO: We should handle dynamic representation transforms here (e.g., `.cylindrical`) instead of defining properties as below. """ # attr == '_representation' is likely from the hasattr() test in the # representation property which is used for # self.representation_component_names. # # Prevent infinite recursion here. if attr.startswith('_'): return self.__getattribute__(attr) # Raise AttributeError. repr_names = self.representation_component_names if attr in repr_names: if self._data is None: self.data # this raises the "no data" error by design - doing it # this way means we don't have to replicate the error message here rep = self.represent_as(self.representation_type, in_frame_units=True) val = getattr(rep, repr_names[attr]) return val diff_names = self.get_representation_component_names('s') if attr in diff_names: if self._data is None: self.data # see above. # TODO: this doesn't work for the case when there is only # unitspherical information. The differential_type gets set to the # default_differential, which expects full information, so the # units don't work out rep = self.represent_as(in_frame_units=True, **self.get_representation_cls(None)) val = getattr(rep.differentials['s'], diff_names[attr]) return val return self.__getattribute__(attr) # Raise AttributeError. def __setattr__(self, attr, value): # Don't slow down access of private attributes! if not attr.startswith('_'): if hasattr(self, 'representation_info'): repr_attr_names = set() for representation_attr in self.representation_info.values(): repr_attr_names.update(representation_attr['names']) if attr in repr_attr_names: raise AttributeError( f'Cannot set any frame attribute {attr}') super().__setattr__(attr, value) def __eq__(self, value): """Equality operator for frame. This implements strict equality and requires that the frames are equivalent and that the representation data are exactly equal. """ is_equiv = self.is_equivalent_frame(value) if self._data is None and value._data is None: # For Frame with no data, == compare is same as is_equivalent_frame() return is_equiv if not is_equiv: raise TypeError(f'cannot compare: objects must have equivalent frames: ' f'{self.replicate_without_data()} vs. ' f'{value.replicate_without_data()}') if ((value._data is None and self._data is not None) or (self._data is None and value._data is not None)): raise ValueError('cannot compare: one frame has data and the other ' 'does not') return self._data == value._data def __ne__(self, value): return np.logical_not(self == value) def separation(self, other): """ Computes on-sky separation between this coordinate and another. .. note:: If the ``other`` coordinate object is in a different frame, it is first transformed to the frame of this object. This can lead to unintuitive behavior if not accounted for. Particularly of note is that ``self.separation(other)`` and ``other.separation(self)`` may not give the same answer in this case. Parameters ---------- other : `~astropy.coordinates.BaseCoordinateFrame` The coordinate to get the separation to. Returns ------- sep : `~astropy.coordinates.Angle` The on-sky separation between this and the ``other`` coordinate. Notes ----- The separation is calculated using the Vincenty formula, which is stable at all locations, including poles and antipodes [1]_. .. [1] https://en.wikipedia.org/wiki/Great-circle_distance """ from .angle_utilities import angular_separation from .angles import Angle self_unit_sph = self.represent_as(r.UnitSphericalRepresentation) other_transformed = other.transform_to(self) other_unit_sph = other_transformed.represent_as(r.UnitSphericalRepresentation) # Get the separation as a Quantity, convert to Angle in degrees sep = angular_separation(self_unit_sph.lon, self_unit_sph.lat, other_unit_sph.lon, other_unit_sph.lat) return Angle(sep, unit=u.degree) def separation_3d(self, other): """ Computes three dimensional separation between this coordinate and another. Parameters ---------- other : `~astropy.coordinates.BaseCoordinateFrame` The coordinate system to get the distance to. Returns ------- sep : `~astropy.coordinates.Distance` The real-space distance between these two coordinates. Raises ------ ValueError If this or the other coordinate do not have distances. """ from .distances import Distance if issubclass(self.data.__class__, r.UnitSphericalRepresentation): raise ValueError('This object does not have a distance; cannot ' 'compute 3d separation.') # do this first just in case the conversion somehow creates a distance other_in_self_system = other.transform_to(self) if issubclass(other_in_self_system.__class__, r.UnitSphericalRepresentation): raise ValueError('The other object does not have a distance; ' 'cannot compute 3d separation.') # drop the differentials to ensure they don't do anything odd in the # subtraction self_car = self.data.without_differentials().represent_as(r.CartesianRepresentation) other_car = other_in_self_system.data.without_differentials().represent_as(r.CartesianRepresentation) dist = (self_car - other_car).norm() if dist.unit == u.one: return dist else: return Distance(dist) @property def cartesian(self): """ Shorthand for a cartesian representation of the coordinates in this object. """ # TODO: if representations are updated to use a full transform graph, # the representation aliases should not be hard-coded like this return self.represent_as('cartesian', in_frame_units=True) @property def cylindrical(self): """ Shorthand for a cylindrical representation of the coordinates in this object. """ # TODO: if representations are updated to use a full transform graph, # the representation aliases should not be hard-coded like this return self.represent_as('cylindrical', in_frame_units=True) @property def spherical(self): """ Shorthand for a spherical representation of the coordinates in this object. """ # TODO: if representations are updated to use a full transform graph, # the representation aliases should not be hard-coded like this return self.represent_as('spherical', in_frame_units=True) @property def sphericalcoslat(self): """ Shorthand for a spherical representation of the positional data and a `SphericalCosLatDifferential` for the velocity data in this object. """ # TODO: if representations are updated to use a full transform graph, # the representation aliases should not be hard-coded like this return self.represent_as('spherical', 'sphericalcoslat', in_frame_units=True) @property def velocity(self): """ Shorthand for retrieving the Cartesian space-motion as a `CartesianDifferential` object. This is equivalent to calling ``self.cartesian.differentials['s']``. """ if 's' not in self.data.differentials: raise ValueError('Frame has no associated velocity (Differential) ' 'data information.') return self.cartesian.differentials['s'] @property def proper_motion(self): """ Shorthand for the two-dimensional proper motion as a `~astropy.units.Quantity` object with angular velocity units. In the returned `~astropy.units.Quantity`, ``axis=0`` is the longitude/latitude dimension so that ``.proper_motion[0]`` is the longitudinal proper motion and ``.proper_motion[1]`` is latitudinal. The longitudinal proper motion already includes the cos(latitude) term. """ if 's' not in self.data.differentials: raise ValueError('Frame has no associated velocity (Differential) ' 'data information.') sph = self.represent_as('spherical', 'sphericalcoslat', in_frame_units=True) pm_lon = sph.differentials['s'].d_lon_coslat pm_lat = sph.differentials['s'].d_lat return np.stack((pm_lon.value, pm_lat.to(pm_lon.unit).value), axis=0) * pm_lon.unit @property def radial_velocity(self): """ Shorthand for the radial or line-of-sight velocity as a `~astropy.units.Quantity` object. """ if 's' not in self.data.differentials: raise ValueError('Frame has no associated velocity (Differential) ' 'data information.') sph = self.represent_as('spherical', in_frame_units=True) return sph.differentials['s'].d_distance class GenericFrame(BaseCoordinateFrame): """ A frame object that can't store data but can hold any arbitrary frame attributes. Mostly useful as a utility for the high-level class to store intermediate frame attributes. Parameters ---------- frame_attrs : dict A dictionary of attributes to be used as the frame attributes for this frame. """ name = None # it's not a "real" frame so it doesn't have a name def __init__(self, frame_attrs): self.frame_attributes = {} for name, default in frame_attrs.items(): self.frame_attributes[name] = Attribute(default) setattr(self, '_' + name, default) super().__init__(None) def __getattr__(self, name): if '_' + name in self.__dict__: return getattr(self, '_' + name) else: raise AttributeError(f'no {name}') def __setattr__(self, name, value): if name in self.get_frame_attr_names(): raise AttributeError(f"can't set frame attribute '{name}'") else: super().__setattr__(name, value)
dcc38bcd2c9a08806adc2e0d2c0da0c2456bd35d1d2100e581de4be94c43ade3
# Licensed under a 3-clause BSD style license - see LICENSE.rst from warnings import warn import collections import socket import json import urllib.request import urllib.error import urllib.parse import numpy as np import erfa from astropy import units as u from astropy import constants as consts from astropy.units.quantity import QuantityInfoBase from astropy.utils import data from astropy.utils.decorators import format_doc from astropy.utils.exceptions import AstropyUserWarning from .angles import Angle, Longitude, Latitude from .representation import (BaseRepresentation, CartesianRepresentation, CartesianDifferential) from .matrix_utilities import matrix_transpose from .errors import UnknownSiteException __all__ = ['EarthLocation', 'BaseGeodeticRepresentation', 'WGS84GeodeticRepresentation', 'WGS72GeodeticRepresentation', 'GRS80GeodeticRepresentation'] GeodeticLocation = collections.namedtuple('GeodeticLocation', ['lon', 'lat', 'height']) ELLIPSOIDS = {} """Available ellipsoids (defined in erfam.h, with numbers exposed in erfa).""" # Note: they get filled by the creation of the geodetic classes. OMEGA_EARTH = ((1.002_737_811_911_354_48 * u.cycle/u.day) .to(1/u.s, u.dimensionless_angles())) """ Rotational velocity of Earth, following SOFA's pvtob. In UT1 seconds, this would be 2 pi / (24 * 3600), but we need the value in SI seconds, so multiply by the ratio of stellar to solar day. See Explanatory Supplement to the Astronomical Almanac, ed. P. Kenneth Seidelmann (1992), University Science Books. The constant is the conventional, exact one (IERS conventions 2003); see http://hpiers.obspm.fr/eop-pc/index.php?index=constants. """ def _check_ellipsoid(ellipsoid=None, default='WGS84'): if ellipsoid is None: ellipsoid = default if ellipsoid not in ELLIPSOIDS: raise ValueError(f'Ellipsoid {ellipsoid} not among known ones ({ELLIPSOIDS})') return ellipsoid def _get_json_result(url, err_str, use_google): # need to do this here to prevent a series of complicated circular imports from .name_resolve import NameResolveError try: # Retrieve JSON response from Google maps API resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout) resp_data = json.loads(resp.read().decode('utf8')) except urllib.error.URLError as e: # This catches a timeout error, see: # http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python if isinstance(e.reason, socket.timeout): raise NameResolveError(err_str.format(msg="connection timed out")) from e else: raise NameResolveError(err_str.format(msg=e.reason)) from e except socket.timeout: # There are some cases where urllib2 does not catch socket.timeout # especially while receiving response data on an already previously # working request raise NameResolveError(err_str.format(msg="connection timed out")) if use_google: results = resp_data.get('results', []) if resp_data.get('status', None) != 'OK': raise NameResolveError(err_str.format(msg="unknown failure with " "Google API")) else: # OpenStreetMap returns a list results = resp_data if not results: raise NameResolveError(err_str.format(msg="no results returned")) return results class EarthLocationInfo(QuantityInfoBase): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. """ _represent_as_dict_attrs = ('x', 'y', 'z', 'ellipsoid') def _construct_from_dict(self, map): # Need to pop ellipsoid off and update post-instantiation. This is # on the to-fix list in #4261. ellipsoid = map.pop('ellipsoid') out = self._parent_cls(**map) out.ellipsoid = ellipsoid return out def new_like(self, cols, length, metadata_conflicts='warn', name=None): """ Return a new EarthLocation instance which is consistent with the input ``cols`` and has ``length`` rows. This is intended for creating an empty column object whose elements can be set in-place for table operations like join or vstack. Parameters ---------- cols : list List of input columns length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : EarthLocation (or subclass) Empty instance of this class consistent with ``cols`` """ # Very similar to QuantityInfo.new_like, but the creation of the # map is different enough that this needs its own rouinte. # Get merged info attributes shape, dtype, format, description. attrs = self.merge_cols_attributes(cols, metadata_conflicts, name, ('meta', 'format', 'description')) # The above raises an error if the dtypes do not match, but returns # just the string representation, which is not useful, so remove. attrs.pop('dtype') # Make empty EarthLocation using the dtype and unit of the last column. # Use zeros so we do not get problems for possible conversion to # geodetic coordinates. shape = (length,) + attrs.pop('shape') data = u.Quantity(np.zeros(shape=shape, dtype=cols[0].dtype), unit=cols[0].unit, copy=False) # Get arguments needed to reconstruct class map = {key: (data[key] if key in 'xyz' else getattr(cols[-1], key)) for key in self._represent_as_dict_attrs} out = self._construct_from_dict(map) # Set remaining info attributes for attr, value in attrs.items(): setattr(out.info, attr, value) return out class EarthLocation(u.Quantity): """ Location on the Earth. Initialization is first attempted assuming geocentric (x, y, z) coordinates are given; if that fails, another attempt is made assuming geodetic coordinates (longitude, latitude, height above a reference ellipsoid). When using the geodetic forms, Longitudes are measured increasing to the east, so west longitudes are negative. Internally, the coordinates are stored as geocentric. To ensure a specific type of coordinates is used, use the corresponding class methods (`from_geocentric` and `from_geodetic`) or initialize the arguments with names (``x``, ``y``, ``z`` for geocentric; ``lon``, ``lat``, ``height`` for geodetic). See the class methods for details. Notes ----- This class fits into the coordinates transformation framework in that it encodes a position on the `~astropy.coordinates.ITRS` frame. To get a proper `~astropy.coordinates.ITRS` object from this object, use the ``itrs`` property. """ _ellipsoid = 'WGS84' _location_dtype = np.dtype({'names': ['x', 'y', 'z'], 'formats': [np.float64]*3}) _array_dtype = np.dtype((np.float64, (3,))) info = EarthLocationInfo() def __new__(cls, *args, **kwargs): # TODO: needs copy argument and better dealing with inputs. if (len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], EarthLocation)): return args[0].copy() try: self = cls.from_geocentric(*args, **kwargs) except (u.UnitsError, TypeError) as exc_geocentric: try: self = cls.from_geodetic(*args, **kwargs) except Exception as exc_geodetic: raise TypeError('Coordinates could not be parsed as either ' 'geocentric or geodetic, with respective ' 'exceptions "{}" and "{}"' .format(exc_geocentric, exc_geodetic)) return self @classmethod def from_geocentric(cls, x, y, z, unit=None): """ Location on Earth, initialized from geocentric coordinates. Parameters ---------- x, y, z : `~astropy.units.Quantity` or array-like Cartesian coordinates. If not quantities, ``unit`` should be given. unit : unit-like or None Physical unit of the coordinate values. If ``x``, ``y``, and/or ``z`` are quantities, they will be converted to this unit. Raises ------ astropy.units.UnitsError If the units on ``x``, ``y``, and ``z`` do not match or an invalid unit is given. ValueError If the shapes of ``x``, ``y``, and ``z`` do not match. TypeError If ``x`` is not a `~astropy.units.Quantity` and no unit is given. """ if unit is None: try: unit = x.unit except AttributeError: raise TypeError("Geocentric coordinates should be Quantities " "unless an explicit unit is given.") from None else: unit = u.Unit(unit) if unit.physical_type != 'length': raise u.UnitsError("Geocentric coordinates should be in " "units of length.") try: x = u.Quantity(x, unit, copy=False) y = u.Quantity(y, unit, copy=False) z = u.Quantity(z, unit, copy=False) except u.UnitsError: raise u.UnitsError("Geocentric coordinate units should all be " "consistent.") x, y, z = np.broadcast_arrays(x, y, z) struc = np.empty(x.shape, cls._location_dtype) struc['x'], struc['y'], struc['z'] = x, y, z return super().__new__(cls, struc, unit, copy=False) @classmethod def from_geodetic(cls, lon, lat, height=0., ellipsoid=None): """ Location on Earth, initialized from geodetic coordinates. Parameters ---------- lon : `~astropy.coordinates.Longitude` or float Earth East longitude. Can be anything that initialises an `~astropy.coordinates.Angle` object (if float, in degrees). lat : `~astropy.coordinates.Latitude` or float Earth latitude. Can be anything that initialises an `~astropy.coordinates.Latitude` object (if float, in degrees). height : `~astropy.units.Quantity` ['length'] or float, optional Height above reference ellipsoid (if float, in meters; default: 0). ellipsoid : str, optional Name of the reference ellipsoid to use (default: 'WGS84'). Available ellipsoids are: 'WGS84', 'GRS80', 'WGS72'. Raises ------ astropy.units.UnitsError If the units on ``lon`` and ``lat`` are inconsistent with angular ones, or that on ``height`` with a length. ValueError If ``lon``, ``lat``, and ``height`` do not have the same shape, or if ``ellipsoid`` is not recognized as among the ones implemented. Notes ----- For the conversion to geocentric coordinates, the ERFA routine ``gd2gc`` is used. See https://github.com/liberfa/erfa """ ellipsoid = _check_ellipsoid(ellipsoid, default=cls._ellipsoid) # As wrapping fails on readonly input, we do so manually lon = Angle(lon, u.degree, copy=False).wrap_at(180 * u.degree) lat = Latitude(lat, u.degree, copy=False) # don't convert to m by default, so we can use the height unit below. if not isinstance(height, u.Quantity): height = u.Quantity(height, u.m, copy=False) # get geocentric coordinates. geodetic = ELLIPSOIDS[ellipsoid](lon, lat, height, copy=False) xyz = geodetic.to_cartesian().get_xyz(xyz_axis=-1) << height.unit self = xyz.view(cls._location_dtype, cls).reshape(geodetic.shape) self._ellipsoid = ellipsoid return self @classmethod def of_site(cls, site_name): """ Return an object of this class for a known observatory/site by name. This is intended as a quick convenience function to get basic site information, not a fully-featured exhaustive registry of observatories and all their properties. Additional information about the site is stored in the ``.info.meta`` dictionary of sites obtained using this method (see the examples below). .. note:: When this function is called, it will attempt to download site information from the astropy data server. If you would like a site to be added, issue a pull request to the `astropy-data repository <https://github.com/astropy/astropy-data>`_ . If a site cannot be found in the registry (i.e., an internet connection is not available), it will fall back on a built-in list, In the future, this bundled list might include a version-controlled list of canonical observatories extracted from the online version, but it currently only contains the Greenwich Royal Observatory as an example case. Parameters ---------- site_name : str Name of the observatory (case-insensitive). Returns ------- site : `~astropy.coordinates.EarthLocation` (or subclass) instance The location of the observatory. The returned class will be the same as this class. Examples -------- >>> from astropy.coordinates import EarthLocation >>> keck = EarthLocation.of_site('Keck Observatory') # doctest: +REMOTE_DATA >>> keck.geodetic # doctest: +REMOTE_DATA +FLOAT_CMP GeodeticLocation(lon=<Longitude -155.47833333 deg>, lat=<Latitude 19.82833333 deg>, height=<Quantity 4160. m>) >>> keck.info # doctest: +REMOTE_DATA name = W. M. Keck Observatory dtype = void192 unit = m class = EarthLocation n_bad = 0 >>> keck.info.meta # doctest: +REMOTE_DATA {'source': 'IRAF Observatory Database', 'timezone': 'US/Hawaii'} See Also -------- get_site_names : the list of sites that this function can access """ # noqa registry = cls._get_site_registry() try: el = registry[site_name] except UnknownSiteException as e: raise UnknownSiteException(e.site, 'EarthLocation.get_site_names', close_names=e.close_names) from e if cls is el.__class__: return el else: newel = cls.from_geodetic(*el.to_geodetic()) newel.info.name = el.info.name return newel @classmethod def of_address(cls, address, get_height=False, google_api_key=None): """ Return an object of this class for a given address by querying either the OpenStreetMap Nominatim tool [1]_ (default) or the Google geocoding API [2]_, which requires a specified API key. This is intended as a quick convenience function to get easy access to locations. If you need to specify a precise location, you should use the initializer directly and pass in a longitude, latitude, and elevation. In the background, this just issues a web query to either of the APIs noted above. This is not meant to be abused! Both OpenStreetMap and Google use IP-based query limiting and will ban your IP if you send more than a few thousand queries per hour [2]_. .. warning:: If the query returns more than one location (e.g., searching on ``address='springfield'``), this function will use the **first** returned location. Parameters ---------- address : str The address to get the location for. As per the Google maps API, this can be a fully specified street address (e.g., 123 Main St., New York, NY) or a city name (e.g., Danbury, CT), or etc. get_height : bool, optional This only works when using the Google API! See the ``google_api_key`` block below. Use the retrieved location to perform a second query to the Google maps elevation API to retrieve the height of the input address [3]_. google_api_key : str, optional A Google API key with the Geocoding API and (optionally) the elevation API enabled. See [4]_ for more information. Returns ------- location : `~astropy.coordinates.EarthLocation` (or subclass) instance The location of the input address. Will be type(this class) References ---------- .. [1] https://nominatim.openstreetmap.org/ .. [2] https://developers.google.com/maps/documentation/geocoding/start .. [3] https://developers.google.com/maps/documentation/elevation/start .. [4] https://developers.google.com/maps/documentation/geocoding/get-api-key """ use_google = google_api_key is not None # Fail fast if invalid options are passed: if not use_google and get_height: raise ValueError( 'Currently, `get_height` only works when using ' 'the Google geocoding API, which requires passing ' 'a Google API key with `google_api_key`. See: ' 'https://developers.google.com/maps/documentation/geocoding/get-api-key ' 'for information on obtaining an API key.') if use_google: # Google pars = urllib.parse.urlencode({'address': address, 'key': google_api_key}) geo_url = f"https://maps.googleapis.com/maps/api/geocode/json?{pars}" else: # OpenStreetMap pars = urllib.parse.urlencode({'q': address, 'format': 'json'}) geo_url = f"https://nominatim.openstreetmap.org/search?{pars}" # get longitude and latitude location err_str = f"Unable to retrieve coordinates for address '{address}'; {{msg}}" geo_result = _get_json_result(geo_url, err_str=err_str, use_google=use_google) if use_google: loc = geo_result[0]['geometry']['location'] lat = loc['lat'] lon = loc['lng'] else: loc = geo_result[0] lat = float(loc['lat']) # strings are returned by OpenStreetMap lon = float(loc['lon']) if get_height: pars = {'locations': f'{lat:.8f},{lon:.8f}', 'key': google_api_key} pars = urllib.parse.urlencode(pars) ele_url = f"https://maps.googleapis.com/maps/api/elevation/json?{pars}" err_str = f"Unable to retrieve elevation for address '{address}'; {{msg}}" ele_result = _get_json_result(ele_url, err_str=err_str, use_google=use_google) height = ele_result[0]['elevation']*u.meter else: height = 0. return cls.from_geodetic(lon=lon*u.deg, lat=lat*u.deg, height=height) @classmethod def get_site_names(cls): """ Get list of names of observatories for use with `~astropy.coordinates.EarthLocation.of_site`. .. note:: When this function is called, it will first attempt to download site information from the astropy data server. If it cannot (i.e., an internet connection is not available), it will fall back on the list included with astropy (which is a limited and dated set of sites). If you think a site should be added, issue a pull request to the `astropy-data repository <https://github.com/astropy/astropy-data>`_ . Returns ------- names : list of str List of valid observatory names See Also -------- of_site : Gets the actual location object for one of the sites names this returns. """ return cls._get_site_registry().names @classmethod def _get_site_registry(cls, force_download=False, force_builtin=False): """ Gets the site registry. The first time this either downloads or loads from the data file packaged with astropy. Subsequent calls will use the cached version unless explicitly overridden. Parameters ---------- force_download : bool or str If not False, force replacement of the cached registry with a downloaded version. If a str, that will be used as the URL to download from (if just True, the default URL will be used). force_builtin : bool If True, load from the data file bundled with astropy and set the cache to that. Returns ------- reg : astropy.coordinates.sites.SiteRegistry """ # need to do this here at the bottom to avoid circular dependencies from .sites import get_builtin_sites, get_downloaded_sites if force_builtin and force_download: raise ValueError('Cannot have both force_builtin and force_download True') if force_builtin: reg = cls._site_registry = get_builtin_sites() else: reg = getattr(cls, '_site_registry', None) if force_download or not reg: try: if isinstance(force_download, str): reg = get_downloaded_sites(force_download) else: reg = get_downloaded_sites() except OSError: if force_download: raise msg = ('Could not access the online site list. Falling ' 'back on the built-in version, which is rather ' 'limited. If you want to retry the download, do ' '{0}._get_site_registry(force_download=True)') warn(AstropyUserWarning(msg.format(cls.__name__))) reg = get_builtin_sites() cls._site_registry = reg return reg @property def ellipsoid(self): """The default ellipsoid used to convert to geodetic coordinates.""" return self._ellipsoid @ellipsoid.setter def ellipsoid(self, ellipsoid): self._ellipsoid = _check_ellipsoid(ellipsoid) @property def geodetic(self): """Convert to geodetic coordinates for the default ellipsoid.""" return self.to_geodetic() def to_geodetic(self, ellipsoid=None): """Convert to geodetic coordinates. Parameters ---------- ellipsoid : str, optional Reference ellipsoid to use. Default is the one the coordinates were initialized with. Available are: 'WGS84', 'GRS80', 'WGS72' Returns ------- lon, lat, height : `~astropy.units.Quantity` The tuple is a ``GeodeticLocation`` namedtuple and is comprised of instances of `~astropy.coordinates.Longitude`, `~astropy.coordinates.Latitude`, and `~astropy.units.Quantity`. Raises ------ ValueError if ``ellipsoid`` is not recognized as among the ones implemented. Notes ----- For the conversion to geodetic coordinates, the ERFA routine ``gc2gd`` is used. See https://github.com/liberfa/erfa """ ellipsoid = _check_ellipsoid(ellipsoid, default=self.ellipsoid) xyz = self.view(self._array_dtype, u.Quantity) llh = CartesianRepresentation(xyz, xyz_axis=-1, copy=False).represent_as( ELLIPSOIDS[ellipsoid]) return GeodeticLocation( Longitude(llh.lon, u.deg, wrap_angle=180*u.deg, copy=False), llh.lat << u.deg, llh.height << self.unit) @property def lon(self): """Longitude of the location, for the default ellipsoid.""" return self.geodetic[0] @property def lat(self): """Latitude of the location, for the default ellipsoid.""" return self.geodetic[1] @property def height(self): """Height of the location, for the default ellipsoid.""" return self.geodetic[2] # mostly for symmetry with geodetic and to_geodetic. @property def geocentric(self): """Convert to a tuple with X, Y, and Z as quantities""" return self.to_geocentric() def to_geocentric(self): """Convert to a tuple with X, Y, and Z as quantities""" return (self.x, self.y, self.z) def get_itrs(self, obstime=None): """ Generates an `~astropy.coordinates.ITRS` object with the location of this object at the requested ``obstime``. Parameters ---------- obstime : `~astropy.time.Time` or None The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or if None, the default ``obstime`` will be used. Returns ------- itrs : `~astropy.coordinates.ITRS` The new object in the ITRS frame """ # Broadcast for a single position at multiple times, but don't attempt # to be more general here. if obstime and self.size == 1 and obstime.shape: self = np.broadcast_to(self, obstime.shape, subok=True) # do this here to prevent a series of complicated circular imports from .builtin_frames import ITRS return ITRS(x=self.x, y=self.y, z=self.z, obstime=obstime) itrs = property(get_itrs, doc="""An `~astropy.coordinates.ITRS` object with for the location of this object at the default ``obstime``.""") def get_gcrs(self, obstime): """GCRS position with velocity at ``obstime`` as a GCRS coordinate. Parameters ---------- obstime : `~astropy.time.Time` The ``obstime`` to calculate the GCRS position/velocity at. Returns ------- gcrs : `~astropy.coordinates.GCRS` instance With velocity included. """ # do this here to prevent a series of complicated circular imports from .builtin_frames import GCRS loc, vel = self.get_gcrs_posvel(obstime) loc.differentials['s'] = CartesianDifferential.from_cartesian(vel) return GCRS(loc, obstime=obstime) def _get_gcrs_posvel(self, obstime, ref_to_itrs, gcrs_to_ref): """Calculate GCRS position and velocity given transformation matrices. The reference frame z axis must point to the Celestial Intermediate Pole (as is the case for CIRS and TETE). This private method is used in intermediate_rotation_transforms, where some of the matrices are already available for the coordinate transformation. The method is faster by an order of magnitude than just adding a zero velocity to ITRS and transforming to GCRS, because it avoids calculating the velocity via finite differencing of the results of the transformation at three separate times. """ # The simplest route is to transform to the reference frame where the # z axis is properly aligned with the Earth's rotation axis (CIRS or # TETE), then calculate the velocity, and then transform this # reference position and velocity to GCRS. For speed, though, we # transform the coordinates to GCRS in one step, and calculate the # velocities by rotating around the earth's axis transformed to GCRS. ref_to_gcrs = matrix_transpose(gcrs_to_ref) itrs_to_gcrs = ref_to_gcrs @ matrix_transpose(ref_to_itrs) # Earth's rotation vector in the ref frame is rot_vec_ref = (0,0,OMEGA_EARTH), # so in GCRS it is rot_vec_gcrs[..., 2] @ OMEGA_EARTH. rot_vec_gcrs = CartesianRepresentation(ref_to_gcrs[..., 2] * OMEGA_EARTH, xyz_axis=-1, copy=False) # Get the position in the GCRS frame. # Since we just need the cartesian representation of ITRS, avoid get_itrs(). itrs_cart = CartesianRepresentation(self.x, self.y, self.z, copy=False) pos = itrs_cart.transform(itrs_to_gcrs) vel = rot_vec_gcrs.cross(pos) return pos, vel def get_gcrs_posvel(self, obstime): """ Calculate the GCRS position and velocity of this object at the requested ``obstime``. Parameters ---------- obstime : `~astropy.time.Time` The ``obstime`` to calculate the GCRS position/velocity at. Returns ------- obsgeoloc : `~astropy.coordinates.CartesianRepresentation` The GCRS position of the object obsgeovel : `~astropy.coordinates.CartesianRepresentation` The GCRS velocity of the object """ # Local import to prevent circular imports. from .builtin_frames.intermediate_rotation_transforms import ( cirs_to_itrs_mat, gcrs_to_cirs_mat) # Get gcrs_posvel by transforming via CIRS (slightly faster than TETE). return self._get_gcrs_posvel(obstime, cirs_to_itrs_mat(obstime), gcrs_to_cirs_mat(obstime)) def gravitational_redshift(self, obstime, bodies=['sun', 'jupiter', 'moon'], masses={}): """Return the gravitational redshift at this EarthLocation. Calculates the gravitational redshift, of order 3 m/s, due to the requested solar system bodies. Parameters ---------- obstime : `~astropy.time.Time` The ``obstime`` to calculate the redshift at. bodies : iterable, optional The bodies (other than the Earth) to include in the redshift calculation. List elements should be any body name `get_body_barycentric` accepts. Defaults to Jupiter, the Sun, and the Moon. Earth is always included (because the class represents an *Earth* location). masses : dict[str, `~astropy.units.Quantity`], optional The mass or gravitational parameters (G * mass) to assume for the bodies requested in ``bodies``. Can be used to override the defaults for the Sun, Jupiter, the Moon, and the Earth, or to pass in masses for other bodies. Returns ------- redshift : `~astropy.units.Quantity` Gravitational redshift in velocity units at given obstime. """ # needs to be here to avoid circular imports from .solar_system import get_body_barycentric bodies = list(bodies) # Ensure earth is included and last in the list. if 'earth' in bodies: bodies.remove('earth') bodies.append('earth') _masses = {'sun': consts.GM_sun, 'jupiter': consts.GM_jup, 'moon': consts.G * 7.34767309e22*u.kg, 'earth': consts.GM_earth} _masses.update(masses) GMs = [] M_GM_equivalency = (u.kg, u.Unit(consts.G * u.kg)) for body in bodies: try: GMs.append(_masses[body].to(u.m**3/u.s**2, [M_GM_equivalency])) except KeyError as err: raise KeyError(f'body "{body}" does not have a mass.') from err except u.UnitsError as exc: exc.args += ('"masses" argument values must be masses or ' 'gravitational parameters.',) raise positions = [get_body_barycentric(name, obstime) for name in bodies] # Calculate distances to objects other than earth. distances = [(pos - positions[-1]).norm() for pos in positions[:-1]] # Append distance from Earth's center for Earth's contribution. distances.append(CartesianRepresentation(self.geocentric).norm()) # Get redshifts due to all objects. redshifts = [-GM / consts.c / distance for (GM, distance) in zip(GMs, distances)] # Reverse order of summing, to go from small to big, and to get # "earth" first, which gives m/s as unit. return sum(redshifts[::-1]) @property def x(self): """The X component of the geocentric coordinates.""" return self['x'] @property def y(self): """The Y component of the geocentric coordinates.""" return self['y'] @property def z(self): """The Z component of the geocentric coordinates.""" return self['z'] def __getitem__(self, item): result = super().__getitem__(item) if result.dtype is self.dtype: return result.view(self.__class__) else: return result.view(u.Quantity) def __array_finalize__(self, obj): super().__array_finalize__(obj) if hasattr(obj, '_ellipsoid'): self._ellipsoid = obj._ellipsoid def __len__(self): if self.shape == (): raise IndexError('0-d EarthLocation arrays cannot be indexed') else: return super().__len__() def _to_value(self, unit, equivalencies=[]): """Helper method for to and to_value.""" # Conversion to another unit in both ``to`` and ``to_value`` goes # via this routine. To make the regular quantity routines work, we # temporarily turn the structured array into a regular one. array_view = self.view(self._array_dtype, np.ndarray) if equivalencies == []: equivalencies = self._equivalencies new_array = self.unit.to(unit, array_view, equivalencies=equivalencies) return new_array.view(self.dtype).reshape(self.shape) geodetic_base_doc = """{__doc__} Parameters ---------- lon, lat : angle-like The longitude and latitude of the point(s), in angular units. The latitude should be between -90 and 90 degrees, and the longitude will be wrapped to an angle between 0 and 360 degrees. These can also be instances of `~astropy.coordinates.Angle` and either `~astropy.coordinates.Longitude` not `~astropy.coordinates.Latitude`, depending on the parameter. height : `~astropy.units.Quantity` ['length'] The height to the point(s). copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ @format_doc(geodetic_base_doc) class BaseGeodeticRepresentation(BaseRepresentation): """Base geodetic representation.""" attr_classes = {'lon': Longitude, 'lat': Latitude, 'height': u.Quantity} def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) if '_ellipsoid' in cls.__dict__: ELLIPSOIDS[cls._ellipsoid] = cls def __init__(self, lon, lat=None, height=None, copy=True): if height is None and not isinstance(lon, self.__class__): height = 0 << u.m super().__init__(lon, lat, height, copy=copy) if not self.height.unit.is_equivalent(u.m): raise u.UnitTypeError(f"{self.__class__.__name__} requires " f"height with units of length.") def to_cartesian(self): """ Converts WGS84 geodetic coordinates to 3D rectangular (geocentric) cartesian coordinates. """ xyz = erfa.gd2gc(getattr(erfa, self._ellipsoid), self.lon, self.lat, self.height) return CartesianRepresentation(xyz, xyz_axis=-1, copy=False) @classmethod def from_cartesian(cls, cart): """ Converts 3D rectangular cartesian coordinates (assumed geocentric) to WGS84 geodetic coordinates. """ lon, lat, height = erfa.gc2gd(getattr(erfa, cls._ellipsoid), cart.get_xyz(xyz_axis=-1)) return cls(lon, lat, height, copy=False) @format_doc(geodetic_base_doc) class WGS84GeodeticRepresentation(BaseGeodeticRepresentation): """Representation of points in WGS84 3D geodetic coordinates.""" _ellipsoid = 'WGS84' @format_doc(geodetic_base_doc) class WGS72GeodeticRepresentation(BaseGeodeticRepresentation): """Representation of points in WGS72 3D geodetic coordinates.""" _ellipsoid = 'WGS72' @format_doc(geodetic_base_doc) class GRS80GeodeticRepresentation(BaseGeodeticRepresentation): """Representation of points in GRS80 3D geodetic coordinates.""" _ellipsoid = 'GRS80'
c4a417909607b11d14b1dbeccbb6197420fc1e80d5ce8a6a70a4c7a050d3e1f2
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains the fundamental classes used for representing coordinates in astropy. """ import warnings from collections import namedtuple import numpy as np from . import angle_formats as form from astropy import units as u from astropy.utils import isiterable __all__ = ['Angle', 'Latitude', 'Longitude'] # these are used by the `hms` and `dms` attributes hms_tuple = namedtuple('hms_tuple', ('h', 'm', 's')) dms_tuple = namedtuple('dms_tuple', ('d', 'm', 's')) signed_dms_tuple = namedtuple('signed_dms_tuple', ('sign', 'd', 'm', 's')) class Angle(u.SpecificTypeQuantity): """ One or more angular value(s) with units equivalent to radians or degrees. An angle can be specified either as an array, scalar, tuple (see below), string, `~astropy.units.Quantity` or another :class:`~astropy.coordinates.Angle`. The input parser is flexible and supports a variety of formats. The examples below illustrate common ways of initializing an `Angle` object. First some imports:: >>> from astropy.coordinates import Angle >>> from astropy import units as u The angle values can now be provided:: >>> Angle('10.2345d') <Angle 10.2345 deg> >>> Angle(['10.2345d', '-20d']) <Angle [ 10.2345, -20. ] deg> >>> Angle('1:2:30.43 degrees') <Angle 1.04178611 deg> >>> Angle('1 2 0 hours') <Angle 1.03333333 hourangle> >>> Angle(np.arange(1, 8), unit=u.deg) <Angle [1., 2., 3., 4., 5., 6., 7.] deg> >>> Angle('1°2′3″') <Angle 1.03416667 deg> >>> Angle('1°2′3″N') <Angle 1.03416667 deg> >>> Angle('1d2m3.4s') <Angle 1.03427778 deg> >>> Angle('1d2m3.4sS') <Angle -1.03427778 deg> >>> Angle('-1h2m3s') <Angle -1.03416667 hourangle> >>> Angle('-1h2m3sE') <Angle -1.03416667 hourangle> >>> Angle('-1h2.5m') <Angle -1.04166667 hourangle> >>> Angle('-1h2.5mW') <Angle 1.04166667 hourangle> >>> Angle('-1:2.5', unit=u.deg) <Angle -1.04166667 deg> >>> Angle((10, 11, 12), unit='hourangle') # (h, m, s) <Angle 10.18666667 hourangle> >>> Angle((-1, 2, 3), unit=u.deg) # (d, m, s) <Angle -1.03416667 deg> >>> Angle(10.2345 * u.deg) <Angle 10.2345 deg> >>> Angle(Angle(10.2345 * u.deg)) <Angle 10.2345 deg> Parameters ---------- angle : `~numpy.array`, scalar, `~astropy.units.Quantity`, :class:`~astropy.coordinates.Angle` The angle value. If a tuple, will be interpreted as ``(h, m, s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted following the rules described above. If ``angle`` is a sequence or array of strings, the resulting values will be in the given ``unit``, or if `None` is provided, the unit will be taken from the first given value. unit : unit-like, optional The unit of the value specified for the angle. This may be any string that `~astropy.units.Unit` understands, but it is better to give an actual unit object. Must be an angular unit. dtype : `~numpy.dtype`, optional See `~astropy.units.Quantity`. copy : bool, optional See `~astropy.units.Quantity`. Raises ------ `~astropy.units.UnitsError` If a unit is not provided or it is not an angular unit. """ _equivalent_unit = u.radian _include_easy_conversion_members = True def __new__(cls, angle, unit=None, dtype=None, copy=True, **kwargs): if not isinstance(angle, u.Quantity): if unit is not None: unit = cls._convert_unit_to_angle_unit(u.Unit(unit)) if isinstance(angle, tuple): angle = cls._tuple_to_float(angle, unit) elif isinstance(angle, str): angle, angle_unit = form.parse_angle(angle, unit) if angle_unit is None: angle_unit = unit if isinstance(angle, tuple): angle = cls._tuple_to_float(angle, angle_unit) if angle_unit is not unit: # Possible conversion to `unit` will be done below. angle = u.Quantity(angle, angle_unit, copy=False) elif (isiterable(angle) and not (isinstance(angle, np.ndarray) and angle.dtype.kind not in 'SUVO')): angle = [Angle(x, unit, copy=False) for x in angle] return super().__new__(cls, angle, unit, dtype=dtype, copy=copy, **kwargs) @staticmethod def _tuple_to_float(angle, unit): """ Converts an angle represented as a 3-tuple or 2-tuple into a floating point number in the given unit. """ # TODO: Numpy array of tuples? if unit == u.hourangle: return form.hms_to_hours(*angle) elif unit == u.degree: return form.dms_to_degrees(*angle) else: raise u.UnitsError(f"Can not parse '{angle}' as unit '{unit}'") @staticmethod def _convert_unit_to_angle_unit(unit): return u.hourangle if unit is u.hour else unit def _set_unit(self, unit): super()._set_unit(self._convert_unit_to_angle_unit(unit)) @property def hour(self): """ The angle's value in hours (read-only property). """ return self.hourangle @property def hms(self): """ The angle's value in hours, as a named tuple with ``(h, m, s)`` members. (This is a read-only property.) """ return hms_tuple(*form.hours_to_hms(self.hourangle)) @property def dms(self): """ The angle's value in degrees, as a named tuple with ``(d, m, s)`` members. (This is a read-only property.) """ return dms_tuple(*form.degrees_to_dms(self.degree)) @property def signed_dms(self): """ The angle's value in degrees, as a named tuple with ``(sign, d, m, s)`` members. The ``d``, ``m``, ``s`` are thus always positive, and the sign of the angle is given by ``sign``. (This is a read-only property.) This is primarily intended for use with `dms` to generate string representations of coordinates that are correct for negative angles. """ return signed_dms_tuple(np.sign(self.degree), *form.degrees_to_dms(np.abs(self.degree))) def to_string(self, unit=None, decimal=False, sep='fromunit', precision=None, alwayssign=False, pad=False, fields=3, format=None): """ A string representation of the angle. Parameters ---------- unit : `~astropy.units.UnitBase`, optional Specifies the unit. Must be an angular unit. If not provided, the unit used to initialize the angle will be used. decimal : bool, optional If `True`, a decimal representation will be used, otherwise the returned string will be in sexagesimal form. sep : str, optional The separator between numbers in a sexagesimal representation. E.g., if it is ':', the result is ``'12:41:11.1241'``. Also accepts 2 or 3 separators. E.g., ``sep='hms'`` would give the result ``'12h41m11.1241s'``, or sep='-:' would yield ``'11-21:17.124'``. Alternatively, the special string 'fromunit' means 'dms' if the unit is degrees, or 'hms' if the unit is hours. precision : int, optional The level of decimal precision. If ``decimal`` is `True`, this is the raw precision, otherwise it gives the precision of the last place of the sexagesimal representation (seconds). If `None`, or not provided, the number of decimal places is determined by the value, and will be between 0-8 decimal places as required. alwayssign : bool, optional If `True`, include the sign no matter what. If `False`, only include the sign if it is negative. pad : bool, optional If `True`, include leading zeros when needed to ensure a fixed number of characters for sexagesimal representation. fields : int, optional Specifies the number of fields to display when outputting sexagesimal notation. For example: - fields == 1: ``'5d'`` - fields == 2: ``'5d45m'`` - fields == 3: ``'5d45m32.5s'`` By default, all fields are displayed. format : str, optional The format of the result. If not provided, an unadorned string is returned. Supported values are: - 'latex': Return a LaTeX-formatted string - 'unicode': Return a string containing non-ASCII unicode characters, such as the degree symbol Returns ------- strrepr : str or array A string representation of the angle. If the angle is an array, this will be an array with a unicode dtype. """ if unit is None: unit = self.unit else: unit = self._convert_unit_to_angle_unit(u.Unit(unit)) separators = { None: { u.degree: 'dms', u.hourangle: 'hms'}, 'latex': { u.degree: [r'^\circ', r'{}^\prime', r'{}^{\prime\prime}'], u.hourangle: [r'^{\mathrm{h}}', r'^{\mathrm{m}}', r'^{\mathrm{s}}']}, 'unicode': { u.degree: '°′″', u.hourangle: 'ʰᵐˢ'} } if sep == 'fromunit': if format not in separators: raise ValueError(f"Unknown format '{format}'") seps = separators[format] if unit in seps: sep = seps[unit] # Create an iterator so we can format each element of what # might be an array. if unit is u.degree: if decimal: values = self.degree if precision is not None: func = ("{0:0." + str(precision) + "f}").format else: func = '{:g}'.format else: if sep == 'fromunit': sep = 'dms' values = self.degree func = lambda x: form.degrees_to_string( x, precision=precision, sep=sep, pad=pad, fields=fields) elif unit is u.hourangle: if decimal: values = self.hour if precision is not None: func = ("{0:0." + str(precision) + "f}").format else: func = '{:g}'.format else: if sep == 'fromunit': sep = 'hms' values = self.hour func = lambda x: form.hours_to_string( x, precision=precision, sep=sep, pad=pad, fields=fields) elif unit.is_equivalent(u.radian): if decimal: values = self.to_value(unit) if precision is not None: func = ("{0:1." + str(precision) + "f}").format else: func = "{:g}".format elif sep == 'fromunit': values = self.to_value(unit) unit_string = unit.to_string(format=format) if format == 'latex': unit_string = unit_string[1:-1] if precision is not None: def plain_unit_format(val): return ("{0:0." + str(precision) + "f}{1}").format( val, unit_string) func = plain_unit_format else: def plain_unit_format(val): return f"{val:g}{unit_string}" func = plain_unit_format else: raise ValueError( f"'{unit.name}' can not be represented in sexagesimal notation") else: raise u.UnitsError( "The unit value provided is not an angular unit.") def do_format(val): # Check if value is not nan to avoid ValueErrors when turning it into # a hexagesimal string. if not np.isnan(val): s = func(float(val)) if alwayssign and not s.startswith('-'): s = '+' + s if format == 'latex': s = f'${s}$' return s s = f"{val}" return s format_ufunc = np.vectorize(do_format, otypes=['U']) result = format_ufunc(values) if result.ndim == 0: result = result[()] return result def _wrap_at(self, wrap_angle): """ Implementation that assumes ``angle`` is already validated and that wrapping is inplace. """ # Convert the wrap angle and 360 degrees to the native unit of # this Angle, then do all the math on raw Numpy arrays rather # than Quantity objects for speed. a360 = u.degree.to(self.unit, 360.0) wrap_angle = wrap_angle.to_value(self.unit) wrap_angle_floor = wrap_angle - a360 self_angle = self.view(np.ndarray) # Do the wrapping, but only if any angles need to be wrapped # # This invalid catch block is needed both for the floor division # and for the comparisons later on (latter not really needed # any more for >= 1.19 (NUMPY_LT_1_19), but former is). with np.errstate(invalid='ignore'): wraps = (self_angle - wrap_angle_floor) // a360 np.nan_to_num(wraps, copy=False) if np.any(wraps != 0): self_angle -= wraps*a360 # Rounding errors can cause problems. self_angle[self_angle >= wrap_angle] -= a360 self_angle[self_angle < wrap_angle_floor] += a360 def wrap_at(self, wrap_angle, inplace=False): """ Wrap the `~astropy.coordinates.Angle` object at the given ``wrap_angle``. This method forces all the angle values to be within a contiguous 360 degree range so that ``wrap_angle - 360d <= angle < wrap_angle``. By default a new Angle object is returned, but if the ``inplace`` argument is `True` then the `~astropy.coordinates.Angle` object is wrapped in place and nothing is returned. For instance:: >>> from astropy.coordinates import Angle >>> import astropy.units as u >>> a = Angle([-20.0, 150.0, 350.0] * u.deg) >>> a.wrap_at(360 * u.deg).degree # Wrap into range 0 to 360 degrees # doctest: +FLOAT_CMP array([340., 150., 350.]) >>> a.wrap_at('180d', inplace=True) # Wrap into range -180 to 180 degrees # doctest: +FLOAT_CMP >>> a.degree # doctest: +FLOAT_CMP array([-20., 150., -10.]) Parameters ---------- wrap_angle : angle-like Specifies a single value for the wrap angle. This can be any object that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``, ``180 * u.deg``, or ``Angle(180, unit=u.deg)``. inplace : bool If `True` then wrap the object in place instead of returning a new `~astropy.coordinates.Angle` Returns ------- out : Angle or None If ``inplace is False`` (default), return new `~astropy.coordinates.Angle` object with angles wrapped accordingly. Otherwise wrap in place and return `None`. """ wrap_angle = Angle(wrap_angle, copy=False) # Convert to an Angle if not inplace: self = self.copy() self._wrap_at(wrap_angle) return None if inplace else self def is_within_bounds(self, lower=None, upper=None): """ Check if all angle(s) satisfy ``lower <= angle < upper`` If ``lower`` is not specified (or `None`) then no lower bounds check is performed. Likewise ``upper`` can be left unspecified. For example:: >>> from astropy.coordinates import Angle >>> import astropy.units as u >>> a = Angle([-20, 150, 350] * u.deg) >>> a.is_within_bounds('0d', '360d') False >>> a.is_within_bounds(None, '360d') True >>> a.is_within_bounds(-30 * u.deg, None) True Parameters ---------- lower : angle-like or None Specifies lower bound for checking. This can be any object that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``, ``180 * u.deg``, or ``Angle(180, unit=u.deg)``. upper : angle-like or None Specifies upper bound for checking. This can be any object that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``, ``180 * u.deg``, or ``Angle(180, unit=u.deg)``. Returns ------- is_within_bounds : bool `True` if all angles satisfy ``lower <= angle < upper`` """ ok = True if lower is not None: ok &= np.all(Angle(lower) <= self) if ok and upper is not None: ok &= np.all(self < Angle(upper)) return bool(ok) def _str_helper(self, format=None): if self.isscalar: return self.to_string(format=format) def formatter(x): return x.to_string(format=format) return np.array2string(self, formatter={'all': formatter}) def __str__(self): return self._str_helper() def _repr_latex_(self): return self._str_helper(format='latex') def _no_angle_subclass(obj): """Return any Angle subclass objects as an Angle objects. This is used to ensure that Latitude and Longitude change to Angle objects when they are used in calculations (such as lon/2.) """ if isinstance(obj, tuple): return tuple(_no_angle_subclass(_obj) for _obj in obj) return obj.view(Angle) if isinstance(obj, (Latitude, Longitude)) else obj class Latitude(Angle): """ Latitude-like angle(s) which must be in the range -90 to +90 deg. A Latitude object is distinguished from a pure :class:`~astropy.coordinates.Angle` by virtue of being constrained so that:: -90.0 * u.deg <= angle(s) <= +90.0 * u.deg Any attempt to set a value outside that range will result in a `ValueError`. The input angle(s) can be specified either as an array, list, scalar, tuple (see below), string, :class:`~astropy.units.Quantity` or another :class:`~astropy.coordinates.Angle`. The input parser is flexible and supports all of the input formats supported by :class:`~astropy.coordinates.Angle`. Parameters ---------- angle : array, list, scalar, `~astropy.units.Quantity`, `~astropy.coordinates.Angle` The angle value(s). If a tuple, will be interpreted as ``(h, m, s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted following the rules described for :class:`~astropy.coordinates.Angle`. If ``angle`` is a sequence or array of strings, the resulting values will be in the given ``unit``, or if `None` is provided, the unit will be taken from the first given value. unit : unit-like, optional The unit of the value specified for the angle. This may be any string that `~astropy.units.Unit` understands, but it is better to give an actual unit object. Must be an angular unit. Raises ------ `~astropy.units.UnitsError` If a unit is not provided or it is not an angular unit. `TypeError` If the angle parameter is an instance of :class:`~astropy.coordinates.Longitude`. """ def __new__(cls, angle, unit=None, **kwargs): # Forbid creating a Lat from a Long. if isinstance(angle, Longitude): raise TypeError("A Latitude angle cannot be created from a Longitude angle") self = super().__new__(cls, angle, unit=unit, **kwargs) self._validate_angles() return self def _validate_angles(self, angles=None): """Check that angles are between -90 and 90 degrees. If not given, the check is done on the object itself""" # Convert the lower and upper bounds to the "native" unit of # this angle. This limits multiplication to two values, # rather than the N values in `self.value`. Also, the # comparison is performed on raw arrays, rather than Quantity # objects, for speed. if angles is None: angles = self lower = u.degree.to(angles.unit, -90.0) upper = u.degree.to(angles.unit, 90.0) # This invalid catch block can be removed when the minimum numpy # version is >= 1.19 (NUMPY_LT_1_19) with np.errstate(invalid='ignore'): invalid_angles = (np.any(angles.value < lower) or np.any(angles.value > upper)) if invalid_angles: raise ValueError('Latitude angle(s) must be within -90 deg <= angle <= 90 deg, ' 'got {}'.format(angles.to(u.degree))) def __setitem__(self, item, value): # Forbid assigning a Long to a Lat. if isinstance(value, Longitude): raise TypeError("A Longitude angle cannot be assigned to a Latitude angle") # first check bounds if value is not np.ma.masked: self._validate_angles(value) super().__setitem__(item, value) # Any calculation should drop to Angle def __array_ufunc__(self, *args, **kwargs): results = super().__array_ufunc__(*args, **kwargs) return _no_angle_subclass(results) class LongitudeInfo(u.QuantityInfo): _represent_as_dict_attrs = u.QuantityInfo._represent_as_dict_attrs + ('wrap_angle',) class Longitude(Angle): """ Longitude-like angle(s) which are wrapped within a contiguous 360 degree range. A ``Longitude`` object is distinguished from a pure :class:`~astropy.coordinates.Angle` by virtue of a ``wrap_angle`` property. The ``wrap_angle`` specifies that all angle values represented by the object will be in the range:: wrap_angle - 360 * u.deg <= angle(s) < wrap_angle The default ``wrap_angle`` is 360 deg. Setting ``wrap_angle=180 * u.deg`` would instead result in values between -180 and +180 deg. Setting the ``wrap_angle`` attribute of an existing ``Longitude`` object will result in re-wrapping the angle values in-place. The input angle(s) can be specified either as an array, list, scalar, tuple, string, :class:`~astropy.units.Quantity` or another :class:`~astropy.coordinates.Angle`. The input parser is flexible and supports all of the input formats supported by :class:`~astropy.coordinates.Angle`. Parameters ---------- angle : tuple or angle-like The angle value(s). If a tuple, will be interpreted as ``(h, m s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted following the rules described for :class:`~astropy.coordinates.Angle`. If ``angle`` is a sequence or array of strings, the resulting values will be in the given ``unit``, or if `None` is provided, the unit will be taken from the first given value. unit : unit-like ['angle'], optional The unit of the value specified for the angle. This may be any string that `~astropy.units.Unit` understands, but it is better to give an actual unit object. Must be an angular unit. wrap_angle : angle-like or None, optional Angle at which to wrap back to ``wrap_angle - 360 deg``. If ``None`` (default), it will be taken to be 360 deg unless ``angle`` has a ``wrap_angle`` attribute already (i.e., is a ``Longitude``), in which case it will be taken from there. Raises ------ `~astropy.units.UnitsError` If a unit is not provided or it is not an angular unit. `TypeError` If the angle parameter is an instance of :class:`~astropy.coordinates.Latitude`. """ _wrap_angle = None _default_wrap_angle = Angle(360 * u.deg) info = LongitudeInfo() def __new__(cls, angle, unit=None, wrap_angle=None, **kwargs): # Forbid creating a Long from a Lat. if isinstance(angle, Latitude): raise TypeError("A Longitude angle cannot be created from " "a Latitude angle.") self = super().__new__(cls, angle, unit=unit, **kwargs) if wrap_angle is None: wrap_angle = getattr(angle, 'wrap_angle', self._default_wrap_angle) self.wrap_angle = wrap_angle # angle-like b/c property setter return self def __setitem__(self, item, value): # Forbid assigning a Lat to a Long. if isinstance(value, Latitude): raise TypeError("A Latitude angle cannot be assigned to a Longitude angle") super().__setitem__(item, value) self._wrap_at(self.wrap_angle) @property def wrap_angle(self): return self._wrap_angle @wrap_angle.setter def wrap_angle(self, value): self._wrap_angle = Angle(value, copy=False) self._wrap_at(self.wrap_angle) def __array_finalize__(self, obj): super().__array_finalize__(obj) self._wrap_angle = getattr(obj, '_wrap_angle', self._default_wrap_angle) # Any calculation should drop to Angle def __array_ufunc__(self, *args, **kwargs): results = super().__array_ufunc__(*args, **kwargs) return _no_angle_subclass(results)
ea900ec49e8a14ba6dddea4c367b1e624ef6e71c5d3ad9b0eaaf8e7765bf6804
# Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.cosmology import units as cu from astropy.io import registry as io_registry from astropy.units import add_enabled_units __all__ = ["CosmologyRead", "CosmologyWrite", "CosmologyFromFormat", "CosmologyToFormat"] __doctest_skip__ = __all__ # ============================================================================== # Read / Write readwrite_registry = io_registry.UnifiedIORegistry() class CosmologyRead(io_registry.UnifiedReadWrite): """Read and parse data to a `~astropy.cosmology.Cosmology`. This function provides the Cosmology interface to the Astropy unified I/O layer. This allows easily reading a file in supported data formats using syntax such as:: >>> from astropy.cosmology import Cosmology >>> cosmo1 = Cosmology.read('<file name>') When the ``read`` method is called from a subclass the subclass will provide a keyword argument ``cosmology=<class>`` to the registered read method. The method uses this cosmology class, regardless of the class indicated in the file, and sets parameters' default values from the class' signature. Get help on the available readers using the ``help()`` method:: >>> Cosmology.read.help() # Get help reading and list supported formats >>> Cosmology.read.help(format='<format>') # Get detailed help on a format >>> Cosmology.read.list_formats() # Print list of available formats See also: https://docs.astropy.org/en/stable/io/unified.html Parameters ---------- *args Positional arguments passed through to data reader. If supplied the first argument is typically the input filename. format : str (optional, keyword-only) File format specifier. **kwargs Keyword arguments passed through to data reader. Returns ------- out : `~astropy.cosmology.Cosmology` subclass instance `~astropy.cosmology.Cosmology` corresponding to file contents. Notes ----- """ def __init__(self, instance, cosmo_cls): super().__init__(instance, cosmo_cls, "read", registry=readwrite_registry) def __call__(self, *args, **kwargs): from astropy.cosmology.core import Cosmology # so subclasses can override, also pass the class as a kwarg. # allows for `FlatLambdaCDM.read` and # `Cosmology.read(..., cosmology=FlatLambdaCDM)` if self._cls is not Cosmology: kwargs.setdefault("cosmology", self._cls) # set, if not present # check that it is the correct cosmology, can be wrong if user # passes in e.g. `w0wzCDM.read(..., cosmology=FlatLambdaCDM)` valid = (self._cls, self._cls.__qualname__) if kwargs["cosmology"] not in valid: raise ValueError( "keyword argument `cosmology` must be either the class " f"{valid[0]} or its qualified name '{valid[1]}'") with add_enabled_units(cu): cosmo = self.registry.read(self._cls, *args, **kwargs) return cosmo class CosmologyWrite(io_registry.UnifiedReadWrite): """Write this Cosmology object out in the specified format. This function provides the Cosmology interface to the astropy unified I/O layer. This allows easily writing a file in supported data formats using syntax such as:: >>> from astropy.cosmology import Planck18 >>> Planck18.write('<file name>') Get help on the available writers for ``Cosmology`` using the ``help()`` method:: >>> Cosmology.write.help() # Get help writing and list supported formats >>> Cosmology.write.help(format='<format>') # Get detailed help on format >>> Cosmology.write.list_formats() # Print list of available formats Parameters ---------- *args Positional arguments passed through to data writer. If supplied the first argument is the output filename. format : str (optional, keyword-only) File format specifier. **kwargs Keyword arguments passed through to data writer. Notes ----- """ def __init__(self, instance, cls): super().__init__(instance, cls, "write", registry=readwrite_registry) def __call__(self, *args, **kwargs): self.registry.write(self._instance, *args, **kwargs) # ============================================================================== # Format Interchange # for transforming instances, e.g. Cosmology <-> dict convert_registry = io_registry.UnifiedIORegistry() class CosmologyFromFormat(io_registry.UnifiedReadWrite): """Transform object to a `~astropy.cosmology.Cosmology`. This function provides the Cosmology interface to the Astropy unified I/O layer. This allows easily parsing supported data formats using syntax such as:: >>> from astropy.cosmology import Cosmology >>> cosmo1 = Cosmology.from_format(cosmo_mapping, format='mapping') When the ``from_format`` method is called from a subclass the subclass will provide a keyword argument ``cosmology=<class>`` to the registered parser. The method uses this cosmology class, regardless of the class indicated in the data, and sets parameters' default values from the class' signature. Get help on the available readers using the ``help()`` method:: >>> Cosmology.from_format.help() # Get help and list supported formats >>> Cosmology.from_format.help('<format>') # Get detailed help on a format >>> Cosmology.from_format.list_formats() # Print list of available formats See also: https://docs.astropy.org/en/stable/io/unified.html Parameters ---------- obj : object The object to parse according to 'format' *args Positional arguments passed through to data parser. format : str or None, optional keyword-only Object format specifier. For `None` (default) CosmologyFromFormat tries to identify the correct format. **kwargs Keyword arguments passed through to data parser. Parsers should accept the following keyword arguments: - cosmology : the class (or string name thereof) to use / check when constructing the cosmology instance. Returns ------- out : `~astropy.cosmology.Cosmology` subclass instance `~astropy.cosmology.Cosmology` corresponding to ``obj`` contents. """ def __init__(self, instance, cosmo_cls): super().__init__(instance, cosmo_cls, "read", registry=convert_registry) def __call__(self, obj, *args, format=None, **kwargs): from astropy.cosmology.core import Cosmology # so subclasses can override, also pass the class as a kwarg. # allows for `FlatLambdaCDM.read` and # `Cosmology.read(..., cosmology=FlatLambdaCDM)` if self._cls is not Cosmology: kwargs.setdefault("cosmology", self._cls) # set, if not present # check that it is the correct cosmology, can be wrong if user # passes in e.g. `w0wzCDM.read(..., cosmology=FlatLambdaCDM)` valid = (self._cls, self._cls.__qualname__) if kwargs["cosmology"] not in valid: raise ValueError( "keyword argument `cosmology` must be either the class " f"{valid[0]} or its qualified name '{valid[1]}'") with add_enabled_units(cu): cosmo = self.registry.read(self._cls, obj, *args, format=format, **kwargs) return cosmo class CosmologyToFormat(io_registry.UnifiedReadWrite): """Transform this Cosmology to another format. This function provides the Cosmology interface to the astropy unified I/O layer. This allows easily transforming to supported data formats using syntax such as:: >>> from astropy.cosmology import Planck18 >>> Planck18.to_format("mapping") {'cosmology': astropy.cosmology.core.FlatLambdaCDM, 'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966, ... Get help on the available representations for ``Cosmology`` using the ``help()`` method:: >>> Cosmology.to_format.help() # Get help and list supported formats >>> Cosmology.to_format.help('<format>') # Get detailed help on format >>> Cosmology.to_format.list_formats() # Print list of available formats Parameters ---------- format : str Format specifier. *args Positional arguments passed through to data writer. If supplied the first argument is the output filename. **kwargs Keyword arguments passed through to data writer. """ def __init__(self, instance, cls): super().__init__(instance, cls, "write", registry=convert_registry) def __call__(self, format, *args, **kwargs): return self.registry.write(self._instance, None, *args, format=format, **kwargs)
c01d296ad1cc278475de924a15d8967a0b17913524abf30fb2458992cef69111
# Licensed under a 3-clause BSD style license - see LICENSE.rst import abc import inspect import numpy as np from astropy.io.registry import UnifiedReadWriteMethod from astropy.utils.decorators import classproperty from astropy.utils.metadata import MetaData from .connect import CosmologyFromFormat, CosmologyRead, CosmologyToFormat, CosmologyWrite from .parameter import Parameter # Originally authored by Andrew Becker ([email protected]), # and modified by Neil Crighton ([email protected]), Roban Kramer # ([email protected]), and Nathaniel Starkman ([email protected]). # Many of these adapted from Hogg 1999, astro-ph/9905116 # and Linder 2003, PRL 90, 91301 __all__ = ["Cosmology", "CosmologyError", "FlatCosmologyMixin"] __doctest_requires__ = {} # needed until __getattr__ removed # registry of cosmology classes with {key=name : value=class} _COSMOLOGY_CLASSES = dict() class CosmologyError(Exception): pass class Cosmology(metaclass=abc.ABCMeta): """Base-class for all Cosmologies. Parameters ---------- *args Arguments into the cosmology; used by subclasses, not this base class. name : str or None (optional, keyword-only) The name of the cosmology. meta : dict or None (optional, keyword-only) Metadata for the cosmology, e.g., a reference. **kwargs Arguments into the cosmology; used by subclasses, not this base class. Notes ----- Class instances are static -- you cannot (and should not) change the values of the parameters. That is, all of the above attributes (except meta) are read only. For details on how to create performant custom subclasses, see the documentation on :ref:`astropy-cosmology-fast-integrals`. """ meta = MetaData() # Unified I/O object interchange methods from_format = UnifiedReadWriteMethod(CosmologyFromFormat) to_format = UnifiedReadWriteMethod(CosmologyToFormat) # Unified I/O read and write methods read = UnifiedReadWriteMethod(CosmologyRead) write = UnifiedReadWriteMethod(CosmologyWrite) # Parameters __parameters__ = () __all_parameters__ = () # --------------------------------------------------------------- def __init_subclass__(cls): super().__init_subclass__() # ------------------- # Parameters # Get parameters that are still Parameters, either in this class or above. parameters = [] derived_parameters = [] for n in cls.__parameters__: p = getattr(cls, n) if isinstance(p, Parameter): derived_parameters.append(n) if p.derived else parameters.append(n) # Add new parameter definitions for n, v in cls.__dict__.items(): if n in parameters or n.startswith("_") or not isinstance(v, Parameter): continue derived_parameters.append(n) if v.derived else parameters.append(n) # reorder to match signature ordered = [parameters.pop(parameters.index(n)) for n in cls._init_signature.parameters.keys() if n in parameters] parameters = ordered + parameters # place "unordered" at the end cls.__parameters__ = tuple(parameters) cls.__all_parameters__ = cls.__parameters__ + tuple(derived_parameters) # ------------------- # register as a Cosmology subclass _COSMOLOGY_CLASSES[cls.__qualname__] = cls @classproperty(lazy=True) def _init_signature(cls): """Initialization signature (without 'self').""" # get signature, dropping "self" by taking arguments [1:] sig = inspect.signature(cls.__init__) sig = sig.replace(parameters=list(sig.parameters.values())[1:]) return sig # --------------------------------------------------------------- def __init__(self, name=None, meta=None): self._name = str(name) if name is not None else name self.meta.update(meta or {}) @property def name(self): """The name of the Cosmology instance.""" return self._name @property @abc.abstractmethod def is_flat(self): """ Return bool; `True` if the cosmology is flat. This is abstract and must be defined in subclasses. """ raise NotImplementedError("is_flat is not implemented") def clone(self, *, meta=None, **kwargs): """Returns a copy of this object with updated parameters, as specified. This cannot be used to change the type of the cosmology, so ``clone()`` cannot be used to change between flat and non-flat cosmologies. Parameters ---------- meta : mapping or None (optional, keyword-only) Metadata that will update the current metadata. **kwargs Cosmology parameter (and name) modifications. If any parameter is changed and a new name is not given, the name will be set to "[old name] (modified)". Returns ------- newcosmo : `~astropy.cosmology.Cosmology` subclass instance A new instance of this class with updated parameters as specified. If no modifications are requested, then a reference to this object is returned instead of copy. Examples -------- To make a copy of the ``Planck13`` cosmology with a different matter density (``Om0``), and a new name: >>> from astropy.cosmology import Planck13 >>> newcosmo = Planck13.clone(name="Modified Planck 2013", Om0=0.35) If no name is specified, the new name will note the modification. >>> Planck13.clone(Om0=0.35).name 'Planck13 (modified)' """ # Quick return check, taking advantage of the Cosmology immutability. if meta is None and not kwargs: return self # There are changed parameter or metadata values. # The name needs to be changed accordingly, if it wasn't already. kwargs.setdefault("name", (self.name + " (modified)" if self.name is not None else None)) # mix new meta into existing, preferring the former. new_meta = {**self.meta, **(meta or {})} # Mix kwargs into initial arguments, preferring the former. new_init = {**self._init_arguments, "meta": new_meta, **kwargs} # Create BoundArgument to handle args versus kwargs. # This also handles all errors from mismatched arguments ba = self._init_signature.bind_partial(**new_init) # Return new instance, respecting args vs kwargs return self.__class__(*ba.args, **ba.kwargs) @property def _init_arguments(self): # parameters kw = {n: getattr(self, n) for n in self.__parameters__} # other info kw["name"] = self.name kw["meta"] = self.meta return kw # --------------------------------------------------------------- # comparison methods def is_equivalent(self, other, *, format=False): r"""Check equivalence between Cosmologies. Two cosmologies may be equivalent even if not the same class. For example, an instance of ``LambdaCDM`` might have :math:`\Omega_0=1` and :math:`\Omega_k=0` and therefore be flat, like ``FlatLambdaCDM``. Parameters ---------- other : `~astropy.cosmology.Cosmology` subclass instance The object in which to compare. format : bool or None or str, optional keyword-only Whether to allow, before equivalence is checked, the object to be converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent to a Cosmology. `False` (default) will not allow conversion. `True` or `None` will, and will use the auto-identification to try to infer the correct format. A `str` is assumed to be the correct format to use when converting. Returns ------- bool True if cosmologies are equivalent, False otherwise. Examples -------- Two cosmologies may be equivalent even if not of the same class. In this examples the ``LambdaCDM`` has ``Ode0`` set to the same value calculated in ``FlatLambdaCDM``. >>> import astropy.units as u >>> from astropy.cosmology import LambdaCDM, FlatLambdaCDM >>> cosmo1 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7) >>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3) >>> cosmo1.is_equivalent(cosmo2) True While in this example, the cosmologies are not equivalent. >>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K) >>> cosmo3.is_equivalent(cosmo2) False Also, using the keyword argument, the notion of equivalence is extended to any Python object that can be converted to a |Cosmology|. >>> from astropy.cosmology import Planck18 >>> tbl = Planck18.to_format("astropy.table") >>> Planck18.is_equivalent(tbl, format=True) True The list of valid formats, e.g. the |Table| in this example, may be checked with ``Cosmology.from_format.list_formats()``. As can be seen in the list of formats, not all formats can be auto-identified by ``Cosmology.from_format.registry``. Objects of these kinds can still be checked for equivalence, but the correct format string must be used. >>> tbl = Planck18.to_format("yaml") >>> Planck18.is_equivalent(tbl, format="yaml") True """ # Allow for different formats to be considered equivalent. if format is not False: format = None if format is True else format # str->str, None/True->None try: other = Cosmology.from_format(other, format=format) except Exception: # TODO! should enforce only TypeError return False # The options are: 1) same class & parameters; 2) same class, different # parameters; 3) different classes, equivalent parameters; 4) different # classes, different parameters. (1) & (3) => True, (2) & (4) => False. equiv = self.__equiv__(other) if equiv is NotImplemented and hasattr(other, "__equiv__"): equiv = other.__equiv__(self) # that failed, try from 'other' return equiv if equiv is not NotImplemented else False def __equiv__(self, other): """Cosmology equivalence. Use ``.is_equivalent()`` for actual check! Parameters ---------- other : `~astropy.cosmology.Cosmology` subclass instance The object in which to compare. Returns ------- bool or `NotImplemented` `NotImplemented` if 'other' is from a different class. `True` if 'other' is of the same class and has matching parameters and parameter values. `False` otherwise. """ if other.__class__ is not self.__class__: return NotImplemented # allows other.__equiv__ # check all parameters in 'other' match those in 'self' and 'other' has # no extra parameters (latter part should never happen b/c same class) params_eq = (set(self.__all_parameters__) == set(other.__all_parameters__) and all(np.all(getattr(self, k) == getattr(other, k)) for k in self.__all_parameters__)) return params_eq def __eq__(self, other): """Check equality between Cosmologies. Checks the Parameters and immutable fields (i.e. not "meta"). Parameters ---------- other : `~astropy.cosmology.Cosmology` subclass instance The object in which to compare. Returns ------- bool `True` if Parameters and names are the same, `False` otherwise. """ if other.__class__ is not self.__class__: return NotImplemented # allows other.__eq__ # check all parameters in 'other' match those in 'self' equivalent = self.__equiv__(other) # non-Parameter checks: name name_eq = (self.name == other.name) return equivalent and name_eq # --------------------------------------------------------------- def __repr__(self): ps = {k: getattr(self, k) for k in self.__parameters__} # values cps = {k: getattr(self.__class__, k) for k in self.__parameters__} # Parameter objects namelead = f"{self.__class__.__qualname__}(" if self.name is not None: namelead += f"name=\"{self.name}\", " # nicely formatted parameters fmtps = (k + '=' + format(v, cps[k].format_spec if v is not None else '') for k, v in ps.items()) return namelead + ", ".join(fmtps) + ")" def __astropy_table__(self, cls, copy, **kwargs): """Return a `~astropy.table.Table` of type ``cls``. Parameters ---------- cls : type Astropy ``Table`` class or subclass. copy : bool Ignored. **kwargs : dict, optional Additional keyword arguments. Passed to ``self.to_format()``. See ``Cosmology.to_format.help("astropy.table")`` for allowed kwargs. Returns ------- `astropy.table.Table` or subclass instance Instance of type ``cls``. """ return self.to_format("astropy.table", cls=cls, **kwargs) class FlatCosmologyMixin(metaclass=abc.ABCMeta): """ Mixin class for flat cosmologies. Do NOT instantiate directly. Note that all instances of ``FlatCosmologyMixin`` are flat, but not all flat cosmologies are instances of ``FlatCosmologyMixin``. As example, ``LambdaCDM`` **may** be flat (for the a specific set of parameter values), but ``FlatLambdaCDM`` **will** be flat. """ @property def is_flat(self): """Return `True`, the cosmology is flat.""" return True # ----------------------------------------------------------------------------- def __getattr__(attr): from . import flrw if hasattr(flrw, attr) and attr not in ("__path__", ): import warnings from astropy.utils.exceptions import AstropyDeprecationWarning warnings.warn( f"`astropy.cosmology.core.{attr}` has been moved (since v5.0) and " f"should be imported as ``from astropy.cosmology import {attr}``." " In future this will raise an exception.", AstropyDeprecationWarning ) return getattr(flrw, attr) raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
db93cbcd8ef3788ad9c903c7992c68c90427a0c589df4d86cbccb1781ea65f13
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ astropy.cosmology contains classes and functions for cosmological distance measures and other cosmology-related calculations. See the `Astropy documentation <https://docs.astropy.org/en/latest/cosmology/index.html>`_ for more detailed usage examples and references. """ from . import core, flrw, funcs, parameter, units, utils # noqa F401 from . import io # needed before 'realizations' # isort: split from . import realizations from .core import * # noqa F401, F403 from .flrw import * # noqa F401, F403 from .funcs import * # noqa F401, F403 from .parameter import * # noqa F401, F403 from .realizations import available, default_cosmology # noqa F401, F403 from .utils import * # noqa F401, F403 __all__ = (core.__all__ + flrw.__all__ # cosmology classes + realizations.__all__ # instances thereof + ["units"] + funcs.__all__ + parameter.__all__ + utils.__all__) # utils def __getattr__(name): """Get realizations using lazy import from `PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_. Raises ------ AttributeError If "name" is not in :mod:`astropy.cosmology.realizations` """ if name not in available: raise AttributeError(f"module {__name__!r} has no attribute {name!r}.") return getattr(realizations, name) def __dir__(): """Directory, including lazily-imported objects.""" return __all__
a2ca785caa73053baefab103e8e3fba80b02c3926d645f511b6a825d1795f81d
# Licensed under a 3-clause BSD style license - see LICENSE.rst import astropy.units as u __all__ = ["Parameter"] class Parameter: r"""Cosmological parameter (descriptor). Should only be used with a :class:`~astropy.cosmology.Cosmology` subclass. Parameters ---------- derived : bool (optional, keyword-only) Whether the Parameter is 'derived', default `False`. Derived parameters behave similarly to normal parameters, but are not sorted by the |Cosmology| signature (probably not there) and are not included in all methods. For reference, see ``Ode0`` in ``FlatFLRWMixin``, which removes :math:`\Omega_{de,0}`` as an independent parameter (:math:`\Omega_{de,0} \equiv 1 - \Omega_{tot}`). unit : unit-like or None (optional, keyword-only) The `~astropy.units.Unit` for the Parameter. If None (default) no unit as assumed. equivalencies : `~astropy.units.Equivalency` or sequence thereof Unit equivalencies for this Parameter. fvalidate : callable[[object, object, Any], Any] or str (optional, keyword-only) Function to validate the Parameter value from instances of the cosmology class. If "default", uses default validator to assign units (with equivalencies), if Parameter has units. For other valid string options, see ``Parameter._registry_validators``. 'fvalidate' can also be set through a decorator with :meth:`~astropy.cosmology.Parameter.validator`. fmt : str (optional, keyword-only) `format` specification, used when making string representation of the containing Cosmology. See https://docs.python.org/3/library/string.html#formatspec doc : str or None (optional, keyword-only) Parameter description. Examples -------- For worked examples see :class:`~astropy.cosmology.FLRW`. """ _registry_validators = {} def __init__(self, *, derived=False, unit=None, equivalencies=[], fvalidate="default", fmt="", doc=None): # attribute name on container cosmology class. # really set in __set_name__, but if Parameter is not init'ed as a # descriptor this ensures that the attributes exist. self._attr_name = self._attr_name_private = None self._derived = derived self._fmt = str(fmt) # @property is `format_spec` self.__doc__ = doc # units stuff self._unit = u.Unit(unit) if unit is not None else None self._equivalencies = equivalencies # Parse registered `fvalidate` self._fvalidate_in = fvalidate # Always store input fvalidate. if callable(fvalidate): pass elif fvalidate in self._registry_validators: fvalidate = self._registry_validators[fvalidate] elif isinstance(fvalidate, str): raise ValueError("`fvalidate`, if str, must be in " f"{self._registry_validators.keys()}") else: raise TypeError("`fvalidate` must be a function or " f"{self._registry_validators.keys()}") self._fvalidate = fvalidate def __set_name__(self, cosmo_cls, name): # attribute name on container cosmology class self._attr_name = name self._attr_name_private = "_" + name @property def name(self): """Parameter name.""" return self._attr_name @property def unit(self): """Parameter unit.""" return self._unit @property def equivalencies(self): """Equivalencies used when initializing Parameter.""" return self._equivalencies @property def format_spec(self): """String format specification.""" return self._fmt @property def derived(self): """Whether the Parameter is derived; true parameters are not.""" return self._derived # ------------------------------------------- # descriptor and property-like methods def __get__(self, cosmology, cosmo_cls=None): # get from class if cosmology is None: return self return getattr(cosmology, self._attr_name_private) def __set__(self, cosmology, value): """Allows attribute setting once. Raises AttributeError subsequently.""" # raise error if setting 2nd time. if hasattr(cosmology, self._attr_name_private): raise AttributeError("can't set attribute") # validate value, generally setting units if present value = self.validate(cosmology, value) setattr(cosmology, self._attr_name_private, value) # ------------------------------------------- # validate value @property def fvalidate(self): """Function to validate a potential value of this Parameter..""" return self._fvalidate def validator(self, fvalidate): """Make new Parameter with custom ``fvalidate``. Note: ``Parameter.fvalidator`` must be the top-most descriptor decorator. Parameters ---------- fvalidate : callable[[type, type, Any], Any] Returns ------- `~astropy.cosmology.Parameter` Copy of this Parameter but with custom ``fvalidate``. """ return self.clone(fvalidate=fvalidate) def validate(self, cosmology, value): """Run the validator on this Parameter. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` instance value : Any The object to validate. Returns ------- Any The output of calling ``fvalidate(cosmology, self, value)`` (yes, that parameter order). """ return self.fvalidate(cosmology, self, value) @classmethod def register_validator(cls, key, fvalidate=None): """Decorator to register a new kind of validator function. Parameters ---------- key : str fvalidate : callable[[object, object, Any], Any] or None, optional Value validation function. Returns ------- ``validator`` or callable[``validator``] if validator is None returns a function that takes and registers a validator. This allows ``register_validator`` to be used as a decorator. """ if key in cls._registry_validators: raise KeyError(f"validator {key!r} already registered with Parameter.") # fvalidate directly passed if fvalidate is not None: cls._registry_validators[key] = fvalidate return fvalidate # for use as a decorator def register(fvalidate): """Register validator function. Parameters ---------- fvalidate : callable[[object, object, Any], Any] Validation function. Returns ------- ``validator`` """ cls._registry_validators[key] = fvalidate return fvalidate return register # ------------------------------------------- def _get_init_arguments(self, processed=False): """Initialization arguments. Parameters ---------- processed : bool Whether to more closely reproduce the input arguments (`False`, default) or the processed arguments (`True`). The former is better for string representations and round-tripping with ``eval(repr())``. Returns ------- dict[str, Any] """ # The keys are added in this order because `repr` prints them in order. kw = {"derived": self.derived, "unit": self.unit, "equivalencies": self.equivalencies, # Validator is always turned into a function, but for ``repr`` it's nice # to know if it was originally a string. "fvalidate": self.fvalidate if processed else self._fvalidate_in, "fmt": self.format_spec, "doc": self.__doc__} return kw def clone(self, **kw): """Clone this `Parameter`, changing any constructor argument. Parameters ---------- **kw Passed to constructor. The current values, eg. ``fvalidate`` are used as the default values, so an empty ``**kw`` is an exact copy. Examples -------- >>> p = Parameter() >>> p Parameter(derived=False, unit=None, equivalencies=[], fvalidate='default', fmt='', doc=None) >>> p.clone(unit="km") Parameter(derived=False, unit=Unit("km"), equivalencies=[], fvalidate='default', fmt='', doc=None) """ # Start with defaults, update from kw. kwargs = {**self._get_init_arguments(), **kw} # All initialization failures, like incorrect input are handled by init cloned = type(self)(**kwargs) # Transfer over the __set_name__ stuff. If `clone` is used to make a # new descriptor, __set_name__ will be called again, overwriting this. cloned._attr_name = self._attr_name cloned._attr_name_private = self._attr_name_private return cloned def __eq__(self, other): """Check Parameter equality. Only equal to other Parameter objects. Returns ------- NotImplemented or True `True` if equal, `NotImplemented` otherwise. This allows `other` to be check for equality with ``other.__eq__``. Examples -------- >>> p1, p2 = Parameter(unit="km"), Parameter(unit="km") >>> p1 == p2 True >>> p3 = Parameter(unit="km / s") >>> p3 == p1 False >>> p1 != 2 True """ if not isinstance(other, Parameter): return NotImplemented # Check equality on all `_init_arguments` & `name`. # Need to compare the processed arguments because the inputs are many- # to-one, e.g. `fvalidate` can be a string or the equivalent function. return ((self._get_init_arguments(True) == other._get_init_arguments(True)) and (self.name == other.name)) def __repr__(self): """String representation. ``eval(repr())`` should work, depending if contents like ``fvalidate`` can be similarly round-tripped. """ return "Parameter({})".format(", ".join(f"{k}={v!r}" for k, v in self._get_init_arguments().items())) # =================================================================== # Built-in validators @Parameter.register_validator("default") def _validate_with_unit(cosmology, param, value): """ Default Parameter value validator. Adds/converts units if Parameter has a unit. """ if param.unit is not None: with u.add_enabled_equivalencies(param.equivalencies): value = u.Quantity(value, param.unit) return value @Parameter.register_validator("float") def _validate_to_float(cosmology, param, value): """Parameter value validator with units, and converted to float.""" value = _validate_with_unit(cosmology, param, value) return float(value) @Parameter.register_validator("scalar") def _validate_to_scalar(cosmology, param, value): """""" value = _validate_with_unit(cosmology, param, value) if not value.isscalar: raise ValueError(f"{param.name} is a non-scalar quantity") return value @Parameter.register_validator("non-negative") def _validate_non_negative(cosmology, param, value): """Parameter value validator where value is a positive float.""" value = _validate_to_float(cosmology, param, value) if value < 0.0: raise ValueError(f"{param.name} cannot be negative.") return value
b0199086878b0617196ea9d663af0d3cdc4d5b666d63967d6eeecdea354cb70f
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """Cosmological units and equivalencies. """ # (newline needed for unit summary) import astropy.units as u from astropy.units.utils import generate_unit_summary as _generate_unit_summary __all__ = ["littleh", "redshift", # redshift equivalencies "dimensionless_redshift", "with_redshift", "redshift_distance", "redshift_hubble", "redshift_temperature", # other equivalencies "with_H0"] __doctest_requires__ = {('with_redshift', 'redshift_distance'): ['scipy']} _ns = globals() ############################################################################### # Cosmological Units # This is not formally a unit, but is used in that way in many contexts, and # an appropriate equivalency is only possible if it's treated as a unit. redshift = u.def_unit(['redshift'], prefixes=False, namespace=_ns, doc="Cosmological redshift.", format={'latex': r''}) # This is not formally a unit, but is used in that way in many contexts, and # an appropriate equivalency is only possible if it's treated as a unit (see # https://arxiv.org/pdf/1308.4150.pdf for more) # Also note that h or h100 or h_100 would be a better name, but they either # conflict or have numbers in them, which is disallowed littleh = u.def_unit(['littleh'], namespace=_ns, prefixes=False, doc='Reduced/"dimensionless" Hubble constant', format={'latex': r'h_{100}'}) ############################################################################### # Equivalencies def dimensionless_redshift(): """Allow redshift to be 1-to-1 equivalent to dimensionless. It is special compared to other equivalency pairs in that it allows this independent of the power to which the redshift is raised, and independent of whether it is part of a more complicated unit. It is similar to u.dimensionless_angles() in this respect. """ return u.Equivalency([(redshift, None)], "dimensionless_redshift") def redshift_distance(cosmology=None, kind="comoving", **atzkw): """Convert quantities between redshift and distance. Care should be taken to not misinterpret a relativistic, gravitational, etc redshift as a cosmological one. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional A cosmology realization or built-in cosmology's name (e.g. 'Planck18'). If None, will use the default cosmology (controlled by :class:`~astropy.cosmology.default_cosmology`). kind : {'comoving', 'lookback', 'luminosity'} or None, optional The distance type for the Equivalency. Note this does NOT include the angular diameter distance as this distance measure is not monotonic. **atzkw keyword arguments for :func:`~astropy.cosmology.z_at_value` Returns ------- `~astropy.units.equivalencies.Equivalency` Equivalency between redshift and temperature. Examples -------- >>> import astropy.units as u >>> import astropy.cosmology.units as cu >>> from astropy.cosmology import WMAP9 >>> z = 1100 * cu.redshift >>> z.to(u.Mpc, cu.redshift_distance(WMAP9, kind="comoving")) # doctest: +FLOAT_CMP <Quantity 14004.03157418 Mpc> """ from astropy.cosmology import default_cosmology, z_at_value # get cosmology: None -> default and process str / class cosmology = cosmology if cosmology is not None else default_cosmology.get() with default_cosmology.set(cosmology): # if already cosmo, passes through cosmology = default_cosmology.get() allowed_kinds = ('comoving', 'lookback', 'luminosity') if kind not in allowed_kinds: raise ValueError(f"`kind` is not one of {allowed_kinds}") method = getattr(cosmology, kind + "_distance") def z_to_distance(z): """Redshift to distance.""" return method(z) def distance_to_z(d): """Distance to redshift.""" return z_at_value(method, d << u.Mpc, **atzkw) return u.Equivalency([(redshift, u.Mpc, z_to_distance, distance_to_z)], "redshift_distance", {'cosmology': cosmology, "distance": kind}) def redshift_hubble(cosmology=None, **atzkw): """Convert quantities between redshift and Hubble parameter and little-h. Care should be taken to not misinterpret a relativistic, gravitational, etc redshift as a cosmological one. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional A cosmology realization or built-in cosmology's name (e.g. 'Planck18'). If None, will use the default cosmology (controlled by :class:`~astropy.cosmology.default_cosmology`). **atzkw keyword arguments for :func:`~astropy.cosmology.z_at_value` Returns ------- `~astropy.units.equivalencies.Equivalency` Equivalency between redshift and Hubble parameter and little-h unit. Examples -------- >>> import astropy.units as u >>> import astropy.cosmology.units as cu >>> from astropy.cosmology import WMAP9 >>> z = 1100 * cu.redshift >>> equivalency = cu.redshift_hubble(WMAP9) # construct equivalency >>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP <Quantity 1565637.40154275 km / (Mpc s)> >>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP <Quantity 15656.37401543 littleh> """ from astropy.cosmology import default_cosmology, z_at_value # get cosmology: None -> default and process str / class cosmology = cosmology if cosmology is not None else default_cosmology.get() with default_cosmology.set(cosmology): # if already cosmo, passes through cosmology = default_cosmology.get() def z_to_hubble(z): """Redshift to Hubble parameter.""" return cosmology.H(z) def hubble_to_z(H): """Hubble parameter to redshift.""" return z_at_value(cosmology.H, H << (u.km / u.s / u.Mpc), **atzkw) def z_to_littleh(z): """Redshift to :math:`h`-unit Quantity.""" return z_to_hubble(z).to_value(u.km / u.s / u.Mpc) / 100 * littleh def littleh_to_z(h): """:math:`h`-unit Quantity to redshift.""" return hubble_to_z(h * 100) return u.Equivalency([(redshift, u.km / u.s / u.Mpc, z_to_hubble, hubble_to_z), (redshift, littleh, z_to_littleh, littleh_to_z)], "redshift_hubble", {'cosmology': cosmology}) def redshift_temperature(cosmology=None, **atzkw): """Convert quantities between redshift and CMB temperature. Care should be taken to not misinterpret a relativistic, gravitational, etc redshift as a cosmological one. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional A cosmology realization or built-in cosmology's name (e.g. 'Planck18'). If None, will use the default cosmology (controlled by :class:`~astropy.cosmology.default_cosmology`). **atzkw keyword arguments for :func:`~astropy.cosmology.z_at_value` Returns ------- `~astropy.units.equivalencies.Equivalency` Equivalency between redshift and temperature. Examples -------- >>> import astropy.units as u >>> import astropy.cosmology.units as cu >>> from astropy.cosmology import WMAP9 >>> z = 1100 * cu.redshift >>> z.to(u.K, cu.redshift_temperature(WMAP9)) <Quantity 3000.225 K> """ from astropy.cosmology import default_cosmology, z_at_value # get cosmology: None -> default and process str / class cosmology = cosmology if cosmology is not None else default_cosmology.get() with default_cosmology.set(cosmology): # if already cosmo, passes through cosmology = default_cosmology.get() def z_to_Tcmb(z): return cosmology.Tcmb(z) def Tcmb_to_z(T): return z_at_value(cosmology.Tcmb, T << u.K, **atzkw) return u.Equivalency([(redshift, u.K, z_to_Tcmb, Tcmb_to_z)], "redshift_temperature", {'cosmology': cosmology}) def with_redshift(cosmology=None, *, distance="comoving", hubble=True, Tcmb=True, atzkw=None): """Convert quantities between measures of cosmological distance. Note: by default all equivalencies are on and must be explicitly turned off. Care should be taken to not misinterpret a relativistic, gravitational, etc redshift as a cosmological one. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional A cosmology realization or built-in cosmology's name (e.g. 'Planck18'). If `None`, will use the default cosmology (controlled by :class:`~astropy.cosmology.default_cosmology`). distance : {'comoving', 'lookback', 'luminosity'} or None (optional, keyword-only) The type of distance equivalency to create or `None`. Default is 'comoving'. hubble : bool (optional, keyword-only) Whether to create a Hubble parameter <-> redshift equivalency, using ``Cosmology.H``. Default is `True`. Tcmb : bool (optional, keyword-only) Whether to create a CMB temperature <-> redshift equivalency, using ``Cosmology.Tcmb``. Default is `True`. atzkw : dict or None (optional, keyword-only) keyword arguments for :func:`~astropy.cosmology.z_at_value` Returns ------- `~astropy.units.equivalencies.Equivalency` With equivalencies between redshift and distance / Hubble / temperature. Examples -------- >>> import astropy.units as u >>> import astropy.cosmology.units as cu >>> from astropy.cosmology import WMAP9 >>> equivalency = cu.with_redshift(WMAP9) >>> z = 1100 * cu.redshift Redshift to (comoving) distance: >>> z.to(u.Mpc, equivalency) # doctest: +FLOAT_CMP <Quantity 14004.03157418 Mpc> Redshift to the Hubble parameter: >>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP <Quantity 1565637.40154275 km / (Mpc s)> >>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP <Quantity 15656.37401543 littleh> Redshift to CMB temperature: >>> z.to(u.K, equivalency) <Quantity 3000.225 K> """ from astropy.cosmology import default_cosmology # get cosmology: None -> default and process str / class cosmology = cosmology if cosmology is not None else default_cosmology.get() with default_cosmology.set(cosmology): # if already cosmo, passes through cosmology = default_cosmology.get() atzkw = atzkw if atzkw is not None else {} equivs = [] # will append as built # Hubble <-> Redshift if hubble: equivs.extend(redshift_hubble(cosmology, **atzkw)) # CMB Temperature <-> Redshift if Tcmb: equivs.extend(redshift_temperature(cosmology, **atzkw)) # Distance <-> Redshift, but need to choose which distance if distance is not None: equivs.extend(redshift_distance(cosmology, kind=distance, **atzkw)) # ----------- return u.Equivalency(equivs, "with_redshift", {'cosmology': cosmology, 'distance': distance, 'hubble': hubble, 'Tcmb': Tcmb}) # =================================================================== def with_H0(H0=None): """ Convert between quantities with little-h and the equivalent physical units. Parameters ---------- H0 : None or `~astropy.units.Quantity` ['frequency'] The value of the Hubble constant to assume. If a `~astropy.units.Quantity`, will assume the quantity *is* ``H0``. If `None` (default), use the ``H0`` attribute from :mod:`~astropy.cosmology.default_cosmology`. References ---------- For an illuminating discussion on why you may or may not want to use little-h at all, see https://arxiv.org/pdf/1308.4150.pdf """ if H0 is None: from .realizations import default_cosmology H0 = default_cosmology.get().H0 h100_val_unit = u.Unit(100/(H0.to_value(u.km / u.s / u.Mpc)) * littleh) return u.Equivalency([(h100_val_unit, None)], "with_H0", kwargs={"H0": H0}) # =================================================================== # Enable the set of default equivalencies. # If the cosmology package is imported, this is added to the list astropy-wide. u.add_enabled_equivalencies(dimensionless_redshift()) # ============================================================================= # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. if __doc__ is not None: __doc__ += _generate_unit_summary(_ns)
1b8f3351a6e6db01bec608380dc58d0d7474c8d56cc9cd3ca828b067494ca727
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB import pathlib import sys # LOCAL from astropy.utils.data import get_pkg_data_path from astropy.utils.decorators import deprecated from astropy.utils.state import ScienceState from .core import Cosmology _COSMOLOGY_DATA_DIR = pathlib.Path(get_pkg_data_path("cosmology", "data", package="astropy")) available = tuple(sorted([p.stem for p in _COSMOLOGY_DATA_DIR.glob("*.ecsv")])) __all__ = ["available", "default_cosmology"] + list(available) __doctest_requires__ = {"*": ["scipy"]} def __getattr__(name): """Make specific realizations from data files with lazy import from `PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_. Raises ------ AttributeError If "name" is not in :mod:`astropy.cosmology.realizations` """ if name not in available: raise AttributeError(f"module {__name__!r} has no attribute {name!r}.") cosmo = Cosmology.read(str(_COSMOLOGY_DATA_DIR / name) + ".ecsv", format="ascii.ecsv") cosmo.__doc__ = (f"{name} instance of {cosmo.__class__.__qualname__} " f"cosmology\n(from {cosmo.meta['reference']})") # Cache in this module so `__getattr__` is only called once per `name`. setattr(sys.modules[__name__], name, cosmo) return cosmo def __dir__(): """Directory, including lazily-imported objects.""" return __all__ ######################################################################### # The science state below contains the current cosmology. ######################################################################### class default_cosmology(ScienceState): """The default cosmology to use. To change it:: >>> from astropy.cosmology import default_cosmology, WMAP7 >>> with default_cosmology.set(WMAP7): ... # WMAP7 cosmology in effect ... pass Or, you may use a string:: >>> with default_cosmology.set('WMAP7'): ... # WMAP7 cosmology in effect ... pass To get the default cosmology: >>> default_cosmology.get() FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, ... To get a specific cosmology: >>> default_cosmology.get("Planck13") FlatLambdaCDM(name="Planck13", H0=67.77 km / (Mpc s), Om0=0.30712, ... """ _default_value = "Planck18" _value = "Planck18" @classmethod def get(cls, key=None): """Get the science state value of ``key``. Parameters ---------- key : str or None The built-in |Cosmology| realization to retrieve. If None (default) get the current value. Returns ------- `astropy.cosmology.Cosmology` or None `None` only if ``key`` is "no_default" Raises ------ TypeError If ``key`` is not a str, |Cosmology|, or None. ValueError If ``key`` is a str, but not for a built-in Cosmology Examples -------- To get the default cosmology: >>> default_cosmology.get() FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, ... To get a specific cosmology: >>> default_cosmology.get("Planck13") FlatLambdaCDM(name="Planck13", H0=67.77 km / (Mpc s), Om0=0.30712, ... """ if key is None: key = cls._value if isinstance(key, str): # special-case one string if key == "no_default": return None # all other options should be built-in realizations try: value = getattr(sys.modules[__name__], key) except AttributeError: raise ValueError(f"Unknown cosmology {key!r}. " f"Valid cosmologies:\n{available}") elif isinstance(key, Cosmology): value = key else: raise TypeError("'key' must be must be None, a string, " f"or Cosmology instance, not {type(key)}.") # validate value to `Cosmology`, if not already return cls.validate(value) @deprecated("5.0", alternative="get") @classmethod def get_cosmology_from_string(cls, arg): """Return a cosmology instance from a string.""" return cls.get(arg) @classmethod def validate(cls, value): """Return a Cosmology given a value. Parameters ---------- value : None, str, or `~astropy.cosmology.Cosmology` Returns ------- `~astropy.cosmology.Cosmology` instance Raises ------ TypeError If ``value`` is not a string or |Cosmology|. """ # None -> default if value is None: value = cls._default_value # Parse to Cosmology. Error if cannot. if isinstance(value, str): value = cls.get(value) elif not isinstance(value, Cosmology): raise TypeError("default_cosmology must be a string or Cosmology instance, " f"not {value}.") return value
fdedce9cd0f7a884a074af48dca7c62f96114287fb66786176fe6b21f1ed9a9a
# Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.table.table_helpers import ArrayWrapper from astropy.coordinates.earth import EarthLocation from astropy.units.quantity import Quantity from collections import OrderedDict from contextlib import nullcontext import pytest import numpy as np from astropy.table import Table, QTable, TableMergeError, Column, MaskedColumn, NdarrayMixin from astropy.table.operations import _get_out_class, join_skycoord, join_distance from astropy import units as u from astropy.utils import metadata from astropy.utils.metadata import MergeConflictError from astropy import table from astropy.time import Time, TimeDelta from astropy.coordinates import (SkyCoord, SphericalRepresentation, UnitSphericalRepresentation, CartesianRepresentation, BaseRepresentationOrDifferential, search_around_3d) from astropy.coordinates.tests.test_representation import representation_equal from astropy.coordinates.tests.helper import skycoord_equal from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa def sort_eq(list1, list2): return sorted(list1) == sorted(list2) def check_mask(col, exp_mask): """Check that col.mask == exp_mask""" if hasattr(col, 'mask'): # Coerce expected mask into dtype of col.mask. In particular this is # needed for types like EarthLocation where the mask is a structured # array. exp_mask = np.array(exp_mask).astype(col.mask.dtype) out = np.all(col.mask == exp_mask) else: # With no mask the check is OK if all the expected mask values # are False (i.e. no auto-conversion to MaskedQuantity if it was # not required by the join). out = np.all(exp_mask == False) return out class TestJoin(): def _setup(self, t_cls=Table): lines1 = [' a b c ', ' 0 foo L1', ' 1 foo L2', ' 1 bar L3', ' 2 bar L4'] lines2 = [' a b d ', ' 1 foo R1', ' 1 foo R2', ' 2 bar R3', ' 4 bar R4'] self.t1 = t_cls.read(lines1, format='ascii') self.t2 = t_cls.read(lines2, format='ascii') self.t3 = t_cls(self.t2, copy=True) self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])) self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) self.t3.meta.update(OrderedDict([('b', 3), ('c', [1, 2]), ('d', 2), ('a', 1)])) self.meta_merge = OrderedDict([('b', [1, 2, 3, 4]), ('c', {'a': 1, 'b': 1}), ('d', 1), ('a', 1)]) def test_table_meta_merge(self, operation_table_type): self._setup(operation_table_type) out = table.join(self.t1, self.t2, join_type='inner') assert out.meta == self.meta_merge def test_table_meta_merge_conflict(self, operation_table_type): self._setup(operation_table_type) with pytest.warns(metadata.MergeConflictWarning) as w: out = table.join(self.t1, self.t3, join_type='inner') assert len(w) == 3 assert out.meta == self.t3.meta with pytest.warns(metadata.MergeConflictWarning) as w: out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='warn') assert len(w) == 3 assert out.meta == self.t3.meta out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='silent') assert out.meta == self.t3.meta with pytest.raises(MergeConflictError): out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='error') with pytest.raises(ValueError): out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='nonsense') def test_both_unmasked_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Basic join with default parameters (inner join on common keys) t12 = table.join(t1, t2) assert type(t12) is operation_table_type assert type(t12['a']) is type(t1['a']) # noqa assert type(t12['b']) is type(t1['b']) # noqa assert type(t12['c']) is type(t1['c']) # noqa assert type(t12['d']) is type(t2['d']) # noqa assert t12.masked is False assert sort_eq(t12.pformat(), [' a b c d ', '--- --- --- ---', ' 1 foo L2 R1', ' 1 foo L2 R2', ' 2 bar L4 R3']) # Table meta merged properly assert t12.meta == self.meta_merge def test_both_unmasked_left_right_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Left join t12 = table.join(t1, t2, join_type='left') assert t12.has_masked_columns is True assert t12.masked is False for name in ('a', 'b', 'c'): assert type(t12[name]) is Column assert type(t12['d']) is MaskedColumn assert sort_eq(t12.pformat(), [' a b c d ', '--- --- --- ---', ' 0 foo L1 --', ' 1 bar L3 --', ' 1 foo L2 R1', ' 1 foo L2 R2', ' 2 bar L4 R3']) # Right join t12 = table.join(t1, t2, join_type='right') assert t12.has_masked_columns is True assert t12.masked is False assert sort_eq(t12.pformat(), [' a b c d ', '--- --- --- ---', ' 1 foo L2 R1', ' 1 foo L2 R2', ' 2 bar L4 R3', ' 4 bar -- R4']) # Outer join t12 = table.join(t1, t2, join_type='outer') assert t12.has_masked_columns is True assert t12.masked is False assert sort_eq(t12.pformat(), [' a b c d ', '--- --- --- ---', ' 0 foo L1 --', ' 1 bar L3 --', ' 1 foo L2 R1', ' 1 foo L2 R2', ' 2 bar L4 R3', ' 4 bar -- R4']) # Check that the common keys are 'a', 'b' t12a = table.join(t1, t2, join_type='outer') t12b = table.join(t1, t2, join_type='outer', keys=['a', 'b']) assert np.all(t12a.as_array() == t12b.as_array()) def test_both_unmasked_single_key_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Inner join on 'a' column t12 = table.join(t1, t2, keys='a') assert type(t12) is operation_table_type assert type(t12['a']) is type(t1['a']) # noqa assert type(t12['b_1']) is type(t1['b']) # noqa assert type(t12['c']) is type(t1['c']) # noqa assert type(t12['b_2']) is type(t2['b']) # noqa assert type(t12['d']) is type(t2['d']) # noqa assert t12.masked is False assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 1 foo L2 foo R1', ' 1 foo L2 foo R2', ' 1 bar L3 foo R1', ' 1 bar L3 foo R2', ' 2 bar L4 bar R3']) def test_both_unmasked_single_key_left_right_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 # Left join t12 = table.join(t1, t2, join_type='left', keys='a') assert t12.has_masked_columns is True assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 0 foo L1 -- --', ' 1 foo L2 foo R1', ' 1 foo L2 foo R2', ' 1 bar L3 foo R1', ' 1 bar L3 foo R2', ' 2 bar L4 bar R3']) # Right join t12 = table.join(t1, t2, join_type='right', keys='a') assert t12.has_masked_columns is True assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 1 foo L2 foo R1', ' 1 foo L2 foo R2', ' 1 bar L3 foo R1', ' 1 bar L3 foo R2', ' 2 bar L4 bar R3', ' 4 -- -- bar R4']) # Outer join t12 = table.join(t1, t2, join_type='outer', keys='a') assert t12.has_masked_columns is True assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 0 foo L1 -- --', ' 1 foo L2 foo R1', ' 1 foo L2 foo R2', ' 1 bar L3 foo R1', ' 1 bar L3 foo R2', ' 2 bar L4 bar R3', ' 4 -- -- bar R4']) def test_masked_unmasked(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t1m = operation_table_type(self.t1, masked=True) t2 = self.t2 # Result table is never masked t1m2 = table.join(t1m, t2, join_type='inner') assert t1m2.masked is False # Result should match non-masked result t12 = table.join(t1, t2) assert np.all(t12.as_array() == np.array(t1m2)) # Mask out some values in left table and make sure they propagate t1m['b'].mask[1] = True t1m['c'].mask[2] = True t1m2 = table.join(t1m, t2, join_type='inner', keys='a') assert sort_eq(t1m2.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 1 -- L2 foo R1', ' 1 -- L2 foo R2', ' 1 bar -- foo R1', ' 1 bar -- foo R2', ' 2 bar L4 bar R3']) t21m = table.join(t2, t1m, join_type='inner', keys='a') assert sort_eq(t21m.pformat(), [' a b_1 d b_2 c ', '--- --- --- --- ---', ' 1 foo R2 -- L2', ' 1 foo R2 bar --', ' 1 foo R1 -- L2', ' 1 foo R1 bar --', ' 2 bar R3 bar L4']) def test_masked_masked(self, operation_table_type): self._setup(operation_table_type) """Two masked tables""" if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') t1 = self.t1 t1m = operation_table_type(self.t1, masked=True) t2 = self.t2 t2m = operation_table_type(self.t2, masked=True) # Result table is never masked but original column types are preserved t1m2m = table.join(t1m, t2m, join_type='inner') assert t1m2m.masked is False for col in t1m2m.itercols(): assert type(col) is MaskedColumn # Result should match non-masked result t12 = table.join(t1, t2) assert np.all(t12.as_array() == np.array(t1m2m)) # Mask out some values in both tables and make sure they propagate t1m['b'].mask[1] = True t1m['c'].mask[2] = True t2m['d'].mask[2] = True t1m2m = table.join(t1m, t2m, join_type='inner', keys='a') assert sort_eq(t1m2m.pformat(), [' a b_1 c b_2 d ', '--- --- --- --- ---', ' 1 -- L2 foo R1', ' 1 -- L2 foo R2', ' 1 bar -- foo R1', ' 1 bar -- foo R2', ' 2 bar L4 bar --']) def test_classes(self): """Ensure that classes and subclasses get through as expected""" class MyCol(Column): pass class MyMaskedCol(MaskedColumn): pass t1 = Table() t1['a'] = MyCol([1]) t1['b'] = MyCol([2]) t1['c'] = MyMaskedCol([3]) t2 = Table() t2['a'] = Column([1, 2]) t2['d'] = MyCol([3, 4]) t2['e'] = MyMaskedCol([5, 6]) t12 = table.join(t1, t2, join_type='inner') for name, exp_type in (('a', MyCol), ('b', MyCol), ('c', MyMaskedCol), ('d', MyCol), ('e', MyMaskedCol)): assert type(t12[name] is exp_type) t21 = table.join(t2, t1, join_type='left') # Note col 'b' gets upgraded from MyCol to MaskedColumn since it needs to be # masked, but col 'c' stays since MyMaskedCol supports masking. for name, exp_type in (('a', MyCol), ('b', MaskedColumn), ('c', MyMaskedCol), ('d', MyCol), ('e', MyMaskedCol)): assert type(t21[name] is exp_type) def test_col_rename(self, operation_table_type): self._setup(operation_table_type) """ Test auto col renaming when there is a conflict. Use non-default values of uniq_col_name and table_names. """ t1 = self.t1 t2 = self.t2 t12 = table.join(t1, t2, uniq_col_name='x_{table_name}_{col_name}_y', table_names=['L', 'R'], keys='a') assert t12.colnames == ['a', 'x_L_b_y', 'c', 'x_R_b_y', 'd'] def test_rename_conflict(self, operation_table_type): self._setup(operation_table_type) """ Test that auto-column rename fails because of a conflict with an existing column """ t1 = self.t1 t2 = self.t2 t1['b_1'] = 1 # Add a new column b_1 that will conflict with auto-rename with pytest.raises(TableMergeError): table.join(t1, t2, keys='a') def test_missing_keys(self, operation_table_type): self._setup(operation_table_type) """Merge on a key column that doesn't exist""" t1 = self.t1 t2 = self.t2 with pytest.raises(TableMergeError): table.join(t1, t2, keys=['a', 'not there']) def test_bad_join_type(self, operation_table_type): self._setup(operation_table_type) """Bad join_type input""" t1 = self.t1 t2 = self.t2 with pytest.raises(ValueError): table.join(t1, t2, join_type='illegal value') def test_no_common_keys(self, operation_table_type): self._setup(operation_table_type) """Merge tables with no common keys""" t1 = self.t1 t2 = self.t2 del t1['a'] del t1['b'] del t2['a'] del t2['b'] with pytest.raises(TableMergeError): table.join(t1, t2) def test_masked_key_column(self, operation_table_type): self._setup(operation_table_type) """Merge on a key column that has a masked element""" if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') t1 = self.t1 t2 = operation_table_type(self.t2, masked=True) table.join(t1, t2) # OK t2['a'].mask[0] = True with pytest.raises(TableMergeError): table.join(t1, t2) def test_col_meta_merge(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t2.rename_column('d', 'c') # force col conflict and renaming meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]) meta2 = OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]) # Key col 'a', should first value ('cm') t1['a'].unit = 'cm' t2['a'].unit = 'm' # Key col 'b', take first value 't1_b' t1['b'].info.description = 't1_b' # Key col 'b', take first non-empty value 't1_b' t2['b'].info.format = '%6s' # Key col 'a', should be merged meta t1['a'].info.meta = meta1 t2['a'].info.meta = meta2 # Key col 'b', should be meta2 t2['b'].info.meta = meta2 # All these should pass through t1['c'].info.format = '%3s' t1['c'].info.description = 't1_c' t2['c'].info.format = '%6s' t2['c'].info.description = 't2_c' if operation_table_type is Table: ctx = pytest.warns(metadata.MergeConflictWarning, match=r"In merged column 'a' the 'unit' attribute does not match \(cm != m\)") # noqa else: ctx = nullcontext() with ctx: t12 = table.join(t1, t2, keys=['a', 'b']) assert t12['a'].unit == 'm' assert t12['b'].info.description == 't1_b' assert t12['b'].info.format == '%6s' assert t12['a'].info.meta == self.meta_merge assert t12['b'].info.meta == meta2 assert t12['c_1'].info.format == '%3s' assert t12['c_1'].info.description == 't1_c' assert t12['c_2'].info.format == '%6s' assert t12['c_2'].info.description == 't2_c' def test_join_multidimensional(self, operation_table_type): self._setup(operation_table_type) # Regression test for #2984, which was an issue where join did not work # on multi-dimensional columns. t1 = operation_table_type() t1['a'] = [1, 2, 3] t1['b'] = np.ones((3, 4)) t2 = operation_table_type() t2['a'] = [1, 2, 3] t2['c'] = [4, 5, 6] t3 = table.join(t1, t2) np.testing.assert_allclose(t3['a'], t1['a']) np.testing.assert_allclose(t3['b'], t1['b']) np.testing.assert_allclose(t3['c'], t2['c']) def test_join_multidimensional_masked(self, operation_table_type): self._setup(operation_table_type) """ Test for outer join with multidimensional columns where masking is required. (Issue #4059). """ if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') a = table.MaskedColumn([1, 2, 3], name='a') a2 = table.Column([1, 3, 4], name='a') b = table.MaskedColumn([[1, 2], [3, 4], [5, 6]], name='b', mask=[[1, 0], [0, 1], [0, 0]]) c = table.Column([[1, 1], [2, 2], [3, 3]], name='c') t1 = operation_table_type([a, b]) t2 = operation_table_type([a2, c]) t12 = table.join(t1, t2, join_type='inner') assert np.all(t12['b'].mask == [[True, False], [False, False]]) assert not hasattr(t12['c'], 'mask') t12 = table.join(t1, t2, join_type='outer') assert np.all(t12['b'].mask == [[True, False], [False, True], [False, False], [True, True]]) assert np.all(t12['c'].mask == [[False, False], [True, True], [False, False], [False, False]]) def test_mixin_functionality(self, mixin_cols): col = mixin_cols['m'] cls_name = type(col).__name__ len_col = len(col) idx = np.arange(len_col) t1 = table.QTable([idx, col], names=['idx', 'm1']) t2 = table.QTable([idx, col], names=['idx', 'm2']) # Set up join mismatches for different join_type cases t1 = t1[[0, 1, 3]] t2 = t2[[0, 2, 3]] # Test inner join, which works for all mixin_cols out = table.join(t1, t2, join_type='inner') assert len(out) == 2 assert out['m2'].__class__ is col.__class__ assert np.all(out['idx'] == [0, 3]) if cls_name == 'SkyCoord': # SkyCoord doesn't support __eq__ so use our own assert skycoord_equal(out['m1'], col[[0, 3]]) assert skycoord_equal(out['m2'], col[[0, 3]]) elif 'Repr' in cls_name or 'Diff' in cls_name: assert np.all(representation_equal(out['m1'], col[[0, 3]])) assert np.all(representation_equal(out['m2'], col[[0, 3]])) else: assert np.all(out['m1'] == col[[0, 3]]) assert np.all(out['m2'] == col[[0, 3]]) # Check for left, right, outer join which requires masking. Works for # the listed mixins classes. if isinstance(col, (Quantity, Time, TimeDelta)): out = table.join(t1, t2, join_type='left') assert len(out) == 3 assert np.all(out['idx'] == [0, 1, 3]) assert np.all(out['m1'] == t1['m1']) assert np.all(out['m2'] == t2['m2']) check_mask(out['m1'], [False, False, False]) check_mask(out['m2'], [False, True, False]) out = table.join(t1, t2, join_type='right') assert len(out) == 3 assert np.all(out['idx'] == [0, 2, 3]) assert np.all(out['m1'] == t1['m1']) assert np.all(out['m2'] == t2['m2']) check_mask(out['m1'], [False, True, False]) check_mask(out['m2'], [False, False, False]) out = table.join(t1, t2, join_type='outer') assert len(out) == 4 assert np.all(out['idx'] == [0, 1, 2, 3]) assert np.all(out['m1'] == col) assert np.all(out['m2'] == col) assert check_mask(out['m1'], [False, False, True, False]) assert check_mask(out['m2'], [False, True, False, False]) else: # Otherwise make sure it fails with the right exception message for join_type in ('outer', 'left', 'right'): with pytest.raises(NotImplementedError) as err: table.join(t1, t2, join_type=join_type) assert ('join requires masking' in str(err.value) or 'join unavailable' in str(err.value)) def test_cartesian_join(self, operation_table_type): t1 = Table(rows=[(1, 'a'), (2, 'b')], names=['a', 'b']) t2 = Table(rows=[(3, 'c'), (4, 'd')], names=['a', 'c']) t12 = table.join(t1, t2, join_type='cartesian') assert t1.colnames == ['a', 'b'] assert t2.colnames == ['a', 'c'] assert len(t12) == len(t1) * len(t2) assert str(t12).splitlines() == [ 'a_1 b a_2 c ', '--- --- --- ---', ' 1 a 3 c', ' 1 a 4 d', ' 2 b 3 c', ' 2 b 4 d'] with pytest.raises(ValueError, match='cannot supply keys for a cartesian join'): t12 = table.join(t1, t2, join_type='cartesian', keys='a') @pytest.mark.skipif('not HAS_SCIPY') def test_join_with_join_skycoord_sky(self): sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit='deg') sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit='deg') t1 = Table([sc1], names=['sc']) t2 = Table([sc2], names=['sc']) t12 = table.join(t1, t2, join_funcs={'sc': join_skycoord(0.2 * u.deg)}) exp = ['sc_id sc_1 sc_2 ', ' deg,deg deg,deg ', '----- ------- --------', ' 1 1.0,0.0 1.05,0.0', ' 1 1.1,0.0 1.05,0.0', ' 2 2.0,0.0 2.1,0.0'] assert str(t12).splitlines() == exp @pytest.mark.skipif('not HAS_SCIPY') @pytest.mark.parametrize('distance_func', ['search_around_3d', search_around_3d]) def test_join_with_join_skycoord_3d(self, distance_func): sc1 = SkyCoord([0, 1, 1.1, 2]*u.deg, [0, 0, 0, 0]*u.deg, [1, 1, 2, 1]*u.m) sc2 = SkyCoord([0.5, 1.05, 2.1]*u.deg, [0, 0, 0]*u.deg, [1, 1, 1]*u.m) t1 = Table([sc1], names=['sc']) t2 = Table([sc2], names=['sc']) join_func = join_skycoord(np.deg2rad(0.2) * u.m, distance_func=distance_func) t12 = table.join(t1, t2, join_funcs={'sc': join_func}) exp = ['sc_id sc_1 sc_2 ', ' deg,deg,m deg,deg,m ', '----- ----------- ------------', ' 1 1.0,0.0,1.0 1.05,0.0,1.0', ' 2 2.0,0.0,1.0 2.1,0.0,1.0'] assert str(t12).splitlines() == exp @pytest.mark.skipif('not HAS_SCIPY') def test_join_with_join_distance_1d(self): c1 = [0, 1, 1.1, 2] c2 = [0.5, 1.05, 2.1] t1 = Table([c1], names=['col']) t2 = Table([c2], names=['col']) join_func = join_distance(0.2, kdtree_args={'leafsize': 32}, query_args={'p': 2}) t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_func}) exp = ['col_id col_1 col_2', '------ ----- -----', ' 1 1.0 1.05', ' 1 1.1 1.05', ' 2 2.0 2.1', ' 3 0.0 --', ' 4 -- 0.5'] assert str(t12).splitlines() == exp @pytest.mark.skipif('not HAS_SCIPY') def test_join_with_join_distance_1d_multikey(self): from astropy.table.operations import _apply_join_funcs c1 = [0, 1, 1.1, 1.2, 2] id1 = [0, 1, 2, 2, 3] o1 = ['a', 'b', 'c', 'd', 'e'] c2 = [0.5, 1.05, 2.1] id2 = [0, 2, 4] o2 = ['z', 'y', 'x'] t1 = Table([c1, id1, o1], names=['col', 'id', 'o1']) t2 = Table([c2, id2, o2], names=['col', 'id', 'o2']) join_func = join_distance(0.2) join_funcs = {'col': join_func} t12 = table.join(t1, t2, join_type='outer', join_funcs=join_funcs) exp = ['col_id col_1 id o1 col_2 o2', '------ ----- --- --- ----- ---', ' 1 1.0 1 b -- --', ' 1 1.1 2 c 1.05 y', ' 1 1.2 2 d 1.05 y', ' 2 2.0 3 e -- --', ' 2 -- 4 -- 2.1 x', ' 3 0.0 0 a -- --', ' 4 -- 0 -- 0.5 z'] assert str(t12).splitlines() == exp left, right, keys = _apply_join_funcs(t1, t2, ('col', 'id'), join_funcs) assert keys == ('col_id', 'id') @pytest.mark.skipif('not HAS_SCIPY') def test_join_with_join_distance_1d_quantity(self): c1 = [0, 1, 1.1, 2] * u.m c2 = [500, 1050, 2100] * u.mm t1 = QTable([c1], names=['col']) t2 = QTable([c2], names=['col']) join_func = join_distance(20 * u.cm) t12 = table.join(t1, t2, join_funcs={'col': join_func}) exp = ['col_id col_1 col_2 ', ' m mm ', '------ ----- ------', ' 1 1.0 1050.0', ' 1 1.1 1050.0', ' 2 2.0 2100.0'] assert str(t12).splitlines() == exp # Generate column name conflict t2['col_id'] = [0, 0, 0] t2['col__id'] = [0, 0, 0] t12 = table.join(t1, t2, join_funcs={'col': join_func}) exp = ['col___id col_1 col_2 col_id col__id', ' m mm ', '-------- ----- ------ ------ -------', ' 1 1.0 1050.0 0 0', ' 1 1.1 1050.0 0 0', ' 2 2.0 2100.0 0 0'] assert str(t12).splitlines() == exp @pytest.mark.skipif('not HAS_SCIPY') def test_join_with_join_distance_2d(self): c1 = np.array([[0, 1, 1.1, 2], [0, 0, 1, 0]]).transpose() c2 = np.array([[0.5, 1.05, 2.1], [0, 0, 0]]).transpose() t1 = Table([c1], names=['col']) t2 = Table([c2], names=['col']) join_func = join_distance(0.2, kdtree_args={'leafsize': 32}, query_args={'p': 2}) t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_func}) exp = ['col_id col_1 [2] col_2 [2] ', '------ ---------- -----------', ' 1 1.0 .. 0.0 1.05 .. 0.0', ' 2 2.0 .. 0.0 2.1 .. 0.0', ' 3 0.0 .. 0.0 -- .. --', ' 4 1.1 .. 1.0 -- .. --', ' 5 -- .. -- 0.5 .. 0.0'] assert str(t12).splitlines() == exp def test_keys_left_right_basic(self): """Test using the keys_left and keys_right args to specify different join keys. This takes the standard test case but renames column 'a' to 'x' and 'y' respectively for tables 1 and 2. Then it compares the normal join on 'a' to the new join on 'x' and 'y'.""" self._setup() for join_type in ('inner', 'left', 'right', 'outer'): t1 = self.t1.copy() t2 = self.t2.copy() # Expected is same as joining on 'a' but with names 'x', 'y' instead t12_exp = table.join(t1, t2, keys='a', join_type=join_type) t12_exp.add_column(t12_exp['a'], name='x', index=1) t12_exp.add_column(t12_exp['a'], name='y', index=len(t1.colnames) + 1) del t12_exp['a'] # Different key names t1.rename_column('a', 'x') t2.rename_column('a', 'y') keys_left_list = ['x'] # Test string key name keys_right_list = [['y']] # Test list of string key names if join_type == 'outer': # Just do this for the outer join (others are the same) keys_left_list.append([t1['x'].tolist()]) # Test list key column keys_right_list.append([t2['y']]) # Test Column key column for keys_left, keys_right in zip(keys_left_list, keys_right_list): t12 = table.join(t1, t2, keys_left=keys_left, keys_right=keys_right, join_type=join_type) assert t12.colnames == t12_exp.colnames for col in t12.values_equal(t12_exp).itercols(): assert np.all(col) assert t12_exp.meta == t12.meta def test_keys_left_right_exceptions(self): """Test exceptions using the keys_left and keys_right args to specify different join keys. """ self._setup() t1 = self.t1 t2 = self.t2 msg = r"left table does not have key column 'z'" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left='z', keys_right=['a']) msg = r"left table has different length from key \[1, 2\]" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left=[[1, 2]], keys_right=['a']) msg = r"keys arg must be None if keys_left and keys_right are supplied" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left='z', keys_right=['a'], keys='a') msg = r"keys_left and keys_right args must have same length" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left=['a', 'b'], keys_right=['a']) msg = r"keys_left and keys_right must both be provided" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left=['a', 'b']) msg = r"cannot supply join_funcs arg and keys_left / keys_right" with pytest.raises(ValueError, match=msg): table.join(t1, t2, keys_left=['a'], keys_right=['a'], join_funcs={}) class TestSetdiff(): def _setup(self, t_cls=Table): lines1 = [' a b ', ' 0 foo ', ' 1 foo ', ' 1 bar ', ' 2 bar '] lines2 = [' a b ', ' 0 foo ', ' 3 foo ', ' 4 bar ', ' 2 bar '] lines3 = [' a b d ', ' 0 foo R1', ' 8 foo R2', ' 1 bar R3', ' 4 bar R4'] self.t1 = t_cls.read(lines1, format='ascii') self.t2 = t_cls.read(lines2, format='ascii') self.t3 = t_cls.read(lines3, format='ascii') def test_default_same_columns(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t1, self.t2) assert type(out['a']) is type(self.t1['a']) # noqa assert type(out['b']) is type(self.t1['b']) # noqa assert out.pformat() == [' a b ', '--- ---', ' 1 bar', ' 1 foo'] def test_default_same_tables(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t1, self.t1) assert type(out['a']) is type(self.t1['a']) # noqa assert type(out['b']) is type(self.t1['b']) # noqa assert out.pformat() == [' a b ', '--- ---'] def test_extra_col_left_table(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(ValueError): table.setdiff(self.t3, self.t1) def test_extra_col_right_table(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t1, self.t3) assert type(out['a']) is type(self.t1['a']) # noqa assert type(out['b']) is type(self.t1['b']) # noqa assert out.pformat() == [' a b ', '--- ---', ' 1 foo', ' 2 bar'] def test_keys(self, operation_table_type): self._setup(operation_table_type) out = table.setdiff(self.t3, self.t1, keys=['a', 'b']) assert type(out['a']) is type(self.t1['a']) # noqa assert type(out['b']) is type(self.t1['b']) # noqa assert out.pformat() == [' a b d ', '--- --- ---', ' 4 bar R4', ' 8 foo R2'] def test_missing_key(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(ValueError): table.setdiff(self.t3, self.t1, keys=['a', 'd']) class TestVStack(): def _setup(self, t_cls=Table): self.t1 = t_cls.read([' a b', ' 0. foo', ' 1. bar'], format='ascii') self.t2 = t_cls.read([' a b c', ' 2. pez 4', ' 3. sez 5'], format='ascii') self.t3 = t_cls.read([' a b', ' 4. 7', ' 5. 8', ' 6. 9'], format='ascii') self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table) # The following table has meta-data that conflicts with t1 self.t5 = t_cls(self.t1, copy=True) self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])) self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)])) self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)])) self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]), ('c', {'a': 1, 'b': 1, 'c': 1}), ('d', 1), ('a', 1), ('e', 1)]) def test_validate_join_type(self): self._setup() with pytest.raises(TypeError, match='Did you accidentally call vstack'): table.vstack(self.t1, self.t2) def test_stack_rows(self, operation_table_type): self._setup(operation_table_type) t2 = self.t1.copy() t2.meta.clear() out = table.vstack([self.t1, t2[1]]) assert type(out['a']) is type(self.t1['a']) # noqa assert type(out['b']) is type(self.t1['b']) # noqa assert out.pformat() == [' a b ', '--- ---', '0.0 foo', '1.0 bar', '1.0 bar'] def test_stack_table_column(self, operation_table_type): self._setup(operation_table_type) t2 = self.t1.copy() t2.meta.clear() out = table.vstack([self.t1, t2['a']]) assert out.masked is False assert out.pformat() == [' a b ', '--- ---', '0.0 foo', '1.0 bar', '0.0 --', '1.0 --'] def test_table_meta_merge(self, operation_table_type): self._setup(operation_table_type) out = table.vstack([self.t1, self.t2, self.t4], join_type='inner') assert out.meta == self.meta_merge def test_table_meta_merge_conflict(self, operation_table_type): self._setup(operation_table_type) with pytest.warns(metadata.MergeConflictWarning) as w: out = table.vstack([self.t1, self.t5], join_type='inner') assert len(w) == 2 assert out.meta == self.t5.meta with pytest.warns(metadata.MergeConflictWarning) as w: out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn') assert len(w) == 2 assert out.meta == self.t5.meta out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent') assert out.meta == self.t5.meta with pytest.raises(MergeConflictError): out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error') with pytest.raises(ValueError): out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense') def test_bad_input_type(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(ValueError): table.vstack([]) with pytest.raises(TypeError): table.vstack(1) with pytest.raises(TypeError): table.vstack([self.t2, 1]) with pytest.raises(ValueError): table.vstack([self.t1, self.t2], join_type='invalid join type') def test_stack_basic_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 t12 = table.vstack([t1, t2], join_type='inner') assert t12.masked is False assert type(t12) is operation_table_type assert type(t12['a']) is type(t1['a']) # noqa assert type(t12['b']) is type(t1['b']) # noqa assert t12.pformat() == [' a b ', '--- ---', '0.0 foo', '1.0 bar', '2.0 pez', '3.0 sez'] t124 = table.vstack([t1, t2, t4], join_type='inner') assert type(t124) is operation_table_type assert type(t12['a']) is type(t1['a']) # noqa assert type(t12['b']) is type(t1['b']) # noqa assert t124.pformat() == [' a b ', '--- ---', '0.0 foo', '1.0 bar', '2.0 pez', '3.0 sez', '0.0 foo', '1.0 bar'] def test_stack_basic_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 t12 = table.vstack([t1, t2], join_type='outer') assert t12.masked is False assert t12.pformat() == [' a b c ', '--- --- ---', '0.0 foo --', '1.0 bar --', '2.0 pez 4', '3.0 sez 5'] t124 = table.vstack([t1, t2, t4], join_type='outer') assert t124.masked is False assert t124.pformat() == [' a b c ', '--- --- ---', '0.0 foo --', '1.0 bar --', '2.0 pez 4', '3.0 sez 5', '0.0 foo --', '1.0 bar --'] def test_stack_incompatible(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(TableMergeError) as excinfo: table.vstack([self.t1, self.t3], join_type='inner') assert ("The 'b' columns have incompatible types: {}" .format([self.t1['b'].dtype.name, self.t3['b'].dtype.name]) in str(excinfo.value)) with pytest.raises(TableMergeError) as excinfo: table.vstack([self.t1, self.t3], join_type='outer') assert "The 'b' columns have incompatible types:" in str(excinfo.value) with pytest.raises(TableMergeError): table.vstack([self.t1, self.t2], join_type='exact') t1_reshape = self.t1.copy() t1_reshape['b'].shape = [2, 1] with pytest.raises(TableMergeError) as excinfo: table.vstack([self.t1, t1_reshape]) assert "have different shape" in str(excinfo.value) def test_vstack_one_masked(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t4 = self.t4 t4['b'].mask[1] = True t14 = table.vstack([t1, t4]) assert t14.masked is False assert t14.pformat() == [' a b ', '--- ---', '0.0 foo', '1.0 bar', '0.0 foo', '1.0 --'] def test_col_meta_merge_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 # Key col 'a', should last value ('km') t1['a'].info.unit = 'cm' t2['a'].info.unit = 'm' t4['a'].info.unit = 'km' # Key col 'a' format should take last when all match t1['a'].info.format = '%f' t2['a'].info.format = '%f' t4['a'].info.format = '%f' # Key col 'b', take first value 't1_b' t1['b'].info.description = 't1_b' # Key col 'b', take first non-empty value '%6s' t4['b'].info.format = '%6s' # Key col 'a', should be merged meta t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])) t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)])) # Key col 'b', should be meta2 t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) if operation_table_type is Table: ctx = pytest.warns(metadata.MergeConflictWarning) else: ctx = nullcontext() with ctx as warning_lines: out = table.vstack([t1, t2, t4], join_type='inner') if operation_table_type is Table: assert len(warning_lines) == 2 assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)" in str(warning_lines[0].message)) assert ("In merged column 'a' the 'unit' attribute does not match (m != km)" in str(warning_lines[1].message)) # Check units are suitably ignored for a regular Table assert out.pformat() == [' a b ', ' km ', '-------- ------', '0.000000 foo', '1.000000 bar', '2.000000 pez', '3.000000 sez', '0.000000 foo', '1.000000 bar'] else: # Check QTable correctly dealt with units. assert out.pformat() == [' a b ', ' km ', '-------- ------', '0.000000 foo', '0.000010 bar', '0.002000 pez', '0.003000 sez', '0.000000 foo', '1.000000 bar'] assert out['a'].info.unit == 'km' assert out['a'].info.format == '%f' assert out['b'].info.description == 't1_b' assert out['b'].info.format == '%6s' assert out['a'].info.meta == self.meta_merge assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]) def test_col_meta_merge_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 # Key col 'a', should last value ('km') t1['a'].unit = 'cm' t2['a'].unit = 'm' t4['a'].unit = 'km' # Key col 'a' format should take last when all match t1['a'].info.format = '%0d' t2['a'].info.format = '%0d' t4['a'].info.format = '%0d' # Key col 'b', take first value 't1_b' t1['b'].info.description = 't1_b' # Key col 'b', take first non-empty value '%6s' t4['b'].info.format = '%6s' # Key col 'a', should be merged meta t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])) t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)])) # Key col 'b', should be meta2 t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) # All these should pass through t2['c'].unit = 'm' t2['c'].info.format = '%6s' t2['c'].info.description = 't2_c' with pytest.warns(metadata.MergeConflictWarning) as warning_lines: out = table.vstack([t1, t2, t4], join_type='outer') assert len(warning_lines) == 2 assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)" in str(warning_lines[0].message)) assert ("In merged column 'a' the 'unit' attribute does not match (m != km)" in str(warning_lines[1].message)) assert out['a'].unit == 'km' assert out['a'].info.format == '%0d' assert out['b'].info.description == 't1_b' assert out['b'].info.format == '%6s' assert out['a'].info.meta == self.meta_merge assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]) assert out['c'].info.unit == 'm' assert out['c'].info.format == '%6s' assert out['c'].info.description == 't2_c' def test_vstack_one_table(self, operation_table_type): self._setup(operation_table_type) """Regression test for issue #3313""" assert (self.t1 == table.vstack(self.t1)).all() assert (self.t1 == table.vstack([self.t1])).all() def test_mixin_functionality(self, mixin_cols): col = mixin_cols['m'] len_col = len(col) t = table.QTable([col], names=['a']) cls_name = type(col).__name__ # Vstack works for these classes: if isinstance(col, (u.Quantity, Time, TimeDelta, SkyCoord, EarthLocation, BaseRepresentationOrDifferential)): out = table.vstack([t, t]) assert len(out) == len_col * 2 if cls_name == 'SkyCoord': # Argh, SkyCoord needs __eq__!! assert skycoord_equal(out['a'][len_col:], col) assert skycoord_equal(out['a'][:len_col], col) elif 'Repr' in cls_name or 'Diff' in cls_name: assert np.all(representation_equal(out['a'][:len_col], col)) assert np.all(representation_equal(out['a'][len_col:], col)) else: assert np.all(out['a'][:len_col] == col) assert np.all(out['a'][len_col:] == col) else: with pytest.raises(NotImplementedError) as err: table.vstack([t, t]) assert ('vstack unavailable for mixin column type(s): {}' .format(cls_name) in str(err.value)) # Check for outer stack which requires masking. Only Time supports # this currently. t2 = table.QTable([col], names=['b']) # different from col name for t if isinstance(col, (Time, TimeDelta, Quantity)): out = table.vstack([t, t2], join_type='outer') assert len(out) == len_col * 2 assert np.all(out['a'][:len_col] == col) assert np.all(out['b'][len_col:] == col) assert check_mask(out['a'], [False] * len_col + [True] * len_col) assert check_mask(out['b'], [True] * len_col + [False] * len_col) # check directly stacking mixin columns: out2 = table.vstack([t, t2['b']]) assert np.all(out['a'] == out2['a']) assert np.all(out['b'] == out2['b']) else: with pytest.raises(NotImplementedError) as err: table.vstack([t, t2], join_type='outer') assert ('vstack requires masking' in str(err.value) or 'vstack unavailable' in str(err.value)) def test_vstack_different_representation(self): """Test that representations can be mixed together.""" rep1 = CartesianRepresentation([1, 2]*u.km, [3, 4]*u.km, 1*u.km) rep2 = SphericalRepresentation([0]*u.deg, [0]*u.deg, 10*u.km) t1 = Table([rep1]) t2 = Table([rep2]) t12 = table.vstack([t1, t2]) expected = CartesianRepresentation([1, 2, 10]*u.km, [3, 4, 0]*u.km, [1, 1, 0]*u.km) assert np.all(representation_equal(t12['col0'], expected)) rep3 = UnitSphericalRepresentation([0]*u.deg, [0]*u.deg) t3 = Table([rep3]) with pytest.raises(ValueError, match='representations are inconsistent'): table.vstack([t1, t3]) class TestDStack(): def _setup(self, t_cls=Table): self.t1 = t_cls.read([' a b', ' 0. foo', ' 1. bar'], format='ascii') self.t2 = t_cls.read([' a b c', ' 2. pez 4', ' 3. sez 5'], format='ascii') self.t2['d'] = Time([1, 2], format='cxcsec') self.t3 = t_cls({'a': [[5., 6.], [4., 3.]], 'b': [['foo', 'bar'], ['pez', 'sez']]}, names=('a', 'b')) self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table) self.t5 = t_cls({'a': [[4., 2.], [1., 6.]], 'b': [['foo', 'pez'], ['bar', 'sez']]}, names=('a', 'b')) self.t6 = t_cls.read([' a b c', ' 7. pez 2', ' 4. sez 6', ' 6. foo 3'], format='ascii') def test_validate_join_type(self): self._setup() with pytest.raises(TypeError, match='Did you accidentally call dstack'): table.dstack(self.t1, self.t2) @staticmethod def compare_dstack(tables, out): for ii, tbl in enumerate(tables): for name, out_col in out.columns.items(): if name in tbl.colnames: # Columns always compare equal assert np.all(tbl[name] == out[name][:, ii]) # If input has a mask then output must have same mask if hasattr(tbl[name], 'mask'): assert np.all(tbl[name].mask == out[name].mask[:, ii]) # If input has no mask then output might have a mask (if other table # is missing that column). If so then all mask values should be False. elif hasattr(out[name], 'mask'): assert not np.any(out[name].mask[:, ii]) else: # Column missing for this table, out must have a mask with all True. assert np.all(out[name].mask[:, ii]) def test_dstack_table_column(self, operation_table_type): """Stack a table with 3 cols and one column (gets auto-converted to Table). """ self._setup(operation_table_type) t2 = self.t1.copy() out = table.dstack([self.t1, t2['a']]) self.compare_dstack([self.t1, t2[('a',)]], out) def test_dstack_basic_outer(self, operation_table_type): if operation_table_type is QTable: pytest.xfail('Quantity columns do not support masking.') self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 t4['a'].mask[0] = True # Test for non-masked table t12 = table.dstack([t1, t2], join_type='outer') assert type(t12) is operation_table_type assert type(t12['a']) is type(t1['a']) # noqa assert type(t12['b']) is type(t1['b']) # noqa self.compare_dstack([t1, t2], t12) # Test for masked table t124 = table.dstack([t1, t2, t4], join_type='outer') assert type(t124) is operation_table_type assert type(t124['a']) is type(t4['a']) # noqa assert type(t124['b']) is type(t4['b']) # noqa self.compare_dstack([t1, t2, t4], t124) def test_dstack_basic_inner(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t4 = self.t4 # Test for masked table t124 = table.dstack([t1, t2, t4], join_type='inner') assert type(t124) is operation_table_type assert type(t124['a']) is type(t4['a']) # noqa assert type(t124['b']) is type(t4['b']) # noqa self.compare_dstack([t1, t2, t4], t124) def test_dstack_multi_dimension_column(self, operation_table_type): self._setup(operation_table_type) t3 = self.t3 t5 = self.t5 t2 = self.t2 t35 = table.dstack([t3, t5]) assert type(t35) is operation_table_type assert type(t35['a']) is type(t3['a']) # noqa assert type(t35['b']) is type(t3['b']) # noqa self.compare_dstack([t3, t5], t35) with pytest.raises(TableMergeError): table.dstack([t2, t3]) def test_dstack_different_length_table(self, operation_table_type): self._setup(operation_table_type) t2 = self.t2 t6 = self.t6 with pytest.raises(ValueError): table.dstack([t2, t6]) def test_dstack_single_table(self): self._setup(Table) out = table.dstack(self.t1) assert np.all(out == self.t1) def test_dstack_representation(self): rep1 = SphericalRepresentation([1, 2]*u.deg, [3, 4]*u.deg, 1*u.kpc) rep2 = SphericalRepresentation([10, 20]*u.deg, [30, 40]*u.deg, 10*u.kpc) t1 = Table([rep1]) t2 = Table([rep2]) t12 = table.dstack([t1, t2]) assert np.all(representation_equal(t12['col0'][:, 0], rep1)) assert np.all(representation_equal(t12['col0'][:, 1], rep2)) def test_dstack_skycoord(self): sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg) sc2 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg) t1 = Table([sc1]) t2 = Table([sc2]) t12 = table.dstack([t1, t2]) assert skycoord_equal(sc1, t12['col0'][:, 0]) assert skycoord_equal(sc2, t12['col0'][:, 1]) class TestHStack(): def _setup(self, t_cls=Table): self.t1 = t_cls.read([' a b', ' 0. foo', ' 1. bar'], format='ascii') self.t2 = t_cls.read([' a b c', ' 2. pez 4', ' 3. sez 5'], format='ascii') self.t3 = t_cls.read([' d e', ' 4. 7', ' 5. 8', ' 6. 9'], format='ascii') self.t4 = t_cls(self.t1, copy=True, masked=True) self.t4['a'].name = 'f' self.t4['b'].name = 'g' # The following table has meta-data that conflicts with t1 self.t5 = t_cls(self.t1, copy=True) self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])) self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)])) self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)])) self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]), ('c', {'a': 1, 'b': 1, 'c': 1}), ('d', 1), ('a', 1), ('e', 1)]) def test_validate_join_type(self): self._setup() with pytest.raises(TypeError, match='Did you accidentally call hstack'): table.hstack(self.t1, self.t2) def test_stack_same_table(self, operation_table_type): """ From #2995, test that hstack'ing references to the same table has the expected output. """ self._setup(operation_table_type) out = table.hstack([self.t1, self.t1]) assert out.masked is False assert out.pformat() == ['a_1 b_1 a_2 b_2', '--- --- --- ---', '0.0 foo 0.0 foo', '1.0 bar 1.0 bar'] def test_stack_rows(self, operation_table_type): self._setup(operation_table_type) out = table.hstack([self.t1[0], self.t2[1]]) assert out.masked is False assert out.pformat() == ['a_1 b_1 a_2 b_2 c ', '--- --- --- --- ---', '0.0 foo 3.0 sez 5'] def test_stack_columns(self, operation_table_type): self._setup(operation_table_type) out = table.hstack([self.t1, self.t2['c']]) assert type(out['a']) is type(self.t1['a']) # noqa assert type(out['b']) is type(self.t1['b']) # noqa assert type(out['c']) is type(self.t2['c']) # noqa assert out.pformat() == [' a b c ', '--- --- ---', '0.0 foo 4', '1.0 bar 5'] def test_table_meta_merge(self, operation_table_type): self._setup(operation_table_type) out = table.hstack([self.t1, self.t2, self.t4], join_type='inner') assert out.meta == self.meta_merge def test_table_meta_merge_conflict(self, operation_table_type): self._setup(operation_table_type) with pytest.warns(metadata.MergeConflictWarning) as w: out = table.hstack([self.t1, self.t5], join_type='inner') assert len(w) == 2 assert out.meta == self.t5.meta with pytest.warns(metadata.MergeConflictWarning) as w: out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn') assert len(w) == 2 assert out.meta == self.t5.meta out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent') assert out.meta == self.t5.meta with pytest.raises(MergeConflictError): out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error') with pytest.raises(ValueError): out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense') def test_bad_input_type(self, operation_table_type): self._setup(operation_table_type) with pytest.raises(ValueError): table.hstack([]) with pytest.raises(TypeError): table.hstack(1) with pytest.raises(TypeError): table.hstack([self.t2, 1]) with pytest.raises(ValueError): table.hstack([self.t1, self.t2], join_type='invalid join type') def test_stack_basic(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t2 = self.t2 t3 = self.t3 t4 = self.t4 out = table.hstack([t1, t2], join_type='inner') assert out.masked is False assert type(out) is operation_table_type assert type(out['a_1']) is type(t1['a']) # noqa assert type(out['b_1']) is type(t1['b']) # noqa assert type(out['a_2']) is type(t2['a']) # noqa assert type(out['b_2']) is type(t2['b']) # noqa assert out.pformat() == ['a_1 b_1 a_2 b_2 c ', '--- --- --- --- ---', '0.0 foo 2.0 pez 4', '1.0 bar 3.0 sez 5'] # stacking as a list gives same result out_list = table.hstack([t1, t2], join_type='inner') assert out.pformat() == out_list.pformat() out = table.hstack([t1, t2], join_type='outer') assert out.pformat() == out_list.pformat() out = table.hstack([t1, t2, t3, t4], join_type='outer') assert out.masked is False assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ', '--- --- --- --- --- --- --- --- ---', '0.0 foo 2.0 pez 4 4.0 7 0.0 foo', '1.0 bar 3.0 sez 5 5.0 8 1.0 bar', ' -- -- -- -- -- 6.0 9 -- --'] out = table.hstack([t1, t2, t3, t4], join_type='inner') assert out.masked is False assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ', '--- --- --- --- --- --- --- --- ---', '0.0 foo 2.0 pez 4 4.0 7 0.0 foo', '1.0 bar 3.0 sez 5 5.0 8 1.0 bar'] def test_stack_incompatible(self, operation_table_type): self._setup(operation_table_type) # For join_type exact, which will fail here because n_rows # does not match with pytest.raises(TableMergeError): table.hstack([self.t1, self.t3], join_type='exact') def test_hstack_one_masked(self, operation_table_type): if operation_table_type is QTable: pytest.xfail() self._setup(operation_table_type) t1 = self.t1 t2 = operation_table_type(t1, copy=True, masked=True) t2.meta.clear() t2['b'].mask[1] = True out = table.hstack([t1, t2]) assert out.pformat() == ['a_1 b_1 a_2 b_2', '--- --- --- ---', '0.0 foo 0.0 foo', '1.0 bar 1.0 --'] def test_table_col_rename(self, operation_table_type): self._setup(operation_table_type) out = table.hstack([self.t1, self.t2], join_type='inner', uniq_col_name='{table_name}_{col_name}', table_names=('left', 'right')) assert out.masked is False assert out.pformat() == ['left_a left_b right_a right_b c ', '------ ------ ------- ------- ---', ' 0.0 foo 2.0 pez 4', ' 1.0 bar 3.0 sez 5'] def test_col_meta_merge(self, operation_table_type): self._setup(operation_table_type) t1 = self.t1 t3 = self.t3[:2] t4 = self.t4 # Just set a bunch of meta and make sure it is the same in output meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]) t1['a'].unit = 'cm' t1['b'].info.description = 't1_b' t4['f'].info.format = '%6s' t1['b'].info.meta.update(meta1) t3['d'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) t4['g'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)])) t3['e'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])) t3['d'].unit = 'm' t3['d'].info.format = '%6s' t3['d'].info.description = 't3_c' out = table.hstack([t1, t3, t4], join_type='exact') for t in [t1, t3, t4]: for name in t.colnames: for attr in ('meta', 'unit', 'format', 'description'): assert getattr(out[name].info, attr) == getattr(t[name].info, attr) # Make sure we got a copy of meta, not ref t1['b'].info.meta['b'] = None assert out['b'].info.meta['b'] == [1, 2] def test_hstack_one_table(self, operation_table_type): self._setup(operation_table_type) """Regression test for issue #3313""" assert (self.t1 == table.hstack(self.t1)).all() assert (self.t1 == table.hstack([self.t1])).all() def test_mixin_functionality(self, mixin_cols): col1 = mixin_cols['m'] col2 = col1[2:4] # Shorter version of col1 t1 = table.QTable([col1]) t2 = table.QTable([col2]) cls_name = type(col1).__name__ out = table.hstack([t1, t2], join_type='inner') assert type(out['col0_1']) is type(out['col0_2']) # noqa assert len(out) == len(col2) # Check that columns are as expected. if cls_name == 'SkyCoord': assert skycoord_equal(out['col0_1'], col1[:len(col2)]) assert skycoord_equal(out['col0_2'], col2) elif 'Repr' in cls_name or 'Diff' in cls_name: assert np.all(representation_equal(out['col0_1'], col1[:len(col2)])) assert np.all(representation_equal(out['col0_2'], col2)) else: assert np.all(out['col0_1'] == col1[:len(col2)]) assert np.all(out['col0_2'] == col2) # Time class supports masking, all other mixins do not if isinstance(col1, (Time, TimeDelta, Quantity)): out = table.hstack([t1, t2], join_type='outer') assert len(out) == len(t1) assert np.all(out['col0_1'] == col1) assert np.all(out['col0_2'][:len(col2)] == col2) assert check_mask(out['col0_2'], [False, False, True, True]) # check directly stacking mixin columns: out2 = table.hstack([t1, t2['col0']], join_type='outer') assert np.all(out['col0_1'] == out2['col0_1']) assert np.all(out['col0_2'] == out2['col0_2']) else: with pytest.raises(NotImplementedError) as err: table.hstack([t1, t2], join_type='outer') assert 'hstack requires masking' in str(err.value) def test_unique(operation_table_type): t = operation_table_type.read( [' a b c d', ' 2 b 7.0 0', ' 1 c 3.0 5', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 1 a 1.0 7', ' 2 b 5.0 1', ' 0 a 0.0 4', ' 1 a 2.0 6', ' 1 c 3.0 5', ], format='ascii') tu = operation_table_type(np.sort(t[:-1])) t_all = table.unique(t) assert sort_eq(t_all.pformat(), tu.pformat()) t_s = t.copy() del t_s['b', 'c', 'd'] t_all = table.unique(t_s) assert sort_eq(t_all.pformat(), [' a ', '---', ' 0', ' 1', ' 2']) key1 = 'a' t1a = table.unique(t, key1) assert sort_eq(t1a.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 1 c 3.0 5', ' 2 b 7.0 0']) t1b = table.unique(t, key1, keep='last') assert sort_eq(t1b.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 1 c 3.0 5', ' 2 b 5.0 1']) t1c = table.unique(t, key1, keep='none') assert sort_eq(t1c.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4']) key2 = ['a', 'b'] t2a = table.unique(t, key2) assert sort_eq(t2a.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 1 a 1.0 7', ' 1 c 3.0 5', ' 2 a 4.0 3', ' 2 b 7.0 0']) t2b = table.unique(t, key2, keep='last') assert sort_eq(t2b.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 1 a 2.0 6', ' 1 c 3.0 5', ' 2 a 4.0 3', ' 2 b 5.0 1']) t2c = table.unique(t, key2, keep='none') assert sort_eq(t2c.pformat(), [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 2 a 4.0 3']) key2 = ['a', 'a'] with pytest.raises(ValueError) as exc: t2a = table.unique(t, key2) assert exc.value.args[0] == "duplicate key names" with pytest.raises(ValueError) as exc: table.unique(t, key2, keep=True) assert exc.value.args[0] == ( "'keep' should be one of 'first', 'last', 'none'") t1_m = operation_table_type(t1a, masked=True) t1_m['a'].mask[1] = True with pytest.raises(ValueError) as exc: t1_mu = table.unique(t1_m) assert exc.value.args[0] == ( "cannot use columns with masked values as keys; " "remove column 'a' from keys and rerun unique()") t1_mu = table.unique(t1_m, silent=True) assert t1_mu.masked is False assert t1_mu.pformat() == [' a b c d ', '--- --- --- ---', ' 0 a 0.0 4', ' 2 b 7.0 0', ' -- c 3.0 5'] with pytest.raises(ValueError): t1_mu = table.unique(t1_m, silent=True, keys='a') t1_m = operation_table_type(t, masked=True) t1_m['a'].mask[1] = True t1_m['d'].mask[3] = True # Test that multiple masked key columns get removed in the correct # order t1_mu = table.unique(t1_m, keys=['d', 'a', 'b'], silent=True) assert t1_mu.masked is False assert t1_mu.pformat() == [' a b c d ', '--- --- --- ---', ' 2 a 4.0 --', ' 2 b 7.0 0', ' -- c 3.0 5'] def test_vstack_bytes(operation_table_type): """ Test for issue #5617 when vstack'ing bytes columns in Py3. This is really an upstream numpy issue numpy/numpy/#8403. """ t = operation_table_type([[b'a']], names=['a']) assert t['a'].itemsize == 1 t2 = table.vstack([t, t]) assert len(t2) == 2 assert t2['a'].itemsize == 1 def test_vstack_unicode(): """ Test for problem related to issue #5617 when vstack'ing *unicode* columns. In this case the character size gets multiplied by 4. """ t = table.Table([['a']], names=['a']) assert t['a'].itemsize == 4 # 4-byte / char for U dtype t2 = table.vstack([t, t]) assert len(t2) == 2 assert t2['a'].itemsize == 4 def test_join_mixins_time_quantity(): """ Test for table join using non-ndarray key columns. """ tm1 = Time([2, 1, 2], format='cxcsec') q1 = [2, 1, 1] * u.m idx1 = [1, 2, 3] tm2 = Time([2, 3], format='cxcsec') q2 = [2, 3] * u.m idx2 = [10, 20] t1 = Table([tm1, q1, idx1], names=['tm', 'q', 'idx']) t2 = Table([tm2, q2, idx2], names=['tm', 'q', 'idx']) # Output: # # <Table length=4> # tm q idx_1 idx_2 # m # object float64 int64 int64 # ------------------ ------- ----- ----- # 0.9999999999969589 1.0 2 -- # 2.00000000000351 1.0 3 -- # 2.00000000000351 2.0 1 10 # 3.000000000000469 3.0 -- 20 t12 = table.join(t1, t2, join_type='outer', keys=['tm', 'q']) # Key cols are lexically sorted assert np.all(t12['tm'] == Time([1, 2, 2, 3], format='cxcsec')) assert np.all(t12['q'] == [1, 1, 2, 3] * u.m) assert np.all(t12['idx_1'] == np.ma.array([2, 3, 1, 0], mask=[0, 0, 0, 1])) assert np.all(t12['idx_2'] == np.ma.array([0, 0, 10, 20], mask=[1, 1, 0, 0])) def test_join_mixins_not_sortable(): """ Test for table join using non-ndarray key columns that are not sortable. """ sc = SkyCoord([1, 2], [3, 4], unit='deg,deg') t1 = Table([sc, [1, 2]], names=['sc', 'idx1']) t2 = Table([sc, [10, 20]], names=['sc', 'idx2']) with pytest.raises(TypeError, match='one or more key columns are not sortable'): table.join(t1, t2, keys='sc') def test_join_non_1d_key_column(): c1 = [[1, 2], [3, 4]] c2 = [1, 2] t1 = Table([c1, c2], names=['a', 'b']) t2 = t1.copy() with pytest.raises(ValueError, match="key column 'a' must be 1-d"): table.join(t1, t2, keys='a') def test_argsort_time_column(): """Regression test for #10823.""" times = Time(['2016-01-01', '2018-01-01', '2017-01-01']) t = Table([times], names=['time']) i = t.argsort('time') assert np.all(i == times.argsort()) def test_sort_indexed_table(): """Test fix for #9473 and #6545 - and another regression test for #10823.""" t = Table([[1, 3, 2], [6, 4, 5]], names=('a', 'b')) t.add_index('a') t.sort('a') assert np.all(t['a'] == [1, 2, 3]) assert np.all(t['b'] == [6, 5, 4]) t.sort('b') assert np.all(t['b'] == [4, 5, 6]) assert np.all(t['a'] == [3, 2, 1]) times = ['2016-01-01', '2018-01-01', '2017-01-01'] tm = Time(times) t2 = Table([tm, [3, 2, 1]], names=['time', 'flux']) t2.sort('flux') assert np.all(t2['flux'] == [1, 2, 3]) t2.sort('time') assert np.all(t2['flux'] == [3, 1, 2]) assert np.all(t2['time'] == tm[[0, 2, 1]]) # Using the table as a TimeSeries implicitly sets the index, so # this test is a bit different from the above. from astropy.timeseries import TimeSeries ts = TimeSeries(time=times) ts['flux'] = [3, 2, 1] ts.sort('flux') assert np.all(ts['flux'] == [1, 2, 3]) ts.sort('time') assert np.all(ts['flux'] == [3, 1, 2]) assert np.all(ts['time'] == tm[[0, 2, 1]]) def test_get_out_class(): c = table.Column([1, 2]) mc = table.MaskedColumn([1, 2]) q = [1, 2] * u.m assert _get_out_class([c, mc]) is mc.__class__ assert _get_out_class([mc, c]) is mc.__class__ assert _get_out_class([c, c]) is c.__class__ assert _get_out_class([c]) is c.__class__ with pytest.raises(ValueError): _get_out_class([c, q]) with pytest.raises(ValueError): _get_out_class([q, c]) def test_masking_required_exception(): """ Test that outer join, hstack and vstack fail for a mixin column which does not support masking. """ col = table.NdarrayMixin([0, 1, 2, 3]) t1 = table.QTable([[1, 2, 3, 4], col], names=['a', 'b']) t2 = table.QTable([[1, 2], col[:2]], names=['a', 'c']) with pytest.raises(NotImplementedError) as err: table.vstack([t1, t2], join_type='outer') assert 'vstack unavailable' in str(err.value) with pytest.raises(NotImplementedError) as err: table.hstack([t1, t2], join_type='outer') assert 'hstack requires masking' in str(err.value) with pytest.raises(NotImplementedError) as err: table.join(t1, t2, join_type='outer') assert 'join requires masking' in str(err.value) def test_stack_columns(): c = table.Column([1, 2]) mc = table.MaskedColumn([1, 2]) q = [1, 2] * u.m time = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02']) sc = SkyCoord([1, 2], [3, 4], unit='deg') cq = table.Column([11, 22], unit=u.m) t = table.hstack([c, q]) assert t.__class__ is table.QTable assert t.masked is False t = table.hstack([q, c]) assert t.__class__ is table.QTable assert t.masked is False t = table.hstack([mc, q]) assert t.__class__ is table.QTable assert t.masked is False t = table.hstack([c, mc]) assert t.__class__ is table.Table assert t.masked is False t = table.vstack([q, q]) assert t.__class__ is table.QTable t = table.vstack([c, c]) assert t.__class__ is table.Table t = table.hstack([c, time]) assert t.__class__ is table.Table t = table.hstack([c, sc]) assert t.__class__ is table.Table t = table.hstack([q, time, sc]) assert t.__class__ is table.QTable with pytest.raises(ValueError): table.vstack([c, q]) with pytest.raises(ValueError): t = table.vstack([q, cq]) def test_mixin_join_regression(): # This used to trigger a ValueError: # ValueError: NumPy boolean array indexing assignment cannot assign # 6 input values to the 4 output values where the mask is true t1 = QTable() t1['index'] = [1, 2, 3, 4, 5] t1['flux1'] = [2, 3, 2, 1, 1] * u.Jy t1['flux2'] = [2, 3, 2, 1, 1] * u.Jy t2 = QTable() t2['index'] = [3, 4, 5, 6] t2['flux1'] = [2, 1, 1, 3] * u.Jy t2['flux2'] = [2, 1, 1, 3] * u.Jy t12 = table.join(t1, t2, keys=('index', 'flux1', 'flux2'), join_type='outer') assert len(t12) == 6