hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
c5d9496eaade4e75fcbc9f1361e0a16dbe525627df412308b5114950fa6c5de0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
import warnings
from collections import defaultdict, OrderedDict
import numpy as np
from . import Header, Card
from astropy import units as u
from astropy.coordinates import EarthLocation
from astropy.table import Column, MaskedColumn
from astropy.table.column import col_copy
from astropy.time import Time, TimeDelta
from astropy.time.core import BARYCENTRIC_SCALES
from astropy.time.formats import FITS_DEPRECATED_SCALES
from astropy.utils.exceptions import AstropyUserWarning
# The following is based on the FITS WCS Paper IV, "Representations of time
# coordinates in FITS".
# https://ui.adsabs.harvard.edu/abs/2015A%26A...574A..36R
# FITS WCS standard specified "4-3" form for non-linear coordinate types
TCTYP_RE_TYPE = re.compile(r'(?P<type>[A-Z]+)[-]+')
TCTYP_RE_ALGO = re.compile(r'(?P<algo>[A-Z]+)\s*')
# FITS Time standard specified time units
FITS_TIME_UNIT = ['s', 'd', 'a', 'cy', 'min', 'h', 'yr', 'ta', 'Ba']
# Global time reference coordinate keywords
TIME_KEYWORDS = ('TIMESYS', 'MJDREF', 'JDREF', 'DATEREF',
'TREFPOS', 'TREFDIR', 'TIMEUNIT', 'TIMEOFFS',
'OBSGEO-X', 'OBSGEO-Y', 'OBSGEO-Z',
'OBSGEO-L', 'OBSGEO-B', 'OBSGEO-H', 'DATE',
'DATE-OBS', 'DATE-AVG', 'DATE-BEG', 'DATE-END',
'MJD-OBS', 'MJD-AVG', 'MJD-BEG', 'MJD-END')
# Column-specific time override keywords
COLUMN_TIME_KEYWORDS = ('TCTYP', 'TCUNI', 'TRPOS')
# Column-specific keywords regex
COLUMN_TIME_KEYWORD_REGEXP = f"({'|'.join(COLUMN_TIME_KEYWORDS)})[0-9]+"
def is_time_column_keyword(keyword):
"""
Check if the FITS header keyword is a time column-specific keyword.
Parameters
----------
keyword : str
FITS keyword.
"""
return re.match(COLUMN_TIME_KEYWORD_REGEXP, keyword) is not None
# Set astropy time global information
GLOBAL_TIME_INFO = {'TIMESYS': ('UTC', 'Default time scale'),
'JDREF': (0.0, 'Time columns are jd = jd1 + jd2'),
'TREFPOS': ('TOPOCENTER', 'Time reference position')}
def _verify_global_info(global_info):
"""
Given the global time reference frame information, verify that
each global time coordinate attribute will be given a valid value.
Parameters
----------
global_info : dict
Global time reference frame information.
"""
# Translate FITS deprecated scale into astropy scale, or else just convert
# to lower case for further checks.
global_info['scale'] = FITS_DEPRECATED_SCALES.get(global_info['TIMESYS'],
global_info['TIMESYS'].lower())
# Verify global time scale
if global_info['scale'] not in Time.SCALES:
# 'GPS' and 'LOCAL' are FITS recognized time scale values
# but are not supported by astropy.
if global_info['scale'] == 'gps':
warnings.warn(
'Global time scale (TIMESYS) has a FITS recognized time scale '
'value "GPS". In Astropy, "GPS" is a time from epoch format '
'which runs synchronously with TAI; GPS is approximately 19 s '
'ahead of TAI. Hence, this format will be used.', AstropyUserWarning)
# Assume that the values are in GPS format
global_info['scale'] = 'tai'
global_info['format'] = 'gps'
if global_info['scale'] == 'local':
warnings.warn(
'Global time scale (TIMESYS) has a FITS recognized time scale '
'value "LOCAL". However, the standard states that "LOCAL" should be '
'tied to one of the existing scales because it is intrinsically '
'unreliable and/or ill-defined. Astropy will thus use the default '
'global time scale "UTC" instead of "LOCAL".', AstropyUserWarning)
# Default scale 'UTC'
global_info['scale'] = 'utc'
global_info['format'] = None
else:
raise AssertionError(
'Global time scale (TIMESYS) should have a FITS recognized '
'time scale value (got {!r}). The FITS standard states that '
'the use of local time scales should be restricted to alternate '
'coordinates.'.format(global_info['TIMESYS']))
else:
# Scale is already set
global_info['format'] = None
# Check if geocentric global location is specified
obs_geo = [global_info[attr] for attr in ('OBSGEO-X', 'OBSGEO-Y', 'OBSGEO-Z')
if attr in global_info]
# Location full specification is (X, Y, Z)
if len(obs_geo) == 3:
global_info['location'] = EarthLocation.from_geocentric(*obs_geo, unit=u.m)
else:
# Check if geodetic global location is specified (since geocentric failed)
# First warn the user if geocentric location is partially specified
if obs_geo:
warnings.warn(
'The geocentric observatory location {} is not completely '
'specified (X, Y, Z) and will be ignored.'.format(obs_geo),
AstropyUserWarning)
# Check geodetic location
obs_geo = [global_info[attr] for attr in ('OBSGEO-L', 'OBSGEO-B', 'OBSGEO-H')
if attr in global_info]
if len(obs_geo) == 3:
global_info['location'] = EarthLocation.from_geodetic(*obs_geo)
else:
# Since both geocentric and geodetic locations are not specified,
# location will be None.
# Warn the user if geodetic location is partially specified
if obs_geo:
warnings.warn(
'The geodetic observatory location {} is not completely '
'specified (lon, lat, alt) and will be ignored.'.format(obs_geo),
AstropyUserWarning)
global_info['location'] = None
# Get global time reference
# Keywords are listed in order of precedence, as stated by the standard
for key, format_ in (('MJDREF', 'mjd'), ('JDREF', 'jd'), ('DATEREF', 'fits')):
if key in global_info:
global_info['ref_time'] = {'val': global_info[key], 'format': format_}
break
else:
# If none of the three keywords is present, MJDREF = 0.0 must be assumed
global_info['ref_time'] = {'val': 0, 'format': 'mjd'}
def _verify_column_info(column_info, global_info):
"""
Given the column-specific time reference frame information, verify that
each column-specific time coordinate attribute has a valid value.
Return True if the coordinate column is time, or else return False.
Parameters
----------
global_info : dict
Global time reference frame information.
column_info : dict
Column-specific time reference frame override information.
"""
scale = column_info.get('TCTYP', None)
unit = column_info.get('TCUNI', None)
location = column_info.get('TRPOS', None)
if scale is not None:
# Non-linear coordinate types have "4-3" form and are not time coordinates
if TCTYP_RE_TYPE.match(scale[:5]) and TCTYP_RE_ALGO.match(scale[5:]):
return False
elif scale.lower() in Time.SCALES:
column_info['scale'] = scale.lower()
column_info['format'] = None
elif scale in FITS_DEPRECATED_SCALES.keys():
column_info['scale'] = FITS_DEPRECATED_SCALES[scale]
column_info['format'] = None
# TCTYPn (scale) = 'TIME' indicates that the column scale is
# controlled by the global scale.
elif scale == 'TIME':
column_info['scale'] = global_info['scale']
column_info['format'] = global_info['format']
elif scale == 'GPS':
warnings.warn(
'Table column "{}" has a FITS recognized time scale value "GPS". '
'In Astropy, "GPS" is a time from epoch format which runs '
'synchronously with TAI; GPS runs ahead of TAI approximately '
'by 19 s. Hence, this format will be used.'.format(column_info),
AstropyUserWarning)
column_info['scale'] = 'tai'
column_info['format'] = 'gps'
elif scale == 'LOCAL':
warnings.warn(
'Table column "{}" has a FITS recognized time scale value "LOCAL". '
'However, the standard states that "LOCAL" should be tied to one '
'of the existing scales because it is intrinsically unreliable '
'and/or ill-defined. Astropy will thus use the global time scale '
'(TIMESYS) as the default.'. format(column_info),
AstropyUserWarning)
column_info['scale'] = global_info['scale']
column_info['format'] = global_info['format']
else:
# Coordinate type is either an unrecognized local time scale
# or a linear coordinate type
return False
# If TCUNIn is a time unit or TRPOSn is specified, the column is a time
# coordinate. This has to be tested since TCTYP (scale) is not specified.
elif (unit is not None and unit in FITS_TIME_UNIT) or location is not None:
column_info['scale'] = global_info['scale']
column_info['format'] = global_info['format']
# None of the conditions for time coordinate columns is satisfied
else:
return False
# Check if column-specific reference position TRPOSn is specified
if location is not None:
# Observatory position (location) needs to be specified only
# for 'TOPOCENTER'.
if location == 'TOPOCENTER':
column_info['location'] = global_info['location']
if column_info['location'] is None:
warnings.warn(
'Time column reference position "TRPOSn" value is "TOPOCENTER". '
'However, the observatory position is not properly specified. '
'The FITS standard does not support this and hence reference '
'position will be ignored.', AstropyUserWarning)
else:
column_info['location'] = None
# Warn user about ignoring global reference position when TRPOSn is
# not specified
elif global_info['TREFPOS'] == 'TOPOCENTER':
if global_info['location'] is not None:
warnings.warn(
'Time column reference position "TRPOSn" is not specified. The '
'default value for it is "TOPOCENTER", and the observatory position '
'has been specified. However, for supporting column-specific location, '
'reference position will be ignored for this column.',
AstropyUserWarning)
column_info['location'] = None
else:
column_info['location'] = None
# Get reference time
column_info['ref_time'] = global_info['ref_time']
return True
def _get_info_if_time_column(col, global_info):
"""
Check if a column without corresponding time column keywords in the
FITS header represents time or not. If yes, return the time column
information needed for its conversion to Time.
This is only applicable to the special-case where a column has the
name 'TIME' and a time unit.
"""
# Column with TTYPEn = 'TIME' and lacking any TC*n or time
# specific keywords will be controlled by the global keywords.
if col.info.name.upper() == 'TIME' and col.info.unit in FITS_TIME_UNIT:
column_info = {'scale': global_info['scale'],
'format': global_info['format'],
'ref_time': global_info['ref_time'],
'location': None}
if global_info['TREFPOS'] == 'TOPOCENTER':
column_info['location'] = global_info['location']
if column_info['location'] is None:
warnings.warn(
'Time column "{}" reference position will be ignored '
'due to unspecified observatory position.'.format(col.info.name),
AstropyUserWarning)
return column_info
return None
def _convert_global_time(table, global_info):
"""
Convert the table metadata for time informational keywords
to astropy Time.
Parameters
----------
table : `~astropy.table.Table`
The table whose time metadata is to be converted.
global_info : dict
Global time reference frame information.
"""
# Read in Global Informational keywords as Time
for key, value in global_info.items():
# FITS uses a subset of ISO-8601 for DATE-xxx
if key not in table.meta:
try:
table.meta[key] = _convert_time_key(global_info, key)
except ValueError:
pass
def _convert_time_key(global_info, key):
"""
Convert a time metadata key to a Time object.
Parameters
----------
global_info : dict
Global time reference frame information.
key : str
Time key.
Returns
-------
astropy.time.Time
Raises
------
ValueError
If key is not a valid global time keyword.
"""
value = global_info[key]
if key.startswith('DATE'):
scale = 'utc' if key == 'DATE' else global_info['scale']
precision = len(value.split('.')[-1]) if '.' in value else 0
return Time(value, format='fits', scale=scale,
precision=precision)
# MJD-xxx in MJD according to TIMESYS
elif key.startswith('MJD-'):
return Time(value, format='mjd',
scale=global_info['scale'])
else:
raise ValueError('Key is not a valid global time keyword')
def _convert_time_column(col, column_info):
"""
Convert time columns to astropy Time columns.
Parameters
----------
col : `~astropy.table.Column`
The time coordinate column to be converted to Time.
column_info : dict
Column-specific time reference frame override information.
"""
# The code might fail while attempting to read FITS files not written by astropy.
try:
# ISO-8601 is the only string representation of time in FITS
if col.info.dtype.kind in ['S', 'U']:
# [+/-C]CCYY-MM-DD[Thh:mm:ss[.s...]] where the number of characters
# from index 20 to the end of string represents the precision
precision = max(int(col.info.dtype.str[2:]) - 20, 0)
return Time(col, format='fits', scale=column_info['scale'],
precision=precision,
location=column_info['location'])
if column_info['format'] == 'gps':
return Time(col, format='gps', location=column_info['location'])
# If reference value is 0 for JD or MJD, the column values can be
# directly converted to Time, as they are absolute (relative
# to a globally accepted zero point).
if (column_info['ref_time']['val'] == 0 and
column_info['ref_time']['format'] in ['jd', 'mjd']):
# (jd1, jd2) where jd = jd1 + jd2
if col.shape[-1] == 2 and col.ndim > 1:
return Time(col[..., 0], col[..., 1], scale=column_info['scale'],
format=column_info['ref_time']['format'],
location=column_info['location'])
else:
return Time(col, scale=column_info['scale'],
format=column_info['ref_time']['format'],
location=column_info['location'])
# Reference time
ref_time = Time(column_info['ref_time']['val'], scale=column_info['scale'],
format=column_info['ref_time']['format'],
location=column_info['location'])
# Elapsed time since reference time
if col.shape[-1] == 2 and col.ndim > 1:
delta_time = TimeDelta(col[..., 0], col[..., 1])
else:
delta_time = TimeDelta(col)
return ref_time + delta_time
except Exception as err:
warnings.warn(
'The exception "{}" was encountered while trying to convert the time '
'column "{}" to Astropy Time.'.format(err, col.info.name),
AstropyUserWarning)
return col
def fits_to_time(hdr, table):
"""
Read FITS binary table time columns as `~astropy.time.Time`.
This method reads the metadata associated with time coordinates, as
stored in a FITS binary table header, converts time columns into
`~astropy.time.Time` columns and reads global reference times as
`~astropy.time.Time` instances.
Parameters
----------
hdr : `~astropy.io.fits.header.Header`
FITS Header
table : `~astropy.table.Table`
The table whose time columns are to be read as Time
Returns
-------
hdr : `~astropy.io.fits.header.Header`
Modified FITS Header (time metadata removed)
"""
# Set defaults for global time scale, reference, etc.
global_info = {'TIMESYS': 'UTC',
'TREFPOS': 'TOPOCENTER'}
# Set default dictionary for time columns
time_columns = defaultdict(OrderedDict)
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = hdr.copy(strip=True)
# Scan the header for global and column-specific time keywords
for key, value, comment in hdr.cards:
if key in TIME_KEYWORDS:
global_info[key] = value
hcopy.remove(key)
elif is_time_column_keyword(key):
base, idx = re.match(r'([A-Z]+)([0-9]+)', key).groups()
time_columns[int(idx)][base] = value
hcopy.remove(key)
elif (value in ('OBSGEO-X', 'OBSGEO-Y', 'OBSGEO-Z') and
re.match('TTYPE[0-9]+', key)):
global_info[value] = table[value]
# Verify and get the global time reference frame information
_verify_global_info(global_info)
_convert_global_time(table, global_info)
# Columns with column-specific time (coordinate) keywords
if time_columns:
for idx, column_info in time_columns.items():
# Check if the column is time coordinate (not spatial)
if _verify_column_info(column_info, global_info):
colname = table.colnames[idx - 1]
# Convert to Time
table[colname] = _convert_time_column(table[colname],
column_info)
# Check for special-cases of time coordinate columns
for idx, colname in enumerate(table.colnames):
if (idx + 1) not in time_columns:
column_info = _get_info_if_time_column(table[colname], global_info)
if column_info:
table[colname] = _convert_time_column(table[colname], column_info)
return hcopy
def time_to_fits(table):
"""
Replace Time columns in a Table with non-mixin columns containing
each element as a vector of two doubles (jd1, jd2) and return a FITS
header with appropriate time coordinate keywords.
jd = jd1 + jd2 represents time in the Julian Date format with
high-precision.
Parameters
----------
table : `~astropy.table.Table`
The table whose Time columns are to be replaced.
Returns
-------
table : `~astropy.table.Table`
The table with replaced Time columns
hdr : `~astropy.io.fits.header.Header`
Header containing global time reference frame FITS keywords
"""
# Make a light copy of table (to the extent possible) and clear any indices along
# the way. Indices are not serialized and cause problems later, but they are not
# needed here so just drop. For Column subclasses take advantage of copy() method,
# but for others it is required to actually copy the data if there are attached
# indices. See #8077 and #9009 for further discussion.
new_cols = []
for col in table.itercols():
if isinstance(col, Column):
new_col = col.copy(copy_data=False) # Also drops any indices
else:
new_col = col_copy(col, copy_indices=False) if col.info.indices else col
new_cols.append(new_col)
newtable = table.__class__(new_cols, copy=False)
newtable.meta = table.meta
# Global time coordinate frame keywords
hdr = Header([Card(keyword=key, value=val[0], comment=val[1])
for key, val in GLOBAL_TIME_INFO.items()])
# Store coordinate column-specific metadata
newtable.meta['__coordinate_columns__'] = defaultdict(OrderedDict)
coord_meta = newtable.meta['__coordinate_columns__']
time_cols = table.columns.isinstance(Time)
# Geocentric location
location = None
for col in time_cols:
# By default, Time objects are written in full precision, i.e. we store both
# jd1 and jd2 (serialize_method['fits'] = 'jd1_jd2'). Formatted values for
# Time can be stored if the user explicitly chooses to do so.
col_cls = MaskedColumn if col.masked else Column
if col.info.serialize_method['fits'] == 'formatted_value':
newtable.replace_column(col.info.name, col_cls(col.value))
continue
# The following is necessary to deal with multi-dimensional ``Time`` objects
# (i.e. where Time.shape is non-trivial).
jd12 = np.stack([col.jd1, col.jd2], axis=-1)
# Roll the 0th (innermost) axis backwards, until it lies in the last position
# (jd12.ndim)
newtable.replace_column(col.info.name, col_cls(jd12, unit='d'))
# Time column-specific override keywords
coord_meta[col.info.name]['coord_type'] = col.scale.upper()
coord_meta[col.info.name]['coord_unit'] = 'd'
# Time column reference position
if getattr(col, 'location') is None:
coord_meta[col.info.name]['time_ref_pos'] = None
if location is not None:
warnings.warn(
'Time Column "{}" has no specified location, but global Time '
'Position is present, which will be the default for this column '
'in FITS specification.'.format(col.info.name),
AstropyUserWarning)
else:
coord_meta[col.info.name]['time_ref_pos'] = 'TOPOCENTER'
# Compatibility of Time Scales and Reference Positions
if col.scale in BARYCENTRIC_SCALES:
warnings.warn(
'Earth Location "TOPOCENTER" for Time Column "{}" is incompatible '
'with scale "{}".'.format(col.info.name, col.scale.upper()),
AstropyUserWarning)
if location is None:
# Set global geocentric location
location = col.location
if location.size > 1:
for dim in ('x', 'y', 'z'):
newtable.add_column(Column(getattr(location, dim).to_value(u.m)),
name=f'OBSGEO-{dim.upper()}')
else:
hdr.extend([Card(keyword=f'OBSGEO-{dim.upper()}',
value=getattr(location, dim).to_value(u.m))
for dim in ('x', 'y', 'z')])
elif np.any(location != col.location):
raise ValueError('Multiple Time Columns with different geocentric '
'observatory locations ({}, {}) encountered.'
'This is not supported by the FITS standard.'
.format(location, col.location))
return newtable, hdr
|
43e72109cb847ff216a0aa4886fa4a3f6703c301b81a56adb96836fed01256fd | # Licensed under a 3-clause BSD style license - see PYFITS.rst
"""
Convenience functions
=====================
The functions in this module provide shortcuts for some of the most basic
operations on FITS files, such as reading and updating the header. They are
included directly in the 'astropy.io.fits' namespace so that they can be used
like::
astropy.io.fits.getheader(...)
These functions are primarily for convenience when working with FITS files in
the command-line interpreter. If performing several operations on the same
file, such as in a script, it is better to *not* use these functions, as each
one must open and re-parse the file. In such cases it is better to use
:func:`astropy.io.fits.open` and work directly with the
:class:`astropy.io.fits.HDUList` object and underlying HDU objects.
Several of the convenience functions, such as `getheader` and `getdata` support
special arguments for selecting which HDU to use when working with a
multi-extension FITS file. There are a few supported argument formats for
selecting the HDU. See the documentation for `getdata` for an
explanation of all the different formats.
.. warning::
All arguments to convenience functions other than the filename that are
*not* for selecting the HDU should be passed in as keyword
arguments. This is to avoid ambiguity and conflicts with the
HDU arguments. For example, to set NAXIS=1 on the Primary HDU:
Wrong::
astropy.io.fits.setval('myimage.fits', 'NAXIS', 1)
The above example will try to set the NAXIS value on the first extension
HDU to blank. That is, the argument '1' is assumed to specify an
HDU.
Right::
astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1)
This will set the NAXIS keyword to 1 on the primary HDU (the default). To
specify the first extension HDU use::
astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1, ext=1)
This complexity arises out of the attempt to simultaneously support
multiple argument formats that were used in past versions of PyFITS.
Unfortunately, it is not possible to support all formats without
introducing some ambiguity. A future Astropy release may standardize
around a single format and officially deprecate the other formats.
"""
import operator
import os
import warnings
import numpy as np
from .diff import FITSDiff, HDUDiff
from .file import FILE_MODES, _File
from .hdu.base import _BaseHDU, _ValidHDU
from .hdu.hdulist import fitsopen, HDUList
from .hdu.image import PrimaryHDU, ImageHDU
from .hdu.table import BinTableHDU
from .header import Header
from .util import (fileobj_closed, fileobj_name, fileobj_mode, _is_int,
_is_dask_array)
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['getheader', 'getdata', 'getval', 'setval', 'delval', 'writeto',
'append', 'update', 'info', 'tabledump', 'tableload',
'table_to_hdu', 'printdiff']
def getheader(filename, *args, **kwargs):
"""
Get the header from an HDU of a FITS file.
Parameters
----------
filename : path-like or file-like
File to get header from. If an opened file object, its mode
must be one of the following rb, rb+, or ab+).
ext, extname, extver
The rest of the arguments are for HDU specification. See the
`getdata` documentation for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
header : `Header` object
"""
mode, closed = _get_file_mode(filename)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
header = hdu.header
finally:
hdulist.close(closed=closed)
return header
def getdata(filename, *args, header=None, lower=None, upper=None, view=None,
**kwargs):
"""
Get the data from an HDU of a FITS file (and optionally the
header).
Parameters
----------
filename : path-like or file-like
File to get data from. If opened, mode must be one of the
following rb, rb+, or ab+.
ext
The rest of the arguments are for HDU specification.
They are flexible and are best illustrated by examples.
No extra arguments implies the primary HDU::
getdata('in.fits')
.. note::
Exclusive to ``getdata``: if ``ext`` is not specified
and primary header contains no data, ``getdata`` attempts
to retrieve data from first extension HDU.
By HDU number::
getdata('in.fits', 0) # the primary HDU
getdata('in.fits', 2) # the second extension HDU
getdata('in.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique)::
getdata('in.fits', 'sci')
getdata('in.fits', extname='sci') # equivalent
Note ``EXTNAME`` values are not case sensitive
By combination of ``EXTNAME`` and EXTVER`` as separate
arguments or as a tuple::
getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2
getdata('in.fits', extname='sci', extver=2) # equivalent
getdata('in.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
getdata('in.fits', ext=('sci',1), extname='err', extver=2)
header : bool, optional
If `True`, return the data and the header of the specified HDU as a
tuple.
lower, upper : bool, optional
If ``lower`` or ``upper`` are `True`, the field names in the
returned data object will be converted to lower or upper case,
respectively.
view : ndarray, optional
When given, the data will be returned wrapped in the given ndarray
subclass by calling::
data.view(view)
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
array : ndarray or `~numpy.recarray` or `~astropy.io.fits.Group`
Type depends on the type of the extension being referenced.
If the optional keyword ``header`` is set to `True`, this
function will return a (``data``, ``header``) tuple.
Raises
------
IndexError
If no data is found in searched HDUs.
"""
mode, closed = _get_file_mode(filename)
ext = kwargs.get('ext')
extname = kwargs.get('extname')
extver = kwargs.get('extver')
ext_given = not (len(args) == 0 and ext is None and
extname is None and extver is None)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
data = hdu.data
if data is None:
if ext_given:
raise IndexError(f"No data in HDU #{extidx}.")
# fallback to the first extension HDU
if len(hdulist) == 1:
raise IndexError(
"No data in Primary HDU and no extension HDU found."
)
hdu = hdulist[1]
data = hdu.data
if data is None:
raise IndexError(
"No data in either Primary or first extension HDUs."
)
if header:
hdr = hdu.header
finally:
hdulist.close(closed=closed)
# Change case of names if requested
trans = None
if lower:
trans = operator.methodcaller('lower')
elif upper:
trans = operator.methodcaller('upper')
if trans:
if data.dtype.names is None:
# this data does not have fields
return
if data.dtype.descr[0][0] == '':
# this data does not have fields
return
data.dtype.names = [trans(n) for n in data.dtype.names]
# allow different views into the underlying ndarray. Keep the original
# view just in case there is a problem
if isinstance(view, type) and issubclass(view, np.ndarray):
data = data.view(view)
if header:
return data, hdr
else:
return data
def getval(filename, keyword, *args, **kwargs):
"""
Get a keyword's value from a header in a FITS file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object (if opened, mode must be
one of the following rb, rb+, or ab+).
keyword : str
Keyword name
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
Returns
-------
keyword value : str, int, or float
"""
if 'do_not_scale_image_data' not in kwargs:
kwargs['do_not_scale_image_data'] = True
hdr = getheader(filename, *args, **kwargs)
return hdr[keyword]
def setval(filename, keyword, *args, value=None, comment=None, before=None,
after=None, savecomment=False, **kwargs):
"""
Set a keyword's value from a header in a FITS file.
If the keyword already exists, it's value/comment will be updated.
If it does not exist, a new card will be created and it will be
placed before or after the specified location. If no ``before`` or
``after`` is specified, it will be appended at the end.
When updating more than one keyword in a file, this convenience
function is a much less efficient approach compared with opening
the file for update, modifying the header, and closing the file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str
Keyword name
value : str, int, float, optional
Keyword value (default: `None`, meaning don't modify)
comment : str, optional
Keyword comment, (default: `None`, meaning don't modify)
before : str, int, optional
Name of the keyword, or index of the card before which the new card
will be placed. The argument ``before`` takes precedence over
``after`` if both are specified (default: `None`).
after : str, int, optional
Name of the keyword, or index of the card after which the new card will
be placed. (default: `None`).
savecomment : bool, optional
When `True`, preserve the current comment for an existing keyword. The
argument ``savecomment`` takes precedence over ``comment`` if both
specified. If ``comment`` is not specified then the current comment
will automatically be preserved (default: `False`).
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if 'do_not_scale_image_data' not in kwargs:
kwargs['do_not_scale_image_data'] = True
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, 'update', *args, **kwargs)
try:
if keyword in hdulist[extidx].header and savecomment:
comment = None
hdulist[extidx].header.set(keyword, value, comment, before, after)
finally:
hdulist.close(closed=closed)
def delval(filename, keyword, *args, **kwargs):
"""
Delete all instances of keyword from a header in a FITS file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str, int
Keyword name or index
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if 'do_not_scale_image_data' not in kwargs:
kwargs['do_not_scale_image_data'] = True
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, 'update', *args, **kwargs)
try:
del hdulist[extidx].header[keyword]
finally:
hdulist.close(closed=closed)
def writeto(filename, data, header=None, output_verify='exception',
overwrite=False, checksum=False):
"""
Create a new FITS file using the supplied data/header.
Parameters
----------
filename : path-like or file-like
File to write to. If opened, must be opened in a writable binary
mode such as 'wb' or 'ab+'.
data : array or `~numpy.recarray` or `~astropy.io.fits.Group`
data to write to the new file
header : `Header` object, optional
the header associated with ``data``. If `None`, a header
of the appropriate type is created for the supplied data. This
argument is optional.
output_verify : str
Output verification option. Must be one of ``"fix"``, ``"silentfix"``,
``"ignore"``, ``"warn"``, or ``"exception"``. May also be any
combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``,
``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See
:ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool, optional
If `True`, adds both ``DATASUM`` and ``CHECKSUM`` cards to the
headers of all HDU's written to the file.
"""
hdu = _makehdu(data, header)
if hdu.is_image and not isinstance(hdu, PrimaryHDU):
hdu = PrimaryHDU(data, header=header)
hdu.writeto(filename, overwrite=overwrite, output_verify=output_verify,
checksum=checksum)
def table_to_hdu(table, character_as_bytes=False):
"""
Convert an `~astropy.table.Table` object to a FITS
`~astropy.io.fits.BinTableHDU`.
Parameters
----------
table : astropy.table.Table
The table to convert.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the HDU.
By default this is `False` and (unicode) strings are returned, but for
large tables this may use up a lot of memory.
Returns
-------
table_hdu : `~astropy.io.fits.BinTableHDU`
The FITS binary table HDU.
"""
# Avoid circular imports
from .connect import is_column_keyword, REMOVE_KEYWORDS
from .column import python_to_tdisp
# Header to store Time related metadata
hdr = None
# Not all tables with mixin columns are supported
if table.has_mixin_columns:
# Import is done here, in order to avoid it at build time as erfa is not
# yet available then.
from astropy.table.column import BaseColumn
from astropy.time import Time
from astropy.units import Quantity
from .fitstime import time_to_fits
# Only those columns which are instances of BaseColumn, Quantity or Time can
# be written
unsupported_cols = table.columns.not_isinstance((BaseColumn, Quantity, Time))
if unsupported_cols:
unsupported_names = [col.info.name for col in unsupported_cols]
raise ValueError(f'cannot write table with mixin column(s) '
f'{unsupported_names}')
time_cols = table.columns.isinstance(Time)
if time_cols:
table, hdr = time_to_fits(table)
# Create a new HDU object
tarray = table.as_array()
if isinstance(tarray, np.ma.MaskedArray):
# Fill masked values carefully:
# float column's default mask value needs to be Nan and
# string column's default mask should be an empty string.
# Note: getting the fill value for the structured array is
# more reliable than for individual columns for string entries.
# (no 'N/A' for a single-element string, where it should be 'N').
default_fill_value = np.ma.default_fill_value(tarray.dtype)
for colname, (coldtype, _) in tarray.dtype.fields.items():
if np.all(tarray.fill_value[colname] == default_fill_value[colname]):
# Since multi-element columns with dtypes such as '2f8' have
# a subdtype, we should look up the type of column on that.
coltype = (coldtype.subdtype[0].type
if coldtype.subdtype else coldtype.type)
if issubclass(coltype, np.complexfloating):
tarray.fill_value[colname] = complex(np.nan, np.nan)
elif issubclass(coltype, np.inexact):
tarray.fill_value[colname] = np.nan
elif issubclass(coltype, np.character):
tarray.fill_value[colname] = ''
# TODO: it might be better to construct the FITS table directly from
# the Table columns, rather than go via a structured array.
table_hdu = BinTableHDU.from_columns(tarray.filled(), header=hdr,
character_as_bytes=character_as_bytes)
for col in table_hdu.columns:
# Binary FITS tables support TNULL *only* for integer data columns
# TODO: Determine a schema for handling non-integer masked columns
# with non-default fill values in FITS (if at all possible).
int_formats = ('B', 'I', 'J', 'K')
if not (col.format in int_formats or
col.format.p_format in int_formats):
continue
fill_value = tarray[col.name].fill_value
col.null = fill_value.astype(int)
else:
table_hdu = BinTableHDU.from_columns(tarray, header=hdr,
character_as_bytes=character_as_bytes)
# Set units and format display for output HDU
for col in table_hdu.columns:
if table[col.name].info.format is not None:
# check for boolean types, special format case
logical = table[col.name].info.dtype == bool
tdisp_format = python_to_tdisp(table[col.name].info.format,
logical_dtype=logical)
if tdisp_format is not None:
col.disp = tdisp_format
unit = table[col.name].unit
if unit is not None:
# Local imports to avoid importing units when it is not required,
# e.g. for command-line scripts
from astropy.units import Unit
from astropy.units.format.fits import UnitScaleError
try:
col.unit = unit.to_string(format='fits')
except UnitScaleError:
scale = unit.scale
raise UnitScaleError(
f"The column '{col.name}' could not be stored in FITS "
f"format because it has a scale '({str(scale)})' that "
f"is not recognized by the FITS standard. Either scale "
f"the data or change the units.")
except ValueError:
# Warn that the unit is lost, but let the details depend on
# whether the column was serialized (because it was a
# quantity), since then the unit can be recovered by astropy.
warning = (
f"The unit '{unit.to_string()}' could not be saved in "
f"native FITS format ")
if any('SerializedColumn' in item and 'name: '+col.name in item
for item in table.meta.get('comments', [])):
warning += (
"and hence will be lost to non-astropy fits readers. "
"Within astropy, the unit can roundtrip using QTable, "
"though one has to enable the unit before reading.")
else:
warning += (
"and cannot be recovered in reading. It can roundtrip "
"within astropy by using QTable both to write and read "
"back, though one has to enable the unit before reading.")
warnings.warn(warning, AstropyUserWarning)
else:
# Try creating a Unit to issue a warning if the unit is not
# FITS compliant
Unit(col.unit, format='fits', parse_strict='warn')
# Column-specific override keywords for coordinate columns
coord_meta = table.meta.pop('__coordinate_columns__', {})
for col_name, col_info in coord_meta.items():
col = table_hdu.columns[col_name]
# Set the column coordinate attributes from data saved earlier.
# Note: have to set these, even if we have no data.
for attr in 'coord_type', 'coord_unit':
setattr(col, attr, col_info.get(attr, None))
trpos = col_info.get('time_ref_pos', None)
if trpos is not None:
setattr(col, 'time_ref_pos', trpos)
for key, value in table.meta.items():
if is_column_keyword(key.upper()) or key.upper() in REMOVE_KEYWORDS:
warnings.warn(
f"Meta-data keyword {key} will be ignored since it conflicts "
f"with a FITS reserved keyword", AstropyUserWarning)
continue
# Convert to FITS format
if key == 'comments':
key = 'comment'
if isinstance(value, list):
for item in value:
try:
table_hdu.header.append((key, item))
except ValueError:
warnings.warn(
f"Attribute `{key}` of type {type(value)} cannot be "
f"added to FITS Header - skipping", AstropyUserWarning)
else:
try:
table_hdu.header[key] = value
except ValueError:
warnings.warn(
f"Attribute `{key}` of type {type(value)} cannot be "
f"added to FITS Header - skipping", AstropyUserWarning)
return table_hdu
def append(filename, data, header=None, checksum=False, verify=True, **kwargs):
"""
Append the header/data to FITS file if filename exists, create if not.
If only ``data`` is supplied, a minimal header is created.
Parameters
----------
filename : path-like or file-like
File to write to. If opened, must be opened for update (rb+) unless it
is a new file, then it must be opened for append (ab+). A file or
`~gzip.GzipFile` object opened for update will be closed after return.
data : array, :class:`~astropy.table.Table`, or `~astropy.io.fits.Group`
The new data used for appending.
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
checksum : bool, optional
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the header
of the HDU when written to the file.
verify : bool, optional
When `True`, the existing FITS file will be read in to verify it for
correctness before appending. When `False`, content is simply appended
to the end of the file. Setting ``verify`` to `False` can be much
faster.
**kwargs
Additional arguments are passed to:
- `~astropy.io.fits.writeto` if the file does not exist or is empty.
In this case ``output_verify`` is the only possible argument.
- `~astropy.io.fits.open` if ``verify`` is True or if ``filename``
is a file object.
- Otherwise no additional arguments can be used.
"""
name, closed, noexist_or_empty = _stat_filename_or_fileobj(filename)
if noexist_or_empty:
#
# The input file or file like object either doesn't exits or is
# empty. Use the writeto convenience function to write the
# output to the empty object.
#
writeto(filename, data, header, checksum=checksum, **kwargs)
else:
hdu = _makehdu(data, header)
if isinstance(hdu, PrimaryHDU):
hdu = ImageHDU(data, header)
if verify or not closed:
f = fitsopen(filename, mode='append', **kwargs)
try:
f.append(hdu)
# Set a flag in the HDU so that only this HDU gets a checksum
# when writing the file.
hdu._output_checksum = checksum
finally:
f.close(closed=closed)
else:
f = _File(filename, mode='append')
try:
hdu._output_checksum = checksum
hdu._writeto(f)
finally:
f.close()
def update(filename, data, *args, **kwargs):
"""
Update the specified HDU with the input data/header.
Parameters
----------
filename : path-like or file-like
File to update. If opened, mode must be update (rb+). An opened file
object or `~gzip.GzipFile` object will be closed upon return.
data : array, `~astropy.table.Table`, or `~astropy.io.fits.Group`
The new data used for updating.
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
ext, extname, extver
The rest of the arguments are flexible: the 3rd argument can be the
header associated with the data. If the 3rd argument is not a
`Header`, it (and other positional arguments) are assumed to be the
HDU specification(s). Header and HDU specs can also be
keyword arguments. For example::
update(file, dat, hdr, 'sci') # update the 'sci' extension
update(file, dat, 3) # update the 3rd extension HDU
update(file, dat, hdr, 3) # update the 3rd extension HDU
update(file, dat, 'sci', 2) # update the 2nd extension HDU named 'sci'
update(file, dat, 3, header=hdr) # update the 3rd extension HDU
update(file, dat, header=hdr, ext=5) # update the 5th extension HDU
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
"""
# The arguments to this function are a bit trickier to deal with than others
# in this module, since the documentation has promised that the header
# argument can be an optional positional argument.
if args and isinstance(args[0], Header):
header = args[0]
args = args[1:]
else:
header = None
# The header can also be a keyword argument--if both are provided the
# keyword takes precedence
header = kwargs.pop('header', header)
new_hdu = _makehdu(data, header)
closed = fileobj_closed(filename)
hdulist, _ext = _getext(filename, 'update', *args, **kwargs)
try:
hdulist[_ext] = new_hdu
finally:
hdulist.close(closed=closed)
def info(filename, output=None, **kwargs):
"""
Print the summary information on a FITS file.
This includes the name, type, length of header, data shape and type
for each HDU.
Parameters
----------
filename : path-like or file-like
FITS file to obtain info from. If opened, mode must be one of
the following: rb, rb+, or ab+ (i.e. the file must be readable).
output : file, bool, optional
A file-like object to write the output to. If ``False``, does not
output to a file and instead returns a list of tuples representing the
HDU info. Writes to ``sys.stdout`` by default.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function sets ``ignore_missing_end=True`` by default.
"""
mode, closed = _get_file_mode(filename, default='readonly')
# Set the default value for the ignore_missing_end parameter
if 'ignore_missing_end' not in kwargs:
kwargs['ignore_missing_end'] = True
f = fitsopen(filename, mode=mode, **kwargs)
try:
ret = f.info(output=output)
finally:
if closed:
f.close()
return ret
def printdiff(inputa, inputb, *args, **kwargs):
"""
Compare two parts of a FITS file, including entire FITS files,
FITS `HDUList` objects and FITS ``HDU`` objects.
Parameters
----------
inputa : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputb``.
inputb : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputa``.
ext, extname, extver
Additional positional arguments are for HDU specification if your
inputs are string filenames (will not work if
``inputa`` and ``inputb`` are ``HDU`` objects or `HDUList` objects).
They are flexible and are best illustrated by examples. In addition
to using these arguments positionally you can directly call the
keyword parameters ``ext``, ``extname``.
By HDU number::
printdiff('inA.fits', 'inB.fits', 0) # the primary HDU
printdiff('inA.fits', 'inB.fits', 2) # the second extension HDU
printdiff('inA.fits', 'inB.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique). ``EXTNAME`` values are
not case sensitive:
printdiff('inA.fits', 'inB.fits', 'sci')
printdiff('inA.fits', 'inB.fits', extname='sci') # equivalent
By combination of ``EXTNAME`` and ``EXTVER`` as separate
arguments or as a tuple::
printdiff('inA.fits', 'inB.fits', 'sci', 2) # EXTNAME='SCI'
# & EXTVER=2
printdiff('inA.fits', 'inB.fits', extname='sci', extver=2)
# equivalent
printdiff('inA.fits', 'inB.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
printdiff('inA.fits', 'inB.fits',
ext=('sci', 1), extname='err', extver=2)
**kwargs
Any additional keyword arguments to be passed to
`~astropy.io.fits.FITSDiff`.
Notes
-----
The primary use for the `printdiff` function is to allow quick print out
of a FITS difference report and will write to ``sys.stdout``.
To save the diff report to a file please use `~astropy.io.fits.FITSDiff`
directly.
"""
# Pop extension keywords
extension = {key: kwargs.pop(key) for key in ['ext', 'extname', 'extver']
if key in kwargs}
has_extensions = args or extension
if isinstance(inputa, str) and has_extensions:
# Use handy _getext to interpret any ext keywords, but
# will need to close a if fails
modea, closeda = _get_file_mode(inputa)
modeb, closedb = _get_file_mode(inputb)
hdulista, extidxa = _getext(inputa, modea, *args, **extension)
# Have to close a if b doesn't make it
try:
hdulistb, extidxb = _getext(inputb, modeb, *args, **extension)
except Exception:
hdulista.close(closed=closeda)
raise
try:
hdua = hdulista[extidxa]
hdub = hdulistb[extidxb]
# See below print for note
print(HDUDiff(hdua, hdub, **kwargs).report())
finally:
hdulista.close(closed=closeda)
hdulistb.close(closed=closedb)
# If input is not a string, can feed HDU objects or HDUList directly,
# but can't currently handle extensions
elif isinstance(inputa, _ValidHDU) and has_extensions:
raise ValueError("Cannot use extension keywords when providing an "
"HDU object.")
elif isinstance(inputa, _ValidHDU) and not has_extensions:
print(HDUDiff(inputa, inputb, **kwargs).report())
elif isinstance(inputa, HDUList) and has_extensions:
raise NotImplementedError("Extension specification with HDUList "
"objects not implemented.")
# This function is EXCLUSIVELY for printing the diff report to screen
# in a one-liner call, hence the use of print instead of logging
else:
print(FITSDiff(inputa, inputb, **kwargs).report())
def tabledump(filename, datafile=None, cdfile=None, hfile=None, ext=1,
overwrite=False):
"""
Dump a table HDU to a file in ASCII format. The table may be
dumped in three separate files, one containing column definitions,
one containing header parameters, and one for table data.
Parameters
----------
filename : path-like or file-like
Input fits file.
datafile : path-like or file-like, optional
Output data file. The default is the root name of the input
fits file appended with an underscore, followed by the
extension number (ext), followed by the extension ``.txt``.
cdfile : path-like or file-like, optional
Output column definitions file. The default is `None`,
no column definitions output is produced.
hfile : path-like or file-like, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
ext : int
The number of the extension containing the table HDU to be
dumped.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Notes
-----
The primary use for the `tabledump` function is to allow editing in a
standard text editor of the table data and parameters. The
`tableload` function can be used to reassemble the table from the
three ASCII files.
"""
# allow file object to already be opened in any of the valid modes
# and leave the file in the same state (opened or closed) as when
# the function was called
mode, closed = _get_file_mode(filename, default='readonly')
f = fitsopen(filename, mode=mode)
# Create the default data file name if one was not provided
try:
if not datafile:
root, tail = os.path.splitext(f._file.name)
datafile = root + '_' + repr(ext) + '.txt'
# Dump the data from the HDU to the files
f[ext].dump(datafile, cdfile, hfile, overwrite)
finally:
if closed:
f.close()
if isinstance(tabledump.__doc__, str):
tabledump.__doc__ += BinTableHDU._tdump_file_format.replace('\n', '\n ')
def tableload(datafile, cdfile, hfile=None):
"""
Create a table from the input ASCII files. The input is from up
to three separate files, one containing column definitions, one
containing header parameters, and one containing column data. The
header parameters file is not required. When the header
parameters file is absent a minimal header is constructed.
Parameters
----------
datafile : path-like or file-like
Input data file containing the table data in ASCII format.
cdfile : path-like or file-like
Input column definition file containing the names, formats,
display formats, physical units, multidimensional array
dimensions, undefined values, scale factors, and offsets
associated with the columns in the table.
hfile : path-like or file-like, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table.
If `None`, a minimal header is constructed.
Notes
-----
The primary use for the `tableload` function is to allow the input of
ASCII data that was edited in a standard text editor of the table
data and parameters. The tabledump function can be used to create the
initial ASCII files.
"""
return BinTableHDU.load(datafile, cdfile, hfile, replace=True)
if isinstance(tableload.__doc__, str):
tableload.__doc__ += BinTableHDU._tdump_file_format.replace('\n', '\n ')
def _getext(filename, mode, *args, ext=None, extname=None, extver=None,
**kwargs):
"""
Open the input file, return the `HDUList` and the extension.
This supports several different styles of extension selection. See the
:func:`getdata()` documentation for the different possibilities.
"""
err_msg = ('Redundant/conflicting extension arguments(s): {}'.format(
{'args': args, 'ext': ext, 'extname': extname,
'extver': extver}))
# This code would be much simpler if just one way of specifying an
# extension were picked. But now we need to support all possible ways for
# the time being.
if len(args) == 1:
# Must be either an extension number, an extension name, or an
# (extname, extver) tuple
if _is_int(args[0]) or (isinstance(ext, tuple) and len(ext) == 2):
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
ext = args[0]
elif isinstance(args[0], str):
# The first arg is an extension name; it could still be valid
# to provide an extver kwarg
if ext is not None or extname is not None:
raise TypeError(err_msg)
extname = args[0]
else:
# Take whatever we have as the ext argument; we'll validate it
# below
ext = args[0]
elif len(args) == 2:
# Must be an extname and extver
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
extname = args[0]
extver = args[1]
elif len(args) > 2:
raise TypeError('Too many positional arguments.')
if (ext is not None and
not (_is_int(ext) or
(isinstance(ext, tuple) and len(ext) == 2 and
isinstance(ext[0], str) and _is_int(ext[1])))):
raise ValueError(
'The ext keyword must be either an extension number '
'(zero-indexed) or a (extname, extver) tuple.')
if extname is not None and not isinstance(extname, str):
raise ValueError('The extname argument must be a string.')
if extver is not None and not _is_int(extver):
raise ValueError('The extver argument must be an integer.')
if ext is None and extname is None and extver is None:
ext = 0
elif ext is not None and (extname is not None or extver is not None):
raise TypeError(err_msg)
elif extname:
if extver:
ext = (extname, extver)
else:
ext = (extname, 1)
elif extver and extname is None:
raise TypeError('extver alone cannot specify an extension.')
hdulist = fitsopen(filename, mode=mode, **kwargs)
return hdulist, ext
def _makehdu(data, header):
if header is None:
header = Header()
hdu = _BaseHDU._from_data(data, header)
if hdu.__class__ in (_BaseHDU, _ValidHDU):
# The HDU type was unrecognized, possibly due to a
# nonexistent/incomplete header
if ((isinstance(data, np.ndarray) and data.dtype.fields is not None) or
isinstance(data, np.recarray)):
hdu = BinTableHDU(data, header=header)
elif isinstance(data, np.ndarray) or _is_dask_array(data):
hdu = ImageHDU(data, header=header)
else:
raise KeyError('Data must be a numpy array.')
return hdu
def _stat_filename_or_fileobj(filename):
if isinstance(filename, os.PathLike):
filename = os.fspath(filename)
closed = fileobj_closed(filename)
name = fileobj_name(filename) or ''
try:
loc = filename.tell()
except AttributeError:
loc = 0
noexist_or_empty = ((name and
(not os.path.exists(name) or
(os.path.getsize(name) == 0)))
or (not name and loc == 0))
return name, closed, noexist_or_empty
def _get_file_mode(filename, default='readonly'):
"""
Allow file object to already be opened in any of the valid modes and
and leave the file in the same state (opened or closed) as when
the function was called.
"""
mode = default
closed = fileobj_closed(filename)
fmode = fileobj_mode(filename)
if fmode is not None:
mode = FILE_MODES.get(fmode)
if mode is None:
raise OSError(
"File mode of the input file object ({!r}) cannot be used to "
"read/write FITS files.".format(fmode))
return mode, closed
|
aa3470d612f8f1d2581e3c68354da5b7a409b48840171def98e5afe4856b8ca5 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import os
import sys
from collections import defaultdict
from setuptools import Extension
from glob import glob
import numpy
from extension_helpers import pkg_config, get_compiler
def _get_compression_extension():
debug = '--debug' in sys.argv
cfg = defaultdict(list)
cfg['include_dirs'].append(numpy.get_include())
cfg['sources'].append(os.path.join(os.path.dirname(__file__),
'src', 'compressionmodule.c'))
if (int(os.environ.get('ASTROPY_USE_SYSTEM_CFITSIO', 0)) or
int(os.environ.get('ASTROPY_USE_SYSTEM_ALL', 0))):
for k, v in pkg_config(['cfitsio'], ['cfitsio']).items():
cfg[k].extend(v)
else:
if get_compiler() == 'msvc':
# These come from the CFITSIO vcc makefile, except the last
# which ensures on windows we do not include unistd.h (in regular
# compilation of cfitsio, an empty file would be generated)
cfg['extra_compile_args'].extend(
['/D', 'WIN32',
'/D', '_WINDOWS',
'/D', '_MBCS',
'/D', '_USRDLL',
'/D', '_CRT_SECURE_NO_DEPRECATE',
'/D', 'FF_NO_UNISTD_H'])
else:
cfg['extra_compile_args'].extend([
'-Wno-declaration-after-statement'
])
cfg['define_macros'].append(('HAVE_UNISTD_H', None))
if not debug:
# these switches are to silence warnings from compiling CFITSIO
# For full silencing, some are added that only are used in
# later versions of gcc (versions approximate; see #6474)
cfg['extra_compile_args'].extend([
'-Wno-strict-prototypes',
'-Wno-unused',
'-Wno-uninitialized',
'-Wno-unused-result', # gcc >~4.8
'-Wno-misleading-indentation', # gcc >~7.2
'-Wno-format-overflow', # gcc >~7.2
])
cfitsio_lib_path = os.path.join('cextern', 'cfitsio', 'lib')
cfitsio_zlib_path = os.path.join('cextern', 'cfitsio', 'zlib')
cfitsio_files = glob(os.path.join(cfitsio_lib_path, '*.c'))
cfitsio_zlib_files = glob(os.path.join(cfitsio_zlib_path, '*.c'))
cfg['include_dirs'].append(cfitsio_lib_path)
cfg['include_dirs'].append(cfitsio_zlib_path)
cfg['sources'].extend(cfitsio_files)
cfg['sources'].extend(cfitsio_zlib_files)
return Extension('astropy.io.fits.compression', **cfg)
def get_extensions():
return [_get_compression_extension()]
|
05fc8f4165d33ed735110c341eb941fe2ae09c9082bccdfb168b0f457b8d18ca | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import itertools
import io
import mmap
import operator
import os
import platform
import signal
import sys
import tempfile
import textwrap
import threading
import warnings
import weakref
from contextlib import contextmanager, suppress
from functools import wraps
import numpy as np
from packaging.version import Version
from astropy.utils import data
from astropy.utils.exceptions import AstropyUserWarning
path_like = (str, os.PathLike)
cmp = lambda a, b: (a > b) - (a < b)
all_integer_types = (int, np.integer)
class NotifierMixin:
"""
Mixin class that provides services by which objects can register
listeners to changes on that object.
All methods provided by this class are underscored, since this is intended
for internal use to communicate between classes in a generic way, and is
not machinery that should be exposed to users of the classes involved.
Use the ``_add_listener`` method to register a listener on an instance of
the notifier. This registers the listener with a weak reference, so if
no other references to the listener exist it is automatically dropped from
the list and does not need to be manually removed.
Call the ``_notify`` method on the notifier to update all listeners
upon changes. ``_notify('change_type', *args, **kwargs)`` results
in calling ``listener._update_change_type(*args, **kwargs)`` on all
listeners subscribed to that notifier.
If a particular listener does not have the appropriate update method
it is ignored.
Examples
--------
>>> class Widget(NotifierMixin):
... state = 1
... def __init__(self, name):
... self.name = name
... def update_state(self):
... self.state += 1
... self._notify('widget_state_changed', self)
...
>>> class WidgetListener:
... def _update_widget_state_changed(self, widget):
... print('Widget {0} changed state to {1}'.format(
... widget.name, widget.state))
...
>>> widget = Widget('fred')
>>> listener = WidgetListener()
>>> widget._add_listener(listener)
>>> widget.update_state()
Widget fred changed state to 2
"""
_listeners = None
def _add_listener(self, listener):
"""
Add an object to the list of listeners to notify of changes to this
object. This adds a weakref to the list of listeners that is
removed from the listeners list when the listener has no other
references to it.
"""
if self._listeners is None:
self._listeners = weakref.WeakValueDictionary()
self._listeners[id(listener)] = listener
def _remove_listener(self, listener):
"""
Removes the specified listener from the listeners list. This relies
on object identity (i.e. the ``is`` operator).
"""
if self._listeners is None:
return
with suppress(KeyError):
del self._listeners[id(listener)]
def _notify(self, notification, *args, **kwargs):
"""
Notify all listeners of some particular state change by calling their
``_update_<notification>`` method with the given ``*args`` and
``**kwargs``.
The notification does not by default include the object that actually
changed (``self``), but it certainly may if required.
"""
if self._listeners is None:
return
method_name = f'_update_{notification}'
for listener in self._listeners.valuerefs():
# Use valuerefs instead of itervaluerefs; see
# https://github.com/astropy/astropy/issues/4015
listener = listener() # dereference weakref
if listener is None:
continue
if hasattr(listener, method_name):
method = getattr(listener, method_name)
if callable(method):
method(*args, **kwargs)
def __getstate__(self):
"""
Exclude listeners when saving the listener's state, since they may be
ephemeral.
"""
# TODO: This hasn't come up often, but if anyone needs to pickle HDU
# objects it will be necessary when HDU objects' states are restored to
# re-register themselves as listeners on their new column instances.
try:
state = super().__getstate__()
except AttributeError:
# Chances are the super object doesn't have a getstate
state = self.__dict__.copy()
state['_listeners'] = None
return state
def first(iterable):
"""
Returns the first item returned by iterating over an iterable object.
Example:
>>> a = [1, 2, 3]
>>> first(a)
1
"""
return next(iter(iterable))
def itersubclasses(cls, _seen=None):
"""
Generator over all subclasses of a given class, in depth first order.
>>> class A: pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)]
[...'tuple', ...'type', ...]
From http://code.activestate.com/recipes/576949/
"""
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in sorted(subs, key=operator.attrgetter('__name__')):
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def ignore_sigint(func):
"""
This decorator registers a custom SIGINT handler to catch and ignore SIGINT
until the wrapped function is completed.
"""
@wraps(func)
def wrapped(*args, **kwargs):
# Get the name of the current thread and determine if this is a single
# threaded application
curr_thread = threading.current_thread()
single_thread = (threading.active_count() == 1 and
curr_thread.name == 'MainThread')
class SigintHandler:
def __init__(self):
self.sigint_received = False
def __call__(self, signum, frame):
warnings.warn('KeyboardInterrupt ignored until {} is '
'complete!'.format(func.__name__),
AstropyUserWarning)
self.sigint_received = True
sigint_handler = SigintHandler()
# Define new signal interput handler
if single_thread:
# Install new handler
old_handler = signal.signal(signal.SIGINT, sigint_handler)
try:
func(*args, **kwargs)
finally:
if single_thread:
if old_handler is not None:
signal.signal(signal.SIGINT, old_handler)
else:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if sigint_handler.sigint_received:
raise KeyboardInterrupt
return wrapped
def pairwise(iterable):
"""Return the items of an iterable paired with its next item.
Ex: s -> (s0,s1), (s1,s2), (s2,s3), ....
"""
a, b = itertools.tee(iterable)
for _ in b:
# Just a little trick to advance b without having to catch
# StopIter if b happens to be empty
break
return zip(a, b)
def encode_ascii(s):
if isinstance(s, str):
return s.encode('ascii')
elif (isinstance(s, np.ndarray) and
issubclass(s.dtype.type, np.str_)):
ns = np.char.encode(s, 'ascii').view(type(s))
if ns.dtype.itemsize != s.dtype.itemsize / 4:
ns = ns.astype((np.bytes_, s.dtype.itemsize / 4))
return ns
elif (isinstance(s, np.ndarray) and
not issubclass(s.dtype.type, np.bytes_)):
raise TypeError('string operation on non-string array')
return s
def decode_ascii(s):
if isinstance(s, bytes):
try:
return s.decode('ascii')
except UnicodeDecodeError:
warnings.warn('non-ASCII characters are present in the FITS '
'file header and have been replaced by "?" '
'characters', AstropyUserWarning)
s = s.decode('ascii', errors='replace')
return s.replace('\ufffd', '?')
elif (isinstance(s, np.ndarray) and
issubclass(s.dtype.type, np.bytes_)):
# np.char.encode/decode annoyingly don't preserve the type of the
# array, hence the view() call
# It also doesn't necessarily preserve widths of the strings,
# hence the astype()
if s.size == 0:
# Numpy apparently also has a bug that if a string array is
# empty calling np.char.decode on it returns an empty float64
# array wth
dt = s.dtype.str.replace('S', 'U')
ns = np.array([], dtype=dt).view(type(s))
else:
ns = np.char.decode(s, 'ascii').view(type(s))
if ns.dtype.itemsize / 4 != s.dtype.itemsize:
ns = ns.astype((np.str_, s.dtype.itemsize))
return ns
elif (isinstance(s, np.ndarray) and
not issubclass(s.dtype.type, np.str_)):
# Don't silently pass through on non-string arrays; we don't want
# to hide errors where things that are not stringy are attempting
# to be decoded
raise TypeError('string operation on non-string array')
return s
def isreadable(f):
"""
Returns True if the file-like object can be read from. This is a common-
sense approximation of io.IOBase.readable.
"""
if hasattr(f, 'readable'):
return f.readable()
if hasattr(f, 'closed') and f.closed:
# This mimics the behavior of io.IOBase.readable
raise ValueError('I/O operation on closed file')
if not hasattr(f, 'read'):
return False
if hasattr(f, 'mode') and not any(c in f.mode for c in 'r+'):
return False
# Not closed, has a 'read()' method, and either has no known mode or a
# readable mode--should be good enough to assume 'readable'
return True
def iswritable(f):
"""
Returns True if the file-like object can be written to. This is a common-
sense approximation of io.IOBase.writable.
"""
if hasattr(f, 'writable'):
return f.writable()
if hasattr(f, 'closed') and f.closed:
# This mimics the behavior of io.IOBase.writable
raise ValueError('I/O operation on closed file')
if not hasattr(f, 'write'):
return False
if hasattr(f, 'mode') and not any(c in f.mode for c in 'wa+'):
return False
# Note closed, has a 'write()' method, and either has no known mode or a
# mode that supports writing--should be good enough to assume 'writable'
return True
def isfile(f):
"""
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
On Python 3 this also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper.
"""
if isinstance(f, io.FileIO):
return True
elif hasattr(f, 'buffer'):
return isfile(f.buffer)
elif hasattr(f, 'raw'):
return isfile(f.raw)
return False
def fileobj_open(filename, mode):
"""
A wrapper around the `open()` builtin.
This exists because `open()` returns an `io.BufferedReader` by default.
This is bad, because `io.BufferedReader` doesn't support random access,
which we need in some cases. We must call open with buffering=0 to get
a raw random-access file reader.
"""
return open(filename, mode, buffering=0)
def fileobj_name(f):
"""
Returns the 'name' of file-like object *f*, if it has anything that could be
called its name. Otherwise f's class or type is returned. If f is a
string f itself is returned.
"""
if isinstance(f, (str, bytes)):
return f
elif isinstance(f, gzip.GzipFile):
# The .name attribute on GzipFiles does not always represent the name
# of the file being read/written--it can also represent the original
# name of the file being compressed
# See the documentation at
# https://docs.python.org/3/library/gzip.html#gzip.GzipFile
# As such, for gzip files only return the name of the underlying
# fileobj, if it exists
return fileobj_name(f.fileobj)
elif hasattr(f, 'name'):
return f.name
elif hasattr(f, 'filename'):
return f.filename
elif hasattr(f, '__class__'):
return str(f.__class__)
else:
return str(type(f))
def fileobj_closed(f):
"""
Returns True if the given file-like object is closed or if *f* is a string
(and assumed to be a pathname).
Returns False for all other types of objects, under the assumption that
they are file-like objects with no sense of a 'closed' state.
"""
if isinstance(f, path_like):
return True
if hasattr(f, 'closed'):
return f.closed
elif hasattr(f, 'fileobj') and hasattr(f.fileobj, 'closed'):
return f.fileobj.closed
elif hasattr(f, 'fp') and hasattr(f.fp, 'closed'):
return f.fp.closed
else:
return False
def fileobj_mode(f):
"""
Returns the 'mode' string of a file-like object if such a thing exists.
Otherwise returns None.
"""
# Go from most to least specific--for example gzip objects have a 'mode'
# attribute, but it's not analogous to the file.mode attribute
# gzip.GzipFile -like
if hasattr(f, 'fileobj') and hasattr(f.fileobj, 'mode'):
fileobj = f.fileobj
# astropy.io.fits._File -like, doesn't need additional checks because it's
# already validated
elif hasattr(f, 'fileobj_mode'):
return f.fileobj_mode
# PIL-Image -like investigate the fp (filebuffer)
elif hasattr(f, 'fp') and hasattr(f.fp, 'mode'):
fileobj = f.fp
# FILEIO -like (normal open(...)), keep as is.
elif hasattr(f, 'mode'):
fileobj = f
# Doesn't look like a file-like object, for example strings, urls or paths.
else:
return None
return _fileobj_normalize_mode(fileobj)
def _fileobj_normalize_mode(f):
"""Takes care of some corner cases in Python where the mode string
is either oddly formatted or does not truly represent the file mode.
"""
mode = f.mode
# Special case: Gzip modes:
if isinstance(f, gzip.GzipFile):
# GzipFiles can be either readonly or writeonly
if mode == gzip.READ:
return 'rb'
elif mode == gzip.WRITE:
return 'wb'
else:
return None # This shouldn't happen?
# Sometimes Python can produce modes like 'r+b' which will be normalized
# here to 'rb+'
if '+' in mode:
mode = mode.replace('+', '')
mode += '+'
return mode
def fileobj_is_binary(f):
"""
Returns True if the give file or file-like object has a file open in binary
mode. When in doubt, returns True by default.
"""
# This is kind of a hack for this to work correctly with _File objects,
# which, for the time being, are *always* binary
if hasattr(f, 'binary'):
return f.binary
if isinstance(f, io.TextIOBase):
return False
mode = fileobj_mode(f)
if mode:
return 'b' in mode
else:
return True
def translate(s, table, deletechars):
if deletechars:
table = table.copy()
for c in deletechars:
table[ord(c)] = None
return s.translate(table)
def fill(text, width, **kwargs):
"""
Like :func:`textwrap.wrap` but preserves existing paragraphs which
:func:`textwrap.wrap` does not otherwise handle well. Also handles section
headers.
"""
paragraphs = text.split('\n\n')
def maybe_fill(t):
if all(len(l) < width for l in t.splitlines()):
return t
else:
return textwrap.fill(t, width, **kwargs)
return '\n\n'.join(maybe_fill(p) for p in paragraphs)
# On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to
# fail when reading over 2Gb of data. If we detect these versions of MacOS X,
# we can instead read the data in chunks. To avoid performance penalties at
# import time, we defer the setting of this global variable until the first
# time it is needed.
CHUNKED_FROMFILE = None
def _array_from_file(infile, dtype, count):
"""Create a numpy array from a file or a file-like object."""
if isfile(infile):
global CHUNKED_FROMFILE
if CHUNKED_FROMFILE is None:
if (sys.platform == 'darwin' and
Version(platform.mac_ver()[0]) < Version('10.9')):
CHUNKED_FROMFILE = True
else:
CHUNKED_FROMFILE = False
if CHUNKED_FROMFILE:
chunk_size = int(1024 ** 3 / dtype.itemsize) # 1Gb to be safe
if count < chunk_size:
return np.fromfile(infile, dtype=dtype, count=count)
else:
array = np.empty(count, dtype=dtype)
for beg in range(0, count, chunk_size):
end = min(count, beg + chunk_size)
array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg)
return array
else:
return np.fromfile(infile, dtype=dtype, count=count)
else:
# treat as file-like object with "read" method; this includes gzip file
# objects, because numpy.fromfile just reads the compressed bytes from
# their underlying file object, instead of the decompressed bytes
read_size = np.dtype(dtype).itemsize * count
s = infile.read(read_size)
array = np.ndarray(buffer=s, dtype=dtype, shape=(count,))
# copy is needed because np.frombuffer returns a read-only view of the
# underlying buffer
array = array.copy()
return array
_OSX_WRITE_LIMIT = (2 ** 32) - 1
_WIN_WRITE_LIMIT = (2 ** 31) - 1
def _array_to_file(arr, outfile):
"""
Write a numpy array to a file or a file-like object.
Parameters
----------
arr : ndarray
The Numpy array to write.
outfile : file-like
A file-like object such as a Python file object, an `io.BytesIO`, or
anything else with a ``write`` method. The file object must support
the buffer interface in its ``write``.
If writing directly to an on-disk file this delegates directly to
`ndarray.tofile`. Otherwise a slower Python implementation is used.
"""
if isfile(outfile) and not isinstance(outfile, io.BufferedIOBase):
write = lambda a, f: a.tofile(f)
else:
write = _array_to_file_like
# Implements a workaround for a bug deep in OSX's stdlib file writing
# functions; on 64-bit OSX it is not possible to correctly write a number
# of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192--
# whatever the default blocksize for the filesystem is).
# This issue should have a workaround in Numpy too, but hasn't been
# implemented there yet: https://github.com/astropy/astropy/issues/839
#
# Apparently Windows has its own fwrite bug:
# https://github.com/numpy/numpy/issues/2256
if (sys.platform == 'darwin' and arr.nbytes >= _OSX_WRITE_LIMIT + 1 and
arr.nbytes % 4096 == 0):
# chunksize is a count of elements in the array, not bytes
chunksize = _OSX_WRITE_LIMIT // arr.itemsize
elif sys.platform.startswith('win'):
chunksize = _WIN_WRITE_LIMIT // arr.itemsize
else:
# Just pass the whole array to the write routine
return write(arr, outfile)
# Write one chunk at a time for systems whose fwrite chokes on large
# writes.
idx = 0
arr = arr.view(np.ndarray).flatten()
while idx < arr.nbytes:
write(arr[idx:idx + chunksize], outfile)
idx += chunksize
def _array_to_file_like(arr, fileobj):
"""
Write a `~numpy.ndarray` to a file-like object (which is not supported by
`numpy.ndarray.tofile`).
"""
# If the array is empty, we can simply take a shortcut and return since
# there is nothing to write.
if len(arr) == 0:
return
if arr.flags.contiguous:
# It suffices to just pass the underlying buffer directly to the
# fileobj's write (assuming it supports the buffer interface). If
# it does not have the buffer interface, a TypeError should be returned
# in which case we can fall back to the other methods.
try:
fileobj.write(arr.data)
except TypeError:
pass
else:
return
if hasattr(np, 'nditer'):
# nditer version for non-contiguous arrays
for item in np.nditer(arr, order='C'):
fileobj.write(item.tobytes())
else:
# Slower version for Numpy versions without nditer;
# The problem with flatiter is it doesn't preserve the original
# byteorder
byteorder = arr.dtype.byteorder
if ((sys.byteorder == 'little' and byteorder == '>')
or (sys.byteorder == 'big' and byteorder == '<')):
for item in arr.flat:
fileobj.write(item.byteswap().tobytes())
else:
for item in arr.flat:
fileobj.write(item.tobytes())
def _write_string(f, s):
"""
Write a string to a file, encoding to ASCII if the file is open in binary
mode, or decoding if the file is open in text mode.
"""
# Assume if the file object doesn't have a specific mode, that the mode is
# binary
binmode = fileobj_is_binary(f)
if binmode and isinstance(s, str):
s = encode_ascii(s)
elif not binmode and not isinstance(f, str):
s = decode_ascii(s)
f.write(s)
def _convert_array(array, dtype):
"""
Converts an array to a new dtype--if the itemsize of the new dtype is
the same as the old dtype and both types are not numeric, a view is
returned. Otherwise a new array must be created.
"""
if array.dtype == dtype:
return array
elif (array.dtype.itemsize == dtype.itemsize and not
(np.issubdtype(array.dtype, np.number) and
np.issubdtype(dtype, np.number))):
# Includes a special case when both dtypes are at least numeric to
# account for old Trac ticket 218 (now inaccessible).
return array.view(dtype)
else:
return array.astype(dtype)
def _pseudo_zero(dtype):
"""
Given a numpy dtype, finds its "zero" point, which is exactly in the
middle of its range.
"""
# special case for int8
if dtype.kind == 'i' and dtype.itemsize == 1:
return -128
assert dtype.kind == 'u'
return 1 << (dtype.itemsize * 8 - 1)
def _is_pseudo_integer(dtype):
return (
(dtype.kind == 'u' and dtype.itemsize >= 2)
or (dtype.kind == 'i' and dtype.itemsize == 1)
)
def _is_int(val):
return isinstance(val, all_integer_types)
def _str_to_num(val):
"""Converts a given string to either an int or a float if necessary."""
try:
num = int(val)
except ValueError:
# If this fails then an exception should be raised anyways
num = float(val)
return num
def _words_group(s, width):
"""
Split a long string into parts where each part is no longer than ``strlen``
and no word is cut into two pieces. But if there are any single words
which are longer than ``strlen``, then they will be split in the middle of
the word.
"""
words = []
slen = len(s)
# appending one blank at the end always ensures that the "last" blank
# is beyond the end of the string
arr = np.frombuffer(s.encode('utf8') + b' ', dtype='S1')
# locations of the blanks
blank_loc = np.nonzero(arr == b' ')[0]
offset = 0
xoffset = 0
while True:
try:
loc = np.nonzero(blank_loc >= width + offset)[0][0]
except IndexError:
loc = len(blank_loc)
if loc > 0:
offset = blank_loc[loc - 1] + 1
else:
offset = -1
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = min(xoffset + width, slen)
# collect the pieces in a list
words.append(s[xoffset:offset])
if offset >= slen:
break
xoffset = offset
return words
def _tmp_name(input):
"""
Create a temporary file name which should not already exist. Use the
directory of the input file as the base name of the mkstemp() output.
"""
if input is not None:
input = os.path.dirname(input)
f, fn = tempfile.mkstemp(dir=input)
os.close(f)
return fn
def _get_array_mmap(array):
"""
If the array has an mmap.mmap at base of its base chain, return the mmap
object; otherwise return None.
"""
if isinstance(array, mmap.mmap):
return array
base = array
while hasattr(base, 'base') and base.base is not None:
if isinstance(base.base, mmap.mmap):
return base.base
base = base.base
@contextmanager
def _free_space_check(hdulist, dirname=None):
try:
yield
except OSError as exc:
error_message = ''
if not isinstance(hdulist, list):
hdulist = [hdulist, ]
if dirname is None:
dirname = os.path.dirname(hdulist._file.name)
if os.path.isdir(dirname):
free_space = data.get_free_space_in_dir(dirname)
hdulist_size = sum(hdu.size for hdu in hdulist)
if free_space < hdulist_size:
error_message = ("Not enough space on disk: requested {}, "
"available {}. ".format(hdulist_size, free_space))
for hdu in hdulist:
hdu._close()
raise OSError(error_message + str(exc))
def _extract_number(value, default):
"""
Attempts to extract an integer number from the given value. If the
extraction fails, the value of the 'default' argument is returned.
"""
try:
# The _str_to_num method converts the value to string/float
# so we need to perform one additional conversion to int on top
return int(_str_to_num(value))
except (TypeError, ValueError):
return default
def get_testdata_filepath(filename):
"""
Return a string representing the path to the file requested from the
io.fits test data set.
.. versionadded:: 2.0.3
Parameters
----------
filename : str
The filename of the test data file.
Returns
-------
filepath : str
The path to the requested file.
"""
return data.get_pkg_data_filename(
f'io/fits/tests/data/{filename}', 'astropy')
def _rstrip_inplace(array):
"""
Performs an in-place rstrip operation on string arrays. This is necessary
since the built-in `np.char.rstrip` in Numpy does not perform an in-place
calculation.
"""
# The following implementation convert the string to unsigned integers of
# the right length. Trailing spaces (which are represented as 32) are then
# converted to null characters (represented as zeros). To avoid creating
# large temporary mask arrays, we loop over chunks (attempting to do that
# on a 1-D version of the array; large memory may still be needed in the
# unlikely case that a string array has small first dimension and cannot
# be represented as a contiguous 1-D array in memory).
dt = array.dtype
if dt.kind not in 'SU':
raise TypeError("This function can only be used on string arrays")
# View the array as appropriate integers. The last dimension will
# equal the number of characters in each string.
bpc = 1 if dt.kind == 'S' else 4
dt_int = f"({dt.itemsize // bpc},){dt.byteorder}u{bpc}"
b = array.view(dt_int, np.ndarray)
# For optimal speed, work in chunks of the internal ufunc buffer size.
bufsize = np.getbufsize()
# Attempt to have the strings as a 1-D array to give the chunk known size.
# Note: the code will work if this fails; the chunks will just be larger.
if b.ndim > 2:
try:
b.shape = -1, b.shape[-1]
except AttributeError: # can occur for non-contiguous arrays
pass
for j in range(0, b.shape[0], bufsize):
c = b[j:j + bufsize]
# Mask which will tell whether we're in a sequence of trailing spaces.
mask = np.ones(c.shape[:-1], dtype=bool)
# Loop over the characters in the strings, in reverse order. We process
# the i-th character of all strings in the chunk at the same time. If
# the character is 32, this corresponds to a space, and we then change
# this to 0. We then construct a new mask to find rows where the
# i-th character is 0 (null) and the i-1-th is 32 (space) and repeat.
for i in range(-1, -c.shape[-1], -1):
mask &= c[..., i] == 32
c[..., i][mask] = 0
mask = c[..., i] == 0
return array
def _is_dask_array(data):
"""Check whether data is a dask array.
We avoid importing dask unless it is likely it is a dask array,
so that non-dask code is not slowed down.
"""
if not hasattr(data, 'compute'):
return False
try:
from dask.array import Array
except ImportError:
# If we cannot import dask, surely this cannot be a
# dask array!
return False
else:
return isinstance(data, Array)
|
f6066757f138dabd0a2e02ab9f3974a2a995a2a4c6c4b6a1738e02f2ca5d1c68 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import re
import warnings
import numpy as np
from .util import _str_to_num, _is_int, translate, _words_group
from .verify import _Verify, _ErrList, VerifyError, VerifyWarning
from . import conf
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['Card', 'Undefined']
FIX_FP_TABLE = str.maketrans('de', 'DE')
FIX_FP_TABLE2 = str.maketrans('dD', 'eE')
CARD_LENGTH = 80
BLANK_CARD = ' ' * CARD_LENGTH
KEYWORD_LENGTH = 8 # The max length for FITS-standard keywords
VALUE_INDICATOR = '= ' # The standard FITS value indicator
VALUE_INDICATOR_LEN = len(VALUE_INDICATOR)
HIERARCH_VALUE_INDICATOR = '=' # HIERARCH cards may use a shortened indicator
class Undefined:
"""Undefined value."""
def __init__(self):
# This __init__ is required to be here for Sphinx documentation
pass
UNDEFINED = Undefined()
class Card(_Verify):
length = CARD_LENGTH
"""The length of a Card image; should always be 80 for valid FITS files."""
# String for a FITS standard compliant (FSC) keyword.
_keywd_FSC_RE = re.compile(r'^[A-Z0-9_-]{0,%d}$' % KEYWORD_LENGTH)
# This will match any printable ASCII character excluding '='
_keywd_hierarch_RE = re.compile(r'^(?:HIERARCH +)?(?:^[ -<>-~]+ ?)+$',
re.I)
# A number sub-string, either an integer or a float in fixed or
# scientific notation. One for FSC and one for non-FSC (NFSC) format:
# NFSC allows lower case of DE for exponent, allows space between sign,
# digits, exponent sign, and exponents
_digits_FSC = r'(\.\d+|\d+(\.\d*)?)([DE][+-]?\d+)?'
_digits_NFSC = r'(\.\d+|\d+(\.\d*)?) *([deDE] *[+-]? *\d+)?'
_numr_FSC = r'[+-]?' + _digits_FSC
_numr_NFSC = r'[+-]? *' + _digits_NFSC
# This regex helps delete leading zeros from numbers, otherwise
# Python might evaluate them as octal values (this is not-greedy, however,
# so it may not strip leading zeros from a float, which is fine)
_number_FSC_RE = re.compile(rf'(?P<sign>[+-])?0*?(?P<digt>{_digits_FSC})')
_number_NFSC_RE = \
re.compile(rf'(?P<sign>[+-])? *0*?(?P<digt>{_digits_NFSC})')
# Used in cards using the CONTINUE convention which expect a string
# followed by an optional comment
_strg = r'\'(?P<strg>([ -~]+?|\'\'|) *?)\'(?=$|/| )'
_comm_field = r'(?P<comm_field>(?P<sepr>/ *)(?P<comm>(.|\n)*))'
_strg_comment_RE = re.compile(f'({_strg})? *{_comm_field}?')
# FSC commentary card string which must contain printable ASCII characters.
# Note: \Z matches the end of the string without allowing newlines
_ascii_text_re = re.compile(r'[ -~]*\Z')
# Checks for a valid value/comment string. It returns a match object
# for a valid value/comment string.
# The valu group will return a match if a FITS string, boolean,
# number, or complex value is found, otherwise it will return
# None, meaning the keyword is undefined. The comment field will
# return a match if the comment separator is found, though the
# comment maybe an empty string.
_value_FSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
# The <strg> regex is not correct for all cases, but
# it comes pretty darn close. It appears to find the
# end of a string rather well, but will accept
# strings with an odd number of single quotes,
# instead of issuing an error. The FITS standard
# appears vague on this issue and only states that a
# string should not end with two single quotes,
# whereas it should not end with an even number of
# quotes to be precise.
#
# Note that a non-greedy match is done for a string,
# since a greedy match will find a single-quote after
# the comment separator resulting in an incorrect
# match.
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_FSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_FSC + r') *, *'
r'(?P<imag>' + _numr_FSC + r') *\))'
r')? *)'
r'(?P<comm_field>'
r'(?P<sepr>/ *)'
r'(?P<comm>[!-~][ -~]*)?'
r')?$')
_value_NFSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_NFSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_NFSC + r') *, *'
r'(?P<imag>' + _numr_NFSC + r') *\))'
fr')? *){_comm_field}?$')
_rvkc_identifier = r'[a-zA-Z_]\w*'
_rvkc_field = _rvkc_identifier + r'(\.\d+)?'
_rvkc_field_specifier_s = fr'{_rvkc_field}(\.{_rvkc_field})*'
_rvkc_field_specifier_val = (r'(?P<keyword>{}): +(?P<val>{})'.format(
_rvkc_field_specifier_s, _numr_FSC))
_rvkc_keyword_val = fr'\'(?P<rawval>{_rvkc_field_specifier_val})\''
_rvkc_keyword_val_comm = (r' *{} *(/ *(?P<comm>[ -~]*))?$'.format(
_rvkc_keyword_val))
_rvkc_field_specifier_val_RE = re.compile(_rvkc_field_specifier_val + '$')
# regular expression to extract the key and the field specifier from a
# string that is being used to index into a card list that contains
# record value keyword cards (ex. 'DP1.AXIS.1')
_rvkc_keyword_name_RE = (
re.compile(r'(?P<keyword>{})\.(?P<field_specifier>{})$'.format(
_rvkc_identifier, _rvkc_field_specifier_s)))
# regular expression to extract the field specifier and value and comment
# from the string value of a record value keyword card
# (ex "'AXIS.1: 1' / a comment")
_rvkc_keyword_val_comm_RE = re.compile(_rvkc_keyword_val_comm)
_commentary_keywords = {'', 'COMMENT', 'HISTORY', 'END'}
_special_keywords = _commentary_keywords.union(['CONTINUE'])
# The default value indicator; may be changed if required by a convention
# (namely HIERARCH cards)
_value_indicator = VALUE_INDICATOR
def __init__(self, keyword=None, value=None, comment=None, **kwargs):
# For backwards compatibility, support the 'key' keyword argument:
if keyword is None and 'key' in kwargs:
keyword = kwargs['key']
self._keyword = None
self._value = None
self._comment = None
self._valuestring = None
self._image = None
# This attribute is set to False when creating the card from a card
# image to ensure that the contents of the image get verified at some
# point
self._verified = True
# A flag to conveniently mark whether or not this was a valid HIERARCH
# card
self._hierarch = False
# If the card could not be parsed according the the FITS standard or
# any recognized non-standard conventions, this will be True
self._invalid = False
self._field_specifier = None
# These are used primarily only by RVKCs
self._rawkeyword = None
self._rawvalue = None
if not (keyword is not None and value is not None and
self._check_if_rvkc(keyword, value)):
# If _check_if_rvkc passes, it will handle setting the keyword and
# value
if keyword is not None:
self.keyword = keyword
if value is not None:
self.value = value
if comment is not None:
self.comment = comment
self._modified = False
self._valuemodified = False
def __repr__(self):
return repr((self.keyword, self.value, self.comment))
def __str__(self):
return self.image
def __len__(self):
return 3
def __getitem__(self, index):
return (self.keyword, self.value, self.comment)[index]
@property
def keyword(self):
"""Returns the keyword name parsed from the card image."""
if self._keyword is not None:
return self._keyword
elif self._image:
self._keyword = self._parse_keyword()
return self._keyword
else:
self.keyword = ''
return ''
@keyword.setter
def keyword(self, keyword):
"""Set the key attribute; once set it cannot be modified."""
if self._keyword is not None:
raise AttributeError(
'Once set, the Card keyword may not be modified')
elif isinstance(keyword, str):
# Be nice and remove trailing whitespace--some FITS code always
# pads keywords out with spaces; leading whitespace, however,
# should be strictly disallowed.
keyword = keyword.rstrip()
keyword_upper = keyword.upper()
if (len(keyword) <= KEYWORD_LENGTH and
self._keywd_FSC_RE.match(keyword_upper)):
# For keywords with length > 8 they will be HIERARCH cards,
# and can have arbitrary case keywords
if keyword_upper == 'END':
raise ValueError("Keyword 'END' not allowed.")
keyword = keyword_upper
elif self._keywd_hierarch_RE.match(keyword):
# In prior versions of PyFITS (*) HIERARCH cards would only be
# created if the user-supplied keyword explicitly started with
# 'HIERARCH '. Now we will create them automatically for long
# keywords, but we still want to support the old behavior too;
# the old behavior makes it possible to create HIERARCH cards
# that would otherwise be recognized as RVKCs
# (*) This has never affected Astropy, because it was changed
# before PyFITS was merged into Astropy!
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
if keyword_upper[:9] == 'HIERARCH ':
# The user explicitly asked for a HIERARCH card, so don't
# bug them about it...
keyword = keyword[9:].strip()
else:
# We'll gladly create a HIERARCH card, but a warning is
# also displayed
warnings.warn(
'Keyword name {!r} is greater than 8 characters or '
'contains characters not allowed by the FITS '
'standard; a HIERARCH card will be created.'.format(
keyword), VerifyWarning)
else:
raise ValueError(f'Illegal keyword name: {keyword!r}.')
self._keyword = keyword
self._modified = True
else:
raise ValueError(f'Keyword name {keyword!r} is not a string.')
@property
def value(self):
"""The value associated with the keyword stored in this card."""
if self.field_specifier:
return float(self._value)
if self._value is not None:
value = self._value
elif self._valuestring is not None or self._image:
value = self._value = self._parse_value()
else:
if self._keyword == '':
self._value = value = ''
else:
self._value = value = UNDEFINED
if conf.strip_header_whitespace and isinstance(value, str):
value = value.rstrip()
return value
@value.setter
def value(self, value):
if self._invalid:
raise ValueError(
'The value of invalid/unparsable cards cannot set. Either '
'delete this card from the header or replace it.')
if value is None:
value = UNDEFINED
try:
oldvalue = self.value
except VerifyError:
# probably a parsing error, falling back to the internal _value
# which should be None. This may happen while calling _fix_value.
oldvalue = self._value
if oldvalue is None:
oldvalue = UNDEFINED
if not isinstance(value,
(str, int, float, complex, bool, Undefined,
np.floating, np.integer, np.complexfloating,
np.bool_)):
raise ValueError(f'Illegal value: {value!r}.')
if isinstance(value, (float, np.float32)) and (np.isnan(value) or
np.isinf(value)):
# value is checked for both float and np.float32 instances
# since np.float32 is not considered a Python float.
raise ValueError("Floating point {!r} values are not allowed "
"in FITS headers.".format(value))
elif isinstance(value, str):
m = self._ascii_text_re.match(value)
if not m:
raise ValueError(
'FITS header values must contain standard printable ASCII '
'characters; {!r} contains characters not representable in '
'ASCII or non-printable characters.'.format(value))
elif isinstance(value, np.bool_):
value = bool(value)
if (conf.strip_header_whitespace and
(isinstance(oldvalue, str) and isinstance(value, str))):
# Ignore extra whitespace when comparing the new value to the old
different = oldvalue.rstrip() != value.rstrip()
elif isinstance(oldvalue, bool) or isinstance(value, bool):
different = oldvalue is not value
else:
different = (oldvalue != value or
not isinstance(value, type(oldvalue)))
if different:
self._value = value
self._rawvalue = None
self._modified = True
self._valuestring = None
self._valuemodified = True
if self.field_specifier:
try:
self._value = _int_or_float(self._value)
except ValueError:
raise ValueError(f'value {self._value} is not a float')
@value.deleter
def value(self):
if self._invalid:
raise ValueError(
'The value of invalid/unparsable cards cannot deleted. '
'Either delete this card from the header or replace it.')
if not self.field_specifier:
self.value = ''
else:
raise AttributeError('Values cannot be deleted from record-valued '
'keyword cards')
@property
def rawkeyword(self):
"""On record-valued keyword cards this is the name of the standard <= 8
character FITS keyword that this RVKC is stored in. Otherwise it is
the card's normal keyword.
"""
if self._rawkeyword is not None:
return self._rawkeyword
elif self.field_specifier is not None:
self._rawkeyword = self.keyword.split('.', 1)[0]
return self._rawkeyword
else:
return self.keyword
@property
def rawvalue(self):
"""On record-valued keyword cards this is the raw string value in
the ``<field-specifier>: <value>`` format stored in the card in order
to represent a RVKC. Otherwise it is the card's normal value.
"""
if self._rawvalue is not None:
return self._rawvalue
elif self.field_specifier is not None:
self._rawvalue = f'{self.field_specifier}: {self.value}'
return self._rawvalue
else:
return self.value
@property
def comment(self):
"""Get the comment attribute from the card image if not already set."""
if self._comment is not None:
return self._comment
elif self._image:
self._comment = self._parse_comment()
return self._comment
else:
self._comment = ''
return ''
@comment.setter
def comment(self, comment):
if self._invalid:
raise ValueError(
'The comment of invalid/unparsable cards cannot set. Either '
'delete this card from the header or replace it.')
if comment is None:
comment = ''
if isinstance(comment, str):
m = self._ascii_text_re.match(comment)
if not m:
raise ValueError(
'FITS header comments must contain standard printable '
'ASCII characters; {!r} contains characters not '
'representable in ASCII or non-printable characters.'
.format(comment))
try:
oldcomment = self.comment
except VerifyError:
# probably a parsing error, falling back to the internal _comment
# which should be None.
oldcomment = self._comment
if oldcomment is None:
oldcomment = ''
if comment != oldcomment:
self._comment = comment
self._modified = True
@comment.deleter
def comment(self):
if self._invalid:
raise ValueError(
'The comment of invalid/unparsable cards cannot deleted. '
'Either delete this card from the header or replace it.')
self.comment = ''
@property
def field_specifier(self):
"""
The field-specifier of record-valued keyword cards; always `None` on
normal cards.
"""
# Ensure that the keyword exists and has been parsed--the will set the
# internal _field_specifier attribute if this is a RVKC.
if self.keyword:
return self._field_specifier
else:
return None
@field_specifier.setter
def field_specifier(self, field_specifier):
if not field_specifier:
raise ValueError('The field-specifier may not be blank in '
'record-valued keyword cards.')
elif not self.field_specifier:
raise AttributeError('Cannot coerce cards to be record-valued '
'keyword cards by setting the '
'field_specifier attribute')
elif field_specifier != self.field_specifier:
self._field_specifier = field_specifier
# The keyword need also be updated
keyword = self._keyword.split('.', 1)[0]
self._keyword = '.'.join([keyword, field_specifier])
self._modified = True
@field_specifier.deleter
def field_specifier(self):
raise AttributeError('The field_specifier attribute may not be '
'deleted from record-valued keyword cards.')
@property
def image(self):
"""
The card "image", that is, the 80 byte character string that represents
this card in an actual FITS header.
"""
if self._image and not self._verified:
self.verify('fix+warn')
if self._image is None or self._modified:
self._image = self._format_image()
return self._image
@property
def is_blank(self):
"""
`True` if the card is completely blank--that is, it has no keyword,
value, or comment. It appears in the header as 80 spaces.
Returns `False` otherwise.
"""
if not self._verified:
# The card image has not been parsed yet; compare directly with the
# string representation of a blank card
return self._image == BLANK_CARD
# If the keyword, value, and comment are all empty (for self.value
# explicitly check that it is a string value, since a blank value is
# returned as '')
return (not self.keyword and
(isinstance(self.value, str) and not self.value) and
not self.comment)
@classmethod
def fromstring(cls, image):
"""
Construct a `Card` object from a (raw) string. It will pad the string
if it is not the length of a card image (80 columns). If the card
image is longer than 80 columns, assume it contains ``CONTINUE``
card(s).
"""
card = cls()
if isinstance(image, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place
image = image.decode('latin1')
card._image = _pad(image)
card._verified = False
return card
@classmethod
def normalize_keyword(cls, keyword):
"""
`classmethod` to convert a keyword value that may contain a
field-specifier to uppercase. The effect is to raise the key to
uppercase and leave the field specifier in its original case.
Parameters
----------
keyword : or str
A keyword value or a ``keyword.field-specifier`` value
"""
# Test first for the most common case: a standard FITS keyword provided
# in standard all-caps
if (len(keyword) <= KEYWORD_LENGTH and
cls._keywd_FSC_RE.match(keyword)):
return keyword
# Test if this is a record-valued keyword
match = cls._rvkc_keyword_name_RE.match(keyword)
if match:
return '.'.join((match.group('keyword').strip().upper(),
match.group('field_specifier')))
elif len(keyword) > 9 and keyword[:9].upper() == 'HIERARCH ':
# Remove 'HIERARCH' from HIERARCH keywords; this could lead to
# ambiguity if there is actually a keyword card containing
# "HIERARCH HIERARCH", but shame on you if you do that.
return keyword[9:].strip().upper()
else:
# A normal FITS keyword, but provided in non-standard case
return keyword.strip().upper()
def _check_if_rvkc(self, *args):
"""
Determine whether or not the card is a record-valued keyword card.
If one argument is given, that argument is treated as a full card image
and parsed as such. If two arguments are given, the first is treated
as the card keyword (including the field-specifier if the card is
intended as a RVKC), and the second as the card value OR the first value
can be the base keyword, and the second value the 'field-specifier:
value' string.
If the check passes the ._keyword, ._value, and .field_specifier
keywords are set.
Examples
--------
::
self._check_if_rvkc('DP1', 'AXIS.1: 2')
self._check_if_rvkc('DP1.AXIS.1', 2)
self._check_if_rvkc('DP1 = AXIS.1: 2')
"""
if not conf.enable_record_valued_keyword_cards:
return False
if len(args) == 1:
return self._check_if_rvkc_image(*args)
elif len(args) == 2:
keyword, value = args
if not isinstance(keyword, str):
return False
if keyword in self._commentary_keywords:
return False
match = self._rvkc_keyword_name_RE.match(keyword)
if match and isinstance(value, (int, float)):
self._init_rvkc(match.group('keyword'),
match.group('field_specifier'), None, value)
return True
# Testing for ': ' is a quick way to avoid running the full regular
# expression, speeding this up for the majority of cases
if isinstance(value, str) and value.find(': ') > 0:
match = self._rvkc_field_specifier_val_RE.match(value)
if match and self._keywd_FSC_RE.match(keyword):
self._init_rvkc(keyword, match.group('keyword'), value,
match.group('val'))
return True
def _check_if_rvkc_image(self, *args):
"""
Implements `Card._check_if_rvkc` for the case of an unparsed card
image. If given one argument this is the full intact image. If given
two arguments the card has already been split between keyword and
value+comment at the standard value indicator '= '.
"""
if len(args) == 1:
image = args[0]
eq_idx = image.find(VALUE_INDICATOR)
if eq_idx < 0 or eq_idx > 9:
return False
keyword = image[:eq_idx]
rest = image[eq_idx + VALUE_INDICATOR_LEN:]
else:
keyword, rest = args
rest = rest.lstrip()
# This test allows us to skip running the full regular expression for
# the majority of cards that do not contain strings or that definitely
# do not contain RVKC field-specifiers; it's very much a
# micro-optimization but it does make a measurable difference
if not rest or rest[0] != "'" or rest.find(': ') < 2:
return False
match = self._rvkc_keyword_val_comm_RE.match(rest)
if match:
self._init_rvkc(keyword, match.group('keyword'),
match.group('rawval'), match.group('val'))
return True
def _init_rvkc(self, keyword, field_specifier, field, value):
"""
Sort of addendum to Card.__init__ to set the appropriate internal
attributes if the card was determined to be a RVKC.
"""
keyword_upper = keyword.upper()
self._keyword = '.'.join((keyword_upper, field_specifier))
self._rawkeyword = keyword_upper
self._field_specifier = field_specifier
self._value = _int_or_float(value)
self._rawvalue = field
def _parse_keyword(self):
keyword = self._image[:KEYWORD_LENGTH].strip()
keyword_upper = keyword.upper()
if keyword_upper in self._special_keywords:
return keyword_upper
elif (keyword_upper == 'HIERARCH' and self._image[8] == ' ' and
HIERARCH_VALUE_INDICATOR in self._image):
# This is valid HIERARCH card as described by the HIERARCH keyword
# convention:
# http://fits.gsfc.nasa.gov/registry/hierarch_keyword.html
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
keyword = self._image.split(HIERARCH_VALUE_INDICATOR, 1)[0][9:]
return keyword.strip()
else:
val_ind_idx = self._image.find(VALUE_INDICATOR)
if 0 <= val_ind_idx <= KEYWORD_LENGTH:
# The value indicator should appear in byte 8, but we are
# flexible and allow this to be fixed
if val_ind_idx < KEYWORD_LENGTH:
keyword = keyword[:val_ind_idx]
keyword_upper = keyword_upper[:val_ind_idx]
rest = self._image[val_ind_idx + VALUE_INDICATOR_LEN:]
# So far this looks like a standard FITS keyword; check whether
# the value represents a RVKC; if so then we pass things off to
# the RVKC parser
if self._check_if_rvkc_image(keyword, rest):
return self._keyword
return keyword_upper
else:
warnings.warn(
'The following header keyword is invalid or follows an '
'unrecognized non-standard convention:\n{}'
.format(self._image), AstropyUserWarning)
self._invalid = True
return keyword
def _parse_value(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# Likewise for invalid cards
if self.keyword.upper() in self._commentary_keywords or self._invalid:
return self._image[KEYWORD_LENGTH:].rstrip()
if self._check_if_rvkc(self._image):
return self._value
m = self._value_NFSC_RE.match(self._split()[1])
if m is None:
raise VerifyError("Unparsable card ({}), fix it first with "
".verify('fix').".format(self.keyword))
if m.group('bool') is not None:
value = m.group('bool') == 'T'
elif m.group('strg') is not None:
value = re.sub("''", "'", m.group('strg'))
elif m.group('numr') is not None:
# Check for numbers with leading 0s.
numr = self._number_NFSC_RE.match(m.group('numr'))
digt = translate(numr.group('digt'), FIX_FP_TABLE2, ' ')
if numr.group('sign') is None:
sign = ''
else:
sign = numr.group('sign')
value = _str_to_num(sign + digt)
elif m.group('cplx') is not None:
# Check for numbers with leading 0s.
real = self._number_NFSC_RE.match(m.group('real'))
rdigt = translate(real.group('digt'), FIX_FP_TABLE2, ' ')
if real.group('sign') is None:
rsign = ''
else:
rsign = real.group('sign')
value = _str_to_num(rsign + rdigt)
imag = self._number_NFSC_RE.match(m.group('imag'))
idigt = translate(imag.group('digt'), FIX_FP_TABLE2, ' ')
if imag.group('sign') is None:
isign = ''
else:
isign = imag.group('sign')
value += _str_to_num(isign + idigt) * 1j
else:
value = UNDEFINED
if not self._valuestring:
self._valuestring = m.group('valu')
return value
def _parse_comment(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# likewise for invalid/unparsable cards
if self.keyword in Card._commentary_keywords or self._invalid:
return ''
valuecomment = self._split()[1]
m = self._value_NFSC_RE.match(valuecomment)
comment = ''
if m is not None:
# Don't combine this if statement with the one above, because
# we only want the elif case to run if this was not a valid
# card at all
if m.group('comm'):
comment = m.group('comm').rstrip()
elif '/' in valuecomment:
# The value in this FITS file was not in a valid/known format. In
# this case the best we can do is guess that everything after the
# first / was meant to be the comment
comment = valuecomment.split('/', 1)[1].strip()
return comment
def _split(self):
"""
Split the card image between the keyword and the rest of the card.
"""
if self._image is not None:
# If we already have a card image, don't try to rebuild a new card
# image, which self.image would do
image = self._image
else:
image = self.image
# Split cards with CONTINUE cards or commentary keywords with long
# values
if len(self._image) > self.length:
values = []
comments = []
keyword = None
for card in self._itersubcards():
kw, vc = card._split()
if keyword is None:
keyword = kw
if keyword in self._commentary_keywords:
values.append(vc)
continue
# Should match a string followed by a comment; if not it
# might be an invalid Card, so we just take it verbatim
m = self._strg_comment_RE.match(vc)
if not m:
return kw, vc
value = m.group('strg') or ''
value = value.rstrip().replace("''", "'")
if value and value[-1] == '&':
value = value[:-1]
values.append(value)
comment = m.group('comm')
if comment:
comments.append(comment.rstrip())
if keyword in self._commentary_keywords:
valuecomment = ''.join(values)
else:
# CONTINUE card
valuecomment = f"'{''.join(values)}' / {' '.join(comments)}"
return keyword, valuecomment
if self.keyword in self._special_keywords:
keyword, valuecomment = image.split(' ', 1)
else:
try:
delim_index = image.index(self._value_indicator)
except ValueError:
delim_index = None
# The equal sign may not be any higher than column 10; anything
# past that must be considered part of the card value
if delim_index is None:
keyword = image[:KEYWORD_LENGTH]
valuecomment = image[KEYWORD_LENGTH:]
elif delim_index > 10 and image[:9] != 'HIERARCH ':
keyword = image[:8]
valuecomment = image[8:]
else:
keyword, valuecomment = image.split(self._value_indicator, 1)
return keyword.strip(), valuecomment.strip()
def _fix_keyword(self):
if self.field_specifier:
keyword, field_specifier = self._keyword.split('.', 1)
self._keyword = '.'.join([keyword.upper(), field_specifier])
else:
self._keyword = self._keyword.upper()
self._modified = True
def _fix_value(self):
"""Fix the card image for fixable non-standard compliance."""
value = None
keyword, valuecomment = self._split()
m = self._value_NFSC_RE.match(valuecomment)
# for the unparsable case
if m is None:
try:
value, comment = valuecomment.split('/', 1)
self.value = value.strip()
self.comment = comment.strip()
except (ValueError, IndexError):
self.value = valuecomment
self._valuestring = self._value
return
elif m.group('numr') is not None:
numr = self._number_NFSC_RE.match(m.group('numr'))
value = translate(numr.group('digt'), FIX_FP_TABLE, ' ')
if numr.group('sign') is not None:
value = numr.group('sign') + value
elif m.group('cplx') is not None:
real = self._number_NFSC_RE.match(m.group('real'))
rdigt = translate(real.group('digt'), FIX_FP_TABLE, ' ')
if real.group('sign') is not None:
rdigt = real.group('sign') + rdigt
imag = self._number_NFSC_RE.match(m.group('imag'))
idigt = translate(imag.group('digt'), FIX_FP_TABLE, ' ')
if imag.group('sign') is not None:
idigt = imag.group('sign') + idigt
value = f'({rdigt}, {idigt})'
self._valuestring = value
# The value itself has not been modified, but its serialized
# representation (as stored in self._valuestring) has been changed, so
# still set this card as having been modified (see ticket #137)
self._modified = True
def _format_keyword(self):
if self.keyword:
if self.field_specifier:
return '{:{len}}'.format(self.keyword.split('.', 1)[0],
len=KEYWORD_LENGTH)
elif self._hierarch:
return f'HIERARCH {self.keyword} '
else:
return '{:{len}}'.format(self.keyword, len=KEYWORD_LENGTH)
else:
return ' ' * KEYWORD_LENGTH
def _format_value(self):
# value string
float_types = (float, np.floating, complex, np.complexfloating)
# Force the value to be parsed out first
value = self.value
# But work with the underlying raw value instead (to preserve
# whitespace, for now...)
value = self._value
if self.keyword in self._commentary_keywords:
# The value of a commentary card must be just a raw unprocessed
# string
value = str(value)
elif (self._valuestring and not self._valuemodified and
isinstance(self.value, float_types)):
# Keep the existing formatting for float/complex numbers
value = f'{self._valuestring:>20}'
elif self.field_specifier:
value = _format_value(self._value).strip()
value = f"'{self.field_specifier}: {value}'"
else:
value = _format_value(value)
# For HIERARCH cards the value should be shortened to conserve space
if not self.field_specifier and len(self.keyword) > KEYWORD_LENGTH:
value = value.strip()
return value
def _format_comment(self):
if not self.comment:
return ''
else:
return f' / {self._comment}'
def _format_image(self):
keyword = self._format_keyword()
value = self._format_value()
is_commentary = keyword.strip() in self._commentary_keywords
if is_commentary:
comment = ''
else:
comment = self._format_comment()
# equal sign string
# by default use the standard value indicator even for HIERARCH cards;
# later we may abbreviate it if necessary
delimiter = VALUE_INDICATOR
if is_commentary:
delimiter = ''
# put all parts together
output = ''.join([keyword, delimiter, value, comment])
# For HIERARCH cards we can save a bit of space if necessary by
# removing the space between the keyword and the equals sign; I'm
# guessing this is part of the HIEARCH card specification
keywordvalue_length = len(keyword) + len(delimiter) + len(value)
if (keywordvalue_length > self.length and
keyword.startswith('HIERARCH')):
if (keywordvalue_length == self.length + 1 and keyword[-1] == ' '):
output = ''.join([keyword[:-1], delimiter, value, comment])
else:
# I guess the HIERARCH card spec is incompatible with CONTINUE
# cards
raise ValueError('The header keyword {!r} with its value is '
'too long'.format(self.keyword))
if len(output) <= self.length:
output = f'{output:80}'
else:
# longstring case (CONTINUE card)
# try not to use CONTINUE if the string value can fit in one line.
# Instead, just truncate the comment
if (isinstance(self.value, str) and
len(value) > (self.length - 10)):
output = self._format_long_image()
else:
warnings.warn('Card is too long, comment will be truncated.',
VerifyWarning)
output = output[:Card.length]
return output
def _format_long_image(self):
"""
Break up long string value/comment into ``CONTINUE`` cards.
This is a primitive implementation: it will put the value
string in one block and the comment string in another. Also,
it does not break at the blank space between words. So it may
not look pretty.
"""
if self.keyword in Card._commentary_keywords:
return self._format_long_commentary_image()
value_length = 67
comment_length = 64
output = []
# do the value string
value = self._value.replace("'", "''")
words = _words_group(value, value_length)
for idx, word in enumerate(words):
if idx == 0:
headstr = '{:{len}}= '.format(self.keyword, len=KEYWORD_LENGTH)
else:
headstr = 'CONTINUE '
# If this is the final CONTINUE remove the '&'
if not self.comment and idx == len(words) - 1:
value_format = "'{}'"
else:
value_format = "'{}&'"
value = value_format.format(word)
output.append(f'{headstr + value:80}')
# do the comment string
comment_format = "{}"
if self.comment:
words = _words_group(self.comment, comment_length)
for idx, word in enumerate(words):
# If this is the final CONTINUE remove the '&'
if idx == len(words) - 1:
headstr = "CONTINUE '' / "
else:
headstr = "CONTINUE '&' / "
comment = headstr + comment_format.format(word)
output.append(f'{comment:80}')
return ''.join(output)
def _format_long_commentary_image(self):
"""
If a commentary card's value is too long to fit on a single card, this
will render the card as multiple consecutive commentary card of the
same type.
"""
maxlen = Card.length - KEYWORD_LENGTH
value = self._format_value()
output = []
idx = 0
while idx < len(value):
output.append(str(Card(self.keyword, value[idx:idx + maxlen])))
idx += maxlen
return ''.join(output)
def _verify(self, option='warn'):
errs = []
fix_text = f'Fixed {self.keyword!r} card to meet the FITS standard.'
# Don't try to verify cards that already don't meet any recognizable
# standard
if self._invalid:
return _ErrList(errs)
# verify the equal sign position
if (self.keyword not in self._commentary_keywords and
(self._image and self._image[:9].upper() != 'HIERARCH ' and
self._image.find('=') != 8)):
errs.append(dict(
err_text='Card {!r} is not FITS standard (equal sign not '
'at column 8).'.format(self.keyword),
fix_text=fix_text,
fix=self._fix_value))
# verify the key, it is never fixable
# always fix silently the case where "=" is before column 9,
# since there is no way to communicate back to the _keys.
if ((self._image and self._image[:8].upper() == 'HIERARCH') or
self._hierarch):
pass
else:
if self._image:
# PyFITS will auto-uppercase any standard keyword, so lowercase
# keywords can only occur if they came from the wild
keyword = self._split()[0]
if keyword != keyword.upper():
# Keyword should be uppercase unless it's a HIERARCH card
errs.append(dict(
err_text=f'Card keyword {keyword!r} is not upper case.',
fix_text=fix_text,
fix=self._fix_keyword))
keyword = self.keyword
if self.field_specifier:
keyword = keyword.split('.', 1)[0]
if not self._keywd_FSC_RE.match(keyword):
errs.append(dict(
err_text=f'Illegal keyword name {keyword!r}',
fixable=False))
# verify the value, it may be fixable
keyword, valuecomment = self._split()
if self.keyword in self._commentary_keywords:
# For commentary keywords all that needs to be ensured is that it
# contains only printable ASCII characters
if not self._ascii_text_re.match(valuecomment):
errs.append(dict(
err_text='Unprintable string {!r}; commentary cards may '
'only contain printable ASCII characters'.format(
valuecomment),
fixable=False))
else:
if not self._valuemodified:
m = self._value_FSC_RE.match(valuecomment)
# If the value of a card was replaced before the card was ever
# even verified, the new value can be considered valid, so we
# don't bother verifying the old value. See
# https://github.com/astropy/astropy/issues/5408
if m is None:
errs.append(dict(
err_text=f'Card {self.keyword!r} is not FITS standard '
f'(invalid value string: {valuecomment!r}).',
fix_text=fix_text,
fix=self._fix_value))
# verify the comment (string), it is never fixable
m = self._value_NFSC_RE.match(valuecomment)
if m is not None:
comment = m.group('comm')
if comment is not None:
if not self._ascii_text_re.match(comment):
errs.append(dict(
err_text=f'Unprintable string {comment!r}; header '
'comments may only contain printable '
'ASCII characters',
fixable=False))
errs = _ErrList([self.run_option(option, **err) for err in errs])
self._verified = True
return errs
def _itersubcards(self):
"""
If the card image is greater than 80 characters, it should consist of a
normal card followed by one or more CONTINUE card. This method returns
the subcards that make up this logical card.
This can also support the case where a HISTORY or COMMENT card has a
long value that is stored internally as multiple concatenated card
images.
"""
ncards = len(self._image) // Card.length
for idx in range(0, Card.length * ncards, Card.length):
card = Card.fromstring(self._image[idx:idx + Card.length])
if idx > 0 and card.keyword.upper() not in self._special_keywords:
raise VerifyError(
'Long card images must have CONTINUE cards after '
'the first card or have commentary keywords like '
'HISTORY or COMMENT.')
if not isinstance(card.value, str):
raise VerifyError('CONTINUE cards must have string values.')
yield card
def _int_or_float(s):
"""
Converts an a string to an int if possible, or to a float.
If the string is neither a string or a float a value error is raised.
"""
if isinstance(s, float):
# Already a float so just pass through
return s
try:
return int(s)
except (ValueError, TypeError):
try:
return float(s)
except (ValueError, TypeError) as e:
raise ValueError(str(e))
def _format_value(value):
"""
Converts a card value to its appropriate string representation as
defined by the FITS format.
"""
# string value should occupies at least 8 columns, unless it is
# a null string
if isinstance(value, str):
if value == '':
return "''"
else:
exp_val_str = value.replace("'", "''")
val_str = f"'{exp_val_str:8}'"
return f'{val_str:20}'
# must be before int checking since bool is also int
elif isinstance(value, (bool, np.bool_)):
return f'{repr(value)[0]:>20}' # T or F
elif _is_int(value):
return f'{value:>20d}'
elif isinstance(value, (float, np.floating)):
return f'{_format_float(value):>20}'
elif isinstance(value, (complex, np.complexfloating)):
val_str = f'({_format_float(value.real)}, {_format_float(value.imag)})'
return f'{val_str:>20}'
elif isinstance(value, Undefined):
return ''
else:
return ''
def _format_float(value):
"""Format a floating number to make sure it gets the decimal point."""
value_str = f'{value:.16G}'
if '.' not in value_str and 'E' not in value_str:
value_str += '.0'
elif 'E' in value_str:
# On some Windows builds of Python (and possibly other platforms?) the
# exponent is zero-padded out to, it seems, three digits. Normalize
# the format to pad only to two digits.
significand, exponent = value_str.split('E')
if exponent[0] in ('+', '-'):
sign = exponent[0]
exponent = exponent[1:]
else:
sign = ''
value_str = f'{significand}E{sign}{int(exponent):02d}'
# Limit the value string to at most 20 characters.
str_len = len(value_str)
if str_len > 20:
idx = value_str.find('E')
if idx < 0:
value_str = value_str[:20]
else:
value_str = value_str[:20 - (str_len - idx)] + value_str[idx:]
return value_str
def _pad(input):
"""Pad blank space to the input string to be multiple of 80."""
_len = len(input)
if _len == Card.length:
return input
elif _len > Card.length:
strlen = _len % Card.length
if strlen == 0:
return input
else:
return input + ' ' * (Card.length - strlen)
# minimum length is 80
else:
strlen = _len % Card.length
return input + ' ' * (Card.length - strlen)
|
bf3ca465363633ee6ef65cdf7f103fde34a787f1413937a4d4321de7c965512d | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import operator
import warnings
from astropy.utils import indent
from astropy.utils.exceptions import AstropyUserWarning
class VerifyError(Exception):
"""
Verify exception class.
"""
class VerifyWarning(AstropyUserWarning):
"""
Verify warning class.
"""
VERIFY_OPTIONS = ['ignore', 'warn', 'exception', 'fix', 'silentfix',
'fix+ignore', 'fix+warn', 'fix+exception',
'silentfix+ignore', 'silentfix+warn', 'silentfix+exception']
class _Verify:
"""
Shared methods for verification.
"""
def run_option(self, option='warn', err_text='', fix_text='Fixed.',
fix=None, fixable=True):
"""
Execute the verification with selected option.
"""
text = err_text
if option in ['warn', 'exception']:
fixable = False
# fix the value
elif not fixable:
text = f'Unfixable error: {text}'
else:
if fix:
fix()
text += ' ' + fix_text
return (fixable, text)
def verify(self, option='warn'):
"""
Verify all values in the instance.
Parameters
----------
option : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``"+warn"``, or ``"+exception"``
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
"""
opt = option.lower()
if opt not in VERIFY_OPTIONS:
raise ValueError(f'Option {option!r} not recognized.')
if opt == 'ignore':
return
errs = self._verify(opt)
# Break the verify option into separate options related to reporting of
# errors, and fixing of fixable errors
if '+' in opt:
fix_opt, report_opt = opt.split('+')
elif opt in ['fix', 'silentfix']:
# The original default behavior for 'fix' and 'silentfix' was to
# raise an exception for unfixable errors
fix_opt, report_opt = opt, 'exception'
else:
fix_opt, report_opt = None, opt
if fix_opt == 'silentfix' and report_opt == 'ignore':
# Fixable errors were fixed, but don't report anything
return
if fix_opt == 'silentfix':
# Don't print out fixable issues; the first element of each verify
# item is a boolean indicating whether or not the issue was fixable
line_filter = lambda x: not x[0]
elif fix_opt == 'fix' and report_opt == 'ignore':
# Don't print *unfixable* issues, but do print fixed issues; this
# is probably not very useful but the option exists for
# completeness
line_filter = operator.itemgetter(0)
else:
line_filter = None
unfixable = False
messages = []
for fixable, message in errs.iter_lines(filter=line_filter):
if fixable is not None:
unfixable = not fixable
messages.append(message)
if messages:
messages.insert(0, 'Verification reported errors:')
messages.append('Note: astropy.io.fits uses zero-based indexing.\n')
if fix_opt == 'silentfix' and not unfixable:
return
elif report_opt == 'warn' or (fix_opt == 'fix' and not unfixable):
for line in messages:
warnings.warn(line, VerifyWarning)
else:
raise VerifyError('\n' + '\n'.join(messages))
class _ErrList(list):
"""
Verification errors list class. It has a nested list structure
constructed by error messages generated by verifications at
different class levels.
"""
def __init__(self, val=(), unit='Element'):
super().__init__(val)
self.unit = unit
def __str__(self):
return '\n'.join(item[1] for item in self.iter_lines())
def iter_lines(self, filter=None, shift=0):
"""
Iterate the nested structure as a list of strings with appropriate
indentations for each level of structure.
"""
element = 0
# go through the list twice, first time print out all top level
# messages
for item in self:
if not isinstance(item, _ErrList):
if filter is None or filter(item):
yield item[0], indent(item[1], shift=shift)
# second time go through the next level items, each of the next level
# must present, even it has nothing.
for item in self:
if isinstance(item, _ErrList):
next_lines = item.iter_lines(filter=filter, shift=shift + 1)
try:
first_line = next(next_lines)
except StopIteration:
first_line = None
if first_line is not None:
if self.unit:
# This line is sort of a header for the next level in
# the hierarchy
yield None, indent(f'{self.unit} {element}:',
shift=shift)
yield first_line
for line in next_lines:
yield line
element += 1
|
af2877a87be49457b52ea659f070d9c520d6f15e69ecc9c31216c6b739c40926 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
cds.py:
Classes to read CDS / Vizier table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import fnmatch
import itertools
import re
import os
from contextlib import suppress
from . import core
from . import fixedwidth
from astropy.units import Unit
__doctest_skip__ = ['*']
class CdsHeader(core.BaseHeader):
_subfmt = 'CDS'
col_type_map = {'e': core.FloatType,
'f': core.FloatType,
'i': core.IntType,
'a': core.StrType}
'The ReadMe file to construct header from.'
readme = None
def get_type_map_key(self, col):
match = re.match(r'\d*(\S)', col.raw_type.lower())
if not match:
raise ValueError('Unrecognized {} format "{}" for column "{}"'.format(
self._subfmt, col.raw_type, col.name))
return match.group(1)
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a CDS/MRT
header.
Parameters
----------
lines : list
List of table lines
"""
# Read header block for the table ``self.data.table_name`` from the read
# me file ``self.readme``.
if self.readme and self.data.table_name:
in_header = False
readme_inputter = core.BaseInputter()
f = readme_inputter.get_lines(self.readme)
# Header info is not in data lines but in a separate file.
lines = []
comment_lines = 0
for line in f:
line = line.strip()
if in_header:
lines.append(line)
if line.startswith(('------', '=======')):
comment_lines += 1
if comment_lines == 3:
break
else:
match = re.match(r'Byte-by-byte Description of file: (?P<name>.+)$',
line, re.IGNORECASE)
if match:
# Split 'name' in case in contains multiple files
names = [s for s in re.split('[, ]+', match.group('name'))
if s]
# Iterate on names to find if one matches the tablename
# including wildcards.
for pattern in names:
if fnmatch.fnmatch(self.data.table_name, pattern):
in_header = True
lines.append(line)
break
else:
raise core.InconsistentTableError("Can't find table {} in {}".format(
self.data.table_name, self.readme))
found_line = False
for i_col_def, line in enumerate(lines):
if re.match(r'Byte-by-byte Description', line, re.IGNORECASE):
found_line = True
elif found_line: # First line after list of file descriptions
i_col_def -= 1 # Set i_col_def to last description line
break
else:
raise ValueError('no line with "Byte-by-byte Description" found')
re_col_def = re.compile(r"""\s*
(?P<start> \d+ \s* -)? \s*
(?P<end> \d+) \s+
(?P<format> [\w.]+) \s+
(?P<units> \S+) \s+
(?P<name> \S+)
(\s+ (?P<descr> \S.*))?""",
re.VERBOSE)
cols = []
for line in itertools.islice(lines, i_col_def + 4, None):
if line.startswith(('------', '=======')):
break
match = re_col_def.match(line)
if match:
col = core.Column(name=match.group('name'))
col.start = int(re.sub(r'[-\s]', '',
match.group('start') or match.group('end'))) - 1
col.end = int(match.group('end'))
unit = match.group('units')
if unit == '---':
col.unit = None # "---" is the marker for no unit in CDS/MRT table
else:
col.unit = Unit(unit, format='cds', parse_strict='warn')
col.description = (match.group('descr') or '').strip()
col.raw_type = match.group('format')
col.type = self.get_col_type(col)
match = re.match(
r'(?P<limits>[\[\]] \S* [\[\]])?' # Matches limits specifier (eg [])
# that may or may not be present
r'\?' # Matches '?' directly
r'((?P<equal>=)(?P<nullval> \S*))?' # Matches to nullval if and only
# if '=' is present
r'(?P<order>[-+]?[=]?)' # Matches to order specifier:
# ('+', '-', '+=', '-=')
r'(\s* (?P<descriptiontext> \S.*))?', # Matches description text even
# even if no whitespace is
# present after '?'
col.description, re.VERBOSE)
if match:
col.description = (match.group('descriptiontext') or '').strip()
if issubclass(col.type, core.FloatType):
fillval = 'nan'
else:
fillval = '0'
if match.group('nullval') == '-':
col.null = '---'
# CDS/MRT tables can use -, --, ---, or ---- to mark missing values
# see https://github.com/astropy/astropy/issues/1335
for i in [1, 2, 3, 4]:
self.data.fill_values.append(('-' * i, fillval, col.name))
else:
col.null = match.group('nullval')
if (col.null is None):
col.null = ''
self.data.fill_values.append((col.null, fillval, col.name))
cols.append(col)
else: # could be a continuation of the previous col's description
if cols:
cols[-1].description += line.strip()
else:
raise ValueError(f'Line "{line}" not parsable as CDS header')
self.names = [x.name for x in cols]
self.cols = cols
class CdsData(core.BaseData):
"""CDS table data reader
"""
_subfmt = 'CDS'
splitter_class = fixedwidth.FixedWidthSplitter
def process_lines(self, lines):
"""Skip over CDS/MRT header by finding the last section delimiter"""
# If the header has a ReadMe and data has a filename
# then no need to skip, as the data lines do not have header
# info. The ``read`` method adds the table_name to the ``data``
# attribute.
if self.header.readme and self.table_name:
return lines
i_sections = [i for i, x in enumerate(lines)
if x.startswith(('------', '======='))]
if not i_sections:
raise core.InconsistentTableError(f'No {self._subfmt} section delimiter found')
return lines[i_sections[-1]+1:] # noqa
class Cds(core.BaseReader):
"""CDS format table.
See: http://vizier.u-strasbg.fr/doc/catstd.htx
Example::
Table: Table name here
= ==============================================================================
Catalog reference paper
Bibliography info here
================================================================================
ADC_Keywords: Keyword ; Another keyword ; etc
Description:
Catalog description here.
================================================================================
Byte-by-byte Description of file: datafile3.txt
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 3 I3 --- Index Running identification number
5- 6 I2 h RAh Hour of Right Ascension (J2000)
8- 9 I2 min RAm Minute of Right Ascension (J2000)
11- 15 F5.2 s RAs Second of Right Ascension (J2000)
--------------------------------------------------------------------------------
Note (1): A CDS file can contain sections with various metadata.
Notes can be multiple lines.
Note (2): Another note.
--------------------------------------------------------------------------------
1 03 28 39.09
2 04 18 24.11
**About parsing the CDS format**
The CDS format consists of a table description and the table data. These
can be in separate files as a ``ReadMe`` file plus data file(s), or
combined in a single file. Different subsections within the description
are separated by lines of dashes or equal signs ("------" or "======").
The table which specifies the column information must be preceded by a line
starting with "Byte-by-byte Description of file:".
In the case where the table description is combined with the data values,
the data must be in the last section and must be preceded by a section
delimiter line (dashes or equal signs only).
**Basic usage**
Use the ``ascii.read()`` function as normal, with an optional ``readme``
parameter indicating the CDS ReadMe file. If not supplied it is assumed that
the header information is at the top of the given table. Examples::
>>> from astropy.io import ascii
>>> table = ascii.read("data/cds.dat")
>>> table = ascii.read("data/vizier/table1.dat", readme="data/vizier/ReadMe")
>>> table = ascii.read("data/cds/multi/lhs2065.dat", readme="data/cds/multi/ReadMe")
>>> table = ascii.read("data/cds/glob/lmxbrefs.dat", readme="data/cds/glob/ReadMe")
The table name and the CDS ReadMe file can be entered as URLs. This can be used
to directly load tables from the Internet. For example, Vizier tables from the
CDS::
>>> table = ascii.read("ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/snrs.dat",
... readme="ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/ReadMe")
If the header (ReadMe) and data are stored in a single file and there
is content between the header and the data (for instance Notes), then the
parsing process may fail. In this case you can instruct the reader to
guess the actual start of the data by supplying ``data_start='guess'`` in the
call to the ``ascii.read()`` function. You should verify that the output
data table matches expectation based on the input CDS file.
**Using a reader object**
When ``Cds`` reader object is created with a ``readme`` parameter
passed to it at initialization, then when the ``read`` method is
executed with a table filename, the header information for the
specified table is taken from the ``readme`` file. An
``InconsistentTableError`` is raised if the ``readme`` file does not
have header information for the given table.
>>> readme = "data/vizier/ReadMe"
>>> r = ascii.get_reader(ascii.Cds, readme=readme)
>>> table = r.read("data/vizier/table1.dat")
>>> # table5.dat has the same ReadMe file
>>> table = r.read("data/vizier/table5.dat")
If no ``readme`` parameter is specified, then the header
information is assumed to be at the top of the given table.
>>> r = ascii.get_reader(ascii.Cds)
>>> table = r.read("data/cds.dat")
>>> #The following gives InconsistentTableError, since no
>>> #readme file was given and table1.dat does not have a header.
>>> table = r.read("data/vizier/table1.dat")
Traceback (most recent call last):
...
InconsistentTableError: No CDS section delimiter found
Caveats:
* The Units and Explanations are available in the column ``unit`` and
``description`` attributes, respectively.
* The other metadata defined by this format is not available in the output table.
"""
_format_name = 'cds'
_io_registry_format_aliases = ['cds']
_io_registry_can_write = False
_description = 'CDS format table'
data_class = CdsData
header_class = CdsHeader
def __init__(self, readme=None):
super().__init__()
self.header.readme = readme
def write(self, table=None):
"""Not available for the CDS class (raises NotImplementedError)"""
raise NotImplementedError
def read(self, table):
# If the read kwarg `data_start` is 'guess' then the table may have extraneous
# lines between the end of the header and the beginning of data.
if self.data.start_line == 'guess':
# Replicate the first part of BaseReader.read up to the point where
# the table lines are initially read in.
with suppress(TypeError):
# For strings only
if os.linesep not in table + '':
self.data.table_name = os.path.basename(table)
self.data.header = self.header
self.header.data = self.data
# Get a list of the lines (rows) in the table
lines = self.inputter.get_lines(table)
# Now try increasing data.start_line by one until the table reads successfully.
# For efficiency use the in-memory list of lines instead of `table`, which
# could be a file.
for data_start in range(len(lines)):
self.data.start_line = data_start
with suppress(Exception):
table = super().read(lines)
return table
else:
return super().read(table)
|
9e8ca0b1f63d9f8778c0355597e8558234d43a8f7246be805a1e1ca2fd702fe9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
basic.py:
Basic table read / write functionality for simple character
delimited files with various options for column header definition.
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from . import core
class BasicHeader(core.BaseHeader):
"""
Basic table Header Reader
Set a few defaults for common ascii table formats
(start at line 0, comments begin with ``#`` and possibly white space)
"""
start_line = 0
comment = r'\s*#'
write_comment = '# '
class BasicData(core.BaseData):
"""
Basic table Data Reader
Set a few defaults for common ascii table formats
(start at line 1, comments begin with ``#`` and possibly white space)
"""
start_line = 1
comment = r'\s*#'
write_comment = '# '
class Basic(core.BaseReader):
r"""Character-delimited table with a single header line at the top.
Lines beginning with a comment character (default='#') as the first
non-whitespace character are comments.
Example table::
# Column definition is the first uncommented line
# Default delimiter is the space character.
apples oranges pears
# Data starts after the header column definition, blank lines ignored
1 2 3
4 5 6
"""
_format_name = 'basic'
_description = 'Basic table with custom delimiters'
_io_registry_format_aliases = ['ascii']
header_class = BasicHeader
data_class = BasicData
class NoHeaderHeader(BasicHeader):
"""
Reader for table header without a header
Set the start of header line number to `None`, which tells the basic
reader there is no header line.
"""
start_line = None
class NoHeaderData(BasicData):
"""
Reader for table data without a header
Data starts at first uncommented line since there is no header line.
"""
start_line = 0
class NoHeader(Basic):
"""Character-delimited table with no header line.
When reading, columns are autonamed using header.auto_format which defaults
to "col%d". Otherwise this reader the same as the :class:`Basic` class
from which it is derived. Example::
# Table data
1 2 "hello there"
3 4 world
"""
_format_name = 'no_header'
_description = 'Basic table with no headers'
header_class = NoHeaderHeader
data_class = NoHeaderData
class CommentedHeaderHeader(BasicHeader):
"""
Header class for which the column definition line starts with the
comment character. See the :class:`CommentedHeader` class for an example.
"""
def process_lines(self, lines):
"""
Return only lines that start with the comment regexp. For these
lines strip out the matching characters.
"""
re_comment = re.compile(self.comment)
for line in lines:
match = re_comment.match(line)
if match:
yield line[match.end():]
def write(self, lines):
lines.append(self.write_comment + self.splitter.join(self.colnames))
class CommentedHeader(Basic):
"""Character-delimited table with column names in a comment line.
When reading, ``header_start`` can be used to specify the
line index of column names, and it can be a negative index (for example -1
for the last commented line). The default delimiter is the <space>
character.
This matches the format produced by ``np.savetxt()``, with ``delimiter=','``,
and ``header='<comma-delimited-column-names-list>'``.
Example::
# col1 col2 col3
# Comment line
1 2 3
4 5 6
"""
_format_name = 'commented_header'
_description = 'Column names in a commented line'
header_class = CommentedHeaderHeader
data_class = NoHeaderData
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
out = super().read(table)
# Strip off the comment line set as the header line for
# commented_header format (first by default).
if 'comments' in out.meta:
idx = self.header.start_line
if idx < 0:
idx = len(out.meta['comments']) + idx
out.meta['comments'] = out.meta['comments'][:idx] + out.meta['comments'][idx + 1:]
if not out.meta['comments']:
del out.meta['comments']
return out
def write_header(self, lines, meta):
"""
Write comment lines after, rather than before, the header.
"""
self.header.write(lines)
self.header.write_comments(lines, meta)
class TabHeaderSplitter(core.DefaultSplitter):
"""Split lines on tab and do not remove whitespace"""
delimiter = '\t'
def process_line(self, line):
return line + '\n'
class TabDataSplitter(TabHeaderSplitter):
"""
Don't strip data value whitespace since that is significant in TSV tables
"""
process_val = None
skipinitialspace = False
class TabHeader(BasicHeader):
"""
Reader for header of tables with tab separated header
"""
splitter_class = TabHeaderSplitter
class TabData(BasicData):
"""
Reader for data of tables with tab separated data
"""
splitter_class = TabDataSplitter
class Tab(Basic):
"""Tab-separated table.
Unlike the :class:`Basic` reader, whitespace is not stripped from the
beginning and end of either lines or individual column values.
Example::
col1 <tab> col2 <tab> col3
# Comment line
1 <tab> 2 <tab> 5
"""
_format_name = 'tab'
_description = 'Basic table with tab-separated values'
header_class = TabHeader
data_class = TabData
class CsvSplitter(core.DefaultSplitter):
"""
Split on comma for CSV (comma-separated-value) tables
"""
delimiter = ','
class CsvHeader(BasicHeader):
"""
Header that uses the :class:`astropy.io.ascii.basic.CsvSplitter`
"""
splitter_class = CsvSplitter
comment = None
write_comment = None
class CsvData(BasicData):
"""
Data that uses the :class:`astropy.io.ascii.basic.CsvSplitter`
"""
splitter_class = CsvSplitter
fill_values = [(core.masked, '')]
comment = None
write_comment = None
class Csv(Basic):
"""CSV (comma-separated-values) table.
This file format may contain rows with fewer entries than the number of
columns, a situation that occurs in output from some spreadsheet editors.
The missing entries are marked as masked in the output table.
Masked values (indicated by an empty '' field value when reading) are
written out in the same way with an empty ('') field. This is different
from the typical default for `astropy.io.ascii` in which missing values are
indicated by ``--``.
Since the `CSV format <https://tools.ietf.org/html/rfc4180>`_ does not
formally support comments, any comments defined for the table via
``tbl.meta['comments']`` are ignored by default. If you would still like to
write those comments then include a keyword ``comment='#'`` to the
``write()`` call.
Example::
num,ra,dec,radius,mag
1,32.23222,10.1211
2,38.12321,-88.1321,2.2,17.0
"""
_format_name = 'csv'
_io_registry_format_aliases = ['csv']
_io_registry_can_write = True
_io_registry_suffix = '.csv'
_description = 'Comma-separated-values'
header_class = CsvHeader
data_class = CsvData
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust row if it is too short.
If a data row is shorter than the header, add empty values to make it the
right length.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table.
"""
if len(str_vals) < ncols:
str_vals.extend((ncols - len(str_vals)) * [''])
return str_vals
class RdbHeader(TabHeader):
"""
Header for RDB tables
"""
col_type_map = {'n': core.NumType,
's': core.StrType}
def get_type_map_key(self, col):
return col.raw_type[-1]
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
This is a specialized get_cols for the RDB type:
Line 0: RDB col names
Line 1: RDB col definitions
Line 2+: RDB data rows
Parameters
----------
lines : list
List of table lines
Returns
-------
None
"""
header_lines = self.process_lines(lines) # this is a generator
header_vals_list = [hl for _, hl in zip(range(2), self.splitter(header_lines))]
if len(header_vals_list) != 2:
raise ValueError('RDB header requires 2 lines')
self.names, raw_types = header_vals_list
if len(self.names) != len(raw_types):
raise core.InconsistentTableError(
'RDB header mismatch between number of column names and column types.')
if any(not re.match(r'\d*(N|S)$', x, re.IGNORECASE) for x in raw_types):
raise core.InconsistentTableError(
f'RDB types definitions do not all match [num](N|S): {raw_types}')
self._set_cols_from_names()
for col, raw_type in zip(self.cols, raw_types):
col.raw_type = raw_type
col.type = self.get_col_type(col)
def write(self, lines):
lines.append(self.splitter.join(self.colnames))
rdb_types = []
for col in self.cols:
# Check if dtype.kind is string or unicode. See help(np.core.numerictypes)
rdb_type = 'S' if col.info.dtype.kind in ('S', 'U') else 'N'
rdb_types.append(rdb_type)
lines.append(self.splitter.join(rdb_types))
class RdbData(TabData):
"""
Data reader for RDB data. Starts reading at line 2.
"""
start_line = 2
class Rdb(Tab):
"""Tab-separated file with an extra line after the column definition line that
specifies either numeric (N) or string (S) data.
See: https://www.drdobbs.com/rdb-a-unix-command-line-database/199101326
Example::
col1 <tab> col2 <tab> col3
N <tab> S <tab> N
1 <tab> 2 <tab> 5
"""
_format_name = 'rdb'
_io_registry_format_aliases = ['rdb']
_io_registry_suffix = '.rdb'
_description = 'Tab-separated with a type definition header line'
header_class = RdbHeader
data_class = RdbData
|
35ae810d766cb33a4b2bb458d876d9e47221f20e65139607c2398308799cf8f9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file connects the readers/writers to the astropy.table.Table class
import re
from astropy.io import registry as io_registry # noqa
from astropy.table import Table
__all__ = []
def io_read(format, filename, **kwargs):
from .ui import read
if format != 'ascii':
format = re.sub(r'^ascii\.', '', format)
kwargs['format'] = format
return read(filename, **kwargs)
def io_write(format, table, filename, **kwargs):
from .ui import write
if format != 'ascii':
format = re.sub(r'^ascii\.', '', format)
kwargs['format'] = format
return write(table, filename, **kwargs)
def io_identify(suffix, origin, filepath, fileobj, *args, **kwargs):
return filepath is not None and filepath.endswith(suffix)
def _get_connectors_table():
from .core import FORMAT_CLASSES
rows = []
rows.append(('ascii', '', 'Yes', 'ASCII table in any supported format (uses guessing)'))
for format in sorted(FORMAT_CLASSES):
cls = FORMAT_CLASSES[format]
io_format = 'ascii.' + cls._format_name
description = getattr(cls, '_description', '')
class_link = f':class:`~{cls.__module__}.{cls.__name__}`'
suffix = getattr(cls, '_io_registry_suffix', '')
can_write = 'Yes' if getattr(cls, '_io_registry_can_write', True) else ''
rows.append((io_format, suffix, can_write,
f'{class_link}: {description}'))
out = Table(list(zip(*rows)), names=('Format', 'Suffix', 'Write', 'Description'))
for colname in ('Format', 'Description'):
width = max(len(x) for x in out[colname])
out[colname].format = f'%-{width}s'
return out
|
463f9281a8aa4912821c33a93d6df516655605807d14d93b67966366481757f4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" An extensible ASCII table reader and writer.
core.py:
Core base classes and functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft ([email protected])
"""
import copy
import csv
import functools
import itertools
import operator
import os
import re
import warnings
import inspect
import fnmatch
from collections import OrderedDict
from contextlib import suppress
from io import StringIO
import numpy
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table
from astropy.utils.data import get_readable_fileobj
from . import connect
from .docs import READ_DOCSTRING, WRITE_DOCSTRING
# Global dictionary mapping format arg to the corresponding Reader class
FORMAT_CLASSES = {}
# Similar dictionary for fast readers
FAST_CLASSES = {}
def _check_multidim_table(table, max_ndim):
"""Check that ``table`` has only columns with ndim <= ``max_ndim``
Currently ECSV is the only built-in format that supports output of arbitrary
N-d columns, but HTML supports 2-d.
"""
# No limit?
if max_ndim is None:
return
# Check for N-d columns
nd_names = [col.info.name for col in table.itercols() if len(col.shape) > max_ndim]
if nd_names:
raise ValueError(f'column(s) with dimension > {max_ndim} '
"cannot be be written with this format, try using 'ecsv' "
"(Enhanced CSV) format")
class CsvWriter:
"""
Internal class to replace the csv writer ``writerow`` and ``writerows``
functions so that in the case of ``delimiter=' '`` and
``quoting=csv.QUOTE_MINIMAL``, the output field value is quoted for empty
fields (when value == '').
This changes the API slightly in that the writerow() and writerows()
methods return the output written string instead of the length of
that string.
Examples
--------
>>> from astropy.io.ascii.core import CsvWriter
>>> writer = CsvWriter(delimiter=' ')
>>> print(writer.writerow(['hello', '', 'world']))
hello "" world
"""
# Random 16-character string that gets injected instead of any
# empty fields and is then replaced post-write with doubled-quotechar.
# Created with:
# ''.join(random.choice(string.printable[:90]) for _ in range(16))
replace_sentinel = '2b=48Av%0-V3p>bX'
def __init__(self, csvfile=None, **kwargs):
self.csvfile = csvfile
# Temporary StringIO for catching the real csv.writer() object output
self.temp_out = StringIO()
self.writer = csv.writer(self.temp_out, **kwargs)
dialect = self.writer.dialect
self.quotechar2 = dialect.quotechar * 2
self.quote_empty = (dialect.quoting == csv.QUOTE_MINIMAL) and (dialect.delimiter == ' ')
def writerow(self, values):
"""
Similar to csv.writer.writerow but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for i, value in enumerate(values):
if value == '':
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerow, values, has_empty)
def writerows(self, values_list):
"""
Similar to csv.writer.writerows but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for values in values_list:
for i, value in enumerate(values):
if value == '':
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerows, values_list, has_empty)
def _writerow(self, writerow_func, values, has_empty):
"""
Call ``writerow_func`` (either writerow or writerows) with ``values``.
If it has empty fields that have been replaced then change those
sentinel strings back to quoted empty strings, e.g. ``""``.
"""
# Clear the temporary StringIO buffer that self.writer writes into and
# then call the real csv.writer().writerow or writerows with values.
self.temp_out.seek(0)
self.temp_out.truncate()
writerow_func(values)
row_string = self.temp_out.getvalue()
if self.quote_empty and has_empty:
row_string = re.sub(self.replace_sentinel, self.quotechar2, row_string)
# self.csvfile is defined then write the output. In practice the pure
# Python writer calls with csvfile=None, while the fast writer calls with
# a file-like object.
if self.csvfile:
self.csvfile.write(row_string)
return row_string
class MaskedConstant(numpy.ma.core.MaskedConstant):
"""A trivial extension of numpy.ma.masked
We want to be able to put the generic term ``masked`` into a dictionary.
The constant ``numpy.ma.masked`` is not hashable (see
https://github.com/numpy/numpy/issues/4660), so we need to extend it
here with a hash value.
See https://github.com/numpy/numpy/issues/11021 for rationale for
__copy__ and __deepcopy__ methods.
"""
def __hash__(self):
'''All instances of this class shall have the same hash.'''
# Any large number will do.
return 1234567890
def __copy__(self):
"""This is a singleton so just return self."""
return self
def __deepcopy__(self, memo):
return self
masked = MaskedConstant()
class InconsistentTableError(ValueError):
"""
Indicates that an input table is inconsistent in some way.
The default behavior of ``BaseReader`` is to throw an instance of
this class if a data row doesn't match the header.
"""
class OptionalTableImportError(ImportError):
"""
Indicates that a dependency for table reading is not present.
An instance of this class is raised whenever an optional reader
with certain required dependencies cannot operate because of
an ImportError.
"""
class ParameterError(NotImplementedError):
"""
Indicates that a reader cannot handle a passed parameter.
The C-based fast readers in ``io.ascii`` raise an instance of
this error class upon encountering a parameter that the
C engine cannot handle.
"""
class FastOptionsError(NotImplementedError):
"""
Indicates that one of the specified options for fast
reading is invalid.
"""
class NoType:
"""
Superclass for ``StrType`` and ``NumType`` classes.
This class is the default type of ``Column`` and provides a base
class for other data types.
"""
class StrType(NoType):
"""
Indicates that a column consists of text data.
"""
class NumType(NoType):
"""
Indicates that a column consists of numerical data.
"""
class FloatType(NumType):
"""
Describes floating-point data.
"""
class BoolType(NoType):
"""
Describes boolean data.
"""
class IntType(NumType):
"""
Describes integer data.
"""
class AllType(StrType, FloatType, IntType):
"""
Subclass of all other data types.
This type is returned by ``convert_numpy`` if the given numpy
type does not match ``StrType``, ``FloatType``, or ``IntType``.
"""
class Column:
"""Table column.
The key attributes of a Column object are:
* **name** : column name
* **type** : column type (NoType, StrType, NumType, FloatType, IntType)
* **dtype** : numpy dtype (optional, overrides **type** if set)
* **str_vals** : list of column values as strings
* **fill_values** : dict of fill values
* **shape** : list of element shape (default [] => scalar)
* **data** : list of converted column values
* **subtype** : actual datatype for columns serialized with JSON
"""
def __init__(self, name):
self.name = name
self.type = NoType # Generic type (Int, Float, Str etc)
self.dtype = None # Numpy dtype if available
self.str_vals = []
self.fill_values = {}
self.shape = []
self.subtype = None
class BaseInputter:
"""
Get the lines from the table input and return a list of lines.
"""
encoding = None
"""Encoding used to read the file"""
def get_lines(self, table, newline=None):
"""
Get the lines from the ``table`` input. The input table can be one of:
* File name
* String (newline separated) with all header and data lines (must have at least 2 lines)
* File-like object with read() method
* List of strings
Parameters
----------
table : str, file-like, list
Can be either a file name, string (newline separated) with all header and data
lines (must have at least 2 lines), a file-like object with a
``read()`` method, or a list of strings.
newline :
Line separator. If `None` use OS default from ``splitlines()``.
Returns
-------
lines : list
List of lines
"""
try:
if (hasattr(table, 'read')
or ('\n' not in table + '' and '\r' not in table + '')):
with get_readable_fileobj(table,
encoding=self.encoding) as fileobj:
table = fileobj.read()
if newline is None:
lines = table.splitlines()
else:
lines = table.split(newline)
except TypeError:
try:
# See if table supports indexing, slicing, and iteration
table[0]
table[0:1]
iter(table)
if len(table) > 1:
lines = table
else:
# treat single entry as if string had been passed directly
if newline is None:
lines = table[0].splitlines()
else:
lines = table[0].split(newline)
except TypeError:
raise TypeError(
'Input "table" must be a string (filename or data) or an iterable')
return self.process_lines(lines)
def process_lines(self, lines):
"""Process lines for subsequent use. In the default case do nothing.
This routine is not generally intended for removing comment lines or
stripping whitespace. These are done (if needed) in the header and
data line processing.
Override this method if something more has to be done to convert raw
input lines to the table rows. For example the
ContinuationLinesInputter derived class accounts for continuation
characters if a row is split into lines."""
return lines
class BaseSplitter:
"""
Base splitter that uses python's split method to do the work.
This does not handle quoted values. A key feature is the formulation of
__call__ as a generator that returns a list of the split line values at
each iteration.
There are two methods that are intended to be overridden, first
``process_line()`` to do pre-processing on each input line before splitting
and ``process_val()`` to do post-processing on each split string value. By
default these apply the string ``strip()`` function. These can be set to
another function via the instance attribute or be disabled entirely, for
example::
reader.header.splitter.process_val = lambda x: x.lstrip()
reader.data.splitter.process_val = None
"""
delimiter = None
""" one-character string used to separate fields """
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end."""
return line.strip()
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip()
def __call__(self, lines):
if self.process_line:
lines = (self.process_line(x) for x in lines)
for line in lines:
vals = line.split(self.delimiter)
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
if self.delimiter is None:
delimiter = ' '
else:
delimiter = self.delimiter
return delimiter.join(str(x) for x in vals)
class DefaultSplitter(BaseSplitter):
"""Default class to split strings into columns using python csv. The class
attributes are taken from the csv Dialect class.
Typical usage::
# lines = ..
splitter = ascii.DefaultSplitter()
for col_vals in splitter(lines):
for col_val in col_vals:
...
"""
delimiter = ' '
""" one-character string used to separate fields. """
quotechar = '"'
""" control how instances of *quotechar* in a field are quoted """
doublequote = True
""" character to remove special meaning from following character """
escapechar = None
""" one-character stringto quote fields containing special characters """
quoting = csv.QUOTE_MINIMAL
""" control when quotes are recognized by the reader """
skipinitialspace = True
""" ignore whitespace immediately following the delimiter """
csv_writer = None
csv_writer_out = StringIO()
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
If splitting on whitespace then replace unquoted tabs with space first"""
if self.delimiter == r'\s':
line = _replace_tab_with_space(line, self.escapechar, self.quotechar)
return line.strip() + '\n'
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip(' \t')
def __call__(self, lines):
"""Return an iterator over the table ``lines``, where each iterator output
is a list of the split line values.
Parameters
----------
lines : list
List of table lines
Yields
------
line : list of str
Each line's split values.
"""
if self.process_line:
lines = [self.process_line(x) for x in lines]
delimiter = ' ' if self.delimiter == r'\s' else self.delimiter
csv_reader = csv.reader(lines,
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
skipinitialspace=self.skipinitialspace
)
for vals in csv_reader:
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
delimiter = ' ' if self.delimiter is None else str(self.delimiter)
if self.csv_writer is None:
self.csv_writer = CsvWriter(delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting)
if self.process_val:
vals = [self.process_val(x) for x in vals]
out = self.csv_writer.writerow(vals).rstrip('\r\n')
return out
def _replace_tab_with_space(line, escapechar, quotechar):
"""Replace tabs with spaces in given string, preserving quoted substrings
Parameters
----------
line : str
String containing tabs to be replaced with spaces.
escapechar : str
Character in ``line`` used to escape special characters.
quotechar : str
Character in ``line`` indicating the start/end of a substring.
Returns
-------
line : str
A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings.
"""
newline = []
in_quote = False
lastchar = 'NONE'
for char in line:
if char == quotechar and lastchar != escapechar:
in_quote = not in_quote
if char == '\t' and not in_quote:
char = ' '
lastchar = char
newline.append(char)
return ''.join(newline)
def _get_line_index(line_or_func, lines):
"""Return the appropriate line index, depending on ``line_or_func`` which
can be either a function, a positive or negative int, or None.
"""
if hasattr(line_or_func, '__call__'):
return line_or_func(lines)
elif line_or_func:
if line_or_func >= 0:
return line_or_func
else:
n_lines = sum(1 for line in lines)
return n_lines + line_or_func
else:
return line_or_func
class BaseHeader:
"""
Base table header reader
"""
auto_format = 'col{}'
""" format string for auto-generating column names """
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
names = None
""" list of names corresponding to each data column """
write_comment = False
write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE']
def __init__(self):
self.splitter = self.splitter_class()
def _set_cols_from_names(self):
self.cols = [Column(name=x) for x in self.names]
def update_meta(self, lines, meta):
"""
Extract any table-level metadata, e.g. keywords, comments, column metadata, from
the table ``lines`` and update the OrderedDict ``meta`` in place. This base
method extracts comment lines and stores them in ``meta`` for output.
"""
if self.comment:
re_comment = re.compile(self.comment)
comment_lines = [x for x in lines if re_comment.match(x)]
else:
comment_lines = []
comment_lines = [re.sub('^' + self.comment, '', x).strip()
for x in comment_lines]
if comment_lines:
meta.setdefault('table', {})['comments'] = comment_lines
def get_cols(self, lines):
"""Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
start_line = _get_line_index(self.start_line, self.process_lines(lines))
if start_line is None:
# No header line so auto-generate names from n_data_cols
# Get the data values from the first line of table data to determine n_data_cols
try:
first_data_vals = next(self.data.get_str_vals())
except StopIteration:
raise InconsistentTableError('No data lines found so cannot autogenerate '
'column names')
n_data_cols = len(first_data_vals)
self.names = [self.auto_format.format(i)
for i in range(1, n_data_cols + 1)]
else:
for i, line in enumerate(self.process_lines(lines)):
if i == start_line:
break
else: # No header line matching
raise ValueError('No header line found in table')
self.names = next(self.splitter([line]))
self._set_cols_from_names()
def process_lines(self, lines):
"""Generator to yield non-blank and non-comment lines"""
re_comment = re.compile(self.comment) if self.comment else None
# Yield non-comment lines
for line in lines:
if line.strip() and (not self.comment or not re_comment.match(line)):
yield line
def write_comments(self, lines, meta):
if self.write_comment not in (False, None):
for comment in meta.get('comments', []):
lines.append(self.write_comment + comment)
def write(self, lines):
if self.start_line is not None:
for i, spacer_line in zip(range(self.start_line),
itertools.cycle(self.write_spacer_lines)):
lines.append(spacer_line)
lines.append(self.splitter.join([x.info.name for x in self.cols]))
@property
def colnames(self):
"""Return the column names of the table"""
return tuple(col.name if isinstance(col, Column) else col.info.name
for col in self.cols)
def remove_columns(self, names):
"""
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
"""
colnames = self.colnames
for name in names:
if name not in colnames:
raise KeyError(f"Column {name} does not exist")
self.cols = [col for col in self.cols if col.name not in names]
def rename_column(self, name, new_name):
"""
Rename a column.
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
"""
try:
idx = self.colnames.index(name)
except ValueError:
raise KeyError(f"Column {name} does not exist")
col = self.cols[idx]
# For writing self.cols can contain cols that are not Column. Raise
# exception in that case.
if isinstance(col, Column):
col.name = new_name
else:
raise TypeError(f'got column type {type(col)} instead of required '
f'{Column}')
def get_type_map_key(self, col):
return col.raw_type
def get_col_type(self, col):
try:
type_map_key = self.get_type_map_key(col)
return self.col_type_map[type_map_key.lower()]
except KeyError:
raise ValueError('Unknown data type ""{}"" for column "{}"'.format(
col.raw_type, col.name))
def check_column_names(self, names, strict_names, guessing):
"""
Check column names.
This must be done before applying the names transformation
so that guessing will fail appropriately if ``names`` is supplied.
For instance if the basic reader is given a table with no column header
row.
Parameters
----------
names : list
User-supplied list of column names
strict_names : bool
Whether to impose extra requirements on names
guessing : bool
True if this method is being called while guessing the table format
"""
if strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in self.colnames:
if (_is_number(name) or len(name) == 0
or name[0] in bads or name[-1] in bads):
raise InconsistentTableError(
f'Column name {name!r} does not meet strict name requirements')
# When guessing require at least two columns, except for ECSV which can
# reliably be guessed from the header requirements.
if guessing and len(self.colnames) <= 1 and self.__class__.__name__ != 'EcsvHeader':
raise ValueError('Table format guessing requires at least two columns, got {}'
.format(list(self.colnames)))
if names is not None and len(names) != len(self.colnames):
raise InconsistentTableError(
'Length of names argument ({}) does not match number'
' of table columns ({})'.format(len(names), len(self.colnames)))
class BaseData:
"""
Base table data reader.
"""
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
end_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" Regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE']
fill_include_names = None
fill_exclude_names = None
fill_values = [(masked, '')]
formats = {}
def __init__(self):
# Need to make sure fill_values list is instance attribute, not class attribute.
# On read, this will be overwritten by the default in the ui.read (thus, in
# the current implementation there can be no different default for different
# Readers). On write, ui.py does not specify a default, so this line here matters.
self.fill_values = copy.copy(self.fill_values)
self.formats = copy.copy(self.formats)
self.splitter = self.splitter_class()
def process_lines(self, lines):
"""
READ: Strip out comment lines and blank lines from list of ``lines``
Parameters
----------
lines : list
All lines in table
Returns
-------
lines : list
List of lines
"""
nonblank_lines = (x for x in lines if x.strip())
if self.comment:
re_comment = re.compile(self.comment)
return [x for x in nonblank_lines if not re_comment.match(x)]
else:
return [x for x in nonblank_lines]
def get_data_lines(self, lines):
"""READ: Set ``data_lines`` attribute to lines slice comprising table data values.
"""
data_lines = self.process_lines(lines)
start_line = _get_line_index(self.start_line, data_lines)
end_line = _get_line_index(self.end_line, data_lines)
if start_line is not None or end_line is not None:
self.data_lines = data_lines[slice(start_line, end_line)]
else: # Don't copy entire data lines unless necessary
self.data_lines = data_lines
def get_str_vals(self):
"""Return a generator that returns a list of column values (as strings)
for each data line."""
return self.splitter(self.data_lines)
def masks(self, cols):
"""READ: Set fill value for each column and then apply that fill value
In the first step it is evaluated with value from ``fill_values`` applies to
which column using ``fill_include_names`` and ``fill_exclude_names``.
In the second step all replacements are done for the appropriate columns.
"""
if self.fill_values:
self._set_fill_values(cols)
self._set_masks(cols)
def _set_fill_values(self, cols):
"""READ, WRITE: Set fill values of individual cols based on fill_values of BaseData
fill values has the following form:
<fill_spec> = (<bad_value>, <fill_value>, <optional col_name>...)
fill_values = <fill_spec> or list of <fill_spec>'s
"""
if self.fill_values:
# when we write tables the columns may be astropy.table.Columns
# which don't carry a fill_values by default
for col in cols:
if not hasattr(col, 'fill_values'):
col.fill_values = {}
# if input is only one <fill_spec>, then make it a list
with suppress(TypeError):
self.fill_values[0] + ''
self.fill_values = [self.fill_values]
# Step 1: Set the default list of columns which are affected by
# fill_values
colnames = set(self.header.colnames)
if self.fill_include_names is not None:
colnames.intersection_update(self.fill_include_names)
if self.fill_exclude_names is not None:
colnames.difference_update(self.fill_exclude_names)
# Step 2a: Find out which columns are affected by this tuple
# iterate over reversed order, so last condition is set first and
# overwritten by earlier conditions
for replacement in reversed(self.fill_values):
if len(replacement) < 2:
raise ValueError("Format of fill_values must be "
"(<bad>, <fill>, <optional col1>, ...)")
elif len(replacement) == 2:
affect_cols = colnames
else:
affect_cols = replacement[2:]
for i, key in ((i, x) for i, x in enumerate(self.header.colnames)
if x in affect_cols):
cols[i].fill_values[replacement[0]] = str(replacement[1])
def _set_masks(self, cols):
"""READ: Replace string values in col.str_vals and set masks"""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
col.mask = numpy.zeros(len(col.str_vals), dtype=bool)
for i, str_val in ((i, x) for i, x in enumerate(col.str_vals)
if x in col.fill_values):
col.str_vals[i] = col.fill_values[str_val]
col.mask[i] = True
def _replace_vals(self, cols):
"""WRITE: replace string values in col.str_vals"""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
for i, str_val in ((i, x) for i, x in enumerate(col.str_vals)
if x in col.fill_values):
col.str_vals[i] = col.fill_values[str_val]
if masked in col.fill_values and hasattr(col, 'mask'):
mask_val = col.fill_values[masked]
for i in col.mask.nonzero()[0]:
col.str_vals[i] = mask_val
def str_vals(self):
"""WRITE: convert all values in table to a list of lists of strings
This sets the fill values and possibly column formats from the input
formats={} keyword, then ends up calling table.pprint._pformat_col_iter()
by a circuitous path. That function does the real work of formatting.
Finally replace anything matching the fill_values.
Returns
-------
values : list of list of str
"""
self._set_fill_values(self.cols)
self._set_col_formats()
for col in self.cols:
col.str_vals = list(col.info.iter_str_vals())
self._replace_vals(self.cols)
return [col.str_vals for col in self.cols]
def write(self, lines):
"""Write ``self.cols`` in place to ``lines``.
Parameters
----------
lines : list
List for collecting output of writing self.cols.
"""
if hasattr(self.start_line, '__call__'):
raise TypeError('Start_line attribute cannot be callable for write()')
else:
data_start_line = self.start_line or 0
while len(lines) < data_start_line:
lines.append(itertools.cycle(self.write_spacer_lines))
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
lines.append(self.splitter.join(vals))
def _set_col_formats(self):
"""WRITE: set column formats."""
for col in self.cols:
if col.info.name in self.formats:
col.info.format = self.formats[col.info.name]
def convert_numpy(numpy_type):
"""Return a tuple containing a function which converts a list into a numpy
array and the type produced by the converter function.
Parameters
----------
numpy_type : numpy data-type
The numpy type required of an array returned by ``converter``. Must be a
valid `numpy type <https://numpy.org/doc/stable/user/basics.types.html>`_
(e.g., numpy.uint, numpy.int8, numpy.int64, numpy.float64) or a python
type covered by a numpy type (e.g., int, float, str, bool).
Returns
-------
converter : callable
``converter`` is a function which accepts a list and converts it to a
numpy array of type ``numpy_type``.
converter_type : type
``converter_type`` tracks the generic data type produced by the
converter function.
Raises
------
ValueError
Raised by ``converter`` if the list elements could not be converted to
the required type.
"""
# Infer converter type from an instance of numpy_type.
type_name = numpy.array([], dtype=numpy_type).dtype.name
if 'int' in type_name:
converter_type = IntType
elif 'float' in type_name:
converter_type = FloatType
elif 'bool' in type_name:
converter_type = BoolType
elif 'str' in type_name:
converter_type = StrType
else:
converter_type = AllType
def bool_converter(vals):
"""
Convert values "False" and "True" to bools. Raise an exception
for any other string values.
"""
if len(vals) == 0:
return numpy.array([], dtype=bool)
# Try a smaller subset first for a long array
if len(vals) > 10000:
svals = numpy.asarray(vals[:1000])
if not numpy.all((svals == 'False')
| (svals == 'True')
| (svals == '0')
| (svals == '1')):
raise ValueError('bool input strings must be False, True, 0, 1, or ""')
vals = numpy.asarray(vals)
trues = (vals == 'True') | (vals == '1')
falses = (vals == 'False') | (vals == '0')
if not numpy.all(trues | falses):
raise ValueError('bool input strings must be only False, True, 0, 1, or ""')
return trues
def generic_converter(vals):
return numpy.array(vals, numpy_type)
converter = bool_converter if converter_type is BoolType else generic_converter
return converter, converter_type
class BaseOutputter:
"""Output table as a dict of column objects keyed on column name. The
table data are stored as plain python lists within the column objects.
"""
converters = {}
# Derived classes must define default_converters and __call__
@staticmethod
def _validate_and_copy(col, converters):
"""Validate the format for the type converters and then copy those
which are valid converters for this column (i.e. converter type is
a subclass of col.type)"""
converters_out = []
try:
for converter in converters:
converter_func, converter_type = converter
if not issubclass(converter_type, NoType):
raise ValueError()
if issubclass(converter_type, col.type):
converters_out.append((converter_func, converter_type))
except (ValueError, TypeError):
raise ValueError('Error: invalid format for converters, see '
'documentation\n{}'.format(converters))
return converters_out
def _convert_vals(self, cols):
for col in cols:
for key, converters in self.converters.items():
if fnmatch.fnmatch(col.name, key):
break
else:
if col.dtype is not None:
converters = [convert_numpy(col.dtype)]
else:
converters = self.default_converters
col.converters = self._validate_and_copy(col, converters)
# Catch the last error in order to provide additional information
# in case all attempts at column conversion fail. The initial
# value of of last_error will apply if no converters are defined
# and the first col.converters[0] access raises IndexError.
last_err = 'no converters defined'
while not hasattr(col, 'data'):
# Try converters, popping the unsuccessful ones from the list.
# If there are no converters left here then fail.
if not col.converters:
raise ValueError(f'Column {col.name} failed to convert: {last_err}')
converter_func, converter_type = col.converters[0]
if not issubclass(converter_type, col.type):
raise TypeError('converter type does not match column type')
try:
col.data = converter_func(col.str_vals)
col.type = converter_type
except (TypeError, ValueError) as err:
col.converters.pop(0)
last_err = err
except OverflowError as err:
# Overflow during conversion (most likely an int that
# doesn't fit in native C long). Put string at the top of
# the converters list for the next while iteration.
warnings.warn(
"OverflowError converting to {} in column {}, reverting to String."
.format(converter_type.__name__, col.name), AstropyWarning)
col.converters.insert(0, convert_numpy(numpy.str))
last_err = err
def _deduplicate_names(names):
"""Ensure there are no duplicates in ``names``
This is done by iteratively adding ``_<N>`` to the name for increasing N
until the name is unique.
"""
new_names = []
existing_names = set()
for name in names:
base_name = name + '_'
i = 1
while name in existing_names:
# Iterate until a unique name is found
name = base_name + str(i)
i += 1
new_names.append(name)
existing_names.add(name)
return new_names
class TableOutputter(BaseOutputter):
"""
Output the table as an astropy.table.Table object.
"""
default_converters = [convert_numpy(int),
convert_numpy(float),
convert_numpy(str)]
def __call__(self, cols, meta):
# Sets col.data to numpy array and col.type to io.ascii Type class (e.g.
# FloatType) for each col.
self._convert_vals(cols)
t_cols = [numpy.ma.MaskedArray(x.data, mask=x.mask)
if hasattr(x, 'mask') and numpy.any(x.mask)
else x.data for x in cols]
out = Table(t_cols, names=[x.name for x in cols], meta=meta['table'])
for col, out_col in zip(cols, out.columns.values()):
for attr in ('format', 'unit', 'description'):
if hasattr(col, attr):
setattr(out_col, attr, getattr(col, attr))
if hasattr(col, 'meta'):
out_col.meta.update(col.meta)
return out
class MetaBaseReader(type):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
format = dct.get('_format_name')
if format is None:
return
fast = dct.get('_fast')
if fast is not None:
FAST_CLASSES[format] = cls
FORMAT_CLASSES[format] = cls
io_formats = ['ascii.' + format] + dct.get('_io_registry_format_aliases', [])
if dct.get('_io_registry_suffix'):
func = functools.partial(connect.io_identify, dct['_io_registry_suffix'])
connect.io_registry.register_identifier(io_formats[0], Table, func)
for io_format in io_formats:
func = functools.partial(connect.io_read, io_format)
header = f"ASCII reader '{io_format}' details\n"
func.__doc__ = (inspect.cleandoc(READ_DOCSTRING).strip() + '\n\n'
+ header + re.sub('.', '=', header) + '\n')
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_reader(io_format, Table, func)
if dct.get('_io_registry_can_write', True):
func = functools.partial(connect.io_write, io_format)
header = f"ASCII writer '{io_format}' details\n"
func.__doc__ = (inspect.cleandoc(WRITE_DOCSTRING).strip() + '\n\n'
+ header + re.sub('.', '=', header) + '\n')
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_writer(io_format, Table, func)
def _is_number(x):
with suppress(ValueError):
x = float(x)
return True
return False
def _apply_include_exclude_names(table, names, include_names, exclude_names):
"""
Apply names, include_names and exclude_names to a table or BaseHeader.
For the latter this relies on BaseHeader implementing ``colnames``,
``rename_column``, and ``remove_columns``.
Parameters
----------
table : `~astropy.table.Table`, `~astropy.io.ascii.BaseHeader`
Input table or BaseHeader subclass instance
names : list
List of names to override those in table (set to None to use existing names)
include_names : list
List of names to include in output
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
"""
def rename_columns(table, names):
# Rename table column names to those passed by user
# Temporarily rename with names that are not in `names` or `table.colnames`.
# This ensures that rename succeeds regardless of existing names.
xxxs = 'x' * max(len(name) for name in list(names) + list(table.colnames))
for ii, colname in enumerate(table.colnames):
table.rename_column(colname, xxxs + str(ii))
for ii, name in enumerate(names):
table.rename_column(xxxs + str(ii), name)
if names is not None:
rename_columns(table, names)
else:
colnames_uniq = _deduplicate_names(table.colnames)
if colnames_uniq != list(table.colnames):
rename_columns(table, colnames_uniq)
names_set = set(table.colnames)
if include_names is not None:
names_set.intersection_update(include_names)
if exclude_names is not None:
names_set.difference_update(exclude_names)
if names_set != set(table.colnames):
remove_names = set(table.colnames) - names_set
table.remove_columns(remove_names)
class BaseReader(metaclass=MetaBaseReader):
"""Class providing methods to read and write an ASCII table using the specified
header, data, inputter, and outputter instances.
Typical usage is to instantiate a Reader() object and customize the
``header``, ``data``, ``inputter``, and ``outputter`` attributes. Each
of these is an object of the corresponding class.
There is one method ``inconsistent_handler`` that can be used to customize the
behavior of ``read()`` in the event that a data row doesn't match the header.
The default behavior is to raise an InconsistentTableError.
"""
names = None
include_names = None
exclude_names = None
strict_names = False
guessing = False
encoding = None
header_class = BaseHeader
data_class = BaseData
inputter_class = BaseInputter
outputter_class = TableOutputter
# Max column dimension that writer supports for this format. Exceptions
# include ECSV (no limit) and HTML (max_ndim=2).
max_ndim = 1
def __init__(self):
self.header = self.header_class()
self.data = self.data_class()
self.inputter = self.inputter_class()
self.outputter = self.outputter_class()
# Data and Header instances benefit from a little cross-coupling. Header may need to
# know about number of data columns for auto-column name generation and Data may
# need to know about header (e.g. for fixed-width tables where widths are spec'd in header.
self.data.header = self.header
self.header.data = self.data
# Metadata, consisting of table-level meta and column-level meta. The latter
# could include information about column type, description, formatting, etc,
# depending on the table meta format.
self.meta = OrderedDict(table=OrderedDict(),
cols=OrderedDict())
def _check_multidim_table(self, table):
"""Check that the dimensions of columns in ``table`` are acceptable.
The reader class attribute ``max_ndim`` defines the maximum dimension of
columns that can be written using this format. The base value is ``1``,
corresponding to normal scalar columns with just a length.
Parameters
----------
table : `~astropy.table.Table`
Input table.
Raises
------
ValueError
If any column exceeds the number of allowed dimensions
"""
_check_multidim_table(table, self.max_ndim)
def read(self, table):
"""Read the ``table`` and return the results in a format determined by
the ``outputter`` attribute.
The ``table`` parameter is any string or object that can be processed
by the instance ``inputter``. For the base Inputter class ``table`` can be
one of:
* File name
* File-like object
* String (newline separated) with all header and data lines (must have at least 2 lines)
* List of strings
Parameters
----------
table : str, file-like, list
Input table.
Returns
-------
table : `~astropy.table.Table`
Output table
"""
# If ``table`` is a file then store the name in the ``data``
# attribute. The ``table`` is a "file" if it is a string
# without the new line specific to the OS.
with suppress(TypeError):
# Strings only
if os.linesep not in table + '':
self.data.table_name = os.path.basename(table)
# If one of the newline chars is set as field delimiter, only
# accept the other one as line splitter
if self.header.splitter.delimiter == '\n':
newline = '\r'
elif self.header.splitter.delimiter == '\r':
newline = '\n'
else:
newline = None
# Get a list of the lines (rows) in the table
self.lines = self.inputter.get_lines(table, newline=newline)
# Set self.data.data_lines to a slice of lines contain the data rows
self.data.get_data_lines(self.lines)
# Extract table meta values (e.g. keywords, comments, etc). Updates self.meta.
self.header.update_meta(self.lines, self.meta)
# Get the table column definitions
self.header.get_cols(self.lines)
# Make sure columns are valid
self.header.check_column_names(self.names, self.strict_names, self.guessing)
self.cols = cols = self.header.cols
self.data.splitter.cols = cols
n_cols = len(cols)
for i, str_vals in enumerate(self.data.get_str_vals()):
if len(str_vals) != n_cols:
str_vals = self.inconsistent_handler(str_vals, n_cols)
# if str_vals is None, we skip this row
if str_vals is None:
continue
# otherwise, we raise an error only if it is still inconsistent
if len(str_vals) != n_cols:
errmsg = ('Number of header columns ({}) inconsistent with'
' data columns ({}) at data line {}\n'
'Header values: {}\n'
'Data values: {}'.format(
n_cols, len(str_vals), i,
[x.name for x in cols], str_vals))
raise InconsistentTableError(errmsg)
for j, col in enumerate(cols):
col.str_vals.append(str_vals[j])
self.data.masks(cols)
if hasattr(self.header, 'table_meta'):
self.meta['table'].update(self.header.table_meta)
_apply_include_exclude_names(self.header, self.names,
self.include_names, self.exclude_names)
table = self.outputter(self.header.cols, self.meta)
self.cols = self.header.cols
return table
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust or skip data entries if a row is inconsistent with the header.
The default implementation does no adjustment, and hence will always trigger
an exception in read() any time the number of data entries does not match
the header.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table. If
the length of this list does not match ``ncols``, an exception will be
raised in read(). Can also be None, in which case the row will be
skipped.
"""
# an empty list will always trigger an InconsistentTableError in read()
return str_vals
@property
def comment_lines(self):
"""Return lines in the table that match header.comment regexp"""
if not hasattr(self, 'lines'):
raise ValueError('Table must be read prior to accessing the header comment lines')
if self.header.comment:
re_comment = re.compile(self.header.comment)
comment_lines = [x for x in self.lines if re_comment.match(x)]
else:
comment_lines = []
return comment_lines
def update_table_data(self, table):
"""
Update table columns in place if needed.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
return table
def write_header(self, lines, meta):
self.header.write_comments(lines, meta)
self.header.write(lines)
def write(self, table):
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data.
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, False)
# In-place update of columns in input ``table`` to reflect column
# filtering. Note that ``table`` is guaranteed to be a copy of the
# original user-supplied table.
_apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names)
# This is a hook to allow updating the table columns after name
# filtering but before setting up to write the data. This is currently
# only used by ECSV and is otherwise just a pass-through.
table = self.update_table_data(table)
# Check that table column dimensions are supported by this format class.
# Most formats support only 1-d columns, but some like ECSV support N-d.
self._check_multidim_table(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
self.header.table_meta = table.meta
# Write header and data to lines list
lines = []
self.write_header(lines, table.meta)
self.data.write(lines)
return lines
class ContinuationLinesInputter(BaseInputter):
"""Inputter where lines ending in ``continuation_char`` are joined
with the subsequent line. Example::
col1 col2 col3
1 \
2 3
4 5 \
6
"""
continuation_char = '\\'
replace_char = ' '
# If no_continue is not None then lines matching this regex are not subject
# to line continuation. The initial use case here is Daophot. In this
# case the continuation character is just replaced with replace_char.
no_continue = None
def process_lines(self, lines):
re_no_continue = re.compile(self.no_continue) if self.no_continue else None
parts = []
outlines = []
for line in lines:
if re_no_continue and re_no_continue.match(line):
line = line.replace(self.continuation_char, self.replace_char)
if line.endswith(self.continuation_char):
parts.append(line.replace(self.continuation_char, self.replace_char))
else:
parts.append(line)
outlines.append(''.join(parts))
parts = []
return outlines
class WhitespaceSplitter(DefaultSplitter):
def process_line(self, line):
"""Replace tab with space within ``line`` while respecting quoted substrings"""
newline = []
in_quote = False
lastchar = None
for char in line:
if char == self.quotechar and (self.escapechar is None
or lastchar != self.escapechar):
in_quote = not in_quote
if char == '\t' and not in_quote:
char = ' '
lastchar = char
newline.append(char)
return ''.join(newline)
extra_reader_pars = ('Reader', 'Inputter', 'Outputter',
'delimiter', 'comment', 'quotechar', 'header_start',
'data_start', 'data_end', 'converters', 'encoding',
'data_Splitter', 'header_Splitter',
'names', 'include_names', 'exclude_names', 'strict_names',
'fill_values', 'fill_include_names', 'fill_exclude_names')
def _get_reader(Reader, Inputter=None, Outputter=None, **kwargs):
"""Initialize a table reader allowing for common customizations. See ui.get_reader()
for param docs. This routine is for internal (package) use only and is useful
because it depends only on the "core" module.
"""
from .fastbasic import FastBasic
if issubclass(Reader, FastBasic): # Fast readers handle args separately
if Inputter is not None:
kwargs['Inputter'] = Inputter
return Reader(**kwargs)
# If user explicitly passed a fast reader with enable='force'
# (e.g. by passing non-default options), raise an error for slow readers
if 'fast_reader' in kwargs:
if kwargs['fast_reader']['enable'] == 'force':
raise ParameterError('fast_reader required with '
'{}, but this is not a fast C reader: {}'
.format(kwargs['fast_reader'], Reader))
else:
del kwargs['fast_reader'] # Otherwise ignore fast_reader parameter
reader_kwargs = dict([k, v] for k, v in kwargs.items() if k not in extra_reader_pars)
reader = Reader(**reader_kwargs)
if Inputter is not None:
reader.inputter = Inputter()
if Outputter is not None:
reader.outputter = Outputter()
# Issue #855 suggested to set data_start to header_start + default_header_length
# Thus, we need to retrieve this from the class definition before resetting these numbers.
try:
default_header_length = reader.data.start_line - reader.header.start_line
except TypeError: # Start line could be None or an instancemethod
default_header_length = None
# csv.reader is hard-coded to recognise either '\r' or '\n' as end-of-line,
# therefore DefaultSplitter cannot handle these as delimiters.
if 'delimiter' in kwargs:
if kwargs['delimiter'] in ('\n', '\r', '\r\n'):
reader.header.splitter = BaseSplitter()
reader.data.splitter = BaseSplitter()
reader.header.splitter.delimiter = kwargs['delimiter']
reader.data.splitter.delimiter = kwargs['delimiter']
if 'comment' in kwargs:
reader.header.comment = kwargs['comment']
reader.data.comment = kwargs['comment']
if 'quotechar' in kwargs:
reader.header.splitter.quotechar = kwargs['quotechar']
reader.data.splitter.quotechar = kwargs['quotechar']
if 'data_start' in kwargs:
reader.data.start_line = kwargs['data_start']
if 'data_end' in kwargs:
reader.data.end_line = kwargs['data_end']
if 'header_start' in kwargs:
if (reader.header.start_line is not None):
reader.header.start_line = kwargs['header_start']
# For FixedWidthTwoLine the data_start is calculated relative to the position line.
# However, position_line is given as absolute number and not relative to header_start.
# So, ignore this Reader here.
if (('data_start' not in kwargs) and (default_header_length is not None)
and reader._format_name not in ['fixed_width_two_line', 'commented_header']):
reader.data.start_line = reader.header.start_line + default_header_length
elif kwargs['header_start'] is not None:
# User trying to set a None header start to some value other than None
raise ValueError('header_start cannot be modified for this Reader')
if 'converters' in kwargs:
reader.outputter.converters = kwargs['converters']
if 'data_Splitter' in kwargs:
reader.data.splitter = kwargs['data_Splitter']()
if 'header_Splitter' in kwargs:
reader.header.splitter = kwargs['header_Splitter']()
if 'names' in kwargs:
reader.names = kwargs['names']
if None in reader.names:
raise TypeError('Cannot have None for column name')
if len(set(reader.names)) != len(reader.names):
raise ValueError('Duplicate column names')
if 'include_names' in kwargs:
reader.include_names = kwargs['include_names']
if 'exclude_names' in kwargs:
reader.exclude_names = kwargs['exclude_names']
# Strict names is normally set only within the guessing process to
# indicate that column names cannot be numeric or have certain
# characters at the beginning or end. It gets used in
# BaseHeader.check_column_names().
if 'strict_names' in kwargs:
reader.strict_names = kwargs['strict_names']
if 'fill_values' in kwargs:
reader.data.fill_values = kwargs['fill_values']
if 'fill_include_names' in kwargs:
reader.data.fill_include_names = kwargs['fill_include_names']
if 'fill_exclude_names' in kwargs:
reader.data.fill_exclude_names = kwargs['fill_exclude_names']
if 'encoding' in kwargs:
reader.encoding = kwargs['encoding']
reader.inputter.encoding = kwargs['encoding']
return reader
extra_writer_pars = ('delimiter', 'comment', 'quotechar', 'formats',
'strip_whitespace',
'names', 'include_names', 'exclude_names',
'fill_values', 'fill_include_names',
'fill_exclude_names')
def _get_writer(Writer, fast_writer, **kwargs):
"""Initialize a table writer allowing for common customizations. This
routine is for internal (package) use only and is useful because it depends
only on the "core" module."""
from .fastbasic import FastBasic
# A value of None for fill_values imply getting the default string
# representation of masked values (depending on the writer class), but the
# machinery expects a list. The easiest here is to just pop the value off,
# i.e. fill_values=None is the same as not providing it at all.
if 'fill_values' in kwargs and kwargs['fill_values'] is None:
del kwargs['fill_values']
if issubclass(Writer, FastBasic): # Fast writers handle args separately
return Writer(**kwargs)
elif fast_writer and f'fast_{Writer._format_name}' in FAST_CLASSES:
# Switch to fast writer
kwargs['fast_writer'] = fast_writer
return FAST_CLASSES[f'fast_{Writer._format_name}'](**kwargs)
writer_kwargs = dict([k, v] for k, v in kwargs.items() if k not in extra_writer_pars)
writer = Writer(**writer_kwargs)
if 'delimiter' in kwargs:
writer.header.splitter.delimiter = kwargs['delimiter']
writer.data.splitter.delimiter = kwargs['delimiter']
if 'comment' in kwargs:
writer.header.write_comment = kwargs['comment']
writer.data.write_comment = kwargs['comment']
if 'quotechar' in kwargs:
writer.header.splitter.quotechar = kwargs['quotechar']
writer.data.splitter.quotechar = kwargs['quotechar']
if 'formats' in kwargs:
writer.data.formats = kwargs['formats']
if 'strip_whitespace' in kwargs:
if kwargs['strip_whitespace']:
# Restore the default SplitterClass process_val method which strips
# whitespace. This may have been changed in the Writer
# initialization (e.g. Rdb and Tab)
writer.data.splitter.process_val = operator.methodcaller('strip', ' \t')
else:
writer.data.splitter.process_val = None
if 'names' in kwargs:
writer.header.names = kwargs['names']
if 'include_names' in kwargs:
writer.include_names = kwargs['include_names']
if 'exclude_names' in kwargs:
writer.exclude_names = kwargs['exclude_names']
if 'fill_values' in kwargs:
# Prepend user-specified values to the class default.
with suppress(TypeError, IndexError):
# Test if it looks like (match, replace_string, optional_colname),
# in which case make it a list
kwargs['fill_values'][1] + ''
kwargs['fill_values'] = [kwargs['fill_values']]
writer.data.fill_values = kwargs['fill_values'] + writer.data.fill_values
if 'fill_include_names' in kwargs:
writer.data.fill_include_names = kwargs['fill_include_names']
if 'fill_exclude_names' in kwargs:
writer.data.fill_exclude_names = kwargs['fill_exclude_names']
return writer
|
f9abd13f80318eeb8570e8aa23b9f475f1d9f423b7725ca34a6a814acb3a7e7e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" An extensible ASCII table reader and writer.
"""
# flake8: noqa
from .core import (InconsistentTableError,
ParameterError,
NoType, StrType, NumType, FloatType, IntType, AllType,
Column,
BaseInputter, ContinuationLinesInputter,
BaseHeader,
BaseData,
BaseOutputter, TableOutputter,
BaseReader,
BaseSplitter, DefaultSplitter, WhitespaceSplitter,
convert_numpy,
masked
)
from .basic import (Basic, BasicHeader, BasicData,
Rdb,
Csv,
Tab,
NoHeader,
CommentedHeader)
from .fastbasic import (FastBasic,
FastCsv,
FastTab,
FastNoHeader,
FastCommentedHeader,
FastRdb)
from .cds import Cds
from .mrt import Mrt
from .ecsv import Ecsv
from .latex import Latex, AASTex, latexdicts
from .html import HTML
from .ipac import Ipac
from .daophot import Daophot
from .qdp import QDP
from .sextractor import SExtractor
from .fixedwidth import (FixedWidth, FixedWidthNoHeader,
FixedWidthTwoLine, FixedWidthSplitter,
FixedWidthHeader, FixedWidthData)
from .rst import RST
from .ui import (set_guess, get_reader, read, get_writer, write, get_read_trace)
from . import connect
|
60c39b976b696add435ca480b412aead0f879f001c954bc9a00411e6ec8b4c08 | # Licensed under a 3-clause BSD style license
"""
:Author: Simon Gibbons ([email protected])
"""
from .core import DefaultSplitter
from .fixedwidth import (FixedWidth,
FixedWidthData,
FixedWidthHeader,
FixedWidthTwoLineDataSplitter)
class SimpleRSTHeader(FixedWidthHeader):
position_line = 0
start_line = 1
splitter_class = DefaultSplitter
position_char = '='
def get_fixedwidth_params(self, line):
vals, starts, ends = super().get_fixedwidth_params(line)
# The right hand column can be unbounded
ends[-1] = None
return vals, starts, ends
class SimpleRSTData(FixedWidthData):
start_line = 3
end_line = -1
splitter_class = FixedWidthTwoLineDataSplitter
class RST(FixedWidth):
"""reStructuredText simple format table.
See: https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#simple-tables
Example::
==== ===== ======
Col1 Col2 Col3
==== ===== ======
1 2.3 Hello
2 4.5 Worlds
==== ===== ======
Currently there is no support for reading tables which utilize continuation lines,
or for ones which define column spans through the use of an additional
line of dashes in the header.
"""
_format_name = 'rst'
_description = 'reStructuredText simple table'
data_class = SimpleRSTData
header_class = SimpleRSTHeader
def __init__(self):
super().__init__(delimiter_pad=None, bookend=False)
def write(self, lines):
lines = super().write(lines)
lines = [lines[1]] + lines + [lines[1]]
return lines
|
957c52dcd08315e7ce8093809628fe2ff1aeee41765e6c637bd22ed7e5311393 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
fixedwidth.py:
Read or write a table with fixed width columns.
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
from . import core
from .core import InconsistentTableError, DefaultSplitter
from . import basic
class FixedWidthSplitter(core.BaseSplitter):
"""
Split line based on fixed start and end positions for each ``col`` in
``self.cols``.
This class requires that the Header class will have defined ``col.start``
and ``col.end`` for each column. The reference to the ``header.cols`` gets
put in the splitter object by the base Reader.read() function just in time
for splitting data lines by a ``data`` object.
Note that the ``start`` and ``end`` positions are defined in the pythonic
style so line[start:end] is the desired substring for a column. This splitter
class does not have a hook for ``process_lines`` since that is generally not
useful for fixed-width input.
"""
delimiter_pad = ''
bookend = False
delimiter = '|'
def __call__(self, lines):
for line in lines:
vals = [line[x.start:x.end] for x in self.cols]
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals, widths):
pad = self.delimiter_pad or ''
delimiter = self.delimiter or ''
padded_delim = pad + delimiter + pad
if self.bookend:
bookend_left = delimiter + pad
bookend_right = pad + delimiter
else:
bookend_left = ''
bookend_right = ''
vals = [' ' * (width - len(val)) + val for val, width in zip(vals, widths)]
return bookend_left + padded_delim.join(vals) + bookend_right
class FixedWidthHeaderSplitter(DefaultSplitter):
'''Splitter class that splits on ``|``.'''
delimiter = '|'
class FixedWidthHeader(basic.BasicHeader):
"""
Fixed width table header reader.
"""
splitter_class = FixedWidthHeaderSplitter
""" Splitter class for splitting data lines into columns """
position_line = None # secondary header line position
""" row index of line that specifies position (default = 1) """
set_of_position_line_characters = set(r'`~!#$%^&*-_+=\|":' + "'")
def get_line(self, lines, index):
for i, line in enumerate(self.process_lines(lines)):
if i == index:
break
else: # No header line matching
raise InconsistentTableError('No header line found in table')
return line
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
# See "else" clause below for explanation of start_line and position_line
start_line = core._get_line_index(self.start_line, self.process_lines(lines))
position_line = core._get_line_index(self.position_line, self.process_lines(lines))
# If start_line is none then there is no header line. Column positions are
# determined from first data line and column names are either supplied by user
# or auto-generated.
if start_line is None:
if position_line is not None:
raise ValueError("Cannot set position_line without also setting header_start")
# data.data_lines attribute already set via self.data.get_data_lines(lines)
# in BaseReader.read(). This includes slicing for data_start / data_end.
data_lines = self.data.data_lines
if not data_lines:
raise InconsistentTableError(
'No data lines found so cannot autogenerate column names')
vals, starts, ends = self.get_fixedwidth_params(data_lines[0])
self.names = [self.auto_format.format(i)
for i in range(1, len(vals) + 1)]
else:
# This bit of code handles two cases:
# start_line = <index> and position_line = None
# Single header line where that line is used to determine both the
# column positions and names.
# start_line = <index> and position_line = <index2>
# Two header lines where the first line defines the column names and
# the second line defines the column positions
if position_line is not None:
# Define self.col_starts and self.col_ends so that the call to
# get_fixedwidth_params below will use those to find the header
# column names. Note that get_fixedwidth_params returns Python
# slice col_ends but expects inclusive col_ends on input (for
# more intuitive user interface).
line = self.get_line(lines, position_line)
if len(set(line) - set([self.splitter.delimiter, ' '])) != 1:
raise InconsistentTableError(
'Position line should only contain delimiters and '
'one other character, e.g. "--- ------- ---".')
# The line above lies. It accepts white space as well.
# We don't want to encourage using three different
# characters, because that can cause ambiguities, but white
# spaces are so common everywhere that practicality beats
# purity here.
charset = self.set_of_position_line_characters.union(
set([self.splitter.delimiter, ' ']))
if not set(line).issubset(charset):
raise InconsistentTableError(
f'Characters in position line must be part of {charset}')
vals, self.col_starts, col_ends = self.get_fixedwidth_params(line)
self.col_ends = [x - 1 if x is not None else None for x in col_ends]
# Get the header column names and column positions
line = self.get_line(lines, start_line)
vals, starts, ends = self.get_fixedwidth_params(line)
self.names = vals
self._set_cols_from_names()
# Set column start and end positions.
for i, col in enumerate(self.cols):
col.start = starts[i]
col.end = ends[i]
def get_fixedwidth_params(self, line):
"""
Split ``line`` on the delimiter and determine column values and
column start and end positions. This might include null columns with
zero length (e.g. for ``header row = "| col1 || col2 | col3 |"`` or
``header2_row = "----- ------- -----"``). The null columns are
stripped out. Returns the values between delimiters and the
corresponding start and end positions.
Parameters
----------
line : str
Input line
Returns
-------
vals : list
List of values.
starts : list
List of starting indices.
ends : list
List of ending indices.
"""
# If column positions are already specified then just use those.
# If neither column starts or ends are given, figure out positions
# between delimiters. Otherwise, either the starts or the ends have
# been given, so figure out whichever wasn't given.
if self.col_starts is not None and self.col_ends is not None:
starts = list(self.col_starts) # could be any iterable, e.g. np.array
# user supplies inclusive endpoint
ends = [x + 1 if x is not None else None for x in self.col_ends]
if len(starts) != len(ends):
raise ValueError('Fixed width col_starts and col_ends must have the same length')
vals = [line[start:end].strip() for start, end in zip(starts, ends)]
elif self.col_starts is None and self.col_ends is None:
# There might be a cleaner way to do this but it works...
vals = line.split(self.splitter.delimiter)
starts = [0]
ends = []
for val in vals:
if val:
ends.append(starts[-1] + len(val))
starts.append(ends[-1] + 1)
else:
starts[-1] += 1
starts = starts[:-1]
vals = [x.strip() for x in vals if x]
if len(vals) != len(starts) or len(vals) != len(ends):
raise InconsistentTableError('Error parsing fixed width header')
else:
# exactly one of col_starts or col_ends is given...
if self.col_starts is not None:
starts = list(self.col_starts)
ends = starts[1:] + [None] # Assume each col ends where the next starts
else: # self.col_ends is not None
ends = [x + 1 for x in self.col_ends]
starts = [0] + ends[:-1] # Assume each col starts where the last ended
vals = [line[start:end].strip() for start, end in zip(starts, ends)]
return vals, starts, ends
def write(self, lines):
# Header line not written until data are formatted. Until then it is
# not known how wide each column will be for fixed width.
pass
class FixedWidthData(basic.BasicData):
"""
Base table data reader.
"""
splitter_class = FixedWidthSplitter
""" Splitter class for splitting data lines into columns """
def write(self, lines):
vals_list = []
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
vals_list.append(vals)
for i, col in enumerate(self.cols):
col.width = max([len(vals[i]) for vals in vals_list])
if self.header.start_line is not None:
col.width = max(col.width, len(col.info.name))
widths = [col.width for col in self.cols]
if self.header.start_line is not None:
lines.append(self.splitter.join([col.info.name for col in self.cols],
widths))
if self.header.position_line is not None:
char = self.header.position_char
if len(char) != 1:
raise ValueError(f'Position_char="{char}" must be a single character')
vals = [char * col.width for col in self.cols]
lines.append(self.splitter.join(vals, widths))
for vals in vals_list:
lines.append(self.splitter.join(vals, widths))
return lines
class FixedWidth(basic.Basic):
"""Fixed width table with single header line defining column names and positions.
Examples::
# Bar delimiter in header and data
| Col1 | Col2 | Col3 |
| 1.2 | hello there | 3 |
| 2.4 | many words | 7 |
# Bar delimiter in header only
Col1 | Col2 | Col3
1.2 hello there 3
2.4 many words 7
# No delimiter with column positions specified as input
Col1 Col2Col3
1.2hello there 3
2.4many words 7
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = 'fixed_width'
_description = 'Fixed width'
header_class = FixedWidthHeader
data_class = FixedWidthData
def __init__(self, col_starts=None, col_ends=None, delimiter_pad=' ', bookend=True):
super().__init__()
self.data.splitter.delimiter_pad = delimiter_pad
self.data.splitter.bookend = bookend
self.header.col_starts = col_starts
self.header.col_ends = col_ends
class FixedWidthNoHeaderHeader(FixedWidthHeader):
'''Header reader for fixed with tables with no header line'''
start_line = None
class FixedWidthNoHeaderData(FixedWidthData):
'''Data reader for fixed width tables with no header line'''
start_line = 0
class FixedWidthNoHeader(FixedWidth):
"""Fixed width table which has no header line.
When reading, column names are either input (``names`` keyword) or
auto-generated. Column positions are determined either by input
(``col_starts`` and ``col_stops`` keywords) or by splitting the first data
line. In the latter case a ``delimiter`` is required to split the data
line.
Examples::
# Bar delimiter in header and data
| 1.2 | hello there | 3 |
| 2.4 | many words | 7 |
# Compact table having no delimiter and column positions specified as input
1.2hello there3
2.4many words 7
This class is just a convenience wrapper around the ``FixedWidth`` reader
but with ``header_start=None`` and ``data_start=0``.
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = 'fixed_width_no_header'
_description = 'Fixed width with no header'
header_class = FixedWidthNoHeaderHeader
data_class = FixedWidthNoHeaderData
def __init__(self, col_starts=None, col_ends=None, delimiter_pad=' ', bookend=True):
super().__init__(col_starts, col_ends, delimiter_pad=delimiter_pad,
bookend=bookend)
class FixedWidthTwoLineHeader(FixedWidthHeader):
'''Header reader for fixed width tables splitting on whitespace.
For fixed width tables with several header lines, there is typically
a white-space delimited format line, so splitting on white space is
needed.
'''
splitter_class = DefaultSplitter
class FixedWidthTwoLineDataSplitter(FixedWidthSplitter):
'''Splitter for fixed width tables splitting on ``' '``.'''
delimiter = ' '
class FixedWidthTwoLineData(FixedWidthData):
'''Data reader for fixed with tables with two header lines.'''
splitter_class = FixedWidthTwoLineDataSplitter
class FixedWidthTwoLine(FixedWidth):
"""Fixed width table which has two header lines.
The first header line defines the column names and the second implicitly
defines the column positions.
Examples::
# Typical case with column extent defined by ---- under column names.
col1 col2 <== header_start = 0
----- ------------ <== position_line = 1, position_char = "-"
1 bee flies <== data_start = 2
2 fish swims
# Pretty-printed table
+------+------------+
| Col1 | Col2 |
+------+------------+
| 1.2 | "hello" |
| 2.4 | there world|
+------+------------+
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = 'fixed_width_two_line'
_description = 'Fixed width with second header line'
data_class = FixedWidthTwoLineData
header_class = FixedWidthTwoLineHeader
def __init__(self, position_line=1, position_char='-', delimiter_pad=None, bookend=False):
super().__init__(delimiter_pad=delimiter_pad, bookend=bookend)
self.header.position_line = position_line
self.header.position_char = position_char
self.data.start_line = position_line + 1
|
ca686dc238db232132ebb3ede96cc6150001c607b144fc4228f8014022c6322e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Classes to read AAS MRT table format
Ref: https://journals.aas.org/mrt-standards
:Copyright: Smithsonian Astrophysical Observatory (2021)
:Author: Tom Aldcroft ([email protected]), \
Suyog Garg ([email protected])
"""
import re
import math
import warnings
import numpy as np
from io import StringIO
from . import core
from . import fixedwidth, cds
from astropy import units as u
from astropy.table import Table
from astropy.table import Column, MaskedColumn
from string import Template
from textwrap import wrap
MAX_SIZE_README_LINE = 80
MAX_COL_INTLIMIT = 100000
__doctest_skip__ = ['*']
BYTE_BY_BYTE_TEMPLATE = [
"Byte-by-byte Description of file: $file",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
"$bytebybyte",
"--------------------------------------------------------------------------------"]
MRT_TEMPLATE = [
"Title:",
"Authors:",
"Table:",
"================================================================================",
"$bytebybyte",
"Notes:",
"--------------------------------------------------------------------------------"]
class MrtSplitter(fixedwidth.FixedWidthSplitter):
"""
Contains the join function to left align the MRT columns
when writing to a file.
"""
def join(self, vals, widths):
vals = [val + ' ' * (width - len(val)) for val, width in zip(vals, widths)]
return self.delimiter.join(vals)
class MrtHeader(cds.CdsHeader):
_subfmt = 'MRT'
def _split_float_format(self, value):
"""
Splits a Float string into different parts to find number
of digits after decimal and check if the value is in Scientific
notation.
Parameters
----------
value : str
String containing the float value to split.
Returns
-------
fmt: (int, int, int, bool, bool)
List of values describing the Float sting.
(size, dec, ent, sign, exp)
size, length of the given string.
ent, number of digits before decimal point.
dec, number of digits after decimal point.
sign, whether or not given value signed.
exp, is value in Scientific notation?
"""
regfloat = re.compile(r"""(?P<sign> [+-]*)
(?P<ent> [^eE.]+)
(?P<deciPt> [.]*)
(?P<decimals> [0-9]*)
(?P<exp> [eE]*-*)[0-9]*""",
re.VERBOSE)
mo = regfloat.match(value)
if mo is None:
raise Exception(f'{value} is not a float number')
return (len(value),
len(mo.group('ent')),
len(mo.group('decimals')),
mo.group('sign') != "",
mo.group('exp') != "")
def _set_column_val_limits(self, col):
"""
Sets the ``col.min`` and ``col.max`` column attributes,
taking into account columns with Null values.
"""
col.max = max(col)
col.min = min(col)
if col.max is np.ma.core.MaskedConstant:
col.max = None
if col.min is np.ma.core.MaskedConstant:
col.min = None
def column_float_formatter(self, col):
"""
String formatter function for a column containing Float values.
Checks if the values in the given column are in Scientific notation,
by spliting the value string. It is assumed that the column either has
float values or Scientific notation.
A ``col.formatted_width`` attribute is added to the column. It is not added
if such an attribute is already present, say when the ``formats`` argument
is passed to the writer. A properly formatted format string is also added as
the ``col.format`` attribute.
Parameters
----------
col : A ``Table.Column`` object.
"""
# maxsize: maximum length of string containing the float value.
# maxent: maximum number of digits places before decimal point.
# maxdec: maximum number of digits places after decimal point.
# maxprec: maximum precision of the column values, sum of maxent and maxdec.
maxsize, maxprec, maxent, maxdec = 1, 0, 1, 0
sign = False
fformat = 'F'
# Find maximum sized value in the col
for val in col.str_vals:
# Skip null values
if val is None or val == '':
continue
# Find format of the Float string
fmt = self._split_float_format(val)
# If value is in Scientific notation
if fmt[4] is True:
# if the previous column value was in normal Float format
# set maxsize, maxprec and maxdec to default.
if fformat == 'F':
maxsize, maxprec, maxdec = 1, 0, 0
# Designate the column to be in Scientific notation.
fformat = 'E'
else:
# Move to next column value if
# current value is not in Scientific notation
# but the column is designated as such because
# one of the previous values was.
if fformat == 'E':
continue
if maxsize < fmt[0]:
maxsize = fmt[0]
if maxent < fmt[1]:
maxent = fmt[1]
if maxdec < fmt[2]:
maxdec = fmt[2]
if fmt[3]:
sign = True
if maxprec < fmt[1] + fmt[2]:
maxprec = fmt[1] + fmt[2]
if fformat == 'E':
if getattr(col, 'formatted_width', None) is None: # If ``formats`` not passed.
col.formatted_width = maxsize
if sign:
col.formatted_width += 1
# Number of digits after decimal is replaced by the precision
# for values in Scientific notation, when writing that Format.
col.fortran_format = fformat + str(col.formatted_width) + "." + str(maxprec)
col.format = str(col.formatted_width) + "." + str(maxdec) + "e"
else:
lead = ''
if getattr(col, 'formatted_width', None) is None: # If ``formats`` not passed.
col.formatted_width = maxent + maxdec + 1
if sign:
col.formatted_width += 1
elif col.format.startswith('0'):
# Keep leading zero, if already set in format - primarily for `seconds` columns
# in coordinates; may need extra case if this is to be also supported with `sign`.
lead = '0'
col.fortran_format = fformat + str(col.formatted_width) + "." + str(maxdec)
col.format = lead + col.fortran_format[1:] + "f"
def write_byte_by_byte(self):
"""
Writes the Byte-By-Byte description of the table.
Columns that are `astropy.coordinates.SkyCoord` or `astropy.time.TimeSeries`
objects or columns with values that are such objects are recognized as such,
and some predefined labels and description is used for them.
See the Vizier MRT Standard documentation in the link below for more details
on these. An example Byte-By-Byte table is shown here.
See: http://vizier.u-strasbg.fr/doc/catstd-3.1.htx
Example::
--------------------------------------------------------------------------------
Byte-by-byte Description of file: table.dat
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 8 A8 --- names Description of names
10-14 E5.1 --- e [-3160000.0/0.01] Description of e
16-23 F8.5 --- d [22.25/27.25] Description of d
25-31 E7.1 --- s [-9e+34/2.0] Description of s
33-35 I3 --- i [-30/67] Description of i
37-39 F3.1 --- sameF [5.0/5.0] Description of sameF
41-42 I2 --- sameI [20] Description of sameI
44-45 I2 h RAh Right Ascension (hour)
47-48 I2 min RAm Right Ascension (minute)
50-67 F18.15 s RAs Right Ascension (second)
69 A1 --- DE- Sign of Declination
70-71 I2 deg DEd Declination (degree)
73-74 I2 arcmin DEm Declination (arcmin)
76-91 F16.13 arcsec DEs Declination (arcsec)
--------------------------------------------------------------------------------
"""
# Get column widths
vals_list = []
col_str_iters = self.data.str_vals()
for vals in zip(*col_str_iters):
vals_list.append(vals)
for i, col in enumerate(self.cols):
col.width = max([len(vals[i]) for vals in vals_list])
if self.start_line is not None:
col.width = max(col.width, len(col.info.name))
widths = [col.width for col in self.cols]
startb = 1 # Byte count starts at 1.
# Set default width of the Bytes count column of the Byte-By-Byte table.
# This ``byte_count_width`` value helps align byte counts with respect
# to the hyphen using a format string.
byte_count_width = len(str(sum(widths) + len(self.cols) - 1))
# Format string for Start Byte and End Byte
singlebfmt = "{:" + str(byte_count_width) + "d}"
fmtb = singlebfmt + "-" + singlebfmt
# Add trailing single whitespaces to Bytes column for better visibility.
singlebfmt += " "
fmtb += " "
# Set default width of Label and Description Byte-By-Byte columns.
max_label_width, max_descrip_size = 7, 16
bbb = Table(names=['Bytes', 'Format', 'Units', 'Label', 'Explanations'],
dtype=[str] * 5)
# Iterate over the columns to write Byte-By-Byte rows.
for i, col in enumerate(self.cols):
# Check if column is MaskedColumn
col.has_null = isinstance(col, MaskedColumn)
if col.format is not None:
col.formatted_width = max([len(sval) for sval in col.str_vals])
# Set MRTColumn type, size and format.
if np.issubdtype(col.dtype, np.integer):
# Integer formatter
self._set_column_val_limits(col)
if getattr(col, 'formatted_width', None) is None: # If ``formats`` not passed.
col.formatted_width = max(len(str(col.max)), len(str(col.min)))
col.fortran_format = "I" + str(col.formatted_width)
if col.format is None:
col.format = ">" + col.fortran_format[1:]
elif np.issubdtype(col.dtype, np.dtype(float).type):
# Float formatter
self._set_column_val_limits(col)
self.column_float_formatter(col)
else:
# String formatter, ``np.issubdtype(col.dtype, str)`` is ``True``.
dtype = col.dtype.str
if col.has_null:
mcol = col
mcol.fill_value = ""
coltmp = Column(mcol.filled(), dtype=str)
dtype = coltmp.dtype.str
if getattr(col, 'formatted_width', None) is None: # If ``formats`` not passed.
col.formatted_width = int(re.search(r'(\d+)$', dtype).group(1))
col.fortran_format = "A" + str(col.formatted_width)
col.format = str(col.formatted_width) + "s"
endb = col.formatted_width + startb - 1
# ``mixin`` columns converted to string valued columns will not have a name
# attribute. In those cases, a ``Unknown`` column label is put, indicating that
# such columns can be better formatted with some manipulation before calling
# the MRT writer.
if col.name is None:
col.name = "Unknown"
# Set column description.
if col.description is not None:
description = col.description
else:
description = "Description of " + col.name
# Set null flag in column description
nullflag = ""
if col.has_null:
nullflag = "?"
# Set column unit
if col.unit is not None:
col_unit = col.unit.to_string("cds")
elif col.name.lower().find("magnitude") > -1:
# ``col.unit`` can still be ``None``, if the unit of column values
# is ``Magnitude``, because ``astropy.units.Magnitude`` is actually a class.
# Unlike other units which are instances of ``astropy.units.Unit``,
# application of the ``Magnitude`` unit calculates the logarithm
# of the values. Thus, the only way to check for if the column values
# have ``Magnitude`` unit is to check the column name.
col_unit = "mag"
else:
col_unit = "---"
# Add col limit values to col description
lim_vals = ""
if (col.min and col.max and
not any(x in col.name for x in ['RA', 'DE', 'LON', 'LAT', 'PLN', 'PLT'])):
# No col limit values for coordinate columns.
if col.fortran_format[0] == 'I':
if abs(col.min) < MAX_COL_INTLIMIT and abs(col.max) < MAX_COL_INTLIMIT:
if col.min == col.max:
lim_vals = "[{0}]".format(col.min)
else:
lim_vals = "[{0}/{1}]".format(col.min, col.max)
elif col.fortran_format[0] in ('E', 'F'):
lim_vals = "[{0}/{1}]".format(math.floor(col.min * 100) / 100.,
math.ceil(col.max * 100) / 100.)
if lim_vals != '' or nullflag != '':
description = "{0}{1} {2}".format(lim_vals, nullflag, description)
# Find the maximum label and description column widths.
if len(col.name) > max_label_width:
max_label_width = len(col.name)
if len(description) > max_descrip_size:
max_descrip_size = len(description)
# Add a row for the Sign of Declination in the bbb table
if col.name == 'DEd':
bbb.add_row([singlebfmt.format(startb),
"A1", "---", "DE-",
"Sign of Declination"])
col.fortran_format = 'I2'
startb += 1
# Add Byte-By-Byte row to bbb table
bbb.add_row([singlebfmt.format(startb) if startb == endb
else fmtb.format(startb, endb),
"" if col.fortran_format is None else col.fortran_format,
col_unit,
"" if col.name is None else col.name,
description])
startb = endb + 2
# Properly format bbb columns
bbblines = StringIO()
bbb.write(bbblines, format='ascii.fixed_width_no_header',
delimiter=' ', bookend=False, delimiter_pad=None,
formats={'Format': '<6s',
'Units': '<6s',
'Label': '<' + str(max_label_width) + 's',
'Explanations': '' + str(max_descrip_size) + 's'})
# Get formatted bbb lines
bbblines = bbblines.getvalue().splitlines()
# ``nsplit`` is the number of whitespaces to prefix to long description
# lines in order to wrap them. It is the sum of the widths of the
# previous 4 columns plus the number of single spacing between them.
# The hyphen in the Bytes column is also counted.
nsplit = byte_count_width * 2 + 1 + 12 + max_label_width + 4
# Wrap line if it is too long
buff = ""
for newline in bbblines:
if len(newline) > MAX_SIZE_README_LINE:
buff += ("\n").join(wrap(newline,
subsequent_indent=" " * nsplit,
width=MAX_SIZE_README_LINE))
buff += "\n"
else:
buff += newline + "\n"
# Last value of ``endb`` is the sum of column widths after formatting.
self.linewidth = endb
# Remove the last extra newline character from Byte-By-Byte.
buff = buff[:-1]
return buff
def write(self, lines):
"""
Writes the Header of the MRT table, aka ReadMe, which
also contains the Byte-By-Byte description of the table.
"""
from astropy.coordinates import SkyCoord
# Recognised ``SkyCoord.name`` forms with their default column names (helio* require SunPy).
coord_systems = {'galactic': ('GLAT', 'GLON', 'b', 'l'),
'ecliptic': ('ELAT', 'ELON', 'lat', 'lon'), # 'geocentric*ecliptic'
'heliographic': ('HLAT', 'HLON', 'lat', 'lon'), # '_carrington|stonyhurst'
'helioprojective': ('HPLT', 'HPLN', 'Ty', 'Tx')}
eqtnames = ['RAh', 'RAm', 'RAs', 'DEd', 'DEm', 'DEs']
# list to store indices of columns that are modified.
to_pop = []
# For columns that are instances of ``SkyCoord`` and other ``mixin`` columns
# or whose values are objects of these classes.
for i, col in enumerate(self.cols):
# If col is a ``Column`` object but its values are ``SkyCoord`` objects,
# convert the whole column to ``SkyCoord`` object, which helps in applying
# SkyCoord methods directly.
if not isinstance(col, SkyCoord) and isinstance(col[0], SkyCoord):
try:
col = SkyCoord(col)
except (ValueError, TypeError):
# If only the first value of the column is a ``SkyCoord`` object,
# the column cannot be converted to a ``SkyCoord`` object.
# These columns are converted to ``Column`` object and then converted
# to string valued column.
if not isinstance(col, Column):
col = Column(col)
col = Column([str(val) for val in col])
self.cols[i] = col
continue
# Replace single ``SkyCoord`` column by its coordinate components if no coordinate
# columns of the correspoding type exist yet.
if isinstance(col, SkyCoord):
# If coordinates are given in RA/DEC, divide each them into hour/deg,
# minute/arcminute, second/arcsecond columns.
if ('ra' in col.representation_component_names.keys() and
len(set(eqtnames) - set(self.colnames)) == 6):
ra_c, dec_c = col.ra.hms, col.dec.dms
coords = [ra_c.h.round().astype('i1'), ra_c.m.round().astype('i1'), ra_c.s,
dec_c.d.round().astype('i1'), dec_c.m.round().astype('i1'), dec_c.s]
coord_units = [u.h, u.min, u.second,
u.deg, u.arcmin, u.arcsec]
coord_descrip = ['Right Ascension (hour)', 'Right Ascension (minute)',
'Right Ascension (second)', 'Declination (degree)',
'Declination (arcmin)', 'Declination (arcsec)']
for coord, name, coord_unit, descrip in zip(
coords, eqtnames, coord_units, coord_descrip):
# Have Sign of Declination only in the DEd column.
if name in ['DEm', 'DEs']:
coord_col = Column(list(np.abs(coord)), name=name,
unit=coord_unit, description=descrip)
else:
coord_col = Column(list(coord), name=name, unit=coord_unit,
description=descrip)
# Set default number of digits after decimal point for the
# second values, and deg-min to (signed) 2-digit zero-padded integer.
if name == 'RAs':
coord_col.format = '013.10f'
elif name == 'DEs':
coord_col.format = '012.9f'
elif name == 'RAh':
coord_col.format = '2d'
elif name == 'DEd':
coord_col.format = '+03d'
elif name.startswith(('RA', 'DE')):
coord_col.format = '02d'
self.cols.append(coord_col)
to_pop.append(i) # Delete original ``SkyCoord`` column.
# For all other coordinate types, simply divide into two columns
# for latitude and longitude resp. with the unit used been as it is.
else:
frminfo = ''
for frame, latlon in coord_systems.items():
if frame in col.name and len(set(latlon[:2]) - set(self.colnames)) == 2:
if frame != col.name:
frminfo = f' ({col.name})'
lon_col = Column(getattr(col, latlon[3]), name=latlon[1],
description=f'{frame.capitalize()} Longitude{frminfo}',
unit=col.representation_component_units[latlon[3]],
format='.12f')
lat_col = Column(getattr(col, latlon[2]), name=latlon[0],
description=f'{frame.capitalize()} Latitude{frminfo}',
unit=col.representation_component_units[latlon[2]],
format='+.12f')
self.cols.append(lon_col)
self.cols.append(lat_col)
to_pop.append(i) # Delete original ``SkyCoord`` column.
# Convert all other ``SkyCoord`` columns that are not in the above three
# representations to string valued columns. Those could either be types not
# supported yet (e.g. 'helioprojective'), or already present and converted.
# If there were any extra ``SkyCoord`` columns of one kind after the first one,
# then their decomposition into their component columns has been skipped.
# This is done in order to not create duplicate component columns.
# Explicit renaming of the extra coordinate component columns by appending some
# suffix to their name, so as to distinguish them, is not yet implemented.
if i not in to_pop:
warnings.warn(f"Coordinate system of type '{col.name}' already stored in table "
f"as CDS/MRT-syle columns or of unrecognized type. So column {i} "
f"is being skipped with designation of a string valued column "
f"`{self.colnames[i]}`.", UserWarning)
self.cols.append(Column(col.to_string(), name=self.colnames[i]))
to_pop.append(i) # Delete original ``SkyCoord`` column.
# Convert all other ``mixin`` columns to ``Column`` objects.
# Parsing these may still lead to errors!
elif not isinstance(col, Column):
col = Column(col)
# If column values are ``object`` types, convert them to string.
if np.issubdtype(col.dtype, np.dtype(object).type):
col = Column([str(val) for val in col])
self.cols[i] = col
# Delete original ``SkyCoord`` columns, if there were any.
for i in to_pop[::-1]:
self.cols.pop(i)
# Check for any left over extra coordinate columns.
if any(x in self.colnames for x in ['RAh', 'DEd', 'ELON', 'GLAT']):
# At this point any extra ``SkyCoord`` columns should have been converted to string
# valued columns, together with issuance of a warning, by the coordinate parser above.
# This test is just left here as a safeguard.
for i, col in enumerate(self.cols):
if isinstance(col, SkyCoord):
self.cols[i] = Column(col.to_string(), name=self.colnames[i])
message = ('Table already has coordinate system in CDS/MRT-syle columns. '
f'So column {i} should have been replaced already with '
f'a string valued column `{self.colnames[i]}`.')
raise core.InconsistentTableError(message)
# Get Byte-By-Byte description and fill the template
bbb_template = Template('\n'.join(BYTE_BY_BYTE_TEMPLATE))
byte_by_byte = bbb_template.substitute({'file': 'table.dat',
'bytebybyte': self.write_byte_by_byte()})
# Fill up the full ReadMe
rm_template = Template('\n'.join(MRT_TEMPLATE))
readme_filled = rm_template.substitute({'bytebybyte': byte_by_byte})
lines.append(readme_filled)
class MrtData(cds.CdsData):
"""MRT table data reader
"""
_subfmt = 'MRT'
splitter_class = MrtSplitter
def write(self, lines):
self.splitter.delimiter = ' '
fixedwidth.FixedWidthData.write(self, lines)
class Mrt(core.BaseReader):
"""AAS MRT (Machine-Readable Table) format table.
**Reading**
::
>>> from astropy.io import ascii
>>> table = ascii.read('data.mrt', format='mrt')
**Writing**
Use ``ascii.write(table, 'data.mrt', format='mrt')`` to write tables to
Machine Readable Table (MRT) format.
Note that the metadata of the table, apart from units, column names and
description, will not be written. These have to be filled in by hand later.
See also: :ref:`cds_mrt_format`.
Caveats:
* The Units and Explanations are available in the column ``unit`` and
``description`` attributes, respectively.
* The other metadata defined by this format is not available in the output table.
"""
_format_name = 'mrt'
_io_registry_format_aliases = ['mrt']
_io_registry_can_write = True
_description = 'MRT format table'
data_class = MrtData
header_class = MrtHeader
def write(self, table=None):
# Construct for writing empty table is not yet done.
if len(table) == 0:
raise NotImplementedError
self.data.header = self.header
self.header.position_line = None
self.header.start_line = None
# Create a copy of the ``table``, so that it the copy gets modified and
# written to the file, while the original table remains as it is.
table = table.copy()
return super().write(table)
|
240424c0a450d343ca2cb47da07cbfddb50e5b0ab1c9085c63847473837c2000 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
latex.py:
Classes to read and write LaTeX tables
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from . import core
latexdicts = {'AA': {'tabletype': 'table',
'header_start': r'\hline \hline', 'header_end': r'\hline',
'data_end': r'\hline'},
'doublelines': {'tabletype': 'table',
'header_start': r'\hline \hline', 'header_end': r'\hline\hline',
'data_end': r'\hline\hline'},
'template': {'tabletype': 'tabletype', 'caption': 'caption',
'tablealign': 'tablealign',
'col_align': 'col_align', 'preamble': 'preamble',
'header_start': 'header_start',
'header_end': 'header_end', 'data_start': 'data_start',
'data_end': 'data_end', 'tablefoot': 'tablefoot',
'units': {'col1': 'unit of col1', 'col2': 'unit of col2'}}
}
RE_COMMENT = re.compile(r'(?<!\\)%') # % character but not \%
def add_dictval_to_list(adict, key, alist):
'''
Add a value from a dictionary to a list
Parameters
----------
adict : dictionary
key : hashable
alist : list
List where value should be added
'''
if key in adict:
if isinstance(adict[key], str):
alist.append(adict[key])
else:
alist.extend(adict[key])
def find_latex_line(lines, latex):
'''
Find the first line which matches a patters
Parameters
----------
lines : list
List of strings
latex : str
Search pattern
Returns
-------
line_num : int, None
Line number. Returns None, if no match was found
'''
re_string = re.compile(latex.replace('\\', '\\\\'))
for i, line in enumerate(lines):
if re_string.match(line):
return i
else:
return None
class LatexInputter(core.BaseInputter):
def process_lines(self, lines):
return [lin.strip() for lin in lines]
class LatexSplitter(core.BaseSplitter):
'''Split LaTeX table date. Default delimiter is `&`.
'''
delimiter = '&'
def __call__(self, lines):
last_line = RE_COMMENT.split(lines[-1])[0].strip()
if not last_line.endswith(r'\\'):
lines[-1] = last_line + r'\\'
return super().__call__(lines)
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. Also remove
\\ at end of line"""
line = RE_COMMENT.split(line)[0]
line = line.strip()
if line.endswith(r'\\'):
line = line.rstrip(r'\\')
else:
raise core.InconsistentTableError(r'Lines in LaTeX table have to end with \\')
return line
def process_val(self, val):
"""Remove whitespace and {} at the beginning or end of value."""
val = val.strip()
if val and (val[0] == '{') and (val[-1] == '}'):
val = val[1:-1]
return val
def join(self, vals):
'''Join values together and add a few extra spaces for readability'''
delimiter = ' ' + self.delimiter + ' '
return delimiter.join(x.strip() for x in vals) + r' \\'
class LatexHeader(core.BaseHeader):
'''Class to read the header of Latex Tables'''
header_start = r'\begin{tabular}'
splitter_class = LatexSplitter
def start_line(self, lines):
line = find_latex_line(lines, self.header_start)
if line is not None:
return line + 1
else:
return None
def _get_units(self):
units = {}
col_units = [col.info.unit for col in self.cols]
for name, unit in zip(self.colnames, col_units):
if unit:
try:
units[name] = unit.to_string(format='latex_inline')
except AttributeError:
units[name] = unit
return units
def write(self, lines):
if 'col_align' not in self.latex:
self.latex['col_align'] = len(self.cols) * 'c'
if 'tablealign' in self.latex:
align = '[' + self.latex['tablealign'] + ']'
else:
align = ''
if self.latex['tabletype'] is not None:
lines.append(r'\begin{' + self.latex['tabletype'] + r'}' + align)
add_dictval_to_list(self.latex, 'preamble', lines)
if 'caption' in self.latex:
lines.append(r'\caption{' + self.latex['caption'] + '}')
lines.append(self.header_start + r'{' + self.latex['col_align'] + r'}')
add_dictval_to_list(self.latex, 'header_start', lines)
lines.append(self.splitter.join(self.colnames))
units = self._get_units()
if 'units' in self.latex:
units.update(self.latex['units'])
if units:
lines.append(self.splitter.join([units.get(name, ' ') for name in self.colnames]))
add_dictval_to_list(self.latex, 'header_end', lines)
class LatexData(core.BaseData):
'''Class to read the data in LaTeX tables'''
data_start = None
data_end = r'\end{tabular}'
splitter_class = LatexSplitter
def start_line(self, lines):
if self.data_start:
return find_latex_line(lines, self.data_start)
else:
start = self.header.start_line(lines)
if start is None:
raise core.InconsistentTableError(r'Could not find table start')
return start + 1
def end_line(self, lines):
if self.data_end:
return find_latex_line(lines, self.data_end)
else:
return None
def write(self, lines):
add_dictval_to_list(self.latex, 'data_start', lines)
core.BaseData.write(self, lines)
add_dictval_to_list(self.latex, 'data_end', lines)
lines.append(self.data_end)
add_dictval_to_list(self.latex, 'tablefoot', lines)
if self.latex['tabletype'] is not None:
lines.append(r'\end{' + self.latex['tabletype'] + '}')
class Latex(core.BaseReader):
r'''LaTeX format table.
This class implements some LaTeX specific commands. Its main
purpose is to write out a table in a form that LaTeX can compile. It
is beyond the scope of this class to implement every possible LaTeX
command, instead the focus is to generate a syntactically valid
LaTeX tables.
This class can also read simple LaTeX tables (one line per table
row, no ``\multicolumn`` or similar constructs), specifically, it
can read the tables that it writes.
Reading a LaTeX table, the following keywords are accepted:
**ignore_latex_commands** :
Lines starting with these LaTeX commands will be treated as comments (i.e. ignored).
When writing a LaTeX table, the some keywords can customize the
format. Care has to be taken here, because python interprets ``\\``
in a string as an escape character. In order to pass this to the
output either format your strings as raw strings with the ``r``
specifier or use a double ``\\\\``.
Examples::
caption = r'My table \label{mytable}'
caption = 'My table \\\\label{mytable}'
**latexdict** : Dictionary of extra parameters for the LaTeX output
* tabletype : used for first and last line of table.
The default is ``\\begin{table}``. The following would generate a table,
which spans the whole page in a two-column document::
ascii.write(data, sys.stdout, Writer = ascii.Latex,
latexdict = {'tabletype': 'table*'})
If ``None``, the table environment will be dropped, keeping only
the ``tabular`` environment.
* tablealign : positioning of table in text.
The default is not to specify a position preference in the text.
If, e.g. the alignment is ``ht``, then the LaTeX will be ``\\begin{table}[ht]``.
* col_align : Alignment of columns
If not present all columns will be centered.
* caption : Table caption (string or list of strings)
This will appear above the table as it is the standard in
many scientific publications. If you prefer a caption below
the table, just write the full LaTeX command as
``latexdict['tablefoot'] = r'\caption{My table}'``
* preamble, header_start, header_end, data_start, data_end, tablefoot: Pure LaTeX
Each one can be a string or a list of strings. These strings
will be inserted into the table without any further
processing. See the examples below.
* units : dictionary of strings
Keys in this dictionary should be names of columns. If
present, a line in the LaTeX table directly below the column
names is added, which contains the values of the
dictionary. Example::
from astropy.io import ascii
data = {'name': ['bike', 'car'], 'mass': [75,1200], 'speed': [10, 130]}
ascii.write(data, Writer=ascii.Latex,
latexdict = {'units': {'mass': 'kg', 'speed': 'km/h'}})
If the column has no entry in the ``units`` dictionary, it defaults
to the **unit** attribute of the column. If this attribute is not
specified (i.e. it is None), the unit will be written as ``' '``.
Run the following code to see where each element of the
dictionary is inserted in the LaTeX table::
from astropy.io import ascii
data = {'cola': [1,2], 'colb': [3,4]}
ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['template'])
Some table styles are predefined in the dictionary
``ascii.latex.latexdicts``. The following generates in table in
style preferred by A&A and some other journals::
ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['AA'])
As an example, this generates a table, which spans all columns
and is centered on the page::
ascii.write(data, Writer=ascii.Latex, col_align='|lr|',
latexdict={'preamble': r'\begin{center}',
'tablefoot': r'\end{center}',
'tabletype': 'table*'})
**caption** : Set table caption
Shorthand for::
latexdict['caption'] = caption
**col_align** : Set the column alignment.
If not present this will be auto-generated for centered
columns. Shorthand for::
latexdict['col_align'] = col_align
'''
_format_name = 'latex'
_io_registry_format_aliases = ['latex']
_io_registry_suffix = '.tex'
_description = 'LaTeX table'
header_class = LatexHeader
data_class = LatexData
inputter_class = LatexInputter
# Strictly speaking latex only supports 1-d columns so this should inherit
# the base max_ndim = 1. But as reported in #11695 this causes a strange
# problem with Jupyter notebook, which displays a table by first calling
# _repr_latex_. For a multidimensional table this issues a stack traceback
# before moving on to _repr_html_. Here we prioritize fixing the issue with
# Jupyter displaying a Table with multidimensional columns.
max_ndim = None
def __init__(self,
ignore_latex_commands=['hline', 'vspace', 'tableline',
'toprule', 'midrule', 'bottomrule'],
latexdict={}, caption='', col_align=None):
super().__init__()
self.latex = {}
# The latex dict drives the format of the table and needs to be shared
# with data and header
self.header.latex = self.latex
self.data.latex = self.latex
self.latex['tabletype'] = 'table'
self.latex.update(latexdict)
if caption:
self.latex['caption'] = caption
if col_align:
self.latex['col_align'] = col_align
self.ignore_latex_commands = ignore_latex_commands
self.header.comment = '%|' + '|'.join(
[r'\\' + command for command in self.ignore_latex_commands])
self.data.comment = self.header.comment
def write(self, table=None):
self.header.start_line = None
self.data.start_line = None
return core.BaseReader.write(self, table=table)
class AASTexHeaderSplitter(LatexSplitter):
r'''Extract column names from a `deluxetable`_.
This splitter expects the following LaTeX code **in a single line**:
\tablehead{\colhead{col1} & ... & \colhead{coln}}
'''
def __call__(self, lines):
return super(LatexSplitter, self).__call__(lines)
def process_line(self, line):
"""extract column names from tablehead
"""
line = line.split('%')[0]
line = line.replace(r'\tablehead', '')
line = line.strip()
if (line[0] == '{') and (line[-1] == '}'):
line = line[1:-1]
else:
raise core.InconsistentTableError(r'\tablehead is missing {}')
return line.replace(r'\colhead', '')
def join(self, vals):
return ' & '.join([r'\colhead{' + str(x) + '}' for x in vals])
class AASTexHeader(LatexHeader):
r'''In a `deluxetable
<http://fits.gsfc.nasa.gov/standard30/deluxetable.sty>`_ some header
keywords differ from standard LaTeX.
This header is modified to take that into account.
'''
header_start = r'\tablehead'
splitter_class = AASTexHeaderSplitter
def start_line(self, lines):
return find_latex_line(lines, r'\tablehead')
def write(self, lines):
if 'col_align' not in self.latex:
self.latex['col_align'] = len(self.cols) * 'c'
if 'tablealign' in self.latex:
align = '[' + self.latex['tablealign'] + ']'
else:
align = ''
lines.append(r'\begin{' + self.latex['tabletype'] + r'}{' + self.latex['col_align'] + r'}'
+ align)
add_dictval_to_list(self.latex, 'preamble', lines)
if 'caption' in self.latex:
lines.append(r'\tablecaption{' + self.latex['caption'] + '}')
tablehead = ' & '.join([r'\colhead{' + name + '}' for name in self.colnames])
units = self._get_units()
if 'units' in self.latex:
units.update(self.latex['units'])
if units:
tablehead += r'\\ ' + self.splitter.join([units.get(name, ' ')
for name in self.colnames])
lines.append(r'\tablehead{' + tablehead + '}')
class AASTexData(LatexData):
r'''In a `deluxetable`_ the data is enclosed in `\startdata` and `\enddata`
'''
data_start = r'\startdata'
data_end = r'\enddata'
def start_line(self, lines):
return find_latex_line(lines, self.data_start) + 1
def write(self, lines):
lines.append(self.data_start)
lines_length_initial = len(lines)
core.BaseData.write(self, lines)
# To remove extra space(s) and // appended which creates an extra new line
# in the end.
if len(lines) > lines_length_initial:
lines[-1] = re.sub(r'\s* \\ \\ \s* $', '', lines[-1],
flags=re.VERBOSE)
lines.append(self.data_end)
add_dictval_to_list(self.latex, 'tablefoot', lines)
lines.append(r'\end{' + self.latex['tabletype'] + r'}')
class AASTex(Latex):
'''AASTeX format table.
This class implements some AASTeX specific commands.
AASTeX is used for the AAS (American Astronomical Society)
publications like ApJ, ApJL and AJ.
It derives from the ``Latex`` reader and accepts the same
keywords. However, the keywords ``header_start``, ``header_end``,
``data_start`` and ``data_end`` in ``latexdict`` have no effect.
'''
_format_name = 'aastex'
_io_registry_format_aliases = ['aastex']
_io_registry_suffix = '' # AASTex inherits from Latex, so override this class attr
_description = 'AASTeX deluxetable used for AAS journals'
header_class = AASTexHeader
data_class = AASTexData
def __init__(self, **kwargs):
super().__init__(**kwargs)
# check if tabletype was explicitly set by the user
if not (('latexdict' in kwargs) and ('tabletype' in kwargs['latexdict'])):
self.latex['tabletype'] = 'deluxetable'
|
7ab8f73fc14254c3f023929843569c86e6c6c023e3089b9691b0b0e0857cba1e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
ipac.py:
Classes to read IPAC table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from collections import defaultdict, OrderedDict
from textwrap import wrap
from warnings import warn
from . import core
from . import fixedwidth
from . import basic
from astropy.utils.exceptions import AstropyUserWarning
from astropy.table.pprint import get_auto_format_func
class IpacFormatErrorDBMS(Exception):
def __str__(self):
return '{}\nSee {}'.format(
super().__str__(),
'https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/DBMSrestriction.html')
class IpacFormatError(Exception):
def __str__(self):
return '{}\nSee {}'.format(
super().__str__(),
'https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html')
class IpacHeaderSplitter(core.BaseSplitter):
'''Splitter for Ipac Headers.
This splitter is similar its parent when reading, but supports a
fixed width format (as required for Ipac table headers) for writing.
'''
process_line = None
process_val = None
delimiter = '|'
delimiter_pad = ''
skipinitialspace = False
comment = r'\s*\\'
write_comment = r'\\'
col_starts = None
col_ends = None
def join(self, vals, widths):
pad = self.delimiter_pad or ''
delimiter = self.delimiter or ''
padded_delim = pad + delimiter + pad
bookend_left = delimiter + pad
bookend_right = pad + delimiter
vals = [' ' * (width - len(val)) + val for val, width in zip(vals, widths)]
return bookend_left + padded_delim.join(vals) + bookend_right
class IpacHeader(fixedwidth.FixedWidthHeader):
"""IPAC table header"""
splitter_class = IpacHeaderSplitter
# Defined ordered list of possible types. Ordering is needed to
# distinguish between "d" (double) and "da" (date) as defined by
# the IPAC standard for abbreviations. This gets used in get_col_type().
col_type_list = (('integer', core.IntType),
('long', core.IntType),
('double', core.FloatType),
('float', core.FloatType),
('real', core.FloatType),
('char', core.StrType),
('date', core.StrType))
definition = 'ignore'
start_line = None
def process_lines(self, lines):
"""Generator to yield IPAC header lines, i.e. those starting and ending with
delimiter character (with trailing whitespace stripped)"""
delim = self.splitter.delimiter
for line in lines:
line = line.rstrip()
if line.startswith(delim) and line.endswith(delim):
yield line.strip(delim)
def update_meta(self, lines, meta):
"""
Extract table-level comments and keywords for IPAC table. See:
https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html#kw
"""
def process_keyword_value(val):
"""
Take a string value and convert to float, int or str, and strip quotes
as needed.
"""
val = val.strip()
try:
val = int(val)
except Exception:
try:
val = float(val)
except Exception:
# Strip leading/trailing quote. The spec says that a matched pair
# of quotes is required, but this code will allow a non-quoted value.
for quote in ('"', "'"):
if val.startswith(quote) and val.endswith(quote):
val = val[1:-1]
break
return val
table_meta = meta['table']
table_meta['comments'] = []
table_meta['keywords'] = OrderedDict()
keywords = table_meta['keywords']
re_keyword = re.compile(r'\\'
r'(?P<name> \w+)'
r'\s* = (?P<value> .+) $',
re.VERBOSE)
for line in lines:
# Keywords and comments start with "\". Once the first non-slash
# line is seen then bail out.
if not line.startswith('\\'):
break
m = re_keyword.match(line)
if m:
name = m.group('name')
val = process_keyword_value(m.group('value'))
# IPAC allows for continuation keywords, e.g.
# \SQL = 'WHERE '
# \SQL = 'SELECT (25 column names follow in next row.)'
if name in keywords and isinstance(val, str):
prev_val = keywords[name]['value']
if isinstance(prev_val, str):
val = prev_val + val
keywords[name] = {'value': val}
else:
# Comment is required to start with "\ "
if line.startswith('\\ '):
val = line[2:].strip()
if val:
table_meta['comments'].append(val)
def get_col_type(self, col):
for (col_type_key, col_type) in self.col_type_list:
if col_type_key.startswith(col.raw_type.lower()):
return col_type
else:
raise ValueError('Unknown data type ""{}"" for column "{}"'.format(
col.raw_type, col.name))
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
header_lines = self.process_lines(lines) # generator returning valid header lines
header_vals = [vals for vals in self.splitter(header_lines)]
if len(header_vals) == 0:
raise ValueError('At least one header line beginning and ending with '
'delimiter required')
elif len(header_vals) > 4:
raise ValueError('More than four header lines were found')
# Generate column definitions
cols = []
start = 1
for i, name in enumerate(header_vals[0]):
col = core.Column(name=name.strip(' -'))
col.start = start
col.end = start + len(name)
if len(header_vals) > 1:
col.raw_type = header_vals[1][i].strip(' -')
col.type = self.get_col_type(col)
if len(header_vals) > 2:
col.unit = header_vals[2][i].strip() or None # Can't strip dashes here
if len(header_vals) > 3:
# The IPAC null value corresponds to the io.ascii bad_value.
# In this case there isn't a fill_value defined, so just put
# in the minimal entry that is sure to convert properly to the
# required type.
#
# Strip spaces but not dashes (not allowed in NULL row per
# https://github.com/astropy/astropy/issues/361)
null = header_vals[3][i].strip()
fillval = '' if issubclass(col.type, core.StrType) else '0'
self.data.fill_values.append((null, fillval, col.name))
start = col.end + 1
cols.append(col)
# Correct column start/end based on definition
if self.ipac_definition == 'right':
col.start -= 1
elif self.ipac_definition == 'left':
col.end += 1
self.names = [x.name for x in cols]
self.cols = cols
def str_vals(self):
if self.DBMS:
IpacFormatE = IpacFormatErrorDBMS
else:
IpacFormatE = IpacFormatError
namelist = self.colnames
if self.DBMS:
countnamelist = defaultdict(int)
for name in self.colnames:
countnamelist[name.lower()] += 1
doublenames = [x for x in countnamelist if countnamelist[x] > 1]
if doublenames != []:
raise IpacFormatE('IPAC DBMS tables are not case sensitive. '
'This causes duplicate column names: {}'.format(doublenames))
for name in namelist:
m = re.match(r'\w+', name)
if m.end() != len(name):
raise IpacFormatE('{} - Only alphanumeric characters and _ '
'are allowed in column names.'.format(name))
if self.DBMS and not(name[0].isalpha() or (name[0] == '_')):
raise IpacFormatE(f'Column name cannot start with numbers: {name}')
if self.DBMS:
if name in ['x', 'y', 'z', 'X', 'Y', 'Z']:
raise IpacFormatE('{} - x, y, z, X, Y, Z are reserved names and '
'cannot be used as column names.'.format(name))
if len(name) > 16:
raise IpacFormatE(
f'{name} - Maximum length for column name is 16 characters')
else:
if len(name) > 40:
raise IpacFormatE(
f'{name} - Maximum length for column name is 40 characters.')
dtypelist = []
unitlist = []
nullist = []
for col in self.cols:
col_dtype = col.info.dtype
col_unit = col.info.unit
col_format = col.info.format
if col_dtype.kind in ['i', 'u']:
if col_dtype.itemsize <= 2:
dtypelist.append('int')
else:
dtypelist.append('long')
elif col_dtype.kind == 'f':
if col_dtype.itemsize <= 4:
dtypelist.append('float')
else:
dtypelist.append('double')
else:
dtypelist.append('char')
if col_unit is None:
unitlist.append('')
else:
unitlist.append(str(col.info.unit))
# This may be incompatible with mixin columns
null = col.fill_values[core.masked]
try:
auto_format_func = get_auto_format_func(col)
format_func = col.info._format_funcs.get(col_format, auto_format_func)
nullist.append((format_func(col_format, null)).strip())
except Exception:
# It is possible that null and the column values have different
# data types (e.g. number and null = 'null' (i.e. a string).
# This could cause all kinds of exceptions, so a catch all
# block is needed here
nullist.append(str(null).strip())
return [namelist, dtypelist, unitlist, nullist]
def write(self, lines, widths):
'''Write header.
The width of each column is determined in Ipac.write. Writing the header
must be delayed until that time.
This function is called from there, once the width information is
available.'''
for vals in self.str_vals():
lines.append(self.splitter.join(vals, widths))
return lines
class IpacDataSplitter(fixedwidth.FixedWidthSplitter):
delimiter = ' '
delimiter_pad = ''
bookend = True
class IpacData(fixedwidth.FixedWidthData):
"""IPAC table data reader"""
comment = r'[|\\]'
start_line = 0
splitter_class = IpacDataSplitter
fill_values = [(core.masked, 'null')]
def write(self, lines, widths, vals_list):
""" IPAC writer, modified from FixedWidth writer """
for vals in vals_list:
lines.append(self.splitter.join(vals, widths))
return lines
class Ipac(basic.Basic):
r"""IPAC format table.
See: https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html
Example::
\\name=value
\\ Comment
| column1 | column2 | column3 | column4 | column5 |
| double | double | int | double | char |
| unit | unit | unit | unit | unit |
| null | null | null | null | null |
2.0978 29.09056 73765 2.06000 B8IVpMnHg
Or::
|-----ra---|----dec---|---sao---|------v---|----sptype--------|
2.09708 29.09056 73765 2.06000 B8IVpMnHg
The comments and keywords defined in the header are available via the output
table ``meta`` attribute::
>>> import os
>>> from astropy.io import ascii
>>> filename = os.path.join(ascii.__path__[0], 'tests/data/ipac.dat')
>>> data = ascii.read(filename)
>>> print(data.meta['comments'])
['This is an example of a valid comment']
>>> for name, keyword in data.meta['keywords'].items():
... print(name, keyword['value'])
...
intval 1
floatval 2300.0
date Wed Sp 20 09:48:36 1995
key_continue IPAC keywords can continue across lines
Note that there are different conventions for characters occurring below the
position of the ``|`` symbol in IPAC tables. By default, any character
below a ``|`` will be ignored (since this is the current standard),
but if you need to read files that assume characters below the ``|``
symbols belong to the column before or after the ``|``, you can specify
``definition='left'`` or ``definition='right'`` respectively when reading
the table (the default is ``definition='ignore'``). The following examples
demonstrate the different conventions:
* ``definition='ignore'``::
| ra | dec |
| float | float |
1.2345 6.7890
* ``definition='left'``::
| ra | dec |
| float | float |
1.2345 6.7890
* ``definition='right'``::
| ra | dec |
| float | float |
1.2345 6.7890
IPAC tables can specify a null value in the header that is shown in place
of missing or bad data. On writing, this value defaults to ``null``.
To specify a different null value, use the ``fill_values`` option to
replace masked values with a string or number of your choice as
described in :ref:`astropy:io_ascii_write_parameters`::
>>> from astropy.io.ascii import masked
>>> fill = [(masked, 'N/A', 'ra'), (masked, -999, 'sptype')]
>>> ascii.write(data, format='ipac', fill_values=fill)
\ This is an example of a valid comment
...
| ra| dec| sai| v2| sptype|
| double| double| long| double| char|
| unit| unit| unit| unit| ergs|
| N/A| null| null| null| -999|
N/A 29.09056 null 2.06 -999
2345678901.0 3456789012.0 456789012 4567890123.0 567890123456789012
When writing a table with a column of integers, the data type is output
as ``int`` when the column ``dtype.itemsize`` is less than or equal to 2;
otherwise the data type is ``long``. For a column of floating-point values,
the data type is ``float`` when ``dtype.itemsize`` is less than or equal
to 4; otherwise the data type is ``double``.
Parameters
----------
definition : str, optional
Specify the convention for characters in the data table that occur
directly below the pipe (``|``) symbol in the header column definition:
* 'ignore' - Any character beneath a pipe symbol is ignored (default)
* 'right' - Character is associated with the column to the right
* 'left' - Character is associated with the column to the left
DBMS : bool, optional
If true, this verifies that written tables adhere (semantically)
to the `IPAC/DBMS
<https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/DBMSrestriction.html>`_
definition of IPAC tables. If 'False' it only checks for the (less strict)
`IPAC <https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html>`_
definition.
"""
_format_name = 'ipac'
_io_registry_format_aliases = ['ipac']
_io_registry_can_write = True
_description = 'IPAC format table'
data_class = IpacData
header_class = IpacHeader
def __init__(self, definition='ignore', DBMS=False):
super().__init__()
# Usually the header is not defined in __init__, but here it need a keyword
if definition in ['ignore', 'left', 'right']:
self.header.ipac_definition = definition
else:
raise ValueError("definition should be one of ignore/left/right")
self.header.DBMS = DBMS
def write(self, table):
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Set a default null value for all columns by adding at the end, which
# is the position with the lowest priority.
# We have to do it this late, because the fill_value
# defined in the class can be overwritten by ui.write
self.data.fill_values.append((core.masked, 'null'))
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, self.guessing)
core._apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names)
# Check that table has only 1-d columns.
self._check_multidim_table(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
# Write header and data to lines list
lines = []
# Write meta information
if 'comments' in table.meta:
for comment in table.meta['comments']:
if len(str(comment)) > 78:
warn('Comment string > 78 characters was automatically wrapped.',
AstropyUserWarning)
for line in wrap(str(comment), 80, initial_indent='\\ ', subsequent_indent='\\ '):
lines.append(line)
if 'keywords' in table.meta:
keydict = table.meta['keywords']
for keyword in keydict:
try:
val = keydict[keyword]['value']
lines.append(f'\\{keyword.strip()}={val!r}')
# meta is not standardized: Catch some common Errors.
except TypeError:
warn("Table metadata keyword {0} has been skipped. "
"IPAC metadata must be in the form {{'keywords':"
"{{'keyword': {{'value': value}} }}".format(keyword),
AstropyUserWarning)
ignored_keys = [key for key in table.meta if key not in ('keywords', 'comments')]
if any(ignored_keys):
warn("Table metadata keyword(s) {0} were not written. "
"IPAC metadata must be in the form {{'keywords':"
"{{'keyword': {{'value': value}} }}".format(ignored_keys),
AstropyUserWarning
)
# Usually, this is done in data.write, but since the header is written
# first, we need that here.
self.data._set_fill_values(self.data.cols)
# get header and data as strings to find width of each column
for i, col in enumerate(table.columns.values()):
col.headwidth = max([len(vals[i]) for vals in self.header.str_vals()])
# keep data_str_vals because they take some time to make
data_str_vals = []
col_str_iters = self.data.str_vals()
for vals in zip(*col_str_iters):
data_str_vals.append(vals)
for i, col in enumerate(table.columns.values()):
# FIXME: In Python 3.4, use max([], default=0).
# See: https://docs.python.org/3/library/functions.html#max
if data_str_vals:
col.width = max([len(vals[i]) for vals in data_str_vals])
else:
col.width = 0
widths = [max(col.width, col.headwidth) for col in table.columns.values()]
# then write table
self.header.write(lines, widths)
self.data.write(lines, widths, data_str_vals)
return lines
|
81fc22adb58f2a26869d42bdd2d3f95822ebfb1d76fbf9b28f70afd4b6a2b98c | # Licensed under a 3-clause BSD style license
import os
from setuptools import Extension
import numpy
ROOT = os.path.relpath(os.path.dirname(__file__))
def get_extensions():
sources = [os.path.join(ROOT, 'cparser.pyx'),
os.path.join(ROOT, 'src', 'tokenizer.c')]
ascii_ext = Extension(
name="astropy.io.ascii.cparser",
include_dirs=[numpy.get_include()],
sources=sources)
return [ascii_ext]
|
35bae6d01df1e64718624bbe60de11adc1435d90bf37469d609be4490562c1cf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible HTML table reader and writer.
html.py:
Classes to read and write HTML tables
`BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_
must be installed to read HTML tables.
"""
import warnings
from . import core
from astropy.table import Column
from astropy.utils.xml import writer
from copy import deepcopy
class SoupString(str):
"""
Allows for strings to hold BeautifulSoup data.
"""
def __new__(cls, *args, **kwargs):
return str.__new__(cls, *args, **kwargs)
def __init__(self, val):
self.soup = val
class ListWriter:
"""
Allows for XMLWriter to write to a list instead of a file.
"""
def __init__(self, out):
self.out = out
def write(self, data):
self.out.append(data)
def identify_table(soup, htmldict, numtable):
"""
Checks whether the given BeautifulSoup tag is the table
the user intends to process.
"""
if soup is None or soup.name != 'table':
return False # Tag is not a <table>
elif 'table_id' not in htmldict:
return numtable == 1
table_id = htmldict['table_id']
if isinstance(table_id, str):
return 'id' in soup.attrs and soup['id'] == table_id
elif isinstance(table_id, int):
return table_id == numtable
# Return False if an invalid parameter is given
return False
class HTMLInputter(core.BaseInputter):
"""
Input lines of HTML in a valid form.
This requires `BeautifulSoup
<http://www.crummy.com/software/BeautifulSoup/>`_ to be installed.
"""
def process_lines(self, lines):
"""
Convert the given input into a list of SoupString rows
for further processing.
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise core.OptionalTableImportError('BeautifulSoup must be '
'installed to read HTML tables')
if 'parser' not in self.html:
with warnings.catch_warnings():
# Ignore bs4 parser warning #4550.
warnings.filterwarnings('ignore', '.*no parser was explicitly specified.*')
soup = BeautifulSoup('\n'.join(lines))
else: # use a custom backend parser
soup = BeautifulSoup('\n'.join(lines), self.html['parser'])
tables = soup.find_all('table')
for i, possible_table in enumerate(tables):
if identify_table(possible_table, self.html, i + 1):
table = possible_table # Find the correct table
break
else:
if isinstance(self.html['table_id'], int):
err_descr = f"number {self.html['table_id']}"
else:
err_descr = f"id '{self.html['table_id']}'"
raise core.InconsistentTableError(
f'ERROR: HTML table {err_descr} not found')
# Get all table rows
soup_list = [SoupString(x) for x in table.find_all('tr')]
return soup_list
class HTMLSplitter(core.BaseSplitter):
"""
Split HTML table data.
"""
def __call__(self, lines):
"""
Return HTML data from lines as a generator.
"""
for line in lines:
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
header_elements = soup.find_all('th')
if header_elements:
# Return multicolumns as tuples for HTMLHeader handling
yield [(el.text.strip(), el['colspan']) if el.has_attr('colspan')
else el.text.strip() for el in header_elements]
data_elements = soup.find_all('td')
if data_elements:
yield [el.text.strip() for el in data_elements]
if len(lines) == 0:
raise core.InconsistentTableError('HTML tables must contain data '
'in a <table> tag')
class HTMLOutputter(core.TableOutputter):
"""
Output the HTML data as an ``astropy.table.Table`` object.
This subclass allows for the final table to contain
multidimensional columns (defined using the colspan attribute
of <th>).
"""
default_converters = [core.convert_numpy(int),
core.convert_numpy(float),
core.convert_numpy(str)]
def __call__(self, cols, meta):
"""
Process the data in multidimensional columns.
"""
new_cols = []
col_num = 0
while col_num < len(cols):
col = cols[col_num]
if hasattr(col, 'colspan'):
# Join elements of spanned columns together into list of tuples
span_cols = cols[col_num:col_num + col.colspan]
new_col = core.Column(col.name)
new_col.str_vals = list(zip(*[x.str_vals for x in span_cols]))
new_cols.append(new_col)
col_num += col.colspan
else:
new_cols.append(col)
col_num += 1
return super().__call__(new_cols, meta)
class HTMLHeader(core.BaseHeader):
splitter_class = HTMLSplitter
def start_line(self, lines):
"""
Return the line number at which header data begins.
"""
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
if soup.th is not None:
return i
return None
def _set_cols_from_names(self):
"""
Set columns from header names, handling multicolumns appropriately.
"""
self.cols = []
new_names = []
for name in self.names:
if isinstance(name, tuple):
col = core.Column(name=name[0])
col.colspan = int(name[1])
self.cols.append(col)
new_names.append(name[0])
for i in range(1, int(name[1])):
# Add dummy columns
self.cols.append(core.Column(''))
new_names.append('')
else:
self.cols.append(core.Column(name=name))
new_names.append(name)
self.names = new_names
class HTMLData(core.BaseData):
splitter_class = HTMLSplitter
def start_line(self, lines):
"""
Return the line number at which table data begins.
"""
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
if soup.td is not None:
if soup.th is not None:
raise core.InconsistentTableError('HTML tables cannot '
'have headings and data in the same row')
return i
raise core.InconsistentTableError('No start line found for HTML data')
def end_line(self, lines):
"""
Return the line number at which table data ends.
"""
last_index = -1
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
if soup.td is not None:
last_index = i
if last_index == -1:
return None
return last_index + 1
class HTML(core.BaseReader):
"""HTML format table.
In order to customize input and output, a dict of parameters may
be passed to this class holding specific customizations.
**htmldict** : Dictionary of parameters for HTML input/output.
* css : Customized styling
If present, this parameter will be included in a <style>
tag and will define stylistic attributes of the output.
* table_id : ID for the input table
If a string, this defines the HTML id of the table to be processed.
If an integer, this specifies the index of the input table in the
available tables. Unless this parameter is given, the reader will
use the first table found in the input file.
* multicol : Use multi-dimensional columns for output
The writer will output tuples as elements of multi-dimensional
columns if this parameter is true, and if not then it will
use the syntax 1.36583e-13 .. 1.36583e-13 for output. If not
present, this parameter will be true by default.
* raw_html_cols : column name or list of names with raw HTML content
This allows one to include raw HTML content in the column output,
for instance to include link references in a table. This option
requires that the bleach package be installed. Only whitelisted
tags are allowed through for security reasons (see the
raw_html_clean_kwargs arg).
* raw_html_clean_kwargs : dict of keyword args controlling HTML cleaning
Raw HTML will be cleaned to prevent unsafe HTML from ending up in
the table output. This is done by calling ``bleach.clean(data,
**raw_html_clean_kwargs)``. For details on the available options
(e.g. tag whitelist) see:
https://bleach.readthedocs.io/en/latest/clean.html
* parser : Specific HTML parsing library to use
If specified, this specifies which HTML parsing library
BeautifulSoup should use as a backend. The options to choose
from are 'html.parser' (the standard library parser), 'lxml'
(the recommended parser), 'xml' (lxml's XML parser), and
'html5lib'. html5lib is a highly lenient parser and therefore
might work correctly for unusual input if a different parser
fails.
* jsfiles : list of js files to include when writing table.
* cssfiles : list of css files to include when writing table.
* js : js script to include in the body when writing table.
* table_class : css class for the table
"""
_format_name = 'html'
_io_registry_format_aliases = ['html']
_io_registry_suffix = '.html'
_description = 'HTML table'
header_class = HTMLHeader
data_class = HTMLData
inputter_class = HTMLInputter
max_ndim = 2 # HTML supports writing 2-d columns with shape (n, m)
def __init__(self, htmldict={}):
"""
Initialize classes for HTML reading and writing.
"""
super().__init__()
self.html = deepcopy(htmldict)
if 'multicol' not in htmldict:
self.html['multicol'] = True
if 'table_id' not in htmldict:
self.html['table_id'] = 1
self.inputter.html = self.html
def read(self, table):
"""
Read the ``table`` in HTML format and return a resulting ``Table``.
"""
self.outputter = HTMLOutputter()
return super().read(table)
def write(self, table):
"""
Return data in ``table`` converted to HTML as a list of strings.
"""
# Check that table has only 1-d or 2-d columns. Above that fails.
self._check_multidim_table(table)
cols = list(table.columns.values())
self.data.header.cols = cols
if isinstance(self.data.fill_values, tuple):
self.data.fill_values = [self.data.fill_values]
self.data._set_fill_values(cols)
lines = []
# Set HTML escaping to False for any column in the raw_html_cols input
raw_html_cols = self.html.get('raw_html_cols', [])
if isinstance(raw_html_cols, str):
raw_html_cols = [raw_html_cols] # Allow for a single string as input
cols_escaped = [col.info.name not in raw_html_cols for col in cols]
# Kwargs that get passed on to bleach.clean() if that is available.
raw_html_clean_kwargs = self.html.get('raw_html_clean_kwargs', {})
# Use XMLWriter to output HTML to lines
w = writer.XMLWriter(ListWriter(lines))
with w.tag('html'):
with w.tag('head'):
# Declare encoding and set CSS style for table
with w.tag('meta', attrib={'charset': 'utf-8'}):
pass
with w.tag('meta', attrib={'http-equiv': 'Content-type',
'content': 'text/html;charset=UTF-8'}):
pass
if 'css' in self.html:
with w.tag('style'):
w.data(self.html['css'])
if 'cssfiles' in self.html:
for filename in self.html['cssfiles']:
with w.tag('link', rel="stylesheet", href=filename, type='text/css'):
pass
if 'jsfiles' in self.html:
for filename in self.html['jsfiles']:
with w.tag('script', src=filename):
w.data('') # need this instead of pass to get <script></script>
with w.tag('body'):
if 'js' in self.html:
with w.xml_cleaning_method('none'):
with w.tag('script'):
w.data(self.html['js'])
if isinstance(self.html['table_id'], str):
html_table_id = self.html['table_id']
else:
html_table_id = None
if 'table_class' in self.html:
html_table_class = self.html['table_class']
attrib = {"class": html_table_class}
else:
attrib = {}
with w.tag('table', id=html_table_id, attrib=attrib):
with w.tag('thead'):
with w.tag('tr'):
for col in cols:
if len(col.shape) > 1 and self.html['multicol']:
# Set colspan attribute for multicolumns
w.start('th', colspan=col.shape[1])
else:
w.start('th')
w.data(col.info.name.strip())
w.end(indent=False)
col_str_iters = []
new_cols_escaped = []
# Make a container to hold any new_col objects created
# below for multicolumn elements. This is purely to
# maintain a reference for these objects during
# subsequent iteration to format column values. This
# requires that the weakref info._parent be maintained.
new_cols = []
for col, col_escaped in zip(cols, cols_escaped):
if len(col.shape) > 1 and self.html['multicol']:
span = col.shape[1]
for i in range(span):
# Split up multicolumns into separate columns
new_col = Column([el[i] for el in col])
new_col_iter_str_vals = self.fill_values(
col, new_col.info.iter_str_vals())
col_str_iters.append(new_col_iter_str_vals)
new_cols_escaped.append(col_escaped)
new_cols.append(new_col)
else:
col_iter_str_vals = self.fill_values(col, col.info.iter_str_vals())
col_str_iters.append(col_iter_str_vals)
new_cols_escaped.append(col_escaped)
for row in zip(*col_str_iters):
with w.tag('tr'):
for el, col_escaped in zip(row, new_cols_escaped):
# Potentially disable HTML escaping for column
method = ('escape_xml' if col_escaped else 'bleach_clean')
with w.xml_cleaning_method(method, **raw_html_clean_kwargs):
w.start('td')
w.data(el.strip())
w.end(indent=False)
# Fixes XMLWriter's insertion of unwanted line breaks
return [''.join(lines)]
def fill_values(self, col, col_str_iters):
"""
Return an iterator of the values with replacements based on fill_values
"""
# check if the col is a masked column and has fill values
is_masked_column = hasattr(col, 'mask')
has_fill_values = hasattr(col, 'fill_values')
for idx, col_str in enumerate(col_str_iters):
if is_masked_column and has_fill_values:
if col.mask[idx]:
yield col.fill_values[core.masked]
continue
if has_fill_values:
if col_str in col.fill_values:
yield col.fill_values[col_str]
continue
yield col_str
|
656424e513aace8e64ed7b7c4891c34b059784f6f56b4e2e00c818cbff1b9854 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" sextractor.py:
Classes to read SExtractor table format
Built on daophot.py:
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from . import core
class SExtractorHeader(core.BaseHeader):
"""Read the header from a file produced by SExtractor."""
comment = r'^\s*#\s*\S\D.*' # Find lines that don't have "# digit"
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a SExtractor
header. The SExtractor header is specialized so that we just copy the entire BaseHeader
get_cols routine and modify as needed.
Parameters
----------
lines : list
List of table lines
"""
# This assumes that the columns are listed in order, one per line with a
# header comment string of the format: "# 1 ID short description [unit]"
# However, some may be missing and must be inferred from skipped column numbers
columns = {}
# E.g. '# 1 ID identification number' (no units) or '# 2 MAGERR magnitude of error [mag]'
# Updated along with issue #4603, for more robust parsing of unit
re_name_def = re.compile(r"""^\s* \# \s* # possible whitespace around #
(?P<colnumber> [0-9]+)\s+ # number of the column in table
(?P<colname> [-\w]+) # name of the column
# column description, match any character until...
(?:\s+(?P<coldescr> \w .+)
# ...until [non-space][space][unit] or [not-right-bracket][end]
(?:(?<!(\]))$|(?=(?:(?<=\S)\s+\[.+\]))))?
(?:\s*\[(?P<colunit>.+)\])?.* # match units in brackets
""", re.VERBOSE)
dataline = None
for line in lines:
if not line.startswith('#'):
dataline = line # save for later to infer the actual number of columns
break # End of header lines
else:
match = re_name_def.search(line)
if match:
colnumber = int(match.group('colnumber'))
colname = match.group('colname')
coldescr = match.group('coldescr')
colunit = match.group('colunit') # If no units are given, colunit = None
columns[colnumber] = (colname, coldescr, colunit)
# Handle skipped column numbers
colnumbers = sorted(columns)
# Handle the case where the last column is array-like by append a pseudo column
# If there are more data columns than the largest column number
# then add a pseudo-column that will be dropped later. This allows
# the array column logic below to work in all cases.
if dataline is not None:
n_data_cols = len(dataline.split())
else:
# handles no data, where we have to rely on the last column number
n_data_cols = colnumbers[-1]
# sextractor column number start at 1.
columns[n_data_cols + 1] = (None, None, None)
colnumbers.append(n_data_cols + 1)
if len(columns) > 1: # only fill in skipped columns when there is genuine column initially
previous_column = 0
for n in colnumbers:
if n != previous_column + 1:
for c in range(previous_column + 1, n):
column_name = (columns[previous_column][0]
+ f"_{c - previous_column}")
column_descr = columns[previous_column][1]
column_unit = columns[previous_column][2]
columns[c] = (column_name, column_descr, column_unit)
previous_column = n
# Add the columns in order to self.names
colnumbers = sorted(columns)[:-1] # drop the pseudo column
self.names = []
for n in colnumbers:
self.names.append(columns[n][0])
if not self.names:
raise core.InconsistentTableError('No column names found in SExtractor header')
self.cols = []
for n in colnumbers:
col = core.Column(name=columns[n][0])
col.description = columns[n][1]
col.unit = columns[n][2]
self.cols.append(col)
class SExtractorData(core.BaseData):
start_line = 0
delimiter = ' '
comment = r'\s*#'
class SExtractor(core.BaseReader):
"""SExtractor format table.
SExtractor is a package for faint-galaxy photometry (Bertin & Arnouts
1996, A&A Supp. 317, 393.)
See: https://sextractor.readthedocs.io/en/latest/
Example::
# 1 NUMBER
# 2 ALPHA_J2000
# 3 DELTA_J2000
# 4 FLUX_RADIUS
# 7 MAG_AUTO [mag]
# 8 X2_IMAGE Variance along x [pixel**2]
# 9 X_MAMA Barycenter position along MAMA x axis [m**(-6)]
# 10 MU_MAX Peak surface brightness above background [mag * arcsec**(-2)]
1 32.23222 10.1211 0.8 1.2 1.4 18.1 1000.0 0.00304 -3.498
2 38.12321 -88.1321 2.2 2.4 3.1 17.0 1500.0 0.00908 1.401
Note the skipped numbers since flux_radius has 3 columns. The three
FLUX_RADIUS columns will be named FLUX_RADIUS, FLUX_RADIUS_1, FLUX_RADIUS_2
Also note that a post-ID description (e.g. "Variance along x") is optional
and that units may be specified at the end of a line in brackets.
"""
_format_name = 'sextractor'
_io_registry_can_write = False
_description = 'SExtractor format table'
header_class = SExtractorHeader
data_class = SExtractorData
inputter_class = core.ContinuationLinesInputter
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
out = super().read(table)
# remove the comments
if 'comments' in out.meta:
del out.meta['comments']
return out
def write(self, table):
raise NotImplementedError
|
fb6c80f651e0c39c7bd0e8a4b20ff90ea44733dd0218aa83fb8645f0873bade9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing QDP tables that are
not meant to be used directly, but instead are available as readers/writers in
`astropy.table`. See :ref:`astropy:table_io` for more details.
"""
import re
import copy
from collections.abc import Iterable
import numpy as np
import warnings
from astropy.utils.exceptions import AstropyUserWarning
from astropy.table import Table
from . import core, basic
def _line_type(line, delimiter=None):
"""Interpret a QDP file line
Parameters
----------
line : str
a single line of the file
Returns
-------
type : str
Line type: "comment", "command", or "data"
Examples
--------
>>> _line_type("READ SERR 3")
'command'
>>> _line_type(" \\n !some gibberish")
'comment'
>>> _line_type(" ")
'comment'
>>> _line_type(" 21345.45")
'data,1'
>>> _line_type(" 21345.45 1.53e-3 1e-3 .04 NO nan")
'data,6'
>>> _line_type(" 21345.45,1.53e-3,1e-3,.04,NO,nan", delimiter=',')
'data,6'
>>> _line_type(" 21345.45 ! a comment to disturb")
'data,1'
>>> _line_type("NO NO NO NO NO")
'new'
>>> _line_type("NO,NO,NO,NO,NO", delimiter=',')
'new'
>>> _line_type("N O N NOON OON O")
Traceback (most recent call last):
...
ValueError: Unrecognized QDP line...
>>> _line_type(" some non-comment gibberish")
Traceback (most recent call last):
...
ValueError: Unrecognized QDP line...
"""
_decimal_re = r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
_command_re = r'READ [TS]ERR(\s+[0-9]+)+'
sep = delimiter
if delimiter is None:
sep = r'\s+'
_new_re = rf'NO({sep}NO)+'
_data_re = rf'({_decimal_re}|NO|[-+]?nan)({sep}({_decimal_re}|NO|[-+]?nan))*)'
_type_re = rf'^\s*((?P<command>{_command_re})|(?P<new>{_new_re})|(?P<data>{_data_re})?\s*(\!(?P<comment>.*))?\s*$'
_line_type_re = re.compile(_type_re)
line = line.strip()
if not line:
return 'comment'
match = _line_type_re.match(line)
if match is None:
raise ValueError(f'Unrecognized QDP line: {line}')
for type_, val in match.groupdict().items():
if val is None:
continue
if type_ == 'data':
return f'data,{len(val.split(sep=delimiter))}'
else:
return type_
def _get_type_from_list_of_lines(lines, delimiter=None):
"""Read through the list of QDP file lines and label each line by type
Parameters
----------
lines : list
List containing one file line in each entry
Returns
-------
contents : list
List containing the type for each line (see `line_type_and_data`)
ncol : int
The number of columns in the data lines. Must be the same throughout
the file
Examples
--------
>>> line0 = "! A comment"
>>> line1 = "543 12 456.0"
>>> lines = [line0, line1]
>>> types, ncol = _get_type_from_list_of_lines(lines)
>>> types[0]
'comment'
>>> types[1]
'data,3'
>>> ncol
3
>>> lines.append("23")
>>> _get_type_from_list_of_lines(lines)
Traceback (most recent call last):
...
ValueError: Inconsistent number of columns
"""
types = [_line_type(line, delimiter=delimiter) for line in lines]
current_ncol = None
for type_ in types:
if type_.startswith('data', ):
ncol = int(type_[5:])
if current_ncol is None:
current_ncol = ncol
elif ncol != current_ncol:
raise ValueError('Inconsistent number of columns')
return types, current_ncol
def _get_lines_from_file(qdp_file):
if "\n" in qdp_file:
lines = qdp_file.split("\n")
elif isinstance(qdp_file, str):
with open(qdp_file) as fobj:
lines = [line.strip() for line in fobj.readlines()]
elif isinstance(qdp_file, Iterable):
lines = qdp_file
else:
raise ValueError('invalid value of qdb_file')
return lines
def _interpret_err_lines(err_specs, ncols, names=None):
"""Give list of column names from the READ SERR and TERR commands
Parameters
----------
err_specs : dict
``{'serr': [n0, n1, ...], 'terr': [n2, n3, ...]}``
Error specifications for symmetric and two-sided errors
ncols : int
Number of data columns
Other Parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
Returns
-------
colnames : list
List containing the column names. Error columns will have the name
of the main column plus ``_err`` for symmetric errors, and ``_perr``
and ``_nerr`` for positive and negative errors respectively
Examples
--------
>>> col_in = ['MJD', 'Rate']
>>> cols = _interpret_err_lines(None, 2, names=col_in)
>>> cols[0]
'MJD'
>>> err_specs = {'terr': [1], 'serr': [2]}
>>> ncols = 5
>>> cols = _interpret_err_lines(err_specs, ncols, names=col_in)
>>> cols[0]
'MJD'
>>> cols[2]
'MJD_nerr'
>>> cols[4]
'Rate_err'
>>> _interpret_err_lines(err_specs, 6, names=col_in)
Traceback (most recent call last):
...
ValueError: Inconsistent number of input colnames
"""
colnames = ["" for i in range(ncols)]
if err_specs is None:
serr_cols = terr_cols = []
else:
# I don't want to empty the original one when using `pop` below
err_specs = copy.deepcopy(err_specs)
serr_cols = err_specs.pop("serr", [])
terr_cols = err_specs.pop("terr", [])
if names is not None:
all_error_cols = len(serr_cols) + len(terr_cols) * 2
if all_error_cols + len(names) != ncols:
raise ValueError("Inconsistent number of input colnames")
shift = 0
for i in range(ncols):
col_num = i + 1 - shift
if colnames[i] != "":
continue
colname_root = f"col{col_num}"
if names is not None:
colname_root = names[col_num - 1]
colnames[i] = f"{colname_root}"
if col_num in serr_cols:
colnames[i + 1] = f"{colname_root}_err"
shift += 1
continue
if col_num in terr_cols:
colnames[i + 1] = f"{colname_root}_perr"
colnames[i + 2] = f"{colname_root}_nerr"
shift += 2
continue
assert not np.any([c == "" for c in colnames])
return colnames
def _get_tables_from_qdp_file(qdp_file, input_colnames=None, delimiter=None):
"""Get all tables from a QDP file
Parameters
----------
qdp_file : str
Input QDP file name
Other Parameters
----------------
input_colnames : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
delimiter : str
Delimiter for the values in the table.
Returns
-------
list of `~astropy.table.Table`
List containing all the tables present inside the QDP file
"""
lines = _get_lines_from_file(qdp_file)
contents, ncol = _get_type_from_list_of_lines(lines, delimiter=delimiter)
table_list = []
err_specs = {}
colnames = None
comment_text = ""
initial_comments = ""
command_lines = ""
current_rows = None
for line, datatype in zip(lines, contents):
line = line.strip().lstrip('!')
# Is this a comment?
if datatype == "comment":
comment_text += line + '\n'
continue
if datatype == "command":
# The first time I find commands, I save whatever comments into
# The initial comments.
if command_lines == "":
initial_comments = comment_text
comment_text = ""
if err_specs != {}:
warnings.warn(
"This file contains multiple command blocks. Please verify",
AstropyUserWarning
)
command_lines += line + '\n'
continue
if datatype.startswith("data"):
# The first time I find data, I define err_specs
if err_specs == {} and command_lines != "":
for cline in command_lines.strip().split('\n'):
command = cline.strip().split()
# This should never happen, but just in case.
if len(command) < 3:
continue
err_specs[command[1].lower()] = [int(c) for c in
command[2:]]
if colnames is None:
colnames = _interpret_err_lines(
err_specs, ncol, names=input_colnames
)
if current_rows is None:
current_rows = []
values = []
for v in line.split(delimiter):
if v == "NO":
values.append(np.ma.masked)
else:
# Understand if number is int or float
try:
values.append(int(v))
except ValueError:
values.append(float(v))
current_rows.append(values)
continue
if datatype == "new":
# Save table to table_list and reset
if current_rows is not None:
new_table = Table(names=colnames, rows=current_rows)
new_table.meta["initial_comments"] = initial_comments.strip().split("\n")
new_table.meta["comments"] = comment_text.strip().split("\n")
# Reset comments
comment_text = ""
table_list.append(new_table)
current_rows = None
continue
# At the very end, if there is still a table being written, let's save
# it to the table_list
if current_rows is not None:
new_table = Table(names=colnames, rows=current_rows)
new_table.meta["initial_comments"] = initial_comments.strip().split("\n")
new_table.meta["comments"] = comment_text.strip().split("\n")
table_list.append(new_table)
return table_list
def _understand_err_col(colnames):
"""Get which column names are error columns
Examples
--------
>>> colnames = ['a', 'a_err', 'b', 'b_perr', 'b_nerr']
>>> serr, terr = _understand_err_col(colnames)
>>> np.allclose(serr, [1])
True
>>> np.allclose(terr, [2])
True
>>> serr, terr = _understand_err_col(['a', 'a_nerr'])
Traceback (most recent call last):
...
ValueError: Missing positive error...
>>> serr, terr = _understand_err_col(['a', 'a_perr'])
Traceback (most recent call last):
...
ValueError: Missing negative error...
"""
shift = 0
serr = []
terr = []
for i, col in enumerate(colnames):
if col.endswith("_err"):
# The previous column, but they're numbered from 1!
# Plus, take shift into account
serr.append(i - shift)
shift += 1
elif col.endswith("_perr"):
terr.append(i - shift)
if len(colnames) == i + 1 or not colnames[i + 1].endswith('_nerr'):
raise ValueError("Missing negative error")
shift += 2
elif col.endswith("_nerr") and not colnames[i - 1].endswith('_perr'):
raise ValueError("Missing positive error")
return serr, terr
def _read_table_qdp(qdp_file, names=None, table_id=None, delimiter=None):
"""Read a table from a QDP file
Parameters
----------
qdp_file : str
Input QDP file name
Other Parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
table_id : int, default 0
Number of the table to be read from the QDP file. This is useful
when multiple tables present in the file. By default, the first is read.
delimiter : str
Any delimiter accepted by the `sep` argument of str.split()
Returns
-------
tables : list of `~astropy.table.Table`
List containing all the tables present inside the QDP file
"""
if table_id is None:
warnings.warn("table_id not specified. Reading the first available "
"table", AstropyUserWarning)
table_id = 0
tables = _get_tables_from_qdp_file(qdp_file, input_colnames=names, delimiter=delimiter)
return tables[table_id]
def _write_table_qdp(table, filename=None, err_specs=None):
"""Write a table to a QDP file
Parameters
----------
table : :class:`~astropy.table.Table`
Input table to be written
filename : str
Output QDP file name
Other Parameters
----------------
err_specs : dict
Dictionary of the format {'serr': [1], 'terr': [2, 3]}, specifying
which columns have symmetric and two-sided errors (see QDP format
specification)
"""
import io
fobj = io.StringIO()
if 'initial_comments' in table.meta and table.meta['initial_comments'] != []:
for line in table.meta['initial_comments']:
line = line.strip()
if not line.startswith("!"):
line = "!" + line
print(line, file=fobj)
if err_specs is None:
serr_cols, terr_cols = _understand_err_col(table.colnames)
else:
serr_cols = err_specs.pop("serr", [])
terr_cols = err_specs.pop("terr", [])
if serr_cols != []:
col_string = " ".join([str(val) for val in serr_cols])
print(f"READ SERR {col_string}", file=fobj)
if terr_cols != []:
col_string = " ".join([str(val) for val in terr_cols])
print(f"READ TERR {col_string}", file=fobj)
if 'comments' in table.meta and table.meta['comments'] != []:
for line in table.meta['comments']:
line = line.strip()
if not line.startswith("!"):
line = "!" + line
print(line, file=fobj)
colnames = table.colnames
print("!" + " ".join(colnames), file=fobj)
for row in table:
values = []
for val in row:
if not np.ma.is_masked(val):
rep = str(val)
else:
rep = "NO"
values.append(rep)
print(" ".join(values), file=fobj)
full_string = fobj.getvalue()
fobj.close()
if filename is not None:
with open(filename, 'w') as fobj:
print(full_string, file=fobj)
return full_string.split("\n")
class QDPSplitter(core.DefaultSplitter):
"""
Split on space for QDP tables
"""
delimiter = ' '
class QDPHeader(basic.CommentedHeaderHeader):
"""
Header that uses the :class:`astropy.io.ascii.basic.QDPSplitter`
"""
splitter_class = QDPSplitter
comment = "!"
write_comment = "!"
class QDPData(basic.BasicData):
"""
Data that uses the :class:`astropy.io.ascii.basic.CsvSplitter`
"""
splitter_class = QDPSplitter
fill_values = [(core.masked, 'NO')]
comment = "!"
write_comment = None
class QDP(basic.Basic):
"""Quick and Dandy Plot table.
Example::
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a a(pos) a(neg) b be c d
53000.5 0.25 -0.5 1 1.5 3.5 2
54000.5 1.25 -1.5 2 2.5 4.5 3
NO NO NO NO NO
! Table 1 comment
!a a(pos) a(neg) b be c d
54000.5 2.25 -2.5 NO 3.5 5.5 5
55000.5 3.25 -3.5 4 4.5 6.5 nan
The input table above contains some initial comments, the error commands,
then two tables.
This file format can contain multiple tables, separated by a line full
of ``NO``s. Comments are exclamation marks, and missing values are single
``NO`` entries. The delimiter is usually whitespace, more rarely a comma.
The QDP format differentiates between data and error columns. The table
above has commands::
READ TERR 1
READ SERR 3
which mean that after data column 1 there will be two error columns
containing its positive and engative error bars, then data column 2 without
error bars, then column 3, then a column with the symmetric error of column
3, then the remaining data columns.
As explained below, table headers are highly inconsistent. Possible
comments containing column names will be ignored and columns will be called
``col1``, ``col2``, etc. unless the user specifies their names with the
``names=`` keyword argument,
When passing column names, pass **only the names of the data columns, not
the error columns.**
Error information will be encoded in the names of the table columns.
(e.g. ``a_perr`` and ``a_nerr`` for the positive and negative error of
column ``a``, ``b_err`` the symmetric error of column ``b``.)
When writing tables to this format, users can pass an ``err_specs`` keyword
passing a dictionary ``{'serr': [3], 'terr': [1, 2]}``, meaning that data
columns 1 and two will have two additional columns each with their positive
and negative errors, and data column 3 will have an additional column with
a symmetric error (just like the ``READ SERR`` and ``READ TERR`` commands
above)
Headers are just comments, and tables distributed by various missions
can differ greatly in their use of conventions. For example, light curves
distributed by the Swift-Gehrels mission have an extra space in one header
entry that makes the number of labels inconsistent with the number of cols.
For this reason, we ignore the comments that might encode the column names
and leave the name specification to the user.
Example::
> Extra space
> |
> v
>! MJD Err (pos) Err(neg) Rate Error
>53000.123456 2.378e-05 -2.378472e-05 NO 0.212439
These readers and writer classes will strive to understand which of the
comments belong to all the tables, and which ones to each single table.
General comments will be stored in the ``initial_comments`` meta of each
table. The comments of each table will be stored in the ``comments`` meta.
Example::
t = Table.read(example_qdp, format='ascii.qdp', table_id=1, names=['a', 'b', 'c', 'd'])
reads the second table (``table_id=1``) in file ``example.qdp`` containing
the table above. There are four column names but seven data columns, why?
Because the ``READ SERR`` and ``READ TERR`` commands say that there are
three error columns.
``t.meta['initial_comments']`` will contain the initial two comment lines
in the file, while ``t.meta['comments']`` will contain ``Table 1 comment``
The table can be written to another file, preserving the same information,
as::
t.write(test_file, err_specs={'terr': [1], 'serr': [3]})
Note how the ``terr`` and ``serr`` commands are passed to the writer.
"""
_format_name = 'qdp'
_io_registry_can_write = True
_io_registry_suffix = '.qdp'
_description = 'Quick and Dandy Plotter'
header_class = QDPHeader
data_class = QDPData
def __init__(self, table_id=None, names=None, err_specs=None, sep=None):
super().__init__()
self.table_id = table_id
self.names = names
self.err_specs = err_specs
self.delimiter = sep
def read(self, table):
self.lines = self.inputter.get_lines(table, newline="\n")
return _read_table_qdp(self.lines, table_id=self.table_id,
names=self.names, delimiter=self.delimiter)
def write(self, table):
self._check_multidim_table(table)
lines = _write_table_qdp(table, err_specs=self.err_specs)
return lines
|
05143af71f1c31d4d9c7c4c9f0db707811c48991d311d3e27c1b575a58aeedd1 | READ_DOCSTRING = """
Read the input ``table`` and return the table. Most of
the default behavior for various parameters is determined by the Reader
class.
See also:
- https://docs.astropy.org/en/stable/io/ascii/
- https://docs.astropy.org/en/stable/io/ascii/read.html
Parameters
----------
table : str, file-like, list, `pathlib.Path` object
Input table as a file name, file-like object, list of string[s],
single newline-separated string or `pathlib.Path` object.
guess : bool
Try to guess the table format. Defaults to None.
format : str, `~astropy.io.ascii.BaseReader`
Input table format
Inputter : `~astropy.io.ascii.BaseInputter`
Inputter class
Outputter : `~astropy.io.ascii.BaseOutputter`
Outputter class
delimiter : str
Column delimiter string
comment : str
Regular expression defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
header_start : int
Line index for the header line not counting comment or blank lines.
A line with only whitespace is considered blank.
data_start : int
Line index for the start of data not counting comment or blank lines.
A line with only whitespace is considered blank.
data_end : int
Line index for the end of data not counting comment or blank lines.
This value can be negative to count from the end.
converters : dict
Dictionary of converters. Keys in the dictionary are columns names,
values are converter functions. In addition to single column names
you can use wildcards via `fnmatch` to select multiple columns.
data_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split data columns
header_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split header columns
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fill_values : tuple, list of tuple
specification of fill values for bad or missing table values
fill_include_names : list
List of names to include in fill_values.
fill_exclude_names : list
List of names to exclude from fill_values (applied after ``fill_include_names``)
fast_reader : bool, str or dict
Whether to use the C engine, can also be a dict with options which
defaults to `False`; parameters for options dict:
use_fast_converter: bool
enable faster but slightly imprecise floating point conversion method
parallel: bool or int
multiprocessing conversion using ``cpu_count()`` or ``'number'`` processes
exponent_style: str
One-character string defining the exponent or ``'Fortran'`` to auto-detect
Fortran-style scientific notation like ``'3.14159D+00'`` (``'E'``, ``'D'``, ``'Q'``),
all case-insensitive; default ``'E'``, all other imply ``use_fast_converter``
chunk_size : int
If supplied with a value > 0 then read the table in chunks of
approximately ``chunk_size`` bytes. Default is reading table in one pass.
chunk_generator : bool
If True and ``chunk_size > 0`` then return an iterator that returns a
table for each chunk. The default is to return a single stacked table
for all the chunks.
encoding : str
Allow to specify encoding to read the file (default= ``None``).
Returns
-------
dat : `~astropy.table.Table` or <generator>
Output table
"""
# Specify allowed types for core write() keyword arguments. Each entry
# corresponds to the name of an argument and either a type (e.g. int) or a
# list of types. These get used in io.ascii.ui._validate_read_write_kwargs().
# - The commented-out kwargs are too flexible for a useful check
# - 'list-list' is a special case for an iterable that is not a string.
READ_KWARG_TYPES = {
# 'table'
'guess': bool,
# 'format'
# 'Reader'
# 'Inputter'
# 'Outputter'
'delimiter': str,
'comment': str,
'quotechar': str,
'header_start': int,
'data_start': (int, str), # CDS allows 'guess'
'data_end': int,
'converters': dict,
# 'data_Splitter'
# 'header_Splitter'
'names': 'list-like',
'include_names': 'list-like',
'exclude_names': 'list-like',
'fill_values': 'list-like',
'fill_include_names': 'list-like',
'fill_exclude_names': 'list-like',
'fast_reader': (bool, str, dict),
'encoding': str,
}
WRITE_DOCSTRING = """
Write the input ``table`` to ``filename``. Most of the default behavior
for various parameters is determined by the Writer class.
See also:
- https://docs.astropy.org/en/stable/io/ascii/
- https://docs.astropy.org/en/stable/io/ascii/write.html
Parameters
----------
table : `~astropy.io.ascii.BaseReader`, array-like, str, file-like, list
Input table as a Reader object, Numpy struct array, file name,
file-like object, list of strings, or single newline-separated string.
output : str, file-like
Output [filename, file-like object]. Defaults to``sys.stdout``.
format : str
Output table format. Defaults to 'basic'.
delimiter : str
Column delimiter string
comment : str, bool
String defining a comment line in table. If `False` then comments
are not written out.
quotechar : str
One-character string to quote fields containing special characters
formats : dict
Dictionary of format specifiers or formatting functions
strip_whitespace : bool
Strip surrounding whitespace from column values.
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fast_writer : bool, str
Whether to use the fast Cython writer. Can be `True` (use fast writer
if available), `False` (do not use fast writer), or ``'force'`` (use
fast writer and fail if not available, mostly for testing).
overwrite : bool
If ``overwrite=False`` (default) and the file exists, then an OSError
is raised. This parameter is ignored when the ``output`` arg is not a
string (e.g., a file object).
"""
# Specify allowed types for core write() keyword arguments. Each entry
# corresponds to the name of an argument and either a type (e.g. int) or a
# list of types. These get used in io.ascii.ui._validate_read_write_kwargs().
# - The commented-out kwargs are too flexible for a useful check
# - 'list-list' is a special case for an iterable that is not a string.
WRITE_KWARG_TYPES = {
# 'table'
# 'output'
'format': str,
'delimiter': str,
'comment': (str, bool),
'quotechar': str,
'header_start': int,
'formats': dict,
'strip_whitespace': (bool),
'names': 'list-like',
'include_names': 'list-like',
'exclude_names': 'list-like',
'fast_writer': (bool, str),
'overwrite': (bool),
}
|
306d62651ecb1ac1a74b0a19bac7938098484818e2d79834603d2024829fcfea | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
ui.py:
Provides the main user functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft ([email protected])
"""
import re
import os
import sys
import copy
import time
import warnings
import contextlib
import collections
from io import StringIO
import numpy as np
from . import core
from . import basic
from . import cds
from . import mrt
from . import daophot
from . import ecsv
from . import sextractor
from . import ipac
from . import latex
from . import html
from . import rst
from . import fastbasic
from . import cparser
from . import fixedwidth
from .docs import READ_KWARG_TYPES, WRITE_KWARG_TYPES
from astropy.table import Table, MaskedColumn
from astropy.utils.data import get_readable_fileobj
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
_read_trace = []
# Default setting for guess parameter in read()
_GUESS = True
def _probably_html(table, maxchars=100000):
"""
Determine if ``table`` probably contains HTML content. See PR #3693 and issue
#3691 for context.
"""
if not isinstance(table, str):
try:
# If table is an iterable (list of strings) then take the first
# maxchars of these. Make sure this is something with random
# access to exclude a file-like object
table[0]
table[:1]
size = 0
for i, line in enumerate(table):
size += len(line)
if size > maxchars:
table = table[:i + 1]
break
table = os.linesep.join(table)
except Exception:
pass
if isinstance(table, str):
# Look for signs of an HTML table in the first maxchars characters
table = table[:maxchars]
# URL ending in .htm or .html
if re.match(r'( http[s]? | ftp | file ) :// .+ \.htm[l]?$', table,
re.IGNORECASE | re.VERBOSE):
return True
# Filename ending in .htm or .html which exists
if re.search(r'\.htm[l]?$', table[-5:], re.IGNORECASE) and os.path.exists(table):
return True
# Table starts with HTML document type declaration
if re.match(r'\s* <! \s* DOCTYPE \s* HTML', table, re.IGNORECASE | re.VERBOSE):
return True
# Look for <TABLE .. >, <TR .. >, <TD .. > tag openers.
if all(re.search(fr'< \s* {element} [^>]* >', table, re.IGNORECASE | re.VERBOSE)
for element in ('table', 'tr', 'td')):
return True
return False
def set_guess(guess):
"""
Set the default value of the ``guess`` parameter for read()
Parameters
----------
guess : bool
New default ``guess`` value (e.g., True or False)
"""
global _GUESS
_GUESS = guess
def get_reader(Reader=None, Inputter=None, Outputter=None, **kwargs):
"""
Initialize a table reader allowing for common customizations. Most of the
default behavior for various parameters is determined by the Reader class.
Parameters
----------
Reader : `~astropy.io.ascii.BaseReader`
Reader class (DEPRECATED). Default is :class:`Basic`.
Inputter : `~astropy.io.ascii.BaseInputter`
Inputter class
Outputter : `~astropy.io.ascii.BaseOutputter`
Outputter class
delimiter : str
Column delimiter string
comment : str
Regular expression defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
header_start : int
Line index for the header line not counting comment or blank lines.
A line with only whitespace is considered blank.
data_start : int
Line index for the start of data not counting comment or blank lines.
A line with only whitespace is considered blank.
data_end : int
Line index for the end of data not counting comment or blank lines.
This value can be negative to count from the end.
converters : dict
Dict of converters.
data_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split data columns.
header_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split header columns.
names : list
List of names corresponding to each data column.
include_names : list, optional
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``).
fill_values : tuple, list of tuple
Specification of fill values for bad or missing table values.
fill_include_names : list
List of names to include in fill_values.
fill_exclude_names : list
List of names to exclude from fill_values (applied after ``fill_include_names``).
Returns
-------
reader : `~astropy.io.ascii.BaseReader` subclass
ASCII format reader instance
"""
# This function is a light wrapper around core._get_reader to provide a
# public interface with a default Reader.
if Reader is None:
# Default reader is Basic unless fast reader is forced
fast_reader = _get_fast_reader_dict(kwargs)
if fast_reader['enable'] == 'force':
Reader = fastbasic.FastBasic
else:
Reader = basic.Basic
reader = core._get_reader(Reader, Inputter=Inputter, Outputter=Outputter, **kwargs)
return reader
def _get_format_class(format, ReaderWriter, label):
if format is not None and ReaderWriter is not None:
raise ValueError(f'Cannot supply both format and {label} keywords')
if format is not None:
if format in core.FORMAT_CLASSES:
ReaderWriter = core.FORMAT_CLASSES[format]
else:
raise ValueError('ASCII format {!r} not in allowed list {}'
.format(format, sorted(core.FORMAT_CLASSES)))
return ReaderWriter
def _get_fast_reader_dict(kwargs):
"""Convert 'fast_reader' key in kwargs into a dict if not already and make sure
'enable' key is available.
"""
fast_reader = copy.deepcopy(kwargs.get('fast_reader', True))
if isinstance(fast_reader, dict):
fast_reader.setdefault('enable', 'force')
else:
fast_reader = {'enable': fast_reader}
return fast_reader
def _validate_read_write_kwargs(read_write, **kwargs):
"""Validate types of keyword arg inputs to read() or write()."""
def is_ducktype(val, cls):
"""Check if ``val`` is an instance of ``cls`` or "seems" like one:
``cls(val) == val`` does not raise and exception and is `True`. In
this way you can pass in ``np.int16(2)`` and have that count as `int`.
This has a special-case of ``cls`` being 'list-like', meaning it is
an iterable but not a string.
"""
if cls == 'list-like':
ok = (not isinstance(val, str)
and isinstance(val, collections.abc.Iterable))
else:
ok = isinstance(val, cls)
if not ok:
# See if ``val`` walks and quacks like a ``cls```.
try:
new_val = cls(val)
assert new_val == val
except Exception:
ok = False
else:
ok = True
return ok
kwarg_types = READ_KWARG_TYPES if read_write == 'read' else WRITE_KWARG_TYPES
for arg, val in kwargs.items():
# Kwarg type checking is opt-in, so kwargs not in the list are considered OK.
# This reflects that some readers allow additional arguments that may not
# be well-specified, e.g. ```__init__(self, **kwargs)`` is an option.
if arg not in kwarg_types or val is None:
continue
# Single type or tuple of types for this arg (like isinstance())
types = kwarg_types[arg]
err_msg = (f"{read_write}() argument '{arg}' must be a "
f"{types} object, got {type(val)} instead")
# Force `types` to be a tuple for the any() check below
if not isinstance(types, tuple):
types = (types,)
if not any(is_ducktype(val, cls) for cls in types):
raise TypeError(err_msg)
def read(table, guess=None, **kwargs):
# This the final output from reading. Static analysis indicates the reading
# logic (which is indeed complex) might not define `dat`, thus do so here.
dat = None
# Docstring defined below
del _read_trace[:]
# Downstream readers might munge kwargs
kwargs = copy.deepcopy(kwargs)
_validate_read_write_kwargs('read', **kwargs)
# Convert 'fast_reader' key in kwargs into a dict if not already and make sure
# 'enable' key is available.
fast_reader = _get_fast_reader_dict(kwargs)
kwargs['fast_reader'] = fast_reader
if fast_reader['enable'] and fast_reader.get('chunk_size'):
return _read_in_chunks(table, **kwargs)
if 'fill_values' not in kwargs:
kwargs['fill_values'] = [('', '0')]
# If an Outputter is supplied in kwargs that will take precedence.
if 'Outputter' in kwargs: # user specified Outputter, not supported for fast reading
fast_reader['enable'] = False
format = kwargs.get('format')
# Dictionary arguments are passed by reference per default and thus need
# special protection:
new_kwargs = copy.deepcopy(kwargs)
kwargs['fast_reader'] = copy.deepcopy(fast_reader)
# Get the Reader class based on possible format and Reader kwarg inputs.
Reader = _get_format_class(format, kwargs.get('Reader'), 'Reader')
if Reader is not None:
new_kwargs['Reader'] = Reader
format = Reader._format_name
# Remove format keyword if there, this is only allowed in read() not get_reader()
if 'format' in new_kwargs:
del new_kwargs['format']
if guess is None:
guess = _GUESS
if guess:
# If ``table`` is probably an HTML file then tell guess function to add
# the HTML reader at the top of the guess list. This is in response to
# issue #3691 (and others) where libxml can segfault on a long non-HTML
# file, thus prompting removal of the HTML reader from the default
# guess list.
new_kwargs['guess_html'] = _probably_html(table)
# If `table` is a filename or readable file object then read in the
# file now. This prevents problems in Python 3 with the file object
# getting closed or left at the file end. See #3132, #3013, #3109,
# #2001. If a `readme` arg was passed that implies CDS format, in
# which case the original `table` as the data filename must be left
# intact.
if 'readme' not in new_kwargs:
encoding = kwargs.get('encoding')
try:
with get_readable_fileobj(table, encoding=encoding) as fileobj:
table = fileobj.read()
except ValueError: # unreadable or invalid binary file
raise
except Exception:
pass
else:
# Ensure that `table` has at least one \r or \n in it
# so that the core.BaseInputter test of
# ('\n' not in table and '\r' not in table)
# will fail and so `table` cannot be interpreted there
# as a filename. See #4160.
if not re.search(r'[\r\n]', table):
table = table + os.linesep
# If the table got successfully read then look at the content
# to see if is probably HTML, but only if it wasn't already
# identified as HTML based on the filename.
if not new_kwargs['guess_html']:
new_kwargs['guess_html'] = _probably_html(table)
# Get the table from guess in ``dat``. If ``dat`` comes back as None
# then there was just one set of kwargs in the guess list so fall
# through below to the non-guess way so that any problems result in a
# more useful traceback.
dat = _guess(table, new_kwargs, format, fast_reader)
if dat is None:
guess = False
if not guess:
if format is None:
reader = get_reader(**new_kwargs)
format = reader._format_name
# Try the fast reader version of `format` first if applicable. Note that
# if user specified a fast format (e.g. format='fast_basic') this test
# will fail and the else-clause below will be used.
if fast_reader['enable'] and f'fast_{format}' in core.FAST_CLASSES:
fast_kwargs = copy.deepcopy(new_kwargs)
fast_kwargs['Reader'] = core.FAST_CLASSES[f'fast_{format}']
fast_reader_rdr = get_reader(**fast_kwargs)
try:
dat = fast_reader_rdr.read(table)
_read_trace.append({'kwargs': copy.deepcopy(fast_kwargs),
'Reader': fast_reader_rdr.__class__,
'status': 'Success with fast reader (no guessing)'})
except (core.ParameterError, cparser.CParserError, UnicodeEncodeError) as err:
# special testing value to avoid falling back on the slow reader
if fast_reader['enable'] == 'force':
raise core.InconsistentTableError(
f'fast reader {fast_reader_rdr.__class__} exception: {err}')
# If the fast reader doesn't work, try the slow version
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(new_kwargs),
'Reader': reader.__class__,
'status': 'Success with slow reader after failing'
' with fast (no guessing)'})
else:
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(new_kwargs),
'Reader': reader.__class__,
'status': 'Success with specified Reader class '
'(no guessing)'})
# Static analysis (pyright) indicates `dat` might be left undefined, so just
# to be sure define it at the beginning and check here.
if dat is None:
raise RuntimeError('read() function failed due to code logic error, '
'please report this bug on github')
return dat
read.__doc__ = core.READ_DOCSTRING
def _guess(table, read_kwargs, format, fast_reader):
"""
Try to read the table using various sets of keyword args. Start with the
standard guess list and filter to make it unique and consistent with
user-supplied read keyword args. Finally, if none of those work then
try the original user-supplied keyword args.
Parameters
----------
table : str, file-like, list
Input table as a file name, file-like object, list of strings, or
single newline-separated string.
read_kwargs : dict
Keyword arguments from user to be supplied to reader
format : str
Table format
fast_reader : dict
Options for the C engine fast reader. See read() function for details.
Returns
-------
dat : `~astropy.table.Table` or None
Output table or None if only one guess format was available
"""
# Keep a trace of all failed guesses kwarg
failed_kwargs = []
# Get an ordered list of read() keyword arg dicts that will be cycled
# through in order to guess the format.
full_list_guess = _get_guess_kwargs_list(read_kwargs)
# If a fast version of the reader is available, try that before the slow version
if (fast_reader['enable'] and format is not None and f'fast_{format}' in
core.FAST_CLASSES):
fast_kwargs = copy.deepcopy(read_kwargs)
fast_kwargs['Reader'] = core.FAST_CLASSES[f'fast_{format}']
full_list_guess = [fast_kwargs] + full_list_guess
else:
fast_kwargs = None
# Filter the full guess list so that each entry is consistent with user kwarg inputs.
# This also removes any duplicates from the list.
filtered_guess_kwargs = []
fast_reader = read_kwargs.get('fast_reader')
for guess_kwargs in full_list_guess:
# If user specified slow reader then skip all fast readers
if (fast_reader['enable'] is False
and guess_kwargs['Reader'] in core.FAST_CLASSES.values()):
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': guess_kwargs['Reader'].__class__,
'status': 'Disabled: reader only available in fast version',
'dt': f'{0.0:.3f} ms'})
continue
# If user required a fast reader then skip all non-fast readers
if (fast_reader['enable'] == 'force'
and guess_kwargs['Reader'] not in core.FAST_CLASSES.values()):
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': guess_kwargs['Reader'].__class__,
'status': 'Disabled: no fast version of reader available',
'dt': f'{0.0:.3f} ms'})
continue
guess_kwargs_ok = True # guess_kwargs are consistent with user_kwargs?
for key, val in read_kwargs.items():
# Do guess_kwargs.update(read_kwargs) except that if guess_args has
# a conflicting key/val pair then skip this guess entirely.
if key not in guess_kwargs:
guess_kwargs[key] = copy.deepcopy(val)
elif val != guess_kwargs[key] and guess_kwargs != fast_kwargs:
guess_kwargs_ok = False
break
if not guess_kwargs_ok:
# User-supplied kwarg is inconsistent with the guess-supplied kwarg, e.g.
# user supplies delimiter="|" but the guess wants to try delimiter=" ",
# so skip the guess entirely.
continue
# Add the guess_kwargs to filtered list only if it is not already there.
if guess_kwargs not in filtered_guess_kwargs:
filtered_guess_kwargs.append(guess_kwargs)
# If there are not at least two formats to guess then return no table
# (None) to indicate that guessing did not occur. In that case the
# non-guess read() will occur and any problems will result in a more useful
# traceback.
if len(filtered_guess_kwargs) <= 1:
return None
# Define whitelist of exceptions that are expected from readers when
# processing invalid inputs. Note that OSError must fall through here
# so one cannot simply catch any exception.
guess_exception_classes = (core.InconsistentTableError, ValueError, TypeError,
AttributeError, core.OptionalTableImportError,
core.ParameterError, cparser.CParserError)
# Now cycle through each possible reader and associated keyword arguments.
# Try to read the table using those args, and if an exception occurs then
# keep track of the failed guess and move on.
for guess_kwargs in filtered_guess_kwargs:
t0 = time.time()
try:
# If guessing will try all Readers then use strict req'ts on column names
if 'Reader' not in read_kwargs:
guess_kwargs['strict_names'] = True
reader = get_reader(**guess_kwargs)
reader.guessing = True
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': reader.__class__,
'status': 'Success (guessing)',
'dt': f'{(time.time() - t0) * 1000:.3f} ms'})
return dat
except guess_exception_classes as err:
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'status': f'{err.__class__.__name__}: {str(err)}',
'dt': f'{(time.time() - t0) * 1000:.3f} ms'})
failed_kwargs.append(guess_kwargs)
else:
# Failed all guesses, try the original read_kwargs without column requirements
try:
reader = get_reader(**read_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(read_kwargs),
'Reader': reader.__class__,
'status': 'Success with original kwargs without strict_names '
'(guessing)'})
return dat
except guess_exception_classes as err:
_read_trace.append({'kwargs': copy.deepcopy(read_kwargs),
'status': f'{err.__class__.__name__}: {str(err)}'})
failed_kwargs.append(read_kwargs)
lines = ['\nERROR: Unable to guess table format with the guesses listed below:']
for kwargs in failed_kwargs:
sorted_keys = sorted([x for x in sorted(kwargs)
if x not in ('Reader', 'Outputter')])
reader_repr = repr(kwargs.get('Reader', basic.Basic))
keys_vals = ['Reader:' + re.search(r"\.(\w+)'>", reader_repr).group(1)]
kwargs_sorted = ((key, kwargs[key]) for key in sorted_keys)
keys_vals.extend([f'{key}: {val!r}' for key, val in kwargs_sorted])
lines.append(' '.join(keys_vals))
msg = ['',
'************************************************************************',
'** ERROR: Unable to guess table format with the guesses listed above. **',
'** **',
'** To figure out why the table did not read, use guess=False and **',
'** fast_reader=False, along with any appropriate arguments to read(). **',
'** In particular specify the format and any known attributes like the **',
'** delimiter. **',
'************************************************************************']
lines.extend(msg)
raise core.InconsistentTableError('\n'.join(lines))
def _get_guess_kwargs_list(read_kwargs):
"""
Get the full list of reader keyword argument dicts that are the basis
for the format guessing process. The returned full list will then be:
- Filtered to be consistent with user-supplied kwargs
- Cleaned to have only unique entries
- Used one by one to try reading the input table
Note that the order of the guess list has been tuned over years of usage.
Maintainers need to be very careful about any adjustments as the
reasoning may not be immediately evident in all cases.
This list can (and usually does) include duplicates. This is a result
of the order tuning, but these duplicates get removed later.
Parameters
----------
read_kwargs : dict
User-supplied read keyword args
Returns
-------
guess_kwargs_list : list
List of read format keyword arg dicts
"""
guess_kwargs_list = []
# If the table is probably HTML based on some heuristics then start with the
# HTML reader.
if read_kwargs.pop('guess_html', None):
guess_kwargs_list.append(dict(Reader=html.HTML))
# Start with ECSV because an ECSV file will be read by Basic. This format
# has very specific header requirements and fails out quickly.
guess_kwargs_list.append(dict(Reader=ecsv.Ecsv))
# Now try readers that accept the user-supplied keyword arguments
# (actually include all here - check for compatibility of arguments later).
# FixedWidthTwoLine would also be read by Basic, so it needs to come first;
# same for RST.
for reader in (fixedwidth.FixedWidthTwoLine, rst.RST,
fastbasic.FastBasic, basic.Basic,
fastbasic.FastRdb, basic.Rdb,
fastbasic.FastTab, basic.Tab,
cds.Cds, mrt.Mrt, daophot.Daophot, sextractor.SExtractor,
ipac.Ipac, latex.Latex, latex.AASTex):
guess_kwargs_list.append(dict(Reader=reader))
# Cycle through the basic-style readers using all combinations of delimiter
# and quotechar.
for Reader in (fastbasic.FastCommentedHeader, basic.CommentedHeader,
fastbasic.FastBasic, basic.Basic,
fastbasic.FastNoHeader, basic.NoHeader):
for delimiter in ("|", ",", " ", r"\s"):
for quotechar in ('"', "'"):
guess_kwargs_list.append(dict(
Reader=Reader, delimiter=delimiter, quotechar=quotechar))
return guess_kwargs_list
def _read_in_chunks(table, **kwargs):
"""
For fast_reader read the ``table`` in chunks and vstack to create
a single table, OR return a generator of chunk tables.
"""
fast_reader = kwargs['fast_reader']
chunk_size = fast_reader.pop('chunk_size')
chunk_generator = fast_reader.pop('chunk_generator', False)
fast_reader['parallel'] = False # No parallel with chunks
tbl_chunks = _read_in_chunks_generator(table, chunk_size, **kwargs)
if chunk_generator:
return tbl_chunks
tbl0 = next(tbl_chunks)
masked = tbl0.masked
# Numpy won't allow resizing the original so make a copy here.
out_cols = {col.name: col.data.copy() for col in tbl0.itercols()}
str_kinds = ('S', 'U')
for tbl in tbl_chunks:
masked |= tbl.masked
for name, col in tbl.columns.items():
# Concatenate current column data and new column data
# If one of the inputs is string-like and the other is not, then
# convert the non-string to a string. In a perfect world this would
# be handled by numpy, but as of numpy 1.13 this results in a string
# dtype that is too long (https://github.com/numpy/numpy/issues/10062).
col1, col2 = out_cols[name], col.data
if col1.dtype.kind in str_kinds and col2.dtype.kind not in str_kinds:
col2 = np.array(col2.tolist(), dtype=col1.dtype.kind)
elif col2.dtype.kind in str_kinds and col1.dtype.kind not in str_kinds:
col1 = np.array(col1.tolist(), dtype=col2.dtype.kind)
# Choose either masked or normal concatenation
concatenate = np.ma.concatenate if masked else np.concatenate
out_cols[name] = concatenate([col1, col2])
# Make final table from numpy arrays, converting dict to list
out_cols = [out_cols[name] for name in tbl0.colnames]
out = tbl0.__class__(out_cols, names=tbl0.colnames, meta=tbl0.meta,
copy=False)
return out
def _read_in_chunks_generator(table, chunk_size, **kwargs):
"""
For fast_reader read the ``table`` in chunks and return a generator
of tables for each chunk.
"""
@contextlib.contextmanager
def passthrough_fileobj(fileobj, encoding=None):
"""Stub for get_readable_fileobj, which does not seem to work in Py3
for input file-like object, see #6460"""
yield fileobj
# Set up to coerce `table` input into a readable file object by selecting
# an appropriate function.
# Convert table-as-string to a File object. Finding a newline implies
# that the string is not a filename.
if (isinstance(table, str) and ('\n' in table or '\r' in table)):
table = StringIO(table)
fileobj_context = passthrough_fileobj
elif hasattr(table, 'read') and hasattr(table, 'seek'):
fileobj_context = passthrough_fileobj
else:
# string filename or pathlib
fileobj_context = get_readable_fileobj
# Set up for iterating over chunks
kwargs['fast_reader']['return_header_chars'] = True
header = '' # Table header (up to start of data)
prev_chunk_chars = '' # Chars from previous chunk after last newline
first_chunk = True # True for the first chunk, False afterward
with fileobj_context(table, encoding=kwargs.get('encoding')) as fh:
while True:
chunk = fh.read(chunk_size)
# Got fewer chars than requested, must be end of file
final_chunk = len(chunk) < chunk_size
# If this is the last chunk and there is only whitespace then break
if final_chunk and not re.search(r'\S', chunk):
break
# Step backwards from last character in chunk and find first newline
for idx in range(len(chunk) - 1, -1, -1):
if final_chunk or chunk[idx] == '\n':
break
else:
raise ValueError('no newline found in chunk (chunk_size too small?)')
# Stick on the header to the chunk part up to (and including) the
# last newline. Make sure the small strings are concatenated first.
complete_chunk = (header + prev_chunk_chars) + chunk[:idx + 1]
prev_chunk_chars = chunk[idx + 1:]
# Now read the chunk as a complete table
tbl = read(complete_chunk, guess=False, **kwargs)
# For the first chunk pop the meta key which contains the header
# characters (everything up to the start of data) then fix kwargs
# so it doesn't return that in meta any more.
if first_chunk:
header = tbl.meta.pop('__ascii_fast_reader_header_chars__')
first_chunk = False
yield tbl
if final_chunk:
break
extra_writer_pars = ('delimiter', 'comment', 'quotechar', 'formats',
'names', 'include_names', 'exclude_names', 'strip_whitespace')
def get_writer(Writer=None, fast_writer=True, **kwargs):
"""
Initialize a table writer allowing for common customizations. Most of the
default behavior for various parameters is determined by the Writer class.
Parameters
----------
Writer : ``Writer``
Writer class (DEPRECATED). Defaults to :class:`Basic`.
delimiter : str
Column delimiter string
comment : str
String defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
formats : dict
Dictionary of format specifiers or formatting functions
strip_whitespace : bool
Strip surrounding whitespace from column values.
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fast_writer : bool
Whether to use the fast Cython writer.
Returns
-------
writer : `~astropy.io.ascii.BaseReader` subclass
ASCII format writer instance
"""
if Writer is None:
Writer = basic.Basic
if 'strip_whitespace' not in kwargs:
kwargs['strip_whitespace'] = True
writer = core._get_writer(Writer, fast_writer, **kwargs)
# Handle the corner case of wanting to disable writing table comments for the
# commented_header format. This format *requires* a string for `write_comment`
# because that is used for the header column row, so it is not possible to
# set the input `comment` to None. Without adding a new keyword or assuming
# a default comment character, there is no other option but to tell user to
# simply remove the meta['comments'].
if (isinstance(writer, (basic.CommentedHeader, fastbasic.FastCommentedHeader))
and not isinstance(kwargs.get('comment', ''), str)):
raise ValueError("for the commented_header writer you must supply a string\n"
"value for the `comment` keyword. In order to disable writing\n"
"table comments use `del t.meta['comments']` prior to writing.")
return writer
def write(table, output=None, format=None, Writer=None, fast_writer=True, *,
overwrite=False, **kwargs):
# Docstring inserted below
_validate_read_write_kwargs('write', format=format, fast_writer=fast_writer,
overwrite=overwrite, **kwargs)
if isinstance(output, str):
if not overwrite and os.path.lexists(output):
raise OSError(NOT_OVERWRITING_MSG.format(output))
if output is None:
output = sys.stdout
# Ensure that `table` is a Table subclass.
names = kwargs.get('names')
if isinstance(table, Table):
# While we are only going to read data from columns, we may need to
# to adjust info attributes such as format, so we make a shallow copy.
table = table.__class__(table, names=names, copy=False)
else:
# Otherwise, create a table from the input.
table = Table(table, names=names, copy=False)
table0 = table[:0].copy()
core._apply_include_exclude_names(table0, kwargs.get('names'),
kwargs.get('include_names'), kwargs.get('exclude_names'))
diff_format_with_names = set(kwargs.get('formats', [])) - set(table0.colnames)
if diff_format_with_names:
warnings.warn(
'The key(s) {} specified in the formats argument do not match a column name.'
.format(diff_format_with_names), AstropyWarning)
if table.has_mixin_columns:
fast_writer = False
Writer = _get_format_class(format, Writer, 'Writer')
writer = get_writer(Writer=Writer, fast_writer=fast_writer, **kwargs)
if writer._format_name in core.FAST_CLASSES:
writer.write(table, output)
return
lines = writer.write(table)
# Write the lines to output
outstr = os.linesep.join(lines)
if not hasattr(output, 'write'):
# NOTE: we need to specify newline='', otherwise the default
# behavior is for Python to translate \r\n (which we write because
# of os.linesep) into \r\r\n. Specifying newline='' disables any
# auto-translation.
output = open(output, 'w', newline='')
output.write(outstr)
output.write(os.linesep)
output.close()
else:
output.write(outstr)
output.write(os.linesep)
write.__doc__ = core.WRITE_DOCSTRING
def get_read_trace():
"""
Return a traceback of the attempted read formats for the last call to
`~astropy.io.ascii.read` where guessing was enabled. This is primarily for
debugging.
The return value is a list of dicts, where each dict includes the keyword
args ``kwargs`` used in the read call and the returned ``status``.
Returns
-------
trace : list of dict
Ordered list of format guesses and status
"""
return copy.deepcopy(_read_trace)
|
061fe343b22ddbc5d18100e1595430beb513aeb22826ad81d5cdbfbe04ca461c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
import copy
from collections import OrderedDict
from . import core
from astropy.table import Table
from . import cparser
from astropy.utils.misc import _set_locale
class FastBasic(metaclass=core.MetaBaseReader):
"""
This class is intended to handle the same format addressed by the
ordinary :class:`Basic` writer, but it acts as a wrapper for underlying C
code and is therefore much faster. Unlike the other ASCII readers and
writers, this class is not very extensible and is restricted
by optimization requirements.
"""
_format_name = 'fast_basic'
_description = 'Basic table with custom delimiter using the fast C engine'
_fast = True
fill_extra_cols = False
guessing = False
strict_names = False
def __init__(self, default_kwargs={}, **user_kwargs):
# Make sure user does not set header_start to None for a reader
# that expects a non-None value (i.e. a number >= 0). This mimics
# what happens in the Basic reader.
if (default_kwargs.get('header_start', 0) is not None
and user_kwargs.get('header_start', 0) is None):
raise ValueError('header_start cannot be set to None for this Reader')
# Set up kwargs and copy any user kwargs. Use deepcopy user kwargs
# since they may contain a dict item which would end up as a ref to the
# original and get munged later (e.g. in cparser.pyx validation of
# fast_reader dict).
kwargs = copy.deepcopy(default_kwargs)
kwargs.update(copy.deepcopy(user_kwargs))
delimiter = kwargs.pop('delimiter', ' ')
self.delimiter = str(delimiter) if delimiter is not None else None
self.write_comment = kwargs.get('comment', '# ')
self.comment = kwargs.pop('comment', '#')
if self.comment is not None:
self.comment = str(self.comment)
self.quotechar = str(kwargs.pop('quotechar', '"'))
self.header_start = kwargs.pop('header_start', 0)
# If data_start is not specified, start reading
# data right after the header line
data_start_default = user_kwargs.get('data_start', self.header_start
+ 1 if self.header_start is not None else 1)
self.data_start = kwargs.pop('data_start', data_start_default)
self.kwargs = kwargs
self.strip_whitespace_lines = True
self.strip_whitespace_fields = True
def _read_header(self):
# Use the tokenizer by default -- this method
# can be overridden for specialized headers
self.engine.read_header()
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
if self.comment is not None and len(self.comment) != 1:
raise core.ParameterError("The C reader does not support a comment regex")
elif self.data_start is None:
raise core.ParameterError("The C reader does not allow data_start to be None")
elif self.header_start is not None and self.header_start < 0 and \
not isinstance(self, FastCommentedHeader):
raise core.ParameterError("The C reader does not allow header_start to be "
"negative except for commented-header files")
elif self.data_start < 0:
raise core.ParameterError("The C reader does not allow data_start to be negative")
elif len(self.delimiter) != 1:
raise core.ParameterError("The C reader only supports 1-char delimiters")
elif len(self.quotechar) != 1:
raise core.ParameterError("The C reader only supports a length-1 quote character")
elif 'converters' in self.kwargs:
raise core.ParameterError("The C reader does not support passing "
"specialized converters")
elif 'encoding' in self.kwargs:
raise core.ParameterError("The C reader does not use the encoding parameter")
elif 'Outputter' in self.kwargs:
raise core.ParameterError("The C reader does not use the Outputter parameter")
elif 'Inputter' in self.kwargs:
raise core.ParameterError("The C reader does not use the Inputter parameter")
elif 'data_Splitter' in self.kwargs or 'header_Splitter' in self.kwargs:
raise core.ParameterError("The C reader does not use a Splitter class")
self.strict_names = self.kwargs.pop('strict_names', False)
# Process fast_reader kwarg, which may or may not exist (though ui.py will always
# pass this as a dict with at least 'enable' set).
fast_reader = self.kwargs.get('fast_reader', True)
if not isinstance(fast_reader, dict):
fast_reader = {}
fast_reader.pop('enable', None)
self.return_header_chars = fast_reader.pop('return_header_chars', False)
# Put fast_reader dict back into kwargs.
self.kwargs['fast_reader'] = fast_reader
self.engine = cparser.CParser(table, self.strip_whitespace_lines,
self.strip_whitespace_fields,
delimiter=self.delimiter,
header_start=self.header_start,
comment=self.comment,
quotechar=self.quotechar,
data_start=self.data_start,
fill_extra_cols=self.fill_extra_cols,
**self.kwargs)
conversion_info = self._read_header()
self.check_header()
if conversion_info is not None:
try_int, try_float, try_string = conversion_info
else:
try_int = {}
try_float = {}
try_string = {}
with _set_locale('C'):
data, comments = self.engine.read(try_int, try_float, try_string)
out = self.make_table(data, comments)
if self.return_header_chars:
out.meta['__ascii_fast_reader_header_chars__'] = self.engine.header_chars
return out
def make_table(self, data, comments):
"""Actually make the output table give the data and comments."""
meta = OrderedDict()
if comments:
meta['comments'] = comments
names = core._deduplicate_names(self.engine.get_names())
return Table(data, names=names, meta=meta)
def check_header(self):
names = self.engine.get_header_names() or self.engine.get_names()
if self.strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in names:
if (core._is_number(name)
or len(name) == 0
or name[0] in bads
or name[-1] in bads):
raise ValueError('Column name {!r} does not meet strict name requirements'
.format(name))
# When guessing require at least two columns
if self.guessing and len(names) <= 1:
raise ValueError('Table format guessing requires at least two columns, got {}'
.format(names))
def write(self, table, output):
"""
Use a fast Cython method to write table data to output,
where output is a filename or file-like object.
"""
self._write(table, output, {})
def _write(self, table, output, default_kwargs,
header_output=True, output_types=False):
# Fast writer supports only 1-d columns
core._check_multidim_table(table, max_ndim=1)
write_kwargs = {'delimiter': self.delimiter,
'quotechar': self.quotechar,
'strip_whitespace': self.strip_whitespace_fields,
'comment': self.write_comment
}
write_kwargs.update(default_kwargs)
# user kwargs take precedence over default kwargs
write_kwargs.update(self.kwargs)
writer = cparser.FastWriter(table, **write_kwargs)
writer.write(output, header_output, output_types)
class FastCsv(FastBasic):
"""
A faster version of the ordinary :class:`Csv` writer that uses the
optimized C parsing engine. Note that this reader will append empty
field values to the end of any row with not enough columns, while
:class:`FastBasic` simply raises an error.
"""
_format_name = 'fast_csv'
_description = 'Comma-separated values table using the fast C engine'
_fast = True
fill_extra_cols = True
def __init__(self, **kwargs):
super().__init__({'delimiter': ',', 'comment': None}, **kwargs)
def write(self, table, output):
"""
Override the default write method of `FastBasic` to
output masked values as empty fields.
"""
self._write(table, output, {'fill_values': [(core.masked, '')]})
class FastTab(FastBasic):
"""
A faster version of the ordinary :class:`Tab` reader that uses
the optimized C parsing engine.
"""
_format_name = 'fast_tab'
_description = 'Tab-separated values table using the fast C engine'
_fast = True
def __init__(self, **kwargs):
super().__init__({'delimiter': '\t'}, **kwargs)
self.strip_whitespace_lines = False
self.strip_whitespace_fields = False
class FastNoHeader(FastBasic):
"""
This class uses the fast C engine to read tables with no header line. If
the names parameter is unspecified, the columns will be autonamed with
"col{}".
"""
_format_name = 'fast_no_header'
_description = 'Basic table with no headers using the fast C engine'
_fast = True
def __init__(self, **kwargs):
super().__init__({'header_start': None, 'data_start': 0}, **kwargs)
def write(self, table, output):
"""
Override the default writing behavior in `FastBasic` so
that columns names are not included in output.
"""
self._write(table, output, {}, header_output=None)
class FastCommentedHeader(FastBasic):
"""
A faster version of the :class:`CommentedHeader` reader, which looks for
column names in a commented line. ``header_start`` denotes the index of
the header line among all commented lines and is 0 by default.
"""
_format_name = 'fast_commented_header'
_description = 'Columns name in a commented line using the fast C engine'
_fast = True
def __init__(self, **kwargs):
super().__init__({}, **kwargs)
# Mimic CommentedHeader's behavior in which data_start
# is relative to header_start if unspecified; see #2692
if 'data_start' not in kwargs:
self.data_start = 0
def make_table(self, data, comments):
"""
Actually make the output table give the data and comments. This is
slightly different from the base FastBasic method in the way comments
are handled.
"""
meta = OrderedDict()
if comments:
idx = self.header_start
if idx < 0:
idx = len(comments) + idx
meta['comments'] = comments[:idx] + comments[idx+1:] # noqa
if not meta['comments']:
del meta['comments']
names = core._deduplicate_names(self.engine.get_names())
return Table(data, names=names, meta=meta)
def _read_header(self):
tmp = self.engine.source
commented_lines = []
for line in tmp.splitlines():
line = line.lstrip()
if line and line[0] == self.comment: # line begins with a comment
commented_lines.append(line[1:])
if len(commented_lines) == self.header_start + 1:
break
if len(commented_lines) <= self.header_start:
raise cparser.CParserError('not enough commented lines')
self.engine.setup_tokenizer([commented_lines[self.header_start]])
self.engine.header_start = 0
self.engine.read_header()
self.engine.setup_tokenizer(tmp)
def write(self, table, output):
"""
Override the default writing behavior in `FastBasic` so
that column names are commented.
"""
self._write(table, output, {}, header_output='comment')
class FastRdb(FastBasic):
"""
A faster version of the :class:`Rdb` reader. This format is similar to
tab-delimited, but it also contains a header line after the column
name line denoting the type of each column (N for numeric, S for string).
"""
_format_name = 'fast_rdb'
_description = 'Tab-separated with a type definition header line'
_fast = True
def __init__(self, **kwargs):
super().__init__({'delimiter': '\t', 'data_start': 2}, **kwargs)
self.strip_whitespace_lines = False
self.strip_whitespace_fields = False
def _read_header(self):
tmp = self.engine.source
line1 = ''
line2 = ''
for line in tmp.splitlines():
# valid non-comment line
if not line1 and line.strip() and line.lstrip()[0] != self.comment:
line1 = line
elif not line2 and line.strip() and line.lstrip()[0] != self.comment:
line2 = line
break
else: # less than 2 lines in table
raise ValueError('RDB header requires 2 lines')
# Tokenize the two header lines separately.
# Each call to self.engine.read_header by default
# - calls _deduplicate_names to ensure unique header_names
# - sets self.names from self.header_names if not provided as kwarg
# - applies self.include_names/exclude_names to self.names.
# For parsing the types disable 1+3, but self.names needs to be set.
self.engine.setup_tokenizer([line2])
self.engine.header_start = 0
self.engine.read_header(deduplicate=False, filter_names=False)
types = self.engine.get_header_names()
# If no kwarg names have been passed, reset to have column names read from header line 1.
if types == self.engine.get_names():
self.engine.set_names([])
self.engine.setup_tokenizer([line1])
# Get full list of column names prior to applying include/exclude_names,
# which have to be applied to the unique name set after deduplicate.
self.engine.read_header(deduplicate=True, filter_names=False)
col_names = self.engine.get_names()
self.engine.read_header(deduplicate=False)
if len(col_names) != len(types):
raise core.InconsistentTableError('RDB header mismatch between number of '
'column names and column types')
# If columns have been removed via include/exclude_names, extract matching types.
if len(self.engine.get_names()) != len(types):
types = [types[col_names.index(n)] for n in self.engine.get_names()]
if any(not re.match(r'\d*(N|S)$', x, re.IGNORECASE) for x in types):
raise core.InconsistentTableError('RDB type definitions do not all match '
'[num](N|S): {}'.format(types))
try_int = {}
try_float = {}
try_string = {}
for name, col_type in zip(self.engine.get_names(), types):
if col_type[-1].lower() == 's':
try_int[name] = 0
try_float[name] = 0
try_string[name] = 1
else:
try_int[name] = 1
try_float[name] = 1
try_string[name] = 0
self.engine.setup_tokenizer(tmp)
return (try_int, try_float, try_string)
def write(self, table, output):
"""
Override the default writing behavior in `FastBasic` to
output a line with column types after the column name line.
"""
self._write(table, output, {}, output_types=True)
|
36ec683cecdc7c8c3fba35414ca371e875d9df97ee3b592cb135619c6465efaf | """A Collection of useful miscellaneous functions.
misc.py:
Collection of useful miscellaneous functions.
:Author: Hannes Breytenbach ([email protected])
"""
import collections.abc
import itertools
import operator
def first_true_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns True"""
if pred is None:
func = operator.itemgetter(1)
else:
func = lambda x: pred(x[1])
ii = next(filter(func, enumerate(iterable)), default) # either index-item pair or default
return ii[0] if ii else default
def first_false_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns False"""
if pred is None:
func = operator.not_
else:
func = lambda x: not pred(x)
return first_true_index(iterable, func, default)
def sortmore(*args, **kw):
"""
Sorts any number of lists according to:
optionally given item sorting key function(s) and/or a global sorting key function.
Parameters
----------
One or more lists
Keywords
--------
globalkey : None
revert to sorting by key function
globalkey : callable
Sort by evaluated value for all items in the lists
(call signature of this function needs to be such that it accepts an
argument tuple of items from each list.
eg.: ``globalkey = lambda *l: sum(l)`` will order all the lists by the
sum of the items from each list
if key: None
sorting done by value of first input list
(in this case the objects in the first iterable need the comparison
methods __lt__ etc...)
if key: callable
sorting done by value of key(item) for items in first iterable
if key: tuple
sorting done by value of (key(item_0), ..., key(item_n)) for items in
the first n iterables (where n is the length of the key tuple)
i.e. the first callable is the primary sorting criterion, and the
rest act as tie-breakers.
Returns
-------
Sorted lists
Examples
--------
Capture sorting indices::
l = list('CharacterS')
In [1]: sortmore( l, range(len(l)) )
Out[1]: (['C', 'S', 'a', 'a', 'c', 'e', 'h', 'r', 'r', 't'],
[0, 9, 2, 4, 5, 7, 1, 3, 8, 6])
In [2]: sortmore( l, range(len(l)), key=str.lower )
Out[2]: (['a', 'a', 'C', 'c', 'e', 'h', 'r', 'r', 'S', 't'],
[2, 4, 0, 5, 7, 1, 3, 8, 9, 6])
"""
first = list(args[0])
if not len(first):
return args
globalkey = kw.get('globalkey')
key = kw.get('key')
if key is None:
if globalkey:
# if global sort function given and no local (secondary) key given, ==> no tiebreakers
key = lambda x: 0
else:
key = lambda x: x # if no global sort and no local sort keys given, sort by item values
if globalkey is None:
globalkey = lambda *x: 0
if not isinstance(globalkey, collections.abc.Callable):
raise ValueError('globalkey needs to be callable')
if isinstance(key, collections.abc.Callable):
k = lambda x: (globalkey(*x), key(x[0]))
elif isinstance(key, tuple):
key = (k if k else lambda x: 0 for k in key)
k = lambda x: (globalkey(*x),) + tuple(f(z) for (f, z) in zip(key, x))
else:
raise KeyError(
"kw arg 'key' should be None, callable, or a sequence of callables, not {}"
.format(type(key)))
res = sorted(list(zip(*args)), key=k)
if 'order' in kw:
if kw['order'].startswith(('descend', 'reverse')):
res = reversed(res)
return tuple(map(list, zip(*res)))
def groupmore(func=None, *its):
"""Extends the itertools.groupby functionality to arbitrary number of iterators."""
if not func:
func = lambda x: x
its = sortmore(*its, key=func)
nfunc = lambda x: func(x[0])
zipper = itertools.groupby(zip(*its), nfunc)
unzipper = ((key, zip(*groups)) for key, groups in zipper)
return unzipper
|
ba4c8817d364d7211a4f5240bf605571a7e404413f8c58553c8696aaba6db254 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
An extensible ASCII table reader and writer.
Classes to read DAOphot table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
import numpy as np
import itertools as itt
from collections import defaultdict, OrderedDict
from . import core
from . import fixedwidth
from .misc import first_true_index, first_false_index, groupmore
class DaophotHeader(core.BaseHeader):
"""
Read the header from a file produced by the IRAF DAOphot routine.
"""
comment = r'\s*#K'
# Regex for extracting the format strings
re_format = re.compile(r'%-?(\d+)\.?\d?[sdfg]')
re_header_keyword = re.compile(r'[#]K'
r'\s+ (?P<name> \w+)'
r'\s* = (?P<stuff> .+) $',
re.VERBOSE)
aperture_values = ()
def __init__(self):
core.BaseHeader.__init__(self)
def parse_col_defs(self, grouped_lines_dict):
"""
Parse a series of column definition lines like below. There may be several
such blocks in a single file (where continuation characters have already been
stripped).
#N ID XCENTER YCENTER MAG MERR MSKY NITER
#U ## pixels pixels magnitudes magnitudes counts ##
#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d
"""
line_ids = ('#N', '#U', '#F')
coldef_dict = defaultdict(list)
# Function to strip identifier lines
stripper = lambda s: s[2:].strip(' \\')
for defblock in zip(*map(grouped_lines_dict.get, line_ids)):
for key, line in zip(line_ids, map(stripper, defblock)):
coldef_dict[key].append(line.split())
# Save the original columns so we can use it later to reconstruct the
# original header for writing
if self.data.is_multiline:
# Database contains multi-aperture data.
# Autogen column names, units, formats from last row of column headers
last_names, last_units, last_formats = list(zip(*map(coldef_dict.get, line_ids)))[-1]
N_multiline = len(self.data.first_block)
for i in np.arange(1, N_multiline + 1).astype('U2'):
# extra column names eg. RAPERT2, SUM2 etc...
extended_names = list(map(''.join, zip(last_names, itt.repeat(i))))
if i == '1': # Enumerate the names starting at 1
coldef_dict['#N'][-1] = extended_names
else:
coldef_dict['#N'].append(extended_names)
coldef_dict['#U'].append(last_units)
coldef_dict['#F'].append(last_formats)
# Get column widths from column format specifiers
get_col_width = lambda s: int(self.re_format.search(s).groups()[0])
col_widths = [[get_col_width(f) for f in formats]
for formats in coldef_dict['#F']]
# original data format might be shorter than 80 characters and filled with spaces
row_widths = np.fromiter(map(sum, col_widths), int)
row_short = Daophot.table_width - row_widths
# fix last column widths
for w, r in zip(col_widths, row_short):
w[-1] += r
self.col_widths = col_widths
# merge the multi-line header data into single line data
coldef_dict = dict((k, sum(v, [])) for (k, v) in coldef_dict.items())
return coldef_dict
def update_meta(self, lines, meta):
"""
Extract table-level keywords for DAOphot table. These are indicated by
a leading '#K ' prefix.
"""
table_meta = meta['table']
# self.lines = self.get_header_lines(lines)
Nlines = len(self.lines)
if Nlines > 0:
# Group the header lines according to their line identifiers (#K,
# #N, #U, #F or just # (spacer line)) function that grabs the line
# identifier
get_line_id = lambda s: s.split(None, 1)[0]
# Group lines by the line identifier ('#N', '#U', '#F', '#K') and
# capture line index
gid, groups = zip(*groupmore(get_line_id, self.lines, range(Nlines)))
# Groups of lines and their indices
grouped_lines, gix = zip(*groups)
# Dict of line groups keyed by line identifiers
grouped_lines_dict = dict(zip(gid, grouped_lines))
# Update the table_meta keywords if necessary
if '#K' in grouped_lines_dict:
keywords = OrderedDict(map(self.extract_keyword_line, grouped_lines_dict['#K']))
table_meta['keywords'] = keywords
coldef_dict = self.parse_col_defs(grouped_lines_dict)
line_ids = ('#N', '#U', '#F')
for name, unit, fmt in zip(*map(coldef_dict.get, line_ids)):
meta['cols'][name] = {'unit': unit,
'format': fmt}
self.meta = meta
self.names = coldef_dict['#N']
def extract_keyword_line(self, line):
"""
Extract info from a header keyword line (#K)
"""
m = self.re_header_keyword.match(line)
if m:
vals = m.group('stuff').strip().rsplit(None, 2)
keyword_dict = {'units': vals[-2],
'format': vals[-1],
'value': (vals[0] if len(vals) > 2 else "")}
return m.group('name'), keyword_dict
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a DAOphot
header. The DAOphot header is specialized so that we just copy the entire BaseHeader
get_cols routine and modify as needed.
Parameters
----------
lines : list
List of table lines
Returns
-------
col : list
List of table Columns
"""
if not self.names:
raise core.InconsistentTableError('No column names found in DAOphot header')
# Create the list of io.ascii column objects
self._set_cols_from_names()
# Set unit and format as needed.
coldefs = self.meta['cols']
for col in self.cols:
unit, fmt = map(coldefs[col.name].get, ('unit', 'format'))
if unit != '##':
col.unit = unit
if fmt != '##':
col.format = fmt
# Set column start and end positions.
col_width = sum(self.col_widths, [])
ends = np.cumsum(col_width)
starts = ends - col_width
for i, col in enumerate(self.cols):
col.start, col.end = starts[i], ends[i]
col.span = col.end - col.start
if hasattr(col, 'format'):
if any(x in col.format for x in 'fg'):
col.type = core.FloatType
elif 'd' in col.format:
col.type = core.IntType
elif 's' in col.format:
col.type = core.StrType
# INDEF is the missing value marker
self.data.fill_values.append(('INDEF', '0'))
class DaophotData(core.BaseData):
splitter_class = fixedwidth.FixedWidthSplitter
start_line = 0
comment = r'\s*#'
def __init__(self):
core.BaseData.__init__(self)
self.is_multiline = False
def get_data_lines(self, lines):
# Special case for multiline daophot databases. Extract the aperture
# values from the first multiline data block
if self.is_multiline:
# Grab the first column of the special block (aperture values) and
# recreate the aperture description string
aplist = next(zip(*map(str.split, self.first_block)))
self.header.aperture_values = tuple(map(float, aplist))
# Set self.data.data_lines to a slice of lines contain the data rows
core.BaseData.get_data_lines(self, lines)
class DaophotInputter(core.ContinuationLinesInputter):
continuation_char = '\\'
multiline_char = '*'
replace_char = ' '
re_multiline = re.compile(r'(#?)[^\\*#]*(\*?)(\\*) ?$')
def search_multiline(self, lines, depth=150):
"""
Search lines for special continuation character to determine number of
continued rows in a datablock. For efficiency, depth gives the upper
limit of lines to search.
"""
# The list of apertures given in the #K APERTURES keyword may not be
# complete!! This happens if the string description of the aperture
# list is longer than the field width of the #K APERTURES field. In
# this case we have to figure out how many apertures there are based on
# the file structure.
comment, special, cont = zip(*(self.re_multiline.search(line).groups()
for line in lines[:depth]))
# Find first non-comment line
data_start = first_false_index(comment)
# No data in lines[:depth]. This may be because there is no data in
# the file, or because the header is really huge. If the latter,
# increasing the search depth should help
if data_start is None:
return None, None, lines[:depth]
header_lines = lines[:data_start]
# Find first line ending on special row continuation character '*'
# indexed relative to data_start
first_special = first_true_index(special[data_start:depth])
if first_special is None: # no special lines
return None, None, header_lines
# last line ending on special '*', but not on line continue '/'
last_special = first_false_index(special[data_start + first_special:depth])
# index relative to first_special
# if first_special is None: #no end of special lines within search
# depth! increase search depth return self.search_multiline( lines,
# depth=2*depth )
# indexing now relative to line[0]
markers = np.cumsum([data_start, first_special, last_special])
# multiline portion of first data block
multiline_block = lines[markers[1]:markers[-1]]
return markers, multiline_block, header_lines
def process_lines(self, lines):
markers, block, header = self.search_multiline(lines)
self.data.is_multiline = markers is not None
self.data.markers = markers
self.data.first_block = block
# set the header lines returned by the search as a attribute of the header
self.data.header.lines = header
if markers is not None:
lines = lines[markers[0]:]
continuation_char = self.continuation_char
multiline_char = self.multiline_char
replace_char = self.replace_char
parts = []
outlines = []
for i, line in enumerate(lines):
mo = self.re_multiline.search(line)
if mo:
comment, special, cont = mo.groups()
if comment or cont:
line = line.replace(continuation_char, replace_char)
if special:
line = line.replace(multiline_char, replace_char)
if cont and not comment:
parts.append(line)
if not cont:
parts.append(line)
outlines.append(''.join(parts))
parts = []
else:
raise core.InconsistentTableError('multiline re could not match line '
'{}: {}'.format(i, line))
return outlines
class Daophot(core.BaseReader):
"""
DAOphot format table.
Example::
#K MERGERAD = INDEF scaleunit %-23.7g
#K IRAF = NOAO/IRAFV2.10EXPORT version %-23s
#K USER = davis name %-23s
#K HOST = tucana computer %-23s
#
#N ID XCENTER YCENTER MAG MERR MSKY NITER \\
#U ## pixels pixels magnitudes magnitudes counts ## \\
#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d
#
#N SHARPNESS CHI PIER PERROR \\
#U ## ## ## perrors \\
#F %-23.3f %-12.3f %-6d %-13s
#
14 138.538 INDEF 15.461 0.003 34.85955 4 \\
-0.032 0.802 0 No_error
The keywords defined in the #K records are available via the output table
``meta`` attribute::
>>> import os
>>> from astropy.io import ascii
>>> filename = os.path.join(ascii.__path__[0], 'tests/data/daophot.dat')
>>> data = ascii.read(filename)
>>> for name, keyword in data.meta['keywords'].items():
... print(name, keyword['value'], keyword['units'], keyword['format'])
...
MERGERAD INDEF scaleunit %-23.7g
IRAF NOAO/IRAFV2.10EXPORT version %-23s
USER name %-23s
...
The unit and formats are available in the output table columns::
>>> for colname in data.colnames:
... col = data[colname]
... print(colname, col.unit, col.format)
...
ID None %-9d
XCENTER pixels %-10.3f
YCENTER pixels %-10.3f
...
Any column values of INDEF are interpreted as a missing value and will be
masked out in the resultant table.
In case of multi-aperture daophot files containing repeated entries for the last
row of fields, extra unique column names will be created by suffixing
corresponding field names with numbers starting from 2 to N (where N is the
total number of apertures).
For example,
first aperture radius will be RAPERT and corresponding magnitude will be MAG,
second aperture radius will be RAPERT2 and corresponding magnitude will be MAG2,
third aperture radius will be RAPERT3 and corresponding magnitude will be MAG3,
and so on.
"""
_format_name = 'daophot'
_io_registry_format_aliases = ['daophot']
_io_registry_can_write = False
_description = 'IRAF DAOphot format table'
header_class = DaophotHeader
data_class = DaophotData
inputter_class = DaophotInputter
table_width = 80
def __init__(self):
core.BaseReader.__init__(self)
# The inputter needs to know about the data (see DaophotInputter.process_lines)
self.inputter.data = self.data
def write(self, table=None):
raise NotImplementedError
|
7402d79396e23e87325fb4336e9a4fe9ce74f2fa461057a16070d34ed2911fae | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Define the Enhanced Character-Separated-Values (ECSV) which allows for reading and
writing all the meta data associated with an astropy Table object.
"""
import re
from collections import OrderedDict
import warnings
import json
import numpy as np
from . import core, basic
from astropy.table import meta, serialize
from astropy.utils.data_info import serialize_context_as
from astropy.utils.exceptions import AstropyUserWarning
from astropy.io.ascii.core import convert_numpy
ECSV_VERSION = '1.0'
DELIMITERS = (' ', ',')
ECSV_DATATYPES = (
'bool', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16',
'uint32', 'uint64', 'float16', 'float32', 'float64',
'float128', 'string')
class EcsvHeader(basic.BasicHeader):
"""Header class for which the column definition line starts with the
comment character. See the :class:`CommentedHeader` class for an example.
"""
def process_lines(self, lines):
"""Return only non-blank lines that start with the comment regexp. For these
lines strip out the matching characters and leading/trailing whitespace."""
re_comment = re.compile(self.comment)
for line in lines:
line = line.strip()
if not line:
continue
match = re_comment.match(line)
if match:
out = line[match.end():]
if out:
yield out
else:
# Stop iterating on first failed match for a non-blank line
return
def write(self, lines):
"""
Write header information in the ECSV ASCII format.
This function is called at the point when preprocessing has been done to
convert the input table columns to `self.cols` which is a list of
`astropy.io.ascii.core.Column` objects. In particular `col.str_vals`
is available for each column with the string representation of each
column item for output.
This format starts with a delimiter separated list of the column names
in order to make this format readable by humans and simple csv-type
readers. It then encodes the full table meta and column attributes and
meta as YAML and pretty-prints this in the header. Finally the
delimited column names are repeated again, for humans and readers that
look for the *last* comment line as defining the column names.
"""
if self.splitter.delimiter not in DELIMITERS:
raise ValueError('only space and comma are allowed for delimiter in ECSV format')
# Now assemble the header dict that will be serialized by the YAML dumper
header = {'cols': self.cols, 'schema': 'astropy-2.0'}
if self.table_meta:
header['meta'] = self.table_meta
# Set the delimiter only for the non-default option(s)
if self.splitter.delimiter != ' ':
header['delimiter'] = self.splitter.delimiter
header_yaml_lines = ([f'%ECSV {ECSV_VERSION}',
'---']
+ meta.get_yaml_from_header(header))
lines.extend([self.write_comment + line for line in header_yaml_lines])
lines.append(self.splitter.join([x.info.name for x in self.cols]))
def write_comments(self, lines, meta):
"""
WRITE: Override the default write_comments to do nothing since this is handled
in the custom write method.
"""
pass
def update_meta(self, lines, meta):
"""
READ: Override the default update_meta to do nothing. This process is done
in get_cols() for this reader.
"""
pass
def get_cols(self, lines):
"""
READ: Initialize the header Column objects from the table ``lines``.
Parameters
----------
lines : list
List of table lines
"""
# Cache a copy of the original input lines before processing below
raw_lines = lines
# Extract non-blank comment (header) lines with comment character stripped
lines = list(self.process_lines(lines))
# Validate that this is a ECSV file
ecsv_header_re = r"""%ECSV [ ]
(?P<major> \d+)
\. (?P<minor> \d+)
\.? (?P<bugfix> \d+)? $"""
no_header_msg = ('ECSV header line like "# %ECSV <version>" not found as first line.'
' This is required for a ECSV file.')
if not lines:
raise core.InconsistentTableError(no_header_msg)
match = re.match(ecsv_header_re, lines[0].strip(), re.VERBOSE)
if not match:
raise core.InconsistentTableError(no_header_msg)
# Construct ecsv_version for backwards compatibility workarounds.
self.ecsv_version = tuple(int(v or 0) for v in match.groups())
try:
header = meta.get_header_from_yaml(lines)
except meta.YamlParseError:
raise core.InconsistentTableError('unable to parse yaml in meta header')
if 'meta' in header:
self.table_meta = header['meta']
if 'delimiter' in header:
delimiter = header['delimiter']
if delimiter not in DELIMITERS:
raise ValueError('only space and comma are allowed for delimiter in ECSV format')
self.splitter.delimiter = delimiter
self.data.splitter.delimiter = delimiter
# Create the list of io.ascii column objects from `header`
header_cols = OrderedDict((x['name'], x) for x in header['datatype'])
self.names = [x['name'] for x in header['datatype']]
# Read the first non-commented line of table and split to get the CSV
# header column names. This is essentially what the Basic reader does.
header_line = next(super().process_lines(raw_lines))
header_names = next(self.splitter([header_line]))
# Check for consistency of the ECSV vs. CSV header column names
if header_names != self.names:
raise core.InconsistentTableError('column names from ECSV header {} do not '
'match names from header line of CSV data {}'
.format(self.names, header_names))
# BaseHeader method to create self.cols, which is a list of
# io.ascii.core.Column objects (*not* Table Column objects).
self._set_cols_from_names()
# Transfer attributes from the column descriptor stored in the input
# header YAML metadata to the new columns to create this table.
for col in self.cols:
for attr in ('description', 'format', 'unit', 'meta', 'subtype'):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
col.dtype = header_cols[col.name]['datatype']
# Require col dtype to be a valid ECSV datatype. However, older versions
# of astropy writing ECSV version 0.9 and earlier had inadvertently allowed
# numpy datatypes like datetime64 or object or python str, which are not in the ECSV standard.
# For back-compatibility with those existing older files, allow reading with no error.
if col.dtype not in ECSV_DATATYPES and self.ecsv_version > (0, 9, 0):
raise ValueError(f'datatype {col.dtype!r} of column {col.name!r} '
f'is not in allowed values {ECSV_DATATYPES}')
# Subtype is written like "int64[2,null]" and we want to split this
# out to "int64" and [2, None].
subtype = col.subtype
if subtype and '[' in subtype:
idx = subtype.index('[')
col.subtype = subtype[:idx]
col.shape = json.loads(subtype[idx:])
# Convert ECSV "string" to numpy "str"
for attr in ('dtype', 'subtype'):
if getattr(col, attr) == 'string':
setattr(col, attr, 'str')
# ECSV subtype of 'json' maps to numpy 'object' dtype
if col.subtype == 'json':
col.subtype = 'object'
def _check_dtype_is_str(col):
if col.dtype != 'str':
raise ValueError(f'datatype of column {col.name!r} must be "string"')
class EcsvOutputter(core.TableOutputter):
"""
After reading the input lines and processing, convert the Reader columns
and metadata to an astropy.table.Table object. This overrides the default
converters to be an empty list because there is no "guessing" of the
conversion function.
"""
default_converters = []
def __call__(self, cols, meta):
# Convert to a Table with all plain Column subclass columns
out = super().__call__(cols, meta)
# If mixin columns exist (based on the special '__mixin_columns__'
# key in the table ``meta``), then use that information to construct
# appropriate mixin columns and remove the original data columns.
# If no __mixin_columns__ exists then this function just passes back
# the input table.
out = serialize._construct_mixins_from_columns(out)
return out
def _convert_vals(self, cols):
"""READ: Convert str_vals in `cols` to final arrays with correct dtypes.
This is adapted from ``BaseOutputter._convert_vals``. In the case of ECSV
there is no guessing and all types are known in advance. A big change
is handling the possibility of JSON-encoded values, both unstructured
object data and structured values that may contain masked data.
"""
for col in cols:
try:
# 1-d or N-d object columns are serialized as JSON.
if col.subtype == 'object':
_check_dtype_is_str(col)
col_vals = [json.loads(val) for val in col.str_vals]
col.data = np.empty([len(col_vals)] + col.shape, dtype=object)
col.data[...] = col_vals
# Variable length arrays with shape (n, m, ..., *) for fixed
# n, m, .. and variable in last axis. Masked values here are
# not currently supported.
elif col.shape and col.shape[-1] is None:
_check_dtype_is_str(col)
# Empty (blank) values in original ECSV are changed to "0"
# in str_vals with corresponding col.mask being created and
# set accordingly. Instead use an empty list here.
if hasattr(col, 'mask'):
for idx in np.nonzero(col.mask)[0]:
col.str_vals[idx] = '[]'
# Remake as a 1-d object column of numpy ndarrays or
# MaskedArray using the datatype specified in the ECSV file.
col_vals = []
for str_val in col.str_vals:
obj_val = json.loads(str_val) # list or nested lists
try:
arr_val = np.array(obj_val, dtype=col.subtype)
except TypeError:
# obj_val has entries that are inconsistent with
# dtype. For a valid ECSV file the only possibility
# is None values (indicating missing values).
data = np.array(obj_val, dtype=object)
# Replace all the None with an appropriate fill value
mask = (data == None) # noqa: E711
kind = np.dtype(col.subtype).kind
data[mask] = {'U': '', 'S': b''}.get(kind, 0)
arr_val = np.ma.array(data.astype(col.subtype), mask=mask)
col_vals.append(arr_val)
col.shape = ()
col.dtype = np.dtype(object)
# np.array(col_vals_arr, dtype=object) fails ?? so this workaround:
col.data = np.empty(len(col_vals), dtype=object)
col.data[:] = col_vals
# Multidim columns with consistent shape (n, m, ...). These
# might be masked.
elif col.shape:
_check_dtype_is_str(col)
# Change empty (blank) values in original ECSV to something
# like "[[null, null],[null,null]]" so subsequent JSON
# decoding works. Delete `col.mask` so that later code in
# core TableOutputter.__call__() that deals with col.mask
# does not run (since handling is done here already).
if hasattr(col, 'mask'):
all_none_arr = np.full(shape=col.shape, fill_value=None, dtype=object)
all_none_json = json.dumps(all_none_arr.tolist())
for idx in np.nonzero(col.mask)[0]:
col.str_vals[idx] = all_none_json
del col.mask
col_vals = [json.loads(val) for val in col.str_vals]
# Make a numpy object array of col_vals to look for None
# (masked values)
data = np.array(col_vals, dtype=object)
mask = (data == None) # noqa: E711
if not np.any(mask):
# No None's, just convert to required dtype
col.data = data.astype(col.subtype)
else:
# Replace all the None with an appropriate fill value
kind = np.dtype(col.subtype).kind
data[mask] = {'U': '', 'S': b''}.get(kind, 0)
# Finally make a MaskedArray with the filled data + mask
col.data = np.ma.array(data.astype(col.subtype), mask=mask)
# Regular scalar value column
else:
if col.subtype:
warnings.warn(f'unexpected subtype {col.subtype!r} set for column '
f'{col.name!r}, using dtype={col.dtype!r} instead.',
category=AstropyUserWarning)
converter_func, _ = convert_numpy(col.dtype)
col.data = converter_func(col.str_vals)
if col.data.shape[1:] != tuple(col.shape):
raise ValueError('shape mismatch between value and column specifier')
except json.JSONDecodeError:
raise ValueError(f'column {col.name!r} failed to convert: '
'column value is not valid JSON')
except Exception as exc:
raise ValueError(f'column {col.name!r} failed to convert: {exc}')
class EcsvData(basic.BasicData):
def _set_fill_values(self, cols):
"""READ: Set the fill values of the individual cols based on fill_values of BaseData
For ECSV handle the corner case of data that has been serialized using
the serialize_method='data_mask' option, which writes the full data and
mask directly, AND where that table includes a string column with zero-length
string entries ("") which are valid data.
Normally the super() method will set col.fill_value=('', '0') to replace
blanks with a '0'. But for that corner case subset, instead do not do
any filling.
"""
super()._set_fill_values(cols)
# Get the serialized columns spec. It might not exist and there might
# not even be any table meta, so punt in those cases.
try:
scs = self.header.table_meta['__serialized_columns__']
except (AttributeError, KeyError):
return
# Got some serialized columns, so check for string type and serialized
# as a MaskedColumn. Without 'data_mask', MaskedColumn objects are
# stored to ECSV as normal columns.
for col in cols:
if (col.dtype == 'str' and col.name in scs
and scs[col.name]['__class__'] == 'astropy.table.column.MaskedColumn'):
col.fill_values = {} # No data value replacement
def str_vals(self):
"""WRITE: convert all values in table to a list of lists of strings
This version considerably simplifies the base method:
- No need to set fill values and column formats
- No per-item formatting, just use repr()
- Use JSON for object-type or multidim values
- Only Column or MaskedColumn can end up as cols here.
- Only replace masked values with "", not the generalized filling
"""
for col in self.cols:
if len(col.shape) > 1 or col.info.dtype.kind == 'O':
def format_col_item(idx):
obj = col[idx]
try:
obj = obj.tolist()
except AttributeError:
pass
return json.dumps(obj, separators=(',', ':'))
else:
def format_col_item(idx):
return str(col[idx])
try:
col.str_vals = [format_col_item(idx) for idx in range(len(col))]
except TypeError as exc:
raise TypeError(f'could not convert column {col.info.name!r}'
f' to string: {exc}') from exc
# Replace every masked value in a 1-d column with an empty string.
# For multi-dim columns this gets done by JSON via "null".
if hasattr(col, 'mask') and col.ndim == 1:
for idx in col.mask.nonzero()[0]:
col.str_vals[idx] = ""
out = [col.str_vals for col in self.cols]
return out
class Ecsv(basic.Basic):
"""ECSV (Enhanced Character Separated Values) format table.
Th ECSV format allows for specification of key table and column meta-data, in
particular the data type and unit.
See: https://github.com/astropy/astropy-APEs/blob/main/APE6.rst
Examples
--------
>>> from astropy.table import Table
>>> ecsv_content = '''# %ECSV 0.9
... # ---
... # datatype:
... # - {name: a, unit: m / s, datatype: int64, format: '%03d'}
... # - {name: b, unit: km, datatype: int64, description: This is column b}
... a b
... 001 2
... 004 3
... '''
>>> Table.read(ecsv_content, format='ascii.ecsv')
<Table length=2>
a b
m / s km
int64 int64
----- -----
001 2
004 3
"""
_format_name = 'ecsv'
_description = 'Enhanced CSV'
_io_registry_suffix = '.ecsv'
header_class = EcsvHeader
data_class = EcsvData
outputter_class = EcsvOutputter
max_ndim = None # No limit on column dimensionality
def update_table_data(self, table):
"""
Update table columns in place if mixin columns are present.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
with serialize_context_as('ecsv'):
out = serialize.represent_mixins_as_columns(table)
return out
|
0d3fd214c27a07d48bc728642f99b76e39e7732589b3432fd6d7a9bb01cabfb2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file connects any readers/writers defined in io.misc to the
# astropy.table.Table class
from . import hdf5
from . import parquet
hdf5.register_hdf5()
parquet.register_parquet()
|
81ac234d2d497c6cbc78285a254156ca16f0b6133fede4631f73300004620402 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing HDF5 tables that are
not meant to be used directly, but instead are available as readers/writers in
`astropy.table`. See :ref:`astropy:table_io` for more details.
"""
import os
import warnings
import numpy as np
# NOTE: Do not import anything from astropy.table here.
# https://github.com/astropy/astropy/issues/6604
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
HDF5_SIGNATURE = b'\x89HDF\r\n\x1a\n'
META_KEY = '__table_column_meta__'
__all__ = ['read_table_hdf5', 'write_table_hdf5']
def meta_path(path):
return path + '.' + META_KEY
def _find_all_structured_arrays(handle):
"""
Find all structured arrays in an HDF5 file
"""
import h5py
structured_arrays = []
def append_structured_arrays(name, obj):
if isinstance(obj, h5py.Dataset) and obj.dtype.kind == 'V':
structured_arrays.append(name)
handle.visititems(append_structured_arrays)
return structured_arrays
def is_hdf5(origin, filepath, fileobj, *args, **kwargs):
if fileobj is not None:
loc = fileobj.tell()
try:
signature = fileobj.read(8)
finally:
fileobj.seek(loc)
return signature == HDF5_SIGNATURE
elif filepath is not None:
return filepath.endswith(('.hdf5', '.h5'))
try:
import h5py
except ImportError:
return False
else:
return isinstance(args[0], (h5py.File, h5py.Group, h5py.Dataset))
def read_table_hdf5(input, path=None, character_as_bytes=True):
"""
Read a Table object from an HDF5 file
This requires `h5py <http://www.h5py.org/>`_ to be installed. If more than one
table is present in the HDF5 file or group, the first table is read in and
a warning is displayed.
Parameters
----------
input : str or :class:`h5py.File` or :class:`h5py.Group` or
:class:`h5py.Dataset` If a string, the filename to read the table from.
If an h5py object, either the file or the group object to read the
table from.
path : str
The path from which to read the table inside the HDF5 file.
This should be relative to the input file or group.
character_as_bytes : bool
If `True` then Table columns are left as bytes.
If `False` then Table columns are converted to unicode.
"""
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
# This function is iterative, and only gets to writing the file when
# the input is an hdf5 Group. Moreover, the input variable is changed in
# place.
# Here, we save its value to be used at the end when the conditions are
# right.
input_save = input
if isinstance(input, (h5py.File, h5py.Group)):
# If a path was specified, follow the path
if path is not None:
try:
input = input[path]
except (KeyError, ValueError):
raise OSError(f"Path {path} does not exist")
# `input` is now either a group or a dataset. If it is a group, we
# will search for all structured arrays inside the group, and if there
# is one we can proceed otherwise an error is raised. If it is a
# dataset, we just proceed with the reading.
if isinstance(input, h5py.Group):
# Find all structured arrays in group
arrays = _find_all_structured_arrays(input)
if len(arrays) == 0:
raise ValueError(f"no table found in HDF5 group {path}")
elif len(arrays) > 0:
path = arrays[0] if path is None else path + '/' + arrays[0]
if len(arrays) > 1:
warnings.warn("path= was not specified but multiple tables"
" are present, reading in first available"
" table (path={})".format(path),
AstropyUserWarning)
return read_table_hdf5(input, path=path)
elif not isinstance(input, h5py.Dataset):
# If a file object was passed, then we need to extract the filename
# because h5py cannot properly read in file objects.
if hasattr(input, 'read'):
try:
input = input.name
except AttributeError:
raise TypeError("h5py can only open regular files")
# Open the file for reading, and recursively call read_table_hdf5 with
# the file object and the path.
f = h5py.File(input, 'r')
try:
return read_table_hdf5(f, path=path, character_as_bytes=character_as_bytes)
finally:
f.close()
# If we are here, `input` should be a Dataset object, which we can now
# convert to a Table.
# Create a Table object
from astropy.table import Table, meta, serialize
table = Table(np.array(input))
# Read the meta-data from the file. For back-compatibility, we can read
# the old file format where the serialized metadata were saved in the
# attributes of the HDF5 dataset.
# In the new format, instead, metadata are stored in a new dataset in the
# same file. This is introduced in Astropy 3.0
old_version_meta = META_KEY in input.attrs
new_version_meta = path is not None and meta_path(path) in input_save
if old_version_meta or new_version_meta:
if new_version_meta:
header = meta.get_header_from_yaml(
h.decode('utf-8') for h in input_save[meta_path(path)])
else:
# Must be old_version_meta is True. if (A or B) and not A then B is True
header = meta.get_header_from_yaml(
h.decode('utf-8') for h in input.attrs[META_KEY])
if 'meta' in list(header.keys()):
table.meta = header['meta']
header_cols = dict((x['name'], x) for x in header['datatype'])
for col in table.columns.values():
for attr in ('description', 'format', 'unit', 'meta'):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
# Construct new table with mixins, using tbl.meta['__serialized_columns__']
# as guidance.
table = serialize._construct_mixins_from_columns(table)
else:
# Read the meta-data from the file
table.meta.update(input.attrs)
if not character_as_bytes:
table.convert_bytestring_to_unicode()
return table
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from astropy.table import serialize
from astropy import units as u
from astropy.utils.data_info import serialize_context_as
# Convert the table to one with no mixins, only Column objects. This adds
# meta data which is extracted with meta.get_yaml_from_table.
with serialize_context_as('hdf5'):
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
def write_table_hdf5(table, output, path=None, compression=False,
append=False, overwrite=False, serialize_meta=False,
**create_dataset_kwargs):
"""
Write a Table object to an HDF5 file
This requires `h5py <http://www.h5py.org/>`_ to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or :class:`h5py.File` or :class:`h5py.Group`
If a string, the filename to write the table to. If an h5py object,
either the file or the group object to write the table to.
path : str
The path to which to write the table inside the HDF5 file.
This should be relative to the input file or group.
If not specified, defaults to ``__astropy_table__``.
compression : bool or str or int
Whether to compress the table inside the HDF5 file. If set to `True`,
``'gzip'`` compression is used. If a string is specified, it should be
one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is
specified (in the range 0-9), ``'gzip'`` compression is used, and the
integer denotes the compression level.
append : bool
Whether to append the table to an existing HDF5 file.
overwrite : bool
Whether to overwrite any existing file without warning.
If ``append=True`` and ``overwrite=True`` then only the dataset will be
replaced; the file/group will not be overwritten.
**create_dataset_kwargs
Additional keyword arguments are passed to
``h5py.File.create_dataset()`` or ``h5py.Group.create_dataset()``.
"""
from astropy.table import meta
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
if path is None:
# table is just an arbitrary, hardcoded string here.
path = '__astropy_table__'
elif path.endswith('/'):
raise ValueError("table path should end with table name, not /")
if '/' in path:
group, name = path.rsplit('/', 1)
else:
group, name = None, path
if isinstance(output, (h5py.File, h5py.Group)):
if len(list(output.keys())) > 0 and name == '__astropy_table__':
raise ValueError("table path should always be set via the "
"path= argument when writing to existing "
"files")
elif name == '__astropy_table__':
warnings.warn("table path was not set via the path= argument; "
"using default path {}".format(path))
if group:
try:
output_group = output[group]
except (KeyError, ValueError):
output_group = output.create_group(group)
else:
output_group = output
elif isinstance(output, str):
if os.path.exists(output) and not append:
if overwrite and not append:
os.remove(output)
else:
raise OSError(NOT_OVERWRITING_MSG.format(output))
# Open the file for appending or writing
f = h5py.File(output, 'a' if append else 'w')
# Recursively call the write function
try:
return write_table_hdf5(table, f, path=path,
compression=compression, append=append,
overwrite=overwrite,
serialize_meta=serialize_meta)
finally:
f.close()
else:
raise TypeError('output should be a string or an h5py File or '
'Group object')
# Check whether table already exists
if name in output_group:
if append and overwrite:
# Delete only the dataset itself
del output_group[name]
if serialize_meta and name + '.__table_column_meta__' in output_group:
del output_group[name + '.__table_column_meta__']
else:
raise OSError(f"Table {path} already exists")
# Encode any mixin columns as plain columns + appropriate metadata
table = _encode_mixins(table)
# Table with numpy unicode strings can't be written in HDF5 so
# to write such a table a copy of table is made containing columns as
# bytestrings. Now this copy of the table can be written in HDF5.
if any(col.info.dtype.kind == 'U' for col in table.itercols()):
table = table.copy(copy_data=False)
table.convert_unicode_to_bytestring()
# Warn if information will be lost when serialize_meta=False. This is
# hardcoded to the set difference between column info attributes and what
# HDF5 can store natively (name, dtype) with no meta.
if serialize_meta is False:
for col in table.itercols():
for attr in ('unit', 'format', 'description', 'meta'):
if getattr(col.info, attr, None) not in (None, {}):
warnings.warn("table contains column(s) with defined 'unit', 'format',"
" 'description', or 'meta' info attributes. These will"
" be dropped since serialize_meta=False.",
AstropyUserWarning)
# Write the table to the file
if compression:
if compression is True:
compression = 'gzip'
dset = output_group.create_dataset(name, data=table.as_array(),
compression=compression,
**create_dataset_kwargs)
else:
dset = output_group.create_dataset(name, data=table.as_array(),
**create_dataset_kwargs)
if serialize_meta:
header_yaml = meta.get_yaml_from_table(table)
header_encoded = np.array([h.encode('utf-8') for h in header_yaml])
output_group.create_dataset(meta_path(name),
data=header_encoded)
else:
# Write the Table meta dict key:value pairs to the file as HDF5
# attributes. This works only for a limited set of scalar data types
# like numbers, strings, etc., but not any complex types. This path
# also ignores column meta like unit or format.
for key in table.meta:
val = table.meta[key]
try:
dset.attrs[key] = val
except TypeError:
warnings.warn("Attribute `{}` of type {} cannot be written to "
"HDF5 files - skipping. (Consider specifying "
"serialize_meta=True to write all meta data)".format(key, type(val)),
AstropyUserWarning)
def register_hdf5():
"""
Register HDF5 with Unified I/O.
"""
from astropy.io import registry as io_registry
from astropy.table import Table
io_registry.register_reader('hdf5', Table, read_table_hdf5)
io_registry.register_writer('hdf5', Table, write_table_hdf5)
io_registry.register_identifier('hdf5', Table, is_hdf5)
|
b8f41234d7ae7c7bc24e39c66eef09a97381fe89d6f73102d6345db0273ff5bb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains miscellaneous utility functions for data
input/output with astropy.
"""
from .pickle_helpers import *
|
3b8fada64dd44ee59b88cdb016b75f9330f58d454c635dde405bfec9327befa9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions for serializing core astropy objects via the
YAML protocol.
It provides functions `~astropy.io.misc.yaml.dump`,
`~astropy.io.misc.yaml.load`, and `~astropy.io.misc.yaml.load_all` which
call the corresponding functions in `PyYaml <https://pyyaml.org>`_ but use the
`~astropy.io.misc.yaml.AstropyDumper` and `~astropy.io.misc.yaml.AstropyLoader`
classes to define custom YAML tags for the following astropy classes:
- `astropy.units.Unit`
- `astropy.units.Quantity`
- `astropy.time.Time`
- `astropy.time.TimeDelta`
- `astropy.coordinates.SkyCoord`
- `astropy.coordinates.Angle`
- `astropy.coordinates.Latitude`
- `astropy.coordinates.Longitude`
- `astropy.coordinates.EarthLocation`
- `astropy.table.SerializedColumn`
Example
=======
::
>>> from astropy.io.misc import yaml
>>> import astropy.units as u
>>> from astropy.time import Time
>>> from astropy.coordinates import EarthLocation
>>> t = Time(2457389.0, format='mjd',
... location=EarthLocation(1000, 2000, 3000, unit=u.km))
>>> td = yaml.dump(t)
>>> print(td)
!astropy.time.Time
format: mjd
in_subfmt: '*'
jd1: 4857390.0
jd2: -0.5
location: !astropy.coordinates.earth.EarthLocation
ellipsoid: WGS84
x: !astropy.units.Quantity
unit: &id001 !astropy.units.Unit {unit: km}
value: 1000.0
y: !astropy.units.Quantity
unit: *id001
value: 2000.0
z: !astropy.units.Quantity
unit: *id001
value: 3000.0
out_subfmt: '*'
precision: 3
scale: utc
>>> ty = yaml.load(td)
>>> ty
<Time object: scale='utc' format='mjd' value=2457389.0>
>>> ty.location # doctest: +FLOAT_CMP
<EarthLocation (1000., 2000., 3000.) km>
"""
import base64
import numpy as np
import yaml
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy import coordinates as coords
from astropy.table import SerializedColumn
__all__ = ['AstropyLoader', 'AstropyDumper', 'load', 'load_all', 'dump']
def _unit_representer(dumper, obj):
out = {'unit': str(obj.to_string())}
return dumper.represent_mapping('!astropy.units.Unit', out)
def _unit_constructor(loader, node):
map = loader.construct_mapping(node)
return u.Unit(map['unit'], parse_strict='warn')
def _serialized_column_representer(dumper, obj):
out = dumper.represent_mapping('!astropy.table.SerializedColumn', obj)
return out
def _serialized_column_constructor(loader, node):
map = loader.construct_mapping(node)
return SerializedColumn(map)
def _time_representer(dumper, obj):
out = obj.info._represent_as_dict()
return dumper.represent_mapping('!astropy.time.Time', out)
def _time_constructor(loader, node):
map = loader.construct_mapping(node)
out = Time.info._construct_from_dict(map)
return out
def _timedelta_representer(dumper, obj):
out = obj.info._represent_as_dict()
return dumper.represent_mapping('!astropy.time.TimeDelta', out)
def _timedelta_constructor(loader, node):
map = loader.construct_mapping(node)
out = TimeDelta.info._construct_from_dict(map)
return out
def _ndarray_representer(dumper, obj):
if not (obj.flags['C_CONTIGUOUS'] or obj.flags['F_CONTIGUOUS']):
obj = np.ascontiguousarray(obj)
if np.isfortran(obj):
obj = obj.T
order = 'F'
else:
order = 'C'
data_b64 = base64.b64encode(obj.tobytes())
out = dict(buffer=data_b64,
dtype=str(obj.dtype) if not obj.dtype.fields else obj.dtype.descr,
shape=obj.shape,
order=order)
return dumper.represent_mapping('!numpy.ndarray', out)
def _ndarray_constructor(loader, node):
# Convert mapping to a dict useful for initializing ndarray.
# Need deep=True since for structured dtype, the contents
# include lists and tuples, which need recursion via
# construct_sequence.
map = loader.construct_mapping(node, deep=True)
map['buffer'] = base64.b64decode(map['buffer'])
return np.ndarray(**map)
def _void_representer(dumper, obj):
data_b64 = base64.b64encode(obj.tobytes())
out = dict(buffer=data_b64,
dtype=str(obj.dtype) if not obj.dtype.fields else obj.dtype.descr)
return dumper.represent_mapping('!numpy.void', out)
def _void_constructor(loader, node):
# Interpret as node as an array scalar and then index to change to void.
map = loader.construct_mapping(node, deep=True)
map['buffer'] = base64.b64decode(map['buffer'])
return np.ndarray(shape=(), **map)[()]
def _quantity_representer(tag):
def representer(dumper, obj):
out = obj.info._represent_as_dict()
return dumper.represent_mapping(tag, out)
return representer
def _quantity_constructor(cls):
def constructor(loader, node):
map = loader.construct_mapping(node)
return cls.info._construct_from_dict(map)
return constructor
def _skycoord_representer(dumper, obj):
map = obj.info._represent_as_dict()
out = dumper.represent_mapping('!astropy.coordinates.sky_coordinate.SkyCoord',
map)
return out
def _skycoord_constructor(loader, node):
map = loader.construct_mapping(node)
out = coords.SkyCoord.info._construct_from_dict(map)
return out
# Straight from yaml's Representer
def _complex_representer(self, data):
if data.imag == 0.0:
data = f'{data.real!r}'
elif data.real == 0.0:
data = f'{data.imag!r}j'
elif data.imag > 0:
data = f'{data.real!r}+{data.imag!r}j'
else:
data = f'{data.real!r}{data.imag!r}j'
return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
def _complex_constructor(loader, node):
map = loader.construct_scalar(node)
return complex(map)
class AstropyLoader(yaml.SafeLoader):
"""
Custom SafeLoader that constructs astropy core objects as well
as Python tuple and unicode objects.
This class is not directly instantiated by user code, but instead is
used to maintain the available constructor functions that are
called when parsing a YAML stream. See the `PyYaml documentation
<https://pyyaml.org/wiki/PyYAMLDocumentation>`_ for details of the
class signature.
"""
def _construct_python_tuple(self, node):
return tuple(self.construct_sequence(node))
def _construct_python_unicode(self, node):
return self.construct_scalar(node)
class AstropyDumper(yaml.SafeDumper):
"""
Custom SafeDumper that represents astropy core objects as well
as Python tuple and unicode objects.
This class is not directly instantiated by user code, but instead is
used to maintain the available representer functions that are
called when generating a YAML stream from an object. See the
`PyYaml documentation <https://pyyaml.org/wiki/PyYAMLDocumentation>`_
for details of the class signature.
"""
def _represent_tuple(self, data):
return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
AstropyDumper.add_multi_representer(u.UnitBase, _unit_representer)
AstropyDumper.add_multi_representer(u.FunctionUnitBase, _unit_representer)
AstropyDumper.add_multi_representer(u.StructuredUnit, _unit_representer)
AstropyDumper.add_representer(tuple, AstropyDumper._represent_tuple)
AstropyDumper.add_representer(np.ndarray, _ndarray_representer)
AstropyDumper.add_representer(np.void, _void_representer)
AstropyDumper.add_representer(Time, _time_representer)
AstropyDumper.add_representer(TimeDelta, _timedelta_representer)
AstropyDumper.add_representer(coords.SkyCoord, _skycoord_representer)
AstropyDumper.add_representer(SerializedColumn, _serialized_column_representer)
# Numpy dtypes
AstropyDumper.add_representer(np.bool_, yaml.representer.SafeRepresenter.represent_bool)
for np_type in [np.int_, np.intc, np.intp, np.int8, np.int16, np.int32,
np.int64, np.uint8, np.uint16, np.uint32, np.uint64]:
AstropyDumper.add_representer(np_type,
yaml.representer.SafeRepresenter.represent_int)
for np_type in [np.float_, np.float16, np.float32, np.float64,
np.longdouble]:
AstropyDumper.add_representer(np_type,
yaml.representer.SafeRepresenter.represent_float)
for np_type in [np.complex_, complex, np.complex64, np.complex128]:
AstropyDumper.add_representer(np_type, _complex_representer)
AstropyLoader.add_constructor('tag:yaml.org,2002:python/complex',
_complex_constructor)
AstropyLoader.add_constructor('tag:yaml.org,2002:python/tuple',
AstropyLoader._construct_python_tuple)
AstropyLoader.add_constructor('tag:yaml.org,2002:python/unicode',
AstropyLoader._construct_python_unicode)
AstropyLoader.add_constructor('!astropy.units.Unit', _unit_constructor)
AstropyLoader.add_constructor('!numpy.ndarray', _ndarray_constructor)
AstropyLoader.add_constructor('!numpy.void', _void_constructor)
AstropyLoader.add_constructor('!astropy.time.Time', _time_constructor)
AstropyLoader.add_constructor('!astropy.time.TimeDelta', _timedelta_constructor)
AstropyLoader.add_constructor('!astropy.coordinates.sky_coordinate.SkyCoord',
_skycoord_constructor)
AstropyLoader.add_constructor('!astropy.table.SerializedColumn',
_serialized_column_constructor)
for cls, tag in ((u.Quantity, '!astropy.units.Quantity'),
(u.Magnitude, '!astropy.units.Magnitude'),
(u.Dex, '!astropy.units.Dex'),
(u.Decibel, '!astropy.units.Decibel'),
(coords.Angle, '!astropy.coordinates.Angle'),
(coords.Latitude, '!astropy.coordinates.Latitude'),
(coords.Longitude, '!astropy.coordinates.Longitude'),
(coords.EarthLocation, '!astropy.coordinates.earth.EarthLocation')):
AstropyDumper.add_multi_representer(cls, _quantity_representer(tag))
AstropyLoader.add_constructor(tag, _quantity_constructor(cls))
for cls in (list(coords.representation.REPRESENTATION_CLASSES.values())
+ list(coords.representation.DIFFERENTIAL_CLASSES.values())):
name = cls.__name__
# Add representations/differentials defined in astropy.
if name in coords.representation.__all__:
tag = '!astropy.coordinates.' + name
AstropyDumper.add_multi_representer(cls, _quantity_representer(tag))
AstropyLoader.add_constructor(tag, _quantity_constructor(cls))
def load(stream):
"""Parse the first YAML document in a stream using the AstropyLoader and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like
YAML input
Returns
-------
obj : object
Object corresponding to YAML document
"""
return yaml.load(stream, Loader=AstropyLoader)
def load_all(stream):
"""Parse the all YAML documents in a stream using the AstropyLoader class and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like
YAML input
Returns
-------
obj : object
Object corresponding to YAML document
"""
return yaml.load_all(stream, Loader=AstropyLoader)
def dump(data, stream=None, **kwargs):
"""Serialize a Python object into a YAML stream using the AstropyDumper class.
If stream is None, return the produced string instead.
Parameters
----------
data : object
Object to serialize to YAML
stream : file-like, optional
YAML output (if not supplied a string is returned)
**kwargs
Other keyword arguments that get passed to yaml.dump()
Returns
-------
out : str or None
If no ``stream`` is supplied then YAML output is returned as str
"""
kwargs['Dumper'] = AstropyDumper
kwargs.setdefault('default_flow_style', None)
return yaml.dump(data, stream=stream, **kwargs)
|
55b43c2dfcc452ebe34a9584131fe68d9e5901599a2f552fe4682ab0429078f2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple input/output related functionality that is not
part of a larger framework or standard.
"""
import pickle
__all__ = ['fnpickle', 'fnunpickle']
def fnunpickle(fileorname, number=0):
""" Unpickle pickled objects from a specified file and return the contents.
Parameters
----------
fileorname : str or file-like
The file name or file from which to unpickle objects. If a file object,
it should have been opened in binary mode.
number : int
If 0, a single object will be returned (the first in the file). If >0,
this specifies the number of objects to be unpickled, and a list will
be returned with exactly that many objects. If <0, all objects in the
file will be unpickled and returned as a list.
Raises
------
EOFError
If ``number`` is >0 and there are fewer than ``number`` objects in the
pickled file.
Returns
-------
contents : object or list
If ``number`` is 0, this is a individual object - the first one
unpickled from the file. Otherwise, it is a list of objects unpickled
from the file.
"""
if isinstance(fileorname, str):
f = open(fileorname, 'rb')
close = True
else:
f = fileorname
close = False
try:
if number > 0: # get that number
res = []
for i in range(number):
res.append(pickle.load(f))
elif number < 0: # get all objects
res = []
eof = False
while not eof:
try:
res.append(pickle.load(f))
except EOFError:
eof = True
else: # number==0
res = pickle.load(f)
finally:
if close:
f.close()
return res
def fnpickle(object, fileorname, protocol=None, append=False):
"""Pickle an object to a specified file.
Parameters
----------
object
The python object to pickle.
fileorname : str or file-like
The filename or file into which the `object` should be pickled. If a
file object, it should have been opened in binary mode.
protocol : int or None
Pickle protocol to use - see the :mod:`pickle` module for details on
these options. If None, the most recent protocol will be used.
append : bool
If True, the object is appended to the end of the file, otherwise the
file will be overwritten (if a file object is given instead of a
file name, this has no effect).
"""
if protocol is None:
protocol = pickle.HIGHEST_PROTOCOL
if isinstance(fileorname, str):
f = open(fileorname, 'ab' if append else 'wb')
close = True
else:
f = fileorname
close = False
try:
pickle.dump(object, f, protocol=protocol)
finally:
if close:
f.close()
|
203525835af727fab74268884b58d3cb2e38dc6820ec8ee461dfb008b9ebeb61 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing Parquet
tables that are not meant to be used directly, but instead are
available as readers/writers in `astropy.table`. See
:ref:`astropy:table_io` for more details.
"""
import os
import warnings
import numpy as np
# NOTE: Do not import anything from astropy.table here.
# https://github.com/astropy/astropy/issues/6604
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
from astropy.utils import minversion
PARQUET_SIGNATURE = b'PAR1'
__all__ = [] # nothing is publicly scoped
def parquet_identify(origin, filepath, fileobj, *args, **kwargs):
"""Checks if input is in the Parquet format.
Parameters
----------
origin : Any
filepath : str or None
fileobj : `~pyarrow.NativeFile` or None
*args, **kwargs
Returns
-------
is_parquet : bool
True if 'fileobj' is not None and is a pyarrow file, or if
'filepath' is a string ending with '.parquet' or '.parq'.
False otherwise.
"""
if fileobj is not None:
try: # safely test if pyarrow file
pos = fileobj.tell() # store current stream position
except AttributeError:
return False
signature = fileobj.read(4) # read first 4 bytes
fileobj.seek(pos) # return to original location
return signature == PARQUET_SIGNATURE
elif filepath is not None:
return filepath.endswith(('.parquet', '.parq'))
else:
return False
def read_table_parquet(input, include_names=None, exclude_names=None,
schema_only=False, filters=None):
"""
Read a Table object from a Parquet file.
This requires `pyarrow <https://arrow.apache.org/docs/python/>`_
to be installed.
The ``filters`` parameter consists of predicates that are expressed
in disjunctive normal form (DNF), like ``[[('x', '=', 0), ...], ...]``.
DNF allows arbitrary boolean logical combinations of single column
predicates. The innermost tuples each describe a single column predicate.
The list of inner predicates is interpreted as a conjunction (AND),
forming a more selective and multiple column predicate. Finally, the most
outer list combines these filters as a disjunction (OR).
Predicates may also be passed as List[Tuple]. This form is interpreted
as a single conjunction. To express OR in predicates, one must
use the (preferred) List[List[Tuple]] notation.
Each tuple has format: (``key``, ``op``, ``value``) and compares the
``key`` with the ``value``.
The supported ``op`` are: ``=`` or ``==``, ``!=``, ``<``, ``>``, ``<=``,
``>=``, ``in`` and ``not in``. If the ``op`` is ``in`` or ``not in``, the
``value`` must be a collection such as a ``list``, a ``set`` or a
``tuple``.
Examples:
.. code-block:: python
('x', '=', 0)
('y', 'in', ['a', 'b', 'c'])
('z', 'not in', {'a','b'})
Parameters
----------
input : str or path-like or file-like object
If a string or path-like object, the filename to read the table from.
If a file-like object, the stream to read data.
include_names : list [str], optional
List of names to include in output. If not supplied, then
include all columns.
exclude_names : list [str], optional
List of names to exclude from output (applied after ``include_names``).
If not supplied then no columns are excluded.
schema_only : bool, optional
Only read the schema/metadata with table information.
filters : list [tuple] or list [list [tuple] ] or None, optional
Rows which do not match the filter predicate will be removed from
scanned data. See `pyarrow.parquet.read_table()` for details.
Returns
-------
table : `~astropy.table.Table`
Table will have zero rows and only metadata information
if schema_only is True.
"""
pa, parquet, _ = get_pyarrow()
if not isinstance(input, (str, os.PathLike)):
# The 'read' attribute is the key component of a generic
# file-like object.
if not hasattr(input, 'read'):
raise TypeError("pyarrow can only open path-like or file-like objects.")
schema = parquet.read_schema(input)
# Pyarrow stores all metadata as byte-strings, so we convert
# to UTF-8 strings here.
if schema.metadata is not None:
md = {k.decode('UTF-8'): v.decode('UTF-8') for k, v in schema.metadata.items()}
else:
md = {}
from astropy.table import Table, meta, serialize
# parse metadata from table yaml
meta_dict = {}
if 'table_meta_yaml' in md:
meta_yaml = md.pop('table_meta_yaml').split('\n')
meta_hdr = meta.get_header_from_yaml(meta_yaml)
if 'meta' in meta_hdr:
meta_dict = meta_hdr['meta']
else:
meta_hdr = None
# parse and set serialized columns
full_table_columns = {name: name for name in schema.names}
has_serialized_columns = False
if '__serialized_columns__' in meta_dict:
has_serialized_columns = True
serialized_columns = meta_dict['__serialized_columns__']
for scol in serialized_columns:
for name in _get_names(serialized_columns[scol]):
full_table_columns[name] = scol
use_names = set(full_table_columns.values())
# Apply include_names before exclude_names
if include_names is not None:
use_names.intersection_update(include_names)
if exclude_names is not None:
use_names.difference_update(exclude_names)
# Preserve column ordering via list, and use this dict trick
# to remove duplicates and preserve ordering (for mixin columns)
use_names = list(dict.fromkeys([x for x in full_table_columns.values() if x in use_names]))
# names_to_read is a list of actual serialized column names, where
# e.g. the requested name 'time' becomes ['time.jd1', 'time.jd2']
names_to_read = []
for name in use_names:
names = [n for n, col in full_table_columns.items() if name == col]
names_to_read.extend(names)
if not names_to_read:
raise ValueError("No include_names specified were found in the table.")
# We need to pop any unread serialized columns out of the meta_dict.
if has_serialized_columns:
for scol in list(meta_dict['__serialized_columns__'].keys()):
if scol not in use_names:
meta_dict['__serialized_columns__'].pop(scol)
# whether to return the whole table or a formatted empty table.
if not schema_only:
# Read the pyarrow table, specifying columns and filters.
pa_table = parquet.read_table(input, columns=names_to_read, filters=filters)
num_rows = pa_table.num_rows
else:
num_rows = 0
# Now need to convert parquet table to Astropy
dtype = []
for name in names_to_read:
# Pyarrow string and byte columns do not have native length information
# so we must determine those here.
if schema.field(name).type not in (pa.string(), pa.binary()):
# Convert the pyarrow type into a numpy dtype (which is returned
# by the to_pandas_type() method).
dtype.append(schema.field(name).type.to_pandas_dtype())
continue
# Special-case for string and binary columns
md_name = f'table::len::{name}'
if md_name in md:
# String/bytes length from header.
strlen = int(md[md_name])
elif schema_only: # Find the maximum string length.
# Choose an arbitrary string length since
# are not reading in the table.
strlen = 10
warnings.warn(f"No {md_name} found in metadata. "
f"Guessing {{strlen}} for schema.",
AstropyUserWarning)
else:
strlen = max([len(row.as_py()) for row in pa_table[name]])
warnings.warn(f"No {md_name} found in metadata. "
f"Using longest string ({{strlen}} characters).",
AstropyUserWarning)
dtype.append(f'U{strlen}' if schema.field(name).type == pa.string() else f'|S{strlen}')
# Create the empty numpy record array to store the pyarrow data.
data = np.zeros(num_rows, dtype=list(zip(names_to_read, dtype)))
if not schema_only:
# Convert each column in the pyarrow table to a numpy array
for name in names_to_read:
data[name][:] = pa_table[name].to_numpy()
table = Table(data=data, meta=meta_dict)
if meta_hdr is not None:
# Set description, format, unit, meta from the column
# metadata that was serialized with the table.
header_cols = dict((x['name'], x) for x in meta_hdr['datatype'])
for col in table.columns.values():
for attr in ('description', 'format', 'unit', 'meta'):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
# Convert all compound columns to astropy objects
# (e.g. time.jd1, time.jd2 into a single time column)
table = serialize._construct_mixins_from_columns(table)
return table
def write_table_parquet(table, output, overwrite=False):
"""
Write a Table object to a Parquet file
This requires `pyarrow <https://arrow.apache.org/docs/python/>`_
to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or path-like
The filename to write the table to.
overwrite : bool, optional
Whether to overwrite any existing file without warning. Default `False`.
"""
from astropy.table import meta, serialize
from astropy.utils.data_info import serialize_context_as
pa, parquet, writer_version = get_pyarrow()
if not isinstance(output, (str, os.PathLike)):
raise TypeError(f'`output` should be a string or path-like, not {output}')
# Convert all compound columns into serialized column names, where
# e.g. 'time' becomes ['time.jd1', 'time.jd2'].
with serialize_context_as('parquet'):
encode_table = serialize.represent_mixins_as_columns(table)
# We store the encoded serialization metadata as a yaml string.
meta_yaml = meta.get_yaml_from_table(encode_table)
meta_yaml_str = '\n'.join(meta_yaml)
metadata = {}
for name, col in encode_table.columns.items():
# Parquet will retain the datatypes of columns, but string and
# byte column length is lost. Therefore, we special-case these
# types to record the length for precise round-tripping.
if col.dtype.type is np.str_:
metadata[f'table::len::{name}'] = str(col.dtype.itemsize//4)
elif col.dtype.type is np.bytes_:
metadata[f'table::len::{name}'] = str(col.dtype.itemsize)
metadata['table_meta_yaml'] = meta_yaml_str
# Pyarrow stores all metadata as byte strings, so we explicitly encode
# our unicode strings in metadata as UTF-8 byte strings here.
metadata_encode = {k.encode('UTF-8'): v.encode('UTF-8') for k, v in metadata.items()}
# Build the pyarrow schema by converting from the numpy dtype of each
# column to an equivalent pyarrow type with from_numpy_dtype()
type_list = [(name, pa.from_numpy_dtype(encode_table.dtype[name].type))
for name in encode_table.dtype.names]
schema = pa.schema(type_list, metadata=metadata_encode)
if os.path.exists(output):
if overwrite:
# We must remove the file prior to writing below.
os.remove(output)
else:
raise OSError(NOT_OVERWRITING_MSG.format(output))
# We use version='2.0' for full support of datatypes including uint32.
with parquet.ParquetWriter(output, schema, version=writer_version) as writer:
# Convert each Table column to a pyarrow array
arrays = [pa.array(col) for col in encode_table.itercols()]
# Create a pyarrow table from the list of arrays and the schema
pa_table = pa.Table.from_arrays(arrays, schema=schema)
# Write the pyarrow table to a file
writer.write_table(pa_table)
def _get_names(_dict):
"""Recursively find the names in a serialized column dictionary.
Parameters
----------
_dict : `dict`
Dictionary from astropy __serialized_columns__
Returns
-------
all_names : `list` [`str`]
All the column names mentioned in _dict and sub-dicts.
"""
all_names = []
for k, v in _dict.items():
if isinstance(v, dict):
all_names.extend(_get_names(v))
elif k == 'name':
all_names.append(v)
return all_names
def register_parquet():
"""
Register Parquet with Unified I/O.
"""
from astropy.io import registry as io_registry
from astropy.table import Table
io_registry.register_reader('parquet', Table, read_table_parquet)
io_registry.register_writer('parquet', Table, write_table_parquet)
io_registry.register_identifier('parquet', Table, parquet_identify)
def get_pyarrow():
try:
import pyarrow as pa
from pyarrow import parquet
except ImportError:
raise Exception("pyarrow is required to read and write parquet files")
if minversion(pa, '6.0.0'):
writer_version = '2.4'
else:
writer_version = '2.0'
return pa, parquet, writer_version
|
62e005a982b482d17a40bc54e31a748b7e6c040221eabe5bdb0daf71821ad3bf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Mixin columns for use in ascii/tests/test_ecsv.py, fits/tests/test_connect.py,
and misc/tests/test_hdf5.py
"""
from astropy import coordinates, table, time, units as u
el = coordinates.EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
sr = coordinates.SphericalRepresentation(
[0, 1]*u.deg, [2, 3]*u.deg, 1*u.kpc)
cr = coordinates.CartesianRepresentation(
[0, 1]*u.pc, [4, 5]*u.pc, [8, 6]*u.pc)
sd = coordinates.SphericalCosLatDifferential(
[0, 1]*u.mas/u.yr, [0, 1]*u.mas/u.yr, 10*u.km/u.s)
srd = coordinates.SphericalRepresentation(
sr, differentials=sd)
sc = coordinates.SkyCoord([1, 2], [3, 4], unit='deg,deg',
frame='fk4', obstime='J1990.5')
scd = coordinates.SkyCoord(
[1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4',
obstime=['J1990.5'] * 2)
scdc = scd.copy()
scdc.representation_type = 'cartesian'
scpm = coordinates.SkyCoord(
[1, 2], [3, 4], [5, 6], unit='deg,deg,pc',
pm_ra_cosdec=[7, 8]*u.mas/u.yr, pm_dec=[9, 10]*u.mas/u.yr)
scpmrv = coordinates.SkyCoord(
[1, 2], [3, 4], [5, 6], unit='deg,deg,pc',
pm_ra_cosdec=[7, 8]*u.mas/u.yr, pm_dec=[9, 10]*u.mas/u.yr,
radial_velocity=[11, 12]*u.km/u.s)
scrv = coordinates.SkyCoord(
[1, 2], [3, 4], [5, 6], unit='deg,deg,pc',
radial_velocity=[11, 12]*u.km/u.s)
tm = time.Time([51000.5, 51001.5], format='mjd', scale='tai',
precision=5, location=el[0])
tm2 = time.Time(tm, precision=3, format='iso')
tm3 = time.Time(tm, location=el)
tm3.info.serialize_method['ecsv'] = 'jd1_jd2'
obj = table.Column([{'a': 1}, {'b': [2]}], dtype='object')
# NOTE: for testing, the name of the column "x" for the
# Quantity is important since it tests the fix for #10215
# (namespace clash, where "x" clashes with "el.x").
mixin_cols = {
'tm': tm,
'tm2': tm2,
'tm3': tm3,
'dt': time.TimeDelta([1, 2] * u.day),
'sc': sc,
'scd': scd,
'scdc': scdc,
'scpm': scpm,
'scpmrv': scpmrv,
'scrv': scrv,
'x': [1, 2] * u.m,
'qdb': [10, 20] * u.dB(u.mW),
'qdex': [4.5, 5.5] * u.dex(u.cm / u.s**2),
'qmag': [21, 22] * u.ABmag,
'lat': coordinates.Latitude([1, 2] * u.deg),
'lon': coordinates.Longitude([1, 2] * u.deg, wrap_angle=180. * u.deg),
'ang': coordinates.Angle([1, 2] * u.deg),
'el': el,
'sr': sr,
'cr': cr,
'sd': sd,
'srd': srd,
'nd': table.NdarrayMixin([1, 2]),
'obj': obj,
}
time_attrs = ['value', 'shape', 'format', 'scale', 'precision',
'in_subfmt', 'out_subfmt', 'location']
compare_attrs = {
'tm': time_attrs,
'tm2': time_attrs,
'tm3': time_attrs,
'dt': ['shape', 'value', 'format', 'scale'],
'sc': ['ra', 'dec', 'representation_type', 'frame.name'],
'scd': ['ra', 'dec', 'distance', 'representation_type', 'frame.name'],
'scdc': ['x', 'y', 'z', 'representation_type', 'frame.name'],
'scpm': ['ra', 'dec', 'distance', 'pm_ra_cosdec', 'pm_dec',
'representation_type', 'frame.name'],
'scpmrv': ['ra', 'dec', 'distance', 'pm_ra_cosdec', 'pm_dec',
'radial_velocity', 'representation_type', 'frame.name'],
'scrv': ['ra', 'dec', 'distance', 'radial_velocity',
'representation_type', 'frame.name'],
'x': ['value', 'unit'],
'qdb': ['value', 'unit'],
'qdex': ['value', 'unit'],
'qmag': ['value', 'unit'],
'lon': ['value', 'unit', 'wrap_angle'],
'lat': ['value', 'unit'],
'ang': ['value', 'unit'],
'el': ['x', 'y', 'z', 'ellipsoid'],
'nd': ['data'],
'sr': ['lon', 'lat', 'distance'],
'cr': ['x', 'y', 'z'],
'sd': ['d_lon_coslat', 'd_lat', 'd_distance'],
'srd': ['lon', 'lat', 'distance', 'differentials.s.d_lon_coslat',
'differentials.s.d_lat', 'differentials.s.d_distance'],
'obj': [],
'su': ['i', 'f'],
'tab': ['tm', 'c', 'x'],
'qtab': ['tm', 'c', 'x'],
}
non_trivial_names = {
'cr': ['cr.x', 'cr.y', 'cr.z'],
'dt': ['dt.jd1', 'dt.jd2'],
'el': ['el.x', 'el.y', 'el.z'],
'sc': ['sc.ra', 'sc.dec'],
'scd': ['scd.ra', 'scd.dec', 'scd.distance',
'scd.obstime.jd1', 'scd.obstime.jd2'],
'scdc': ['scdc.x', 'scdc.y', 'scdc.z',
'scdc.obstime.jd1', 'scdc.obstime.jd2'],
'scfc': ['scdc.x', 'scdc.y', 'scdc.z',
'scdc.obstime.jd1', 'scdc.obstime.jd2'],
'scpm': ['scpm.ra', 'scpm.dec', 'scpm.distance',
'scpm.pm_ra_cosdec', 'scpm.pm_dec'],
'scpmrv': ['scpmrv.ra', 'scpmrv.dec', 'scpmrv.distance',
'scpmrv.pm_ra_cosdec', 'scpmrv.pm_dec',
'scpmrv.radial_velocity'],
'scrv': ['scrv.ra', 'scrv.dec', 'scrv.distance',
'scrv.radial_velocity'],
'sd': ['sd.d_lon_coslat', 'sd.d_lat', 'sd.d_distance'],
'sr': ['sr.lon', 'sr.lat', 'sr.distance'],
'srd': ['srd.lon', 'srd.lat', 'srd.distance',
'srd.differentials.s.d_lon_coslat',
'srd.differentials.s.d_lat',
'srd.differentials.s.d_distance'],
'tm': ['tm.jd1', 'tm.jd2'],
'tm2': ['tm2.jd1', 'tm2.jd2'],
'tm3': ['tm3.jd1', 'tm3.jd2',
'tm3.location.x', 'tm3.location.y', 'tm3.location.z'],
}
serialized_names = {name: non_trivial_names.get(name, [name])
for name in sorted(mixin_cols)}
|
550b062f4573e4d612268f6e576262b9f6955f4427a7be8e178c55087b3ec105 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
class CatchZeroByteWriter(io.BufferedWriter):
"""File handle to intercept 0-byte writes"""
def write(self, buffer):
nbytes = super().write(buffer)
if nbytes == 0:
raise ValueError("This writer does not allow empty writes")
return nbytes
|
29e73944e3febcf5c810278b88f377b650c53540cad7856ef26917dbe2cff58c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import sys
from collections import OrderedDict
import numpy as np
from .base import IORegistryError, _UnifiedIORegistryBase
__all__ = ['UnifiedIORegistry', 'UnifiedInputRegistry', 'UnifiedOutputRegistry']
PATH_TYPES = (str, os.PathLike) # TODO! include bytes
# -----------------------------------------------------------------------------
class UnifiedInputRegistry(_UnifiedIORegistryBase):
"""Read-only Unified Registry.
.. versionadded:: 5.0
Examples
--------
First let's start by creating a read-only registry.
.. code-block:: python
>>> from astropy.io.registry import UnifiedInputRegistry
>>> read_reg = UnifiedInputRegistry()
There is nothing in this registry. Let's make a reader for the
:class:`~astropy.table.Table` class::
from astropy.table import Table
def my_table_reader(filename, some_option=1):
# Read in the table by any means necessary
return table # should be an instance of Table
Such a function can then be registered with the I/O registry::
read_reg.register_reader('my-table-format', Table, my_table_reader)
Note that we CANNOT then read in a table with::
d = Table.read('my_table_file.mtf', format='my-table-format')
Why? because ``Table.read`` uses Astropy's default global registry and this
is a separate registry.
Instead we can read by the read method on the registry::
d = read_reg.read(Table, 'my_table_file.mtf', format='my-table-format')
"""
def __init__(self):
super().__init__() # set _identifiers
self._readers = OrderedDict()
self._registries["read"] = dict(attr="_readers", column="Read")
self._registries_order = ("read", "identify")
# =========================================================================
# Read methods
def register_reader(self, data_format, data_class, function, force=False,
priority=0):
"""
Register a reader function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when reading.
data_class : class
The class of the object that the reader produces.
function : function
The function to read in a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
priority : int, optional
The priority of the reader, used to compare possible formats when
trying to determine the best reader to use. Higher priorities are
preferred over lower priorities, with the default priority being 0
(negative numbers are allowed though).
"""
if not (data_format, data_class) in self._readers or force:
self._readers[(data_format, data_class)] = function, priority
else:
raise IORegistryError("Reader for format '{}' and class '{}' is "
'already defined'
''.format(data_format, data_class.__name__))
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, 'read')
def unregister_reader(self, data_format, data_class):
"""
Unregister a reader function
Parameters
----------
data_format : str
The data format identifier.
data_class : class
The class of the object that the reader produces.
"""
if (data_format, data_class) in self._readers:
self._readers.pop((data_format, data_class))
else:
raise IORegistryError("No reader defined for format '{}' and class '{}'"
''.format(data_format, data_class.__name__))
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, 'read')
def get_reader(self, data_format, data_class):
"""Get reader for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : class
The class of the object that can be written.
Returns
-------
reader : callable
The registered reader function for this format and class.
"""
readers = [(fmt, cls) for fmt, cls in self._readers if fmt == data_format]
for reader_format, reader_class in readers:
if self._is_best_match(data_class, reader_class, readers):
return self._readers[(reader_format, reader_class)][0]
else:
format_table_str = self._get_format_table_str(data_class, 'Read')
raise IORegistryError(
"No reader defined for format '{}' and class '{}'.\n\nThe "
"available formats are:\n\n{}".format(
data_format, data_class.__name__, format_table_str))
def read(self, cls, *args, format=None, cache=False, **kwargs):
"""
Read in data.
Parameters
----------
cls : class
*args
The arguments passed to this method depend on the format.
format : str or None
cache : bool
Whether to cache the results of reading in the data.
**kwargs
The arguments passed to this method depend on the format.
Returns
-------
object or None
The output of the registered reader.
"""
ctx = None
try:
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES) and not os.path.isdir(args[0]):
from astropy.utils.data import get_readable_fileobj
# path might be a os.PathLike object
if isinstance(args[0], os.PathLike):
args = (os.fspath(args[0]),) + args[1:]
path = args[0]
try:
ctx = get_readable_fileobj(args[0], encoding='binary', cache=cache)
fileobj = ctx.__enter__()
except OSError:
raise
except Exception:
fileobj = None
else:
args = [fileobj] + list(args[1:])
elif hasattr(args[0], 'read'):
path = None
fileobj = args[0]
format = self._get_valid_format(
'read', cls, path, fileobj, args, kwargs)
reader = self.get_reader(format, cls)
data = reader(*args, **kwargs)
if not isinstance(data, cls):
# User has read with a subclass where only the parent class is
# registered. This returns the parent class, so try coercing
# to desired subclass.
try:
data = cls(data)
except Exception:
raise TypeError('could not convert reader output to {} '
'class.'.format(cls.__name__))
finally:
if ctx is not None:
ctx.__exit__(*sys.exc_info())
return data
# -----------------------------------------------------------------------------
class UnifiedOutputRegistry(_UnifiedIORegistryBase):
"""Write-only Registry.
.. versionadded:: 5.0
"""
def __init__(self):
super().__init__()
self._writers = OrderedDict()
self._registries["write"] = dict(attr="_writers", column="Write")
self._registries_order = ("write", "identify", )
# =========================================================================
# Write Methods
def register_writer(self, data_format, data_class, function, force=False, priority=0):
"""
Register a table writer function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when writing.
data_class : class
The class of the object that can be written.
function : function
The function to write out a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
priority : int, optional
The priority of the writer, used to compare possible formats when trying
to determine the best writer to use. Higher priorities are preferred
over lower priorities, with the default priority being 0 (negative
numbers are allowed though).
"""
if not (data_format, data_class) in self._writers or force:
self._writers[(data_format, data_class)] = function, priority
else:
raise IORegistryError("Writer for format '{}' and class '{}' is "
'already defined'
''.format(data_format, data_class.__name__))
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, 'write')
def unregister_writer(self, data_format, data_class):
"""
Unregister a writer function
Parameters
----------
data_format : str
The data format identifier.
data_class : class
The class of the object that can be written.
"""
if (data_format, data_class) in self._writers:
self._writers.pop((data_format, data_class))
else:
raise IORegistryError("No writer defined for format '{}' and class '{}'"
''.format(data_format, data_class.__name__))
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, 'write')
def get_writer(self, data_format, data_class):
"""Get writer for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : class
The class of the object that can be written.
Returns
-------
writer : callable
The registered writer function for this format and class.
"""
writers = [(fmt, cls) for fmt, cls in self._writers if fmt == data_format]
for writer_format, writer_class in writers:
if self._is_best_match(data_class, writer_class, writers):
return self._writers[(writer_format, writer_class)][0]
else:
format_table_str = self._get_format_table_str(data_class, 'Write')
raise IORegistryError(
"No writer defined for format '{}' and class '{}'.\n\nThe "
"available formats are:\n\n{}".format(
data_format, data_class.__name__, format_table_str))
def write(self, data, *args, format=None, **kwargs):
"""
Write out data.
Parameters
----------
data : object
The data to write.
*args
The arguments passed to this method depend on the format.
format : str or None
**kwargs
The arguments passed to this method depend on the format.
Returns
-------
object or None
The output of the registered writer. Most often `None`.
.. versionadded:: 4.3
"""
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES):
# path might be a os.PathLike object
if isinstance(args[0], os.PathLike):
args = (os.fspath(args[0]),) + args[1:]
path = args[0]
fileobj = None
elif hasattr(args[0], 'read'):
path = None
fileobj = args[0]
format = self._get_valid_format(
'write', data.__class__, path, fileobj, args, kwargs)
writer = self.get_writer(format, data.__class__)
return writer(data, *args, **kwargs)
# -----------------------------------------------------------------------------
class UnifiedIORegistry(UnifiedInputRegistry, UnifiedOutputRegistry):
"""Unified I/O Registry.
.. versionadded:: 5.0
"""
def __init__(self):
super().__init__()
self._registries_order = ("read", "write", "identify")
def get_formats(self, data_class=None, readwrite=None):
"""
Get the list of registered I/O formats as a `~astropy.table.Table`.
Parameters
----------
data_class : class, optional
Filter readers/writer to match data class (default = all classes).
readwrite : str or None, optional
Search only for readers (``"Read"``) or writers (``"Write"``).
If None search for both. Default is None.
.. versionadded:: 1.3
Returns
-------
format_table : :class:`~astropy.table.Table`
Table of available I/O formats.
"""
return super().get_formats(data_class, readwrite)
|
6e3a64cbace0325707e47904b4987c5baf5a5a267f4d3e35308634eb88962d01 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Unified I/O Registry.
"""
from . import base, compat, core, interface
from .base import *
from .compat import *
from .compat import _identifiers, _readers, _writers # for backwards compat
from .core import *
from .interface import *
__all__ = core.__all__ + interface.__all__ + compat.__all__ + base.__all__
|
3364f4b2e36baa83fa21d685a9c4b90a14c08c3365378a95afba1691c944e0dd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
import contextlib
import re
import warnings
from collections import OrderedDict
from operator import itemgetter
import numpy as np
__all__ = ['IORegistryError']
class IORegistryError(Exception):
"""Custom error for registry clashes.
"""
pass
# -----------------------------------------------------------------------------
class _UnifiedIORegistryBase(metaclass=abc.ABCMeta):
"""Base class for registries in Astropy's Unified IO.
This base class provides identification functions and miscellaneous
utilities. For an example how to build a registry subclass we suggest
:class:`~astropy.io.registry.UnifiedInputRegistry`, which enables
read-only registries. These higher-level subclasses will probably serve
better as a baseclass, for instance
:class:`~astropy.io.registry.UnifiedIORegistry` subclasses both
:class:`~astropy.io.registry.UnifiedInputRegistry` and
:class:`~astropy.io.registry.UnifiedOutputRegistry` to enable both
reading from and writing to files.
.. versionadded:: 5.0
"""
def __init__(self):
# registry of identifier functions
self._identifiers = OrderedDict()
# what this class can do: e.g. 'read' &/or 'write'
self._registries = dict()
self._registries["identify"] = dict(attr="_identifiers", column="Auto-identify")
self._registries_order = ("identify", ) # match keys in `_registries`
# If multiple formats are added to one class the update of the docs is quite
# expensive. Classes for which the doc update is temporarly delayed are added
# to this set.
self._delayed_docs_classes = set()
@property
def available_registries(self):
"""Available registries.
Returns
-------
``dict_keys``
"""
return self._registries.keys()
def get_formats(self, data_class=None, filter_on=None):
"""
Get the list of registered formats as a `~astropy.table.Table`.
Parameters
----------
data_class : class or None, optional
Filter readers/writer to match data class (default = all classes).
filter_on : str or None, optional
Which registry to show. E.g. "identify"
If None search for both. Default is None.
Returns
-------
format_table : :class:`~astropy.table.Table`
Table of available I/O formats.
Raises
------
ValueError
If ``filter_on`` is not None nor a registry name.
"""
from astropy.table import Table
# set up the column names
colnames = (
"Data class", "Format",
*[self._registries[k]["column"] for k in self._registries_order],
"Deprecated")
i_dataclass = colnames.index("Data class")
i_format = colnames.index("Format")
i_regstart = colnames.index(self._registries[self._registries_order[0]]["column"])
i_deprecated = colnames.index("Deprecated")
# registries
regs = set()
for k in self._registries.keys() - {"identify"}:
regs |= set(getattr(self, self._registries[k]["attr"]))
format_classes = sorted(regs, key=itemgetter(0))
# the format classes from all registries except "identify"
rows = []
for (fmt, cls) in format_classes:
# see if can skip, else need to document in row
if (data_class is not None and not self._is_best_match(
data_class, cls, format_classes)):
continue
# flags for each registry
has_ = {k: "Yes" if (fmt, cls) in getattr(self, v["attr"]) else "No"
for k, v in self._registries.items()}
# Check if this is a short name (e.g. 'rdb') which is deprecated in
# favor of the full 'ascii.rdb'.
ascii_format_class = ('ascii.' + fmt, cls)
# deprecation flag
deprecated = "Yes" if ascii_format_class in format_classes else ""
# add to rows
rows.append((cls.__name__, fmt,
*[has_[n] for n in self._registries_order], deprecated))
# filter_on can be in self_registries_order or None
if str(filter_on).lower() in self._registries_order:
index = self._registries_order.index(str(filter_on).lower())
rows = [row for row in rows if row[i_regstart + index] == 'Yes']
elif filter_on is not None:
raise ValueError('unrecognized value for "filter_on": {0}.\n'
f'Allowed are {self._registries_order} and None.')
# Sorting the list of tuples is much faster than sorting it after the
# table is created. (#5262)
if rows:
# Indices represent "Data Class", "Deprecated" and "Format".
data = list(zip(*sorted(
rows, key=itemgetter(i_dataclass, i_deprecated, i_format))))
else:
data = None
# make table
# need to filter elementwise comparison failure issue
# https://github.com/numpy/numpy/issues/6784
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=FutureWarning)
format_table = Table(data, names=colnames)
if not np.any(format_table['Deprecated'].data == 'Yes'):
format_table.remove_column('Deprecated')
return format_table
@contextlib.contextmanager
def delay_doc_updates(self, cls):
"""Contextmanager to disable documentation updates when registering
reader and writer. The documentation is only built once when the
contextmanager exits.
.. versionadded:: 1.3
Parameters
----------
cls : class
Class for which the documentation updates should be delayed.
Notes
-----
Registering multiple readers and writers can cause significant overhead
because the documentation of the corresponding ``read`` and ``write``
methods are build every time.
Examples
--------
see for example the source code of ``astropy.table.__init__``.
"""
self._delayed_docs_classes.add(cls)
yield
self._delayed_docs_classes.discard(cls)
for method in self._registries.keys() - {"identify"}:
self._update__doc__(cls, method)
# =========================================================================
# Identifier methods
def register_identifier(self, data_format, data_class, identifier, force=False):
"""
Associate an identifier function with a specific data type.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : class
The class of the object that can be written.
identifier : function
A function that checks the argument specified to `read` or `write` to
determine whether the input can be interpreted as a table of type
``data_format``. This function should take the following arguments:
- ``origin``: A string ``"read"`` or ``"write"`` identifying whether
the file is to be opened for reading or writing.
- ``path``: The path to the file.
- ``fileobj``: An open file object to read the file's contents, or
`None` if the file could not be opened.
- ``*args``: Positional arguments for the `read` or `write`
function.
- ``**kwargs``: Keyword arguments for the `read` or `write`
function.
One or both of ``path`` or ``fileobj`` may be `None`. If they are
both `None`, the identifier will need to work from ``args[0]``.
The function should return True if the input can be identified
as being of format ``data_format``, and False otherwise.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
Examples
--------
To set the identifier based on extensions, for formats that take a
filename as a first argument, you can do for example
.. code-block:: python
from astropy.io.registry import register_identifier
from astropy.table import Table
def my_identifier(*args, **kwargs):
return isinstance(args[0], str) and args[0].endswith('.tbl')
register_identifier('ipac', Table, my_identifier)
unregister_identifier('ipac', Table)
"""
if not (data_format, data_class) in self._identifiers or force:
self._identifiers[(data_format, data_class)] = identifier
else:
raise IORegistryError("Identifier for format '{}' and class '{}' is "
'already defined'.format(data_format,
data_class.__name__))
def unregister_identifier(self, data_format, data_class):
"""
Unregister an identifier function
Parameters
----------
data_format : str
The data format identifier.
data_class : class
The class of the object that can be read/written.
"""
if (data_format, data_class) in self._identifiers:
self._identifiers.pop((data_format, data_class))
else:
raise IORegistryError("No identifier defined for format '{}' and class"
" '{}'".format(data_format, data_class.__name__))
def identify_format(self, origin, data_class_required, path, fileobj, args, kwargs):
"""Loop through identifiers to see which formats match.
Parameters
----------
origin : str
A string ``"read`` or ``"write"`` identifying whether the file is to be
opened for reading or writing.
data_class_required : object
The specified class for the result of `read` or the class that is to be
written.
path : str or path-like or None
The path to the file or None.
fileobj : file-like or None.
An open file object to read the file's contents, or ``None`` if the
file could not be opened.
args : sequence
Positional arguments for the `read` or `write` function. Note that
these must be provided as sequence.
kwargs : dict-like
Keyword arguments for the `read` or `write` function. Note that this
parameter must be `dict`-like.
Returns
-------
valid_formats : list
List of matching formats.
"""
valid_formats = []
for data_format, data_class in self._identifiers:
if self._is_best_match(data_class_required, data_class, self._identifiers):
if self._identifiers[(data_format, data_class)](
origin, path, fileobj, *args, **kwargs):
valid_formats.append(data_format)
return valid_formats
# =========================================================================
# Utils
def _get_format_table_str(self, data_class, filter_on):
"""``get_formats()``, without column "Data class", as a str."""
format_table = self.get_formats(data_class, filter_on)
format_table.remove_column('Data class')
format_table_str = '\n'.join(format_table.pformat(max_lines=-1))
return format_table_str
def _is_best_match(self, class1, class2, format_classes):
"""
Determine if class2 is the "best" match for class1 in the list
of classes. It is assumed that (class2 in classes) is True.
class2 is the the best match if:
- ``class1`` is a subclass of ``class2`` AND
- ``class2`` is the nearest ancestor of ``class1`` that is in classes
(which includes the case that ``class1 is class2``)
"""
if issubclass(class1, class2):
classes = {cls for fmt, cls in format_classes}
for parent in class1.__mro__:
if parent is class2: # class2 is closest registered ancestor
return True
if parent in classes: # class2 was superceded
return False
return False
def _get_valid_format(self, mode, cls, path, fileobj, args, kwargs):
"""
Returns the first valid format that can be used to read/write the data in
question. Mode can be either 'read' or 'write'.
"""
valid_formats = self.identify_format(mode, cls, path, fileobj, args, kwargs)
if len(valid_formats) == 0:
format_table_str = self._get_format_table_str(cls, mode.capitalize())
raise IORegistryError("Format could not be identified based on the"
" file name or contents, please provide a"
" 'format' argument.\n"
"The available formats are:\n"
"{}".format(format_table_str))
elif len(valid_formats) > 1:
return self._get_highest_priority_format(mode, cls, valid_formats)
return valid_formats[0]
def _get_highest_priority_format(self, mode, cls, valid_formats):
"""
Returns the reader or writer with the highest priority. If it is a tie,
error.
"""
if mode == "read":
format_dict = self._readers
mode_loader = "reader"
elif mode == "write":
format_dict = self._writers
mode_loader = "writer"
best_formats = []
current_priority = - np.inf
for format in valid_formats:
try:
_, priority = format_dict[(format, cls)]
except KeyError:
# We could throw an exception here, but get_reader/get_writer handle
# this case better, instead maximally deprioritise the format.
priority = - np.inf
if priority == current_priority:
best_formats.append(format)
elif priority > current_priority:
best_formats = [format]
current_priority = priority
if len(best_formats) > 1:
raise IORegistryError("Format is ambiguous - options are: {}".format(
', '.join(sorted(valid_formats, key=itemgetter(0)))
))
return best_formats[0]
def _update__doc__(self, data_class, readwrite):
"""
Update the docstring to include all the available readers / writers for
the ``data_class.read``/``data_class.write`` functions (respectively).
Don't update if the data_class does not have the relevant method.
"""
# abort if method "readwrite" isn't on data_class
if not hasattr(data_class, readwrite):
return
from .interface import UnifiedReadWrite
FORMATS_TEXT = 'The available built-in formats are:'
# Get the existing read or write method and its docstring
class_readwrite_func = getattr(data_class, readwrite)
if not isinstance(class_readwrite_func.__doc__, str):
# No docstring--could just be test code, or possibly code compiled
# without docstrings
return
lines = class_readwrite_func.__doc__.splitlines()
# Find the location of the existing formats table if it exists
sep_indices = [ii for ii, line in enumerate(lines) if FORMATS_TEXT in line]
if sep_indices:
# Chop off the existing formats table, including the initial blank line
chop_index = sep_indices[0]
lines = lines[:chop_index]
# Find the minimum indent, skipping the first line because it might be odd
matches = [re.search(r'(\S)', line) for line in lines[1:]]
left_indent = ' ' * min(match.start() for match in matches if match)
# Get the available unified I/O formats for this class
# Include only formats that have a reader, and drop the 'Data class' column
format_table = self.get_formats(data_class, readwrite.capitalize())
format_table.remove_column('Data class')
# Get the available formats as a table, then munge the output of pformat()
# a bit and put it into the docstring.
new_lines = format_table.pformat(max_lines=-1, max_width=80)
table_rst_sep = re.sub('-', '=', new_lines[1])
new_lines[1] = table_rst_sep
new_lines.insert(0, table_rst_sep)
new_lines.append(table_rst_sep)
# Check for deprecated names and include a warning at the end.
if 'Deprecated' in format_table.colnames:
new_lines.extend(['',
'Deprecated format names like ``aastex`` will be '
'removed in a future version. Use the full ',
'name (e.g. ``ascii.aastex``) instead.'])
new_lines = [FORMATS_TEXT, ''] + new_lines
lines.extend([left_indent + line for line in new_lines])
# Depending on Python version and whether class_readwrite_func is
# an instancemethod or classmethod, one of the following will work.
if isinstance(class_readwrite_func, UnifiedReadWrite):
class_readwrite_func.__class__.__doc__ = '\n'.join(lines)
else:
try:
class_readwrite_func.__doc__ = '\n'.join(lines)
except AttributeError:
class_readwrite_func.__func__.__doc__ = '\n'.join(lines)
|
bfe0e77aef5444aa2f855ebcca6c952422c9414ea9d7c6a1e240867222198106 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import os
import re
from .base import IORegistryError
__all__ = ['UnifiedReadWriteMethod', 'UnifiedReadWrite']
# -----------------------------------------------------------------------------
class UnifiedReadWrite:
"""Base class for the worker object used in unified read() or write() methods.
This lightweight object is created for each `read()` or `write()` call
via ``read`` / ``write`` descriptors on the data object class. The key
driver is to allow complete format-specific documentation of available
method options via a ``help()`` method, e.g. ``Table.read.help('fits')``.
Subclasses must define a ``__call__`` method which is what actually gets
called when the data object ``read()`` or ``write()`` method is called.
For the canonical example see the `~astropy.table.Table` class
implementation (in particular the ``connect.py`` module there).
Parameters
----------
instance : object
Descriptor calling instance or None if no instance
cls : type
Descriptor calling class (either owner class or instance class)
method_name : str
Method name, e.g. 'read' or 'write'
registry : ``_UnifiedIORegistryBase`` or None, optional
The IO registry.
"""
def __init__(self, instance, cls, method_name, registry=None):
if registry is None:
from astropy.io.registry.compat import default_registry as registry
self._registry = registry
self._instance = instance
self._cls = cls
self._method_name = method_name # 'read' or 'write'
@property
def registry(self):
"""Unified I/O registry instance."""
return self._registry
def help(self, format=None, out=None):
"""Output help documentation for the specified unified I/O ``format``.
By default the help output is printed to the console via ``pydoc.pager``.
Instead one can supplied a file handle object as ``out`` and the output
will be written to that handle.
Parameters
----------
format : str
Unified I/O format name, e.g. 'fits' or 'ascii.ecsv'
out : None or path-like
Output destination (default is stdout via a pager)
"""
cls = self._cls
method_name = self._method_name
# Get reader or writer function associated with the registry
get_func = (self._registry.get_reader if method_name == 'read'
else self._registry.get_writer)
try:
if format:
read_write_func = get_func(format, cls)
except IORegistryError as err:
reader_doc = 'ERROR: ' + str(err)
else:
if format:
# Format-specific
header = ("{}.{}(format='{}') documentation\n"
.format(cls.__name__, method_name, format))
doc = read_write_func.__doc__
else:
# General docs
header = f'{cls.__name__}.{method_name} general documentation\n'
doc = getattr(cls, method_name).__doc__
reader_doc = re.sub('.', '=', header)
reader_doc += header
reader_doc += re.sub('.', '=', header)
reader_doc += os.linesep
if doc is not None:
reader_doc += inspect.cleandoc(doc)
if out is None:
import pydoc
pydoc.pager(reader_doc)
else:
out.write(reader_doc)
def list_formats(self, out=None):
"""Print a list of available formats to console (or ``out`` filehandle)
out : None or file handle object
Output destination (default is stdout via a pager)
"""
tbl = self._registry.get_formats(self._cls, self._method_name.capitalize())
del tbl['Data class']
if out is None:
tbl.pprint(max_lines=-1, max_width=-1)
else:
out.write('\n'.join(tbl.pformat(max_lines=-1, max_width=-1)))
return out
# -----------------------------------------------------------------------------
class UnifiedReadWriteMethod(property):
"""Descriptor class for creating read() and write() methods in unified I/O.
The canonical example is in the ``Table`` class, where the ``connect.py``
module creates subclasses of the ``UnifiedReadWrite`` class. These have
custom ``__call__`` methods that do the setup work related to calling the
registry read() or write() functions. With this, the ``Table`` class
defines read and write methods as follows::
read = UnifiedReadWriteMethod(TableRead)
write = UnifiedReadWriteMethod(TableWrite)
Parameters
----------
func : `~astropy.io.registry.UnifiedReadWrite` subclass
Class that defines read or write functionality
"""
# We subclass property to ensure that __set__ is defined and that,
# therefore, we are a data descriptor, which cannot be overridden.
# This also means we automatically inherit the __doc__ of fget (which will
# be a UnifiedReadWrite subclass), and that this docstring gets recognized
# and properly typeset by sphinx (which was previously an issue; see
# gh-11554).
# We override __get__ to pass both instance and class to UnifiedReadWrite.
def __get__(self, instance, owner_cls):
return self.fget(instance, owner_cls)
|
9c06cf046fa0118ec08b4637e97e5d9a4bc6e920967128457fee165ca99989ae | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import sys
from .core import UnifiedIORegistry
__all__ = ["register_reader", "register_writer", "register_identifier", # noqa: F822
"unregister_reader", "unregister_writer", "unregister_identifier",
"get_reader", "get_writer", "get_formats",
"read", "write",
"identify_format", "delay_doc_updates"]
# make a default global-state registry (not publicly scoped, but often accessed)
# this is for backward compatibility when ``io.registry`` was a file.
default_registry = UnifiedIORegistry()
# also need to expose the enclosed registries
_identifiers = default_registry._identifiers
_readers = default_registry._readers
_writers = default_registry._writers
def _make_io_func(method_name):
"""Makes a function for a method on UnifiedIORegistry.
.. todo::
Make kwarg "registry" not hidden.
Returns
-------
wrapper : callable
Signature matches method on UnifiedIORegistry.
Accepts (hidden) kwarg "registry". default is ``default_registry``.
"""
@functools.wraps(getattr(default_registry, method_name))
def wrapper(*args, registry=None, **kwargs):
# written this way in case ever controlled by ScienceState
if registry is None:
registry = default_registry
# get and call bound method from registry instance
return getattr(registry, method_name)(*args, **kwargs)
return wrapper
# =============================================================================
# JIT function creation and lookup (PEP 562)
def __dir__():
dir_out = list(globals())
return sorted(dir_out + __all__)
def __getattr__(method: str):
if method in __all__:
return _make_io_func(method)
raise AttributeError(f"module {__name__!r} has no attribute {method!r}")
|
6444d437fcd5fd3fde9dcc57e138d495586591eeb8f4dace9d0a26554e25ccd8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module handles the conversion of various VOTABLE datatypes
to/from TABLEDATA_ and BINARY_ formats.
"""
# STDLIB
import re
import sys
from struct import unpack as _struct_unpack
from struct import pack as _struct_pack
# THIRD-PARTY
import numpy as np
from numpy import ma
# ASTROPY
from astropy.utils.xml.writer import xml_escape_cdata
# LOCAL
from .exceptions import (vo_raise, vo_warn, warn_or_raise, W01,
W30, W31, W39, W46, W47, W49, W51, W55, E01, E02, E03, E04,
E05, E06, E24)
__all__ = ['get_converter', 'Converter', 'table_column_to_votable_datatype']
pedantic_array_splitter = re.compile(r" +")
array_splitter = re.compile(r"\s+|(?:\s*,\s*)")
"""
A regex to handle splitting values on either whitespace or commas.
SPEC: Usage of commas is not actually allowed by the spec, but many
files in the wild use them.
"""
_zero_int = b'\0\0\0\0'
_empty_bytes = b''
_zero_byte = b'\0'
struct_unpack = _struct_unpack
struct_pack = _struct_pack
if sys.byteorder == 'little':
def _ensure_bigendian(x):
if x.dtype.byteorder != '>':
return x.byteswap()
return x
else:
def _ensure_bigendian(x):
if x.dtype.byteorder == '<':
return x.byteswap()
return x
def _make_masked_array(data, mask):
"""
Masked arrays of zero length that also have a mask of zero length
cause problems in Numpy (at least in 1.6.2). This function
creates a masked array from data and a mask, unless it is zero
length.
"""
# np.ma doesn't like setting mask to []
if len(data):
return ma.array(
np.array(data),
mask=np.array(mask, dtype='bool'))
else:
return ma.array(np.array(data))
def bitarray_to_bool(data, length):
"""
Converts a bit array (a string of bits in a bytes object) to a
boolean Numpy array.
Parameters
----------
data : bytes
The bit array. The most significant byte is read first.
length : int
The number of bits to read. The least significant bits in the
data bytes beyond length will be ignored.
Returns
-------
array : numpy bool array
"""
results = []
for byte in data:
for bit_no in range(7, -1, -1):
bit = byte & (1 << bit_no)
bit = (bit != 0)
results.append(bit)
if len(results) == length:
break
if len(results) == length:
break
return np.array(results, dtype='b1')
def bool_to_bitarray(value):
"""
Converts a numpy boolean array to a bit array (a string of bits in
a bytes object).
Parameters
----------
value : numpy bool array
Returns
-------
bit_array : bytes
The first value in the input array will be the most
significant bit in the result. The length will be `floor((N +
7) / 8)` where `N` is the length of `value`.
"""
value = value.flat
bit_no = 7
byte = 0
bytes = []
for v in value:
if v:
byte |= 1 << bit_no
if bit_no == 0:
bytes.append(byte)
bit_no = 7
byte = 0
else:
bit_no -= 1
if bit_no != 7:
bytes.append(byte)
return struct_pack(f"{len(bytes)}B", *bytes)
class Converter:
"""
The base class for all converters. Each subclass handles
converting a specific VOTABLE data type to/from the TABLEDATA_ and
BINARY_ on-disk representations.
Parameters
----------
field : `~astropy.io.votable.tree.Field`
object describing the datatype
config : dict
The parser configuration dictionary
pos : tuple
The position in the XML file where the FIELD object was
found. Used for error messages.
"""
def __init__(self, field, config=None, pos=None):
pass
@staticmethod
def _parse_length(read):
return struct_unpack(">I", read(4))[0]
@staticmethod
def _write_length(length):
return struct_pack(">I", int(length))
def supports_empty_values(self, config):
"""
Returns True when the field can be completely empty.
"""
return config.get('version_1_3_or_later')
def parse(self, value, config=None, pos=None):
"""
Convert the string *value* from the TABLEDATA_ format into an
object with the correct native in-memory datatype and mask flag.
Parameters
----------
value : str
value in TABLEDATA format
Returns
-------
native : tuple
A two-element tuple of: value, mask.
The value as a Numpy array or scalar, and *mask* is True
if the value is missing.
"""
raise NotImplementedError(
"This datatype must implement a 'parse' method.")
def parse_scalar(self, value, config=None, pos=None):
"""
Parse a single scalar of the underlying type of the converter.
For non-array converters, this is equivalent to parse. For
array converters, this is used to parse a single
element of the array.
Parameters
----------
value : str
value in TABLEDATA format
Returns
-------
native : (2,) tuple
(value, mask)
The value as a Numpy array or scalar, and *mask* is True
if the value is missing.
"""
return self.parse(value, config, pos)
def output(self, value, mask):
"""
Convert the object *value* (in the native in-memory datatype)
to a unicode string suitable for serializing in the TABLEDATA_
format.
Parameters
----------
value
The value, the native type corresponding to this converter
mask : bool
If `True`, will return the string representation of a
masked value.
Returns
-------
tabledata_repr : unicode
"""
raise NotImplementedError(
"This datatype must implement a 'output' method.")
def binparse(self, read):
"""
Reads some number of bytes from the BINARY_ format
representation by calling the function *read*, and returns the
native in-memory object representation for the datatype
handled by *self*.
Parameters
----------
read : function
A function that given a number of bytes, returns a byte
string.
Returns
-------
native : (2,) tuple
(value, mask). The value as a Numpy array or scalar, and *mask* is
True if the value is missing.
"""
raise NotImplementedError(
"This datatype must implement a 'binparse' method.")
def binoutput(self, value, mask):
"""
Convert the object *value* in the native in-memory datatype to
a string of bytes suitable for serialization in the BINARY_
format.
Parameters
----------
value
The value, the native type corresponding to this converter
mask : bool
If `True`, will return the string representation of a
masked value.
Returns
-------
bytes : bytes
The binary representation of the value, suitable for
serialization in the BINARY_ format.
"""
raise NotImplementedError(
"This datatype must implement a 'binoutput' method.")
class Char(Converter):
"""
Handles the char datatype. (7-bit unsigned characters)
Missing values are not handled for string or unicode types.
"""
default = _empty_bytes
def __init__(self, field, config=None, pos=None):
if config is None:
config = {}
Converter.__init__(self, field, config, pos)
self.field_name = field.name
if field.arraysize is None:
vo_warn(W47, (), config, pos)
field.arraysize = '1'
if field.arraysize == '*':
self.format = 'O'
self.binparse = self._binparse_var
self.binoutput = self._binoutput_var
self.arraysize = '*'
else:
if field.arraysize.endswith('*'):
field.arraysize = field.arraysize[:-1]
try:
self.arraysize = int(field.arraysize)
except ValueError:
vo_raise(E01, (field.arraysize, 'char', field.ID), config)
self.format = f'U{self.arraysize:d}'
self.binparse = self._binparse_fixed
self.binoutput = self._binoutput_fixed
self._struct_format = f">{self.arraysize:d}s"
def supports_empty_values(self, config):
return True
def parse(self, value, config=None, pos=None):
if self.arraysize != '*' and len(value) > self.arraysize:
vo_warn(W46, ('char', self.arraysize), config, pos)
# Warn about non-ascii characters if warnings are enabled.
try:
value.encode('ascii')
except UnicodeEncodeError:
vo_warn(W55, (self.field_name, value), config, pos)
return value, False
def output(self, value, mask):
if mask:
return ''
# The output methods for Char assume that value is either str or bytes.
# This method needs to return a str, but needs to warn if the str contains
# non-ASCII characters.
try:
if isinstance(value, str):
value.encode('ascii')
else:
# Check for non-ASCII chars in the bytes object.
value = value.decode('ascii')
except (ValueError, UnicodeEncodeError):
warn_or_raise(E24, UnicodeEncodeError, (value, self.field_name))
finally:
if isinstance(value, bytes):
# Convert the bytes to str regardless of non-ASCII chars.
value = value.decode('utf-8')
return xml_escape_cdata(value)
def _binparse_var(self, read):
length = self._parse_length(read)
return read(length).decode('ascii'), False
def _binparse_fixed(self, read):
s = struct_unpack(self._struct_format, read(self.arraysize))[0]
end = s.find(_zero_byte)
s = s.decode('ascii')
if end != -1:
return s[:end], False
return s, False
def _binoutput_var(self, value, mask):
if mask or value is None or value == '':
return _zero_int
if isinstance(value, str):
try:
value = value.encode('ascii')
except ValueError:
vo_raise(E24, (value, self.field_name))
return self._write_length(len(value)) + value
def _binoutput_fixed(self, value, mask):
if mask:
value = _empty_bytes
elif isinstance(value, str):
try:
value = value.encode('ascii')
except ValueError:
vo_raise(E24, (value, self.field_name))
return struct_pack(self._struct_format, value)
class UnicodeChar(Converter):
"""
Handles the unicodeChar data type. UTF-16-BE.
Missing values are not handled for string or unicode types.
"""
default = ''
def __init__(self, field, config=None, pos=None):
Converter.__init__(self, field, config, pos)
if field.arraysize is None:
vo_warn(W47, (), config, pos)
field.arraysize = '1'
if field.arraysize == '*':
self.format = 'O'
self.binparse = self._binparse_var
self.binoutput = self._binoutput_var
self.arraysize = '*'
else:
try:
self.arraysize = int(field.arraysize)
except ValueError:
vo_raise(E01, (field.arraysize, 'unicode', field.ID), config)
self.format = f'U{self.arraysize:d}'
self.binparse = self._binparse_fixed
self.binoutput = self._binoutput_fixed
self._struct_format = f">{self.arraysize*2:d}s"
def parse(self, value, config=None, pos=None):
if self.arraysize != '*' and len(value) > self.arraysize:
vo_warn(W46, ('unicodeChar', self.arraysize), config, pos)
return value, False
def output(self, value, mask):
if mask:
return ''
return xml_escape_cdata(str(value))
def _binparse_var(self, read):
length = self._parse_length(read)
return read(length * 2).decode('utf_16_be'), False
def _binparse_fixed(self, read):
s = struct_unpack(self._struct_format, read(self.arraysize * 2))[0]
s = s.decode('utf_16_be')
end = s.find('\0')
if end != -1:
return s[:end], False
return s, False
def _binoutput_var(self, value, mask):
if mask or value is None or value == '':
return _zero_int
encoded = value.encode('utf_16_be')
return self._write_length(len(encoded) / 2) + encoded
def _binoutput_fixed(self, value, mask):
if mask:
value = ''
return struct_pack(self._struct_format, value.encode('utf_16_be'))
class Array(Converter):
"""
Handles both fixed and variable-lengths arrays.
"""
def __init__(self, field, config=None, pos=None):
if config is None:
config = {}
Converter.__init__(self, field, config, pos)
if config.get('verify', 'ignore') == 'exception':
self._splitter = self._splitter_pedantic
else:
self._splitter = self._splitter_lax
def parse_scalar(self, value, config=None, pos=0):
return self._base.parse_scalar(value, config, pos)
@staticmethod
def _splitter_pedantic(value, config=None, pos=None):
return pedantic_array_splitter.split(value)
@staticmethod
def _splitter_lax(value, config=None, pos=None):
if ',' in value:
vo_warn(W01, (), config, pos)
return array_splitter.split(value)
class VarArray(Array):
"""
Handles variable lengths arrays (i.e. where *arraysize* is '*').
"""
format = 'O'
def __init__(self, field, base, arraysize, config=None, pos=None):
Array.__init__(self, field, config)
self._base = base
self.default = np.array([], dtype=self._base.format)
def output(self, value, mask):
output = self._base.output
result = [output(x, m) for x, m in np.broadcast(value, mask)]
return ' '.join(result)
def binparse(self, read):
length = self._parse_length(read)
result = []
result_mask = []
binparse = self._base.binparse
for i in range(length):
val, mask = binparse(read)
result.append(val)
result_mask.append(mask)
return _make_masked_array(result, result_mask), False
def binoutput(self, value, mask):
if value is None or len(value) == 0:
return _zero_int
length = len(value)
result = [self._write_length(length)]
binoutput = self._base.binoutput
for x, m in zip(value, value.mask):
result.append(binoutput(x, m))
return _empty_bytes.join(result)
class ArrayVarArray(VarArray):
"""
Handles an array of variable-length arrays, i.e. where *arraysize*
ends in '*'.
"""
def parse(self, value, config=None, pos=None):
if value.strip() == '':
return ma.array([]), False
parts = self._splitter(value, config, pos)
items = self._base._items
parse_parts = self._base.parse_parts
if len(parts) % items != 0:
vo_raise(E02, (items, len(parts)), config, pos)
result = []
result_mask = []
for i in range(0, len(parts), items):
value, mask = parse_parts(parts[i:i+items], config, pos)
result.append(value)
result_mask.append(mask)
return _make_masked_array(result, result_mask), False
class ScalarVarArray(VarArray):
"""
Handles a variable-length array of numeric scalars.
"""
def parse(self, value, config=None, pos=None):
if value.strip() == '':
return ma.array([]), False
parts = self._splitter(value, config, pos)
parse = self._base.parse
result = []
result_mask = []
for x in parts:
value, mask = parse(x, config, pos)
result.append(value)
result_mask.append(mask)
return _make_masked_array(result, result_mask), False
class NumericArray(Array):
"""
Handles a fixed-length array of numeric scalars.
"""
vararray_type = ArrayVarArray
def __init__(self, field, base, arraysize, config=None, pos=None):
Array.__init__(self, field, config, pos)
self._base = base
self._arraysize = arraysize
self.format = f"{tuple(arraysize)}{base.format}"
self._items = 1
for dim in arraysize:
self._items *= dim
self._memsize = np.dtype(self.format).itemsize
self._bigendian_format = '>' + self.format
self.default = np.empty(arraysize, dtype=self._base.format)
self.default[...] = self._base.default
def parse(self, value, config=None, pos=None):
if config is None:
config = {}
elif config['version_1_3_or_later'] and value == '':
return np.zeros(self._arraysize, dtype=self._base.format), True
parts = self._splitter(value, config, pos)
if len(parts) != self._items:
warn_or_raise(E02, E02, (self._items, len(parts)), config, pos)
if config.get('verify', 'ignore') == 'exception':
return self.parse_parts(parts, config, pos)
else:
if len(parts) == self._items:
pass
elif len(parts) > self._items:
parts = parts[:self._items]
else:
parts = (parts +
([self._base.default] * (self._items - len(parts))))
return self.parse_parts(parts, config, pos)
def parse_parts(self, parts, config=None, pos=None):
base_parse = self._base.parse
result = []
result_mask = []
for x in parts:
value, mask = base_parse(x, config, pos)
result.append(value)
result_mask.append(mask)
result = np.array(result, dtype=self._base.format).reshape(
self._arraysize)
result_mask = np.array(result_mask, dtype='bool').reshape(
self._arraysize)
return result, result_mask
def output(self, value, mask):
base_output = self._base.output
value = np.asarray(value)
mask = np.asarray(mask)
if mask.size <= 1:
func = np.broadcast
else: # When mask is already array but value is scalar, this prevents broadcast
func = zip
return ' '.join(base_output(x, m) for x, m in
func(value.flat, mask.flat))
def binparse(self, read):
result = np.frombuffer(read(self._memsize),
dtype=self._bigendian_format)[0]
result_mask = self._base.is_null(result)
return result, result_mask
def binoutput(self, value, mask):
filtered = self._base.filter_array(value, mask)
filtered = _ensure_bigendian(filtered)
return filtered.tobytes()
class Numeric(Converter):
"""
The base class for all numeric data types.
"""
array_type = NumericArray
vararray_type = ScalarVarArray
null = None
def __init__(self, field, config=None, pos=None):
Converter.__init__(self, field, config, pos)
self._memsize = np.dtype(self.format).itemsize
self._bigendian_format = '>' + self.format
if field.values.null is not None:
self.null = np.asarray(field.values.null, dtype=self.format)
self.default = self.null
self.is_null = self._is_null
else:
self.is_null = np.isnan
def binparse(self, read):
result = np.frombuffer(read(self._memsize),
dtype=self._bigendian_format)
return result[0], self.is_null(result[0])
def _is_null(self, value):
return value == self.null
class FloatingPoint(Numeric):
"""
The base class for floating-point datatypes.
"""
default = np.nan
def __init__(self, field, config=None, pos=None):
if config is None:
config = {}
Numeric.__init__(self, field, config, pos)
precision = field.precision
width = field.width
if precision is None:
format_parts = ['{!r:>']
else:
format_parts = ['{:']
if width is not None:
format_parts.append(str(width))
if precision is not None:
if precision.startswith("E"):
format_parts.append(f'.{int(precision[1:]):d}g')
elif precision.startswith("F"):
format_parts.append(f'.{int(precision[1:]):d}f')
else:
format_parts.append(f'.{int(precision):d}f')
format_parts.append('}')
self._output_format = ''.join(format_parts)
self.nan = np.array(np.nan, self.format)
if self.null is None:
self._null_output = 'NaN'
self._null_binoutput = self.binoutput(self.nan, False)
self.filter_array = self._filter_nan
else:
self._null_output = self.output(np.asarray(self.null), False)
self._null_binoutput = self.binoutput(np.asarray(self.null), False)
self.filter_array = self._filter_null
if config.get('verify', 'ignore') == 'exception':
self.parse = self._parse_pedantic
else:
self.parse = self._parse_permissive
def supports_empty_values(self, config):
return True
def _parse_pedantic(self, value, config=None, pos=None):
if value.strip() == '':
return self.null, True
f = float(value)
return f, self.is_null(f)
def _parse_permissive(self, value, config=None, pos=None):
try:
f = float(value)
return f, self.is_null(f)
except ValueError:
# IRSA VOTables use the word 'null' to specify empty values,
# but this is not defined in the VOTable spec.
if value.strip() != '':
vo_warn(W30, value, config, pos)
return self.null, True
@property
def output_format(self):
return self._output_format
def output(self, value, mask):
if mask:
return self._null_output
if np.isfinite(value):
if not np.isscalar(value):
value = value.dtype.type(value)
result = self._output_format.format(value)
if result.startswith('array'):
raise RuntimeError()
if (self._output_format[2] == 'r' and
result.endswith('.0')):
result = result[:-2]
return result
elif np.isnan(value):
return 'NaN'
elif np.isposinf(value):
return '+InF'
elif np.isneginf(value):
return '-InF'
# Should never raise
vo_raise(f"Invalid floating point value '{value}'")
def binoutput(self, value, mask):
if mask:
return self._null_binoutput
value = _ensure_bigendian(value)
return value.tobytes()
def _filter_nan(self, value, mask):
return np.where(mask, np.nan, value)
def _filter_null(self, value, mask):
return np.where(mask, self.null, value)
class Double(FloatingPoint):
"""
Handles the double datatype. Double-precision IEEE
floating-point.
"""
format = 'f8'
class Float(FloatingPoint):
"""
Handles the float datatype. Single-precision IEEE floating-point.
"""
format = 'f4'
class Integer(Numeric):
"""
The base class for all the integral datatypes.
"""
default = 0
def __init__(self, field, config=None, pos=None):
Numeric.__init__(self, field, config, pos)
def parse(self, value, config=None, pos=None):
if config is None:
config = {}
mask = False
if isinstance(value, str):
value = value.lower()
if value == '':
if config['version_1_3_or_later']:
mask = True
else:
warn_or_raise(W49, W49, (), config, pos)
if self.null is not None:
value = self.null
else:
value = self.default
elif value == 'nan':
mask = True
if self.null is None:
warn_or_raise(W31, W31, (), config, pos)
value = self.default
else:
value = self.null
elif value.startswith('0x'):
value = int(value[2:], 16)
else:
value = int(value, 10)
else:
value = int(value)
if self.null is not None and value == self.null:
mask = True
if value < self.val_range[0]:
warn_or_raise(W51, W51, (value, self.bit_size), config, pos)
value = self.val_range[0]
elif value > self.val_range[1]:
warn_or_raise(W51, W51, (value, self.bit_size), config, pos)
value = self.val_range[1]
return value, mask
def output(self, value, mask):
if mask:
if self.null is None:
warn_or_raise(W31, W31)
return 'NaN'
return str(self.null)
return str(value)
def binoutput(self, value, mask):
if mask:
if self.null is None:
vo_raise(W31)
else:
value = self.null
value = _ensure_bigendian(value)
return value.tobytes()
def filter_array(self, value, mask):
if np.any(mask):
if self.null is not None:
return np.where(mask, self.null, value)
else:
vo_raise(W31)
return value
class UnsignedByte(Integer):
"""
Handles the unsignedByte datatype. Unsigned 8-bit integer.
"""
format = 'u1'
val_range = (0, 255)
bit_size = '8-bit unsigned'
class Short(Integer):
"""
Handles the short datatype. Signed 16-bit integer.
"""
format = 'i2'
val_range = (-32768, 32767)
bit_size = '16-bit'
class Int(Integer):
"""
Handles the int datatype. Signed 32-bit integer.
"""
format = 'i4'
val_range = (-2147483648, 2147483647)
bit_size = '32-bit'
class Long(Integer):
"""
Handles the long datatype. Signed 64-bit integer.
"""
format = 'i8'
val_range = (-9223372036854775808, 9223372036854775807)
bit_size = '64-bit'
class ComplexArrayVarArray(VarArray):
"""
Handles an array of variable-length arrays of complex numbers.
"""
def parse(self, value, config=None, pos=None):
if value.strip() == '':
return ma.array([]), True
parts = self._splitter(value, config, pos)
items = self._base._items
parse_parts = self._base.parse_parts
if len(parts) % items != 0:
vo_raise(E02, (items, len(parts)), config, pos)
result = []
result_mask = []
for i in range(0, len(parts), items):
value, mask = parse_parts(parts[i:i + items], config, pos)
result.append(value)
result_mask.append(mask)
return _make_masked_array(result, result_mask), False
class ComplexVarArray(VarArray):
"""
Handles a variable-length array of complex numbers.
"""
def parse(self, value, config=None, pos=None):
if value.strip() == '':
return ma.array([]), True
parts = self._splitter(value, config, pos)
parse_parts = self._base.parse_parts
result = []
result_mask = []
for i in range(0, len(parts), 2):
value = [float(x) for x in parts[i:i + 2]]
value, mask = parse_parts(value, config, pos)
result.append(value)
result_mask.append(mask)
return _make_masked_array(
np.array(result, dtype=self._base.format), result_mask), False
class ComplexArray(NumericArray):
"""
Handles a fixed-size array of complex numbers.
"""
vararray_type = ComplexArrayVarArray
def __init__(self, field, base, arraysize, config=None, pos=None):
NumericArray.__init__(self, field, base, arraysize, config, pos)
self._items *= 2
def parse(self, value, config=None, pos=None):
parts = self._splitter(value, config, pos)
if parts == ['']:
parts = []
return self.parse_parts(parts, config, pos)
def parse_parts(self, parts, config=None, pos=None):
if len(parts) != self._items:
vo_raise(E02, (self._items, len(parts)), config, pos)
base_parse = self._base.parse_parts
result = []
result_mask = []
for i in range(0, self._items, 2):
value = [float(x) for x in parts[i:i + 2]]
value, mask = base_parse(value, config, pos)
result.append(value)
result_mask.append(mask)
result = np.array(
result, dtype=self._base.format).reshape(self._arraysize)
result_mask = np.array(
result_mask, dtype='bool').reshape(self._arraysize)
return result, result_mask
class Complex(FloatingPoint, Array):
"""
The base class for complex numbers.
"""
array_type = ComplexArray
vararray_type = ComplexVarArray
default = np.nan
def __init__(self, field, config=None, pos=None):
FloatingPoint.__init__(self, field, config, pos)
Array.__init__(self, field, config, pos)
def parse(self, value, config=None, pos=None):
stripped = value.strip()
if stripped == '' or stripped.lower() == 'nan':
return np.nan, True
splitter = self._splitter
parts = [float(x) for x in splitter(value, config, pos)]
if len(parts) != 2:
vo_raise(E03, (value,), config, pos)
return self.parse_parts(parts, config, pos)
_parse_permissive = parse
_parse_pedantic = parse
def parse_parts(self, parts, config=None, pos=None):
value = complex(*parts)
return value, self.is_null(value)
def output(self, value, mask):
if mask:
if self.null is None:
return 'NaN'
else:
value = self.null
real = self._output_format.format(float(value.real))
imag = self._output_format.format(float(value.imag))
if self._output_format[2] == 'r':
if real.endswith('.0'):
real = real[:-2]
if imag.endswith('.0'):
imag = imag[:-2]
return real + ' ' + imag
class FloatComplex(Complex):
"""
Handle floatComplex datatype. Pair of single-precision IEEE
floating-point numbers.
"""
format = 'c8'
class DoubleComplex(Complex):
"""
Handle doubleComplex datatype. Pair of double-precision IEEE
floating-point numbers.
"""
format = 'c16'
class BitArray(NumericArray):
"""
Handles an array of bits.
"""
vararray_type = ArrayVarArray
def __init__(self, field, base, arraysize, config=None, pos=None):
NumericArray.__init__(self, field, base, arraysize, config, pos)
self._bytes = ((self._items - 1) // 8) + 1
@staticmethod
def _splitter_pedantic(value, config=None, pos=None):
return list(re.sub(r'\s', '', value))
@staticmethod
def _splitter_lax(value, config=None, pos=None):
if ',' in value:
vo_warn(W01, (), config, pos)
return list(re.sub(r'\s|,', '', value))
def output(self, value, mask):
if np.any(mask):
vo_warn(W39)
value = np.asarray(value)
mapping = {False: '0', True: '1'}
return ''.join(mapping[x] for x in value.flat)
def binparse(self, read):
data = read(self._bytes)
result = bitarray_to_bool(data, self._items)
result = result.reshape(self._arraysize)
result_mask = np.zeros(self._arraysize, dtype='b1')
return result, result_mask
def binoutput(self, value, mask):
if np.any(mask):
vo_warn(W39)
return bool_to_bitarray(value)
class Bit(Converter):
"""
Handles the bit datatype.
"""
format = 'b1'
array_type = BitArray
vararray_type = ScalarVarArray
default = False
binary_one = b'\x08'
binary_zero = b'\0'
def parse(self, value, config=None, pos=None):
if config is None:
config = {}
mapping = {'1': True, '0': False}
if value is False or value.strip() == '':
if not config['version_1_3_or_later']:
warn_or_raise(W49, W49, (), config, pos)
return False, True
else:
try:
return mapping[value], False
except KeyError:
vo_raise(E04, (value,), config, pos)
def output(self, value, mask):
if mask:
vo_warn(W39)
if value:
return '1'
else:
return '0'
def binparse(self, read):
data = read(1)
return (ord(data) & 0x8) != 0, False
def binoutput(self, value, mask):
if mask:
vo_warn(W39)
if value:
return self.binary_one
return self.binary_zero
class BooleanArray(NumericArray):
"""
Handles an array of boolean values.
"""
vararray_type = ArrayVarArray
def binparse(self, read):
data = read(self._items)
binparse = self._base.binparse_value
result = []
result_mask = []
for char in data:
value, mask = binparse(char)
result.append(value)
result_mask.append(mask)
result = np.array(result, dtype='b1').reshape(
self._arraysize)
result_mask = np.array(result_mask, dtype='b1').reshape(
self._arraysize)
return result, result_mask
def binoutput(self, value, mask):
binoutput = self._base.binoutput
value = np.asarray(value)
mask = np.asarray(mask)
result = [binoutput(x, m)
for x, m in np.broadcast(value.flat, mask.flat)]
return _empty_bytes.join(result)
class Boolean(Converter):
"""
Handles the boolean datatype.
"""
format = 'b1'
array_type = BooleanArray
vararray_type = ScalarVarArray
default = False
binary_question_mark = b'?'
binary_true = b'T'
binary_false = b'F'
def parse(self, value, config=None, pos=None):
if value == '':
return False, True
if value is False:
return False, True
mapping = {'TRUE': (True, False),
'FALSE': (False, False),
'1': (True, False),
'0': (False, False),
'T': (True, False),
'F': (False, False),
'\0': (False, True),
' ': (False, True),
'?': (False, True),
'': (False, True)}
try:
return mapping[value.upper()]
except KeyError:
vo_raise(E05, (value,), config, pos)
def output(self, value, mask):
if mask:
return '?'
if value:
return 'T'
return 'F'
def binparse(self, read):
value = ord(read(1))
return self.binparse_value(value)
_binparse_mapping = {
ord('T'): (True, False),
ord('t'): (True, False),
ord('1'): (True, False),
ord('F'): (False, False),
ord('f'): (False, False),
ord('0'): (False, False),
ord('\0'): (False, True),
ord(' '): (False, True),
ord('?'): (False, True)}
def binparse_value(self, value):
try:
return self._binparse_mapping[value]
except KeyError:
vo_raise(E05, (value,))
def binoutput(self, value, mask):
if mask:
return self.binary_question_mark
if value:
return self.binary_true
return self.binary_false
converter_mapping = {
'double': Double,
'float': Float,
'bit': Bit,
'boolean': Boolean,
'unsignedByte': UnsignedByte,
'short': Short,
'int': Int,
'long': Long,
'floatComplex': FloatComplex,
'doubleComplex': DoubleComplex,
'char': Char,
'unicodeChar': UnicodeChar}
def get_converter(field, config=None, pos=None):
"""
Get an appropriate converter instance for a given field.
Parameters
----------
field : astropy.io.votable.tree.Field
config : dict, optional
Parser configuration dictionary
pos : tuple
Position in the input XML file. Used for error messages.
Returns
-------
converter : astropy.io.votable.converters.Converter
"""
if config is None:
config = {}
if field.datatype not in converter_mapping:
vo_raise(E06, (field.datatype, field.ID), config)
cls = converter_mapping[field.datatype]
converter = cls(field, config, pos)
arraysize = field.arraysize
# With numeric datatypes, special things need to happen for
# arrays.
if (field.datatype not in ('char', 'unicodeChar') and
arraysize is not None):
if arraysize[-1] == '*':
arraysize = arraysize[:-1]
last_x = arraysize.rfind('x')
if last_x == -1:
arraysize = ''
else:
arraysize = arraysize[:last_x]
fixed = False
else:
fixed = True
if arraysize != '':
arraysize = [int(x) for x in arraysize.split("x")]
arraysize.reverse()
else:
arraysize = []
if arraysize != []:
converter = converter.array_type(
field, converter, arraysize, config)
if not fixed:
converter = converter.vararray_type(
field, converter, arraysize, config)
return converter
numpy_dtype_to_field_mapping = {
np.float64().dtype.num: 'double',
np.float32().dtype.num: 'float',
np.bool_().dtype.num: 'bit',
np.uint8().dtype.num: 'unsignedByte',
np.int16().dtype.num: 'short',
np.int32().dtype.num: 'int',
np.int64().dtype.num: 'long',
np.complex64().dtype.num: 'floatComplex',
np.complex128().dtype.num: 'doubleComplex',
np.unicode_().dtype.num: 'unicodeChar'
}
numpy_dtype_to_field_mapping[np.bytes_().dtype.num] = 'char'
def _all_matching_dtype(column):
first_dtype = False
first_shape = ()
for x in column:
if not isinstance(x, np.ndarray) or len(x) == 0:
continue
if first_dtype is False:
first_dtype = x.dtype
first_shape = x.shape[1:]
elif first_dtype != x.dtype:
return False, ()
elif first_shape != x.shape[1:]:
first_shape = ()
return first_dtype, first_shape
def numpy_to_votable_dtype(dtype, shape):
"""
Converts a numpy dtype and shape to a dictionary of attributes for
a VOTable FIELD element and correspond to that type.
Parameters
----------
dtype : Numpy dtype instance
shape : tuple
Returns
-------
attributes : dict
A dict containing 'datatype' and 'arraysize' keys that can be
set on a VOTable FIELD element.
"""
if dtype.num not in numpy_dtype_to_field_mapping:
raise TypeError(
f"{dtype!r} can not be represented in VOTable")
if dtype.char == 'S':
return {'datatype': 'char',
'arraysize': str(dtype.itemsize)}
elif dtype.char == 'U':
return {'datatype': 'unicodeChar',
'arraysize': str(dtype.itemsize // 4)}
else:
result = {
'datatype': numpy_dtype_to_field_mapping[dtype.num]}
if len(shape):
result['arraysize'] = 'x'.join(str(x) for x in shape)
return result
def table_column_to_votable_datatype(column):
"""
Given a `astropy.table.Column` instance, returns the attributes
necessary to create a VOTable FIELD element that corresponds to
the type of the column.
This necessarily must perform some heuristics to determine the
type of variable length arrays fields, since they are not directly
supported by Numpy.
If the column has dtype of "object", it performs the following
tests:
- If all elements are byte or unicode strings, it creates a
variable-length byte or unicode field, respectively.
- If all elements are numpy arrays of the same dtype and with a
consistent shape in all but the first dimension, it creates a
variable length array of fixed sized arrays. If the dtypes
match, but the shapes do not, a variable length array is
created.
If the dtype of the input is not understood, it sets the data type
to the most inclusive: a variable length unicodeChar array.
Parameters
----------
column : `astropy.table.Column` instance
Returns
-------
attributes : dict
A dict containing 'datatype' and 'arraysize' keys that can be
set on a VOTable FIELD element.
"""
votable_string_dtype = None
if column.info.meta is not None:
votable_string_dtype = column.info.meta.get('_votable_string_dtype')
if column.dtype.char == 'O':
if votable_string_dtype is not None:
return {'datatype': votable_string_dtype, 'arraysize': '*'}
elif isinstance(column[0], np.ndarray):
dtype, shape = _all_matching_dtype(column)
if dtype is not False:
result = numpy_to_votable_dtype(dtype, shape)
if 'arraysize' not in result:
result['arraysize'] = '*'
else:
result['arraysize'] += '*'
return result
# All bets are off, do the most generic thing
return {'datatype': 'unicodeChar', 'arraysize': '*'}
# For fixed size string columns, datatype here will be unicodeChar,
# but honor the original FIELD datatype if present.
result = numpy_to_votable_dtype(column.dtype, column.shape[1:])
if result['datatype'] == 'unicodeChar' and votable_string_dtype == 'char':
result['datatype'] = 'char'
return result
|
7414432d8ce84775019d8140893e4ad0cb6119869bfa788b8211102a0672543c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from . import parse, from_table
from .tree import VOTableFile, Table as VOTable
from astropy.io import registry as io_registry
from astropy.table import Table
from astropy.table.column import BaseColumn
from astropy.units import Quantity
from astropy.utils.misc import NOT_OVERWRITING_MSG
def is_votable(origin, filepath, fileobj, *args, **kwargs):
"""
Reads the header of a file to determine if it is a VOTable file.
Parameters
----------
origin : str or readable file-like
Path or file object containing a VOTABLE_ xml file.
Returns
-------
is_votable : bool
Returns `True` if the given file is a VOTable file.
"""
from . import is_votable
if origin == 'read':
if fileobj is not None:
try:
result = is_votable(fileobj)
finally:
fileobj.seek(0)
return result
elif filepath is not None:
return is_votable(filepath)
elif isinstance(args[0], (VOTableFile, VOTable)):
return True
else:
return False
else:
return False
def read_table_votable(input, table_id=None, use_names_over_ids=False,
verify=None, **kwargs):
"""
Read a Table object from an VO table file
Parameters
----------
input : str or `~astropy.io.votable.tree.VOTableFile` or `~astropy.io.votable.tree.Table`
If a string, the filename to read the table from. If a
:class:`~astropy.io.votable.tree.VOTableFile` or
:class:`~astropy.io.votable.tree.Table` object, the object to extract
the table from.
table_id : str or int, optional
The table to read in. If a `str`, it is an ID corresponding
to the ID of the table in the file (not all VOTable files
assign IDs to their tables). If an `int`, it is the index of
the table in the file, starting at 0.
use_names_over_ids : bool, optional
When `True` use the ``name`` attributes of columns as the names
of columns in the `~astropy.table.Table` instance. Since names
are not guaranteed to be unique, this may cause some columns
to be renamed by appending numbers to the end. Otherwise
(default), use the ID attributes as the column names.
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to ``'ignore'``.
**kwargs
Additional keyword arguments are passed on to
:func:`astropy.io.votable.table.parse`.
"""
if not isinstance(input, (VOTableFile, VOTable)):
input = parse(input, table_id=table_id, verify=verify, **kwargs)
# Parse all table objects
table_id_mapping = dict()
tables = []
if isinstance(input, VOTableFile):
for table in input.iter_tables():
if table.ID is not None:
table_id_mapping[table.ID] = table
tables.append(table)
if len(tables) > 1:
if table_id is None:
raise ValueError(
"Multiple tables found: table id should be set via "
"the table_id= argument. The available tables are {}, "
'or integers less than {}.'.format(
', '.join(table_id_mapping.keys()), len(tables)))
elif isinstance(table_id, str):
if table_id in table_id_mapping:
table = table_id_mapping[table_id]
else:
raise ValueError(
f"No tables with id={table_id} found")
elif isinstance(table_id, int):
if table_id < len(tables):
table = tables[table_id]
else:
raise IndexError(
"Table index {} is out of range. "
"{} tables found".format(
table_id, len(tables)))
elif len(tables) == 1:
table = tables[0]
else:
raise ValueError("No table found")
elif isinstance(input, VOTable):
table = input
# Convert to an astropy.table.Table object
return table.to_table(use_names_over_ids=use_names_over_ids)
def write_table_votable(input, output, table_id=None, overwrite=False,
tabledata_format=None):
"""
Write a Table object to an VO table file
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
table_id : str, optional
The table ID to use. If this is not specified, the 'ID' keyword in the
``meta`` object of the table will be used.
overwrite : bool, optional
Whether to overwrite any existing file without warning.
tabledata_format : str, optional
The format of table data to write. Must be one of ``tabledata``
(text representation), ``binary`` or ``binary2``. Default is
``tabledata``. See :ref:`astropy:votable-serialization`.
"""
# Only those columns which are instances of BaseColumn or Quantity can be written
unsupported_cols = input.columns.not_isinstance((BaseColumn, Quantity))
if unsupported_cols:
unsupported_names = [col.info.name for col in unsupported_cols]
raise ValueError('cannot write table with mixin column(s) {} to VOTable'
.format(unsupported_names))
# Check if output file already exists
if isinstance(output, str) and os.path.exists(output):
if overwrite:
os.remove(output)
else:
raise OSError(NOT_OVERWRITING_MSG.format(output))
# Create a new VOTable file
table_file = from_table(input, table_id=table_id)
# Write out file
table_file.to_xml(output, tabledata_format=tabledata_format)
io_registry.register_reader('votable', Table, read_table_votable)
io_registry.register_writer('votable', Table, write_table_votable)
io_registry.register_identifier('votable', Table, is_votable)
|
c0f407b14507566fa2c3d96935240deca7f8d2dabf9d960eee21ec79c1eaea2a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# TODO: Test FITS parsing
# STDLIB
import io
import re
import gzip
import base64
import codecs
import urllib.request
import warnings
# THIRD-PARTY
import numpy as np
from numpy import ma
# LOCAL
from astropy.io import fits
from astropy import __version__ as astropy_version
from astropy.utils.collections import HomogeneousList
from astropy.utils.xml.writer import XMLWriter
from astropy.utils.exceptions import AstropyDeprecationWarning
from . import converters
from .exceptions import (warn_or_raise, vo_warn, vo_raise, vo_reraise,
warn_unknown_attrs, W06, W07, W08, W09, W10, W11, W12,
W13, W15, W17, W18, W19, W20, W21, W22, W26, W27, W28,
W29, W32, W33, W35, W36, W37, W38, W40, W41, W42, W43,
W44, W45, W50, W52, W53, W54, E06, E08, E09, E10, E11,
E12, E13, E15, E16, E17, E18, E19, E20, E21, E22, E23,
E25)
from . import ucd as ucd_mod
from . import util
from . import xmlutil
try:
from . import tablewriter
_has_c_tabledata_writer = True
except ImportError:
_has_c_tabledata_writer = False
__all__ = [
'Link', 'Info', 'Values', 'Field', 'Param', 'CooSys', 'TimeSys',
'FieldRef', 'ParamRef', 'Group', 'Table', 'Resource',
'VOTableFile', 'Element'
]
# The default number of rows to read in each chunk before converting
# to an array.
DEFAULT_CHUNK_SIZE = 256
RESIZE_AMOUNT = 1.5
######################################################################
# FACTORY FUNCTIONS
def _resize(masked, new_size):
"""
Masked arrays can not be resized inplace, and `np.resize` and
`ma.resize` are both incompatible with structured arrays.
Therefore, we do all this.
"""
new_array = ma.zeros((new_size,), dtype=masked.dtype)
length = min(len(masked), new_size)
new_array[:length] = masked[:length]
return new_array
def _lookup_by_attr_factory(attr, unique, iterator, element_name, doc):
"""
Creates a function useful for looking up an element by a given
attribute.
Parameters
----------
attr : str
The attribute name
unique : bool
Should be `True` if the attribute is unique and therefore this
should return only one value. Otherwise, returns a list of
values.
iterator : generator
A generator that iterates over some arbitrary set of elements
element_name : str
The XML element name of the elements being iterated over (used
for error messages only).
doc : str
A docstring to apply to the generated function.
Returns
-------
factory : function
A function that looks up an element by the given attribute.
"""
def lookup_by_attr(self, ref, before=None):
"""
Given a string *ref*, finds the first element in the iterator
where the given attribute == *ref*. If *before* is provided,
will stop searching at the object *before*. This is
important, since "forward references" are not allowed in the
VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if getattr(element, attr, None) == ref:
vo_raise(
f"{element_name} references itself",
element._config, element._pos, KeyError)
break
if getattr(element, attr, None) == ref:
yield element
def lookup_by_attr_unique(self, ref, before=None):
for element in lookup_by_attr(self, ref, before=before):
return element
raise KeyError(
"No {} with {} '{}' found before the referencing {}".format(
element_name, attr, ref, element_name))
if unique:
lookup_by_attr_unique.__doc__ = doc
return lookup_by_attr_unique
else:
lookup_by_attr.__doc__ = doc
return lookup_by_attr
def _lookup_by_id_or_name_factory(iterator, element_name, doc):
"""
Like `_lookup_by_attr_factory`, but looks in both the "ID" and
"name" attributes.
"""
def lookup_by_id_or_name(self, ref, before=None):
"""
Given an key *ref*, finds the first element in the iterator
with the attribute ID == *ref* or name == *ref*. If *before*
is provided, will stop searching at the object *before*. This
is important, since "forward references" are not allowed in
the VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if ref in (element.ID, element.name):
vo_raise(
f"{element_name} references itself",
element._config, element._pos, KeyError)
break
if ref in (element.ID, element.name):
return element
raise KeyError(
"No {} with ID or name '{}' found before the referencing {}".format(
element_name, ref, element_name))
lookup_by_id_or_name.__doc__ = doc
return lookup_by_id_or_name
def _get_default_unit_format(config):
"""
Get the default unit format as specified in the VOTable spec.
"""
# The unit format changed between VOTable versions 1.3 and 1.4,
# see issue #10791.
if config['version_1_4_or_later']:
return 'vounit'
else:
return 'cds'
def _get_unit_format(config):
"""
Get the unit format based on the configuration.
"""
if config.get('unit_format') is None:
format = _get_default_unit_format(config)
else:
format = config['unit_format']
return format
######################################################################
# ATTRIBUTE CHECKERS
def check_astroyear(year, field, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*year* is not a valid astronomical year as defined by the VOTABLE
standard.
Parameters
----------
year : str
An astronomical year string
field : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if (year is not None and
re.match(r"^[JB]?[0-9]+([.][0-9]*)?$", year) is None):
warn_or_raise(W07, W07, (field, year), config, pos)
return False
return True
def check_string(string, attr_name, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*string* is not a string or Unicode string.
Parameters
----------
string : str
An astronomical year string
attr_name : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if string is not None and not isinstance(string, str):
warn_or_raise(W08, W08, attr_name, config, pos)
return False
return True
def resolve_id(ID, id, config=None, pos=None):
if ID is None and id is not None:
warn_or_raise(W09, W09, (), config, pos)
return id
return ID
def check_ucd(ucd, config=None, pos=None):
"""
Warns or raises a
`~astropy.io.votable.exceptions.VOTableSpecError` if *ucd* is not
a valid `unified content descriptor`_ string as defined by the
VOTABLE standard.
Parameters
----------
ucd : str
A UCD string.
config, pos : optional
Information about the source of the value
"""
if config is None:
config = {}
if config.get('version_1_1_or_later'):
try:
ucd_mod.parse_ucd(
ucd,
check_controlled_vocabulary=config.get(
'version_1_2_or_later', False),
has_colon=config.get('version_1_2_or_later', False))
except ValueError as e:
# This weird construction is for Python 3 compatibility
if config.get('verify', 'ignore') == 'exception':
vo_raise(W06, (ucd, str(e)), config, pos)
elif config.get('verify', 'ignore') == 'warn':
vo_warn(W06, (ucd, str(e)), config, pos)
return False
else:
return False
return True
######################################################################
# PROPERTY MIXINS
class _IDProperty:
@property
def ID(self):
"""
The XML ID_ of the element. May be `None` or a string
conforming to XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
xmlutil.check_id(ID, 'ID', self._config, self._pos)
self._ID = ID
@ID.deleter
def ID(self):
self._ID = None
class _NameProperty:
@property
def name(self):
"""An optional name for the element."""
return self._name
@name.setter
def name(self, name):
xmlutil.check_token(name, 'name', self._config, self._pos)
self._name = name
@name.deleter
def name(self):
self._name = None
class _XtypeProperty:
@property
def xtype(self):
"""Extended data type information."""
return self._xtype
@xtype.setter
def xtype(self, xtype):
if xtype is not None and not self._config.get('version_1_2_or_later'):
warn_or_raise(
W28, W28, ('xtype', self._element_name, '1.2'),
self._config, self._pos)
check_string(xtype, 'xtype', self._config, self._pos)
self._xtype = xtype
@xtype.deleter
def xtype(self):
self._xtype = None
class _UtypeProperty:
_utype_in_v1_2 = False
@property
def utype(self):
"""The usage-specific or `unique type`_ of the element."""
return self._utype
@utype.setter
def utype(self, utype):
if (self._utype_in_v1_2 and
utype is not None and
not self._config.get('version_1_2_or_later')):
warn_or_raise(
W28, W28, ('utype', self._element_name, '1.2'),
self._config, self._pos)
check_string(utype, 'utype', self._config, self._pos)
self._utype = utype
@utype.deleter
def utype(self):
self._utype = None
class _UcdProperty:
_ucd_in_v1_2 = False
@property
def ucd(self):
"""The `unified content descriptor`_ for the element."""
return self._ucd
@ucd.setter
def ucd(self, ucd):
if ucd is not None and ucd.strip() == '':
ucd = None
if ucd is not None:
if (self._ucd_in_v1_2 and
not self._config.get('version_1_2_or_later')):
warn_or_raise(
W28, W28, ('ucd', self._element_name, '1.2'),
self._config, self._pos)
check_ucd(ucd, self._config, self._pos)
self._ucd = ucd
@ucd.deleter
def ucd(self):
self._ucd = None
class _DescriptionProperty:
@property
def description(self):
"""
An optional string describing the element. Corresponds to the
DESCRIPTION_ element.
"""
return self._description
@description.setter
def description(self, description):
self._description = description
@description.deleter
def description(self):
self._description = None
######################################################################
# ELEMENT CLASSES
class Element:
"""
A base class for all classes that represent XML elements in the
VOTABLE file.
"""
_element_name = ''
_attr_list = []
def _add_unknown_tag(self, iterator, tag, data, config, pos):
warn_or_raise(W10, W10, tag, config, pos)
def _ignore_add(self, iterator, tag, data, config, pos):
warn_unknown_attrs(tag, data.keys(), config, pos)
def _add_definitions(self, iterator, tag, data, config, pos):
if config.get('version_1_1_or_later'):
warn_or_raise(W22, W22, (), config, pos)
warn_unknown_attrs(tag, data.keys(), config, pos)
def parse(self, iterator, config):
"""
For internal use. Parse the XML content of the children of the
element.
Parameters
----------
iterator : xml iterable
An iterator over XML elements as returned by
`~astropy.utils.xml.iterparser.get_xml_iterator`.
config : dict
The configuration dictionary that affects how certain
elements are read.
Returns
-------
self : `~astropy.io.votable.tree.Element`
Returns self as a convenience.
"""
raise NotImplementedError()
def to_xml(self, w, **kwargs):
"""
For internal use. Output the element to XML.
Parameters
----------
w : astropy.utils.xml.writer.XMLWriter object
An XML writer to write to.
**kwargs : dict
Any configuration parameters to control the output.
"""
raise NotImplementedError()
class SimpleElement(Element):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
Element.__init__(self)
def __repr__(self):
buff = io.StringIO()
SimpleElement.to_xml(self, XMLWriter(buff))
return buff.getvalue().strip()
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
break
return self
def to_xml(self, w, **kwargs):
w.element(self._element_name,
attrib=w.object_attrs(self, self._attr_list))
class SimpleElementWithContent(SimpleElement):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
SimpleElement.__init__(self)
self._content = None
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
if data:
self.content = data
break
return self
def to_xml(self, w, **kwargs):
w.element(self._element_name, self._content,
attrib=w.object_attrs(self, self._attr_list))
@property
def content(self):
"""The content of the element."""
return self._content
@content.setter
def content(self, content):
check_string(content, 'content', self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
class Link(SimpleElement, _IDProperty):
"""
LINK_ elements: used to reference external documents and servers through a URI.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ['ID', 'content_role', 'content_type', 'title', 'value',
'href', 'action']
_element_name = 'LINK'
def __init__(self, ID=None, title=None, value=None, href=None, action=None,
id=None, config=None, pos=None, **kwargs):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
content_role = kwargs.get('content-role') or kwargs.get('content_role')
content_type = kwargs.get('content-type') or kwargs.get('content_type')
if 'gref' in kwargs:
warn_or_raise(W11, W11, (), config, pos)
self.ID = resolve_id(ID, id, config, pos)
self.content_role = content_role
self.content_type = content_type
self.title = title
self.value = value
self.href = href
self.action = action
warn_unknown_attrs(
'LINK', kwargs.keys(), config, pos,
['content-role', 'content_role', 'content-type', 'content_type',
'gref'])
@property
def content_role(self):
"""
Defines the MIME role of the referenced object. Must be one of:
None, 'query', 'hints', 'doc', 'location' or 'type'
"""
return self._content_role
@content_role.setter
def content_role(self, content_role):
if ((content_role == 'type' and
not self._config['version_1_3_or_later']) or
content_role not in
(None, 'query', 'hints', 'doc', 'location')):
vo_warn(W45, (content_role,), self._config, self._pos)
self._content_role = content_role
@content_role.deleter
def content_role(self):
self._content_role = None
@property
def content_type(self):
"""Defines the MIME content type of the referenced object."""
return self._content_type
@content_type.setter
def content_type(self, content_type):
xmlutil.check_mime_content_type(content_type, self._config, self._pos)
self._content_type = content_type
@content_type.deleter
def content_type(self):
self._content_type = None
@property
def href(self):
"""
A URI to an arbitrary protocol. The vo package only supports
http and anonymous ftp.
"""
return self._href
@href.setter
def href(self, href):
xmlutil.check_anyuri(href, self._config, self._pos)
self._href = href
@href.deleter
def href(self):
self._href = None
def to_table_column(self, column):
meta = {}
for key in self._attr_list:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
column.meta.setdefault('links', [])
column.meta['links'].append(meta)
@classmethod
def from_table_column(cls, d):
return cls(**d)
class Info(SimpleElementWithContent, _IDProperty, _XtypeProperty,
_UtypeProperty):
"""
INFO_ elements: arbitrary key-value pairs for extensions to the standard.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_element_name = 'INFO'
_attr_list_11 = ['ID', 'name', 'value']
_attr_list_12 = _attr_list_11 + ['xtype', 'ref', 'unit', 'ucd', 'utype']
_utype_in_v1_2 = True
def __init__(self, ID=None, name=None, value=None, id=None, xtype=None,
ref=None, unit=None, ucd=None, utype=None,
config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElementWithContent.__init__(self)
self.ID = (resolve_id(ID, id, config, pos) or
xmlutil.fix_id(name, config, pos))
self.name = name
self.value = value
self.xtype = xtype
self.ref = ref
self.unit = unit
self.ucd = ucd
self.utype = utype
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs('INFO', ['xtype'], config, pos)
if ref is not None:
warn_unknown_attrs('INFO', ['ref'], config, pos)
if unit is not None:
warn_unknown_attrs('INFO', ['unit'], config, pos)
if ucd is not None:
warn_unknown_attrs('INFO', ['ucd'], config, pos)
if utype is not None:
warn_unknown_attrs('INFO', ['utype'], config, pos)
warn_unknown_attrs('INFO', extra.keys(), config, pos)
@property
def name(self):
"""[*required*] The key of the key-value pair."""
return self._name
@name.setter
def name(self, name):
if name is None:
warn_or_raise(W35, W35, ('name'), self._config, self._pos)
xmlutil.check_token(name, 'name', self._config, self._pos)
self._name = name
@property
def value(self):
"""
[*required*] The value of the key-value pair. (Always stored
as a string or unicode string).
"""
return self._value
@value.setter
def value(self, value):
if value is None:
warn_or_raise(W35, W35, ('value'), self._config, self._pos)
check_string(value, 'value', self._config, self._pos)
self._value = value
@property
def content(self):
"""The content inside the INFO element."""
return self._content
@content.setter
def content(self, content):
check_string(content, 'content', self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
@property
def ref(self):
"""
Refer to another INFO_ element by ID_, defined previously in
the document.
"""
return self._ref
@ref.setter
def ref(self, ref):
if ref is not None and not self._config.get('version_1_2_or_later'):
warn_or_raise(W28, W28, ('ref', 'INFO', '1.2'),
self._config, self._pos)
xmlutil.check_id(ref, 'ref', self._config, self._pos)
# TODO: actually apply the reference
# if ref is not None:
# try:
# other = self._votable.get_values_by_id(ref, before=self)
# except KeyError:
# vo_raise(
# "VALUES ref='%s', which has not already been defined." %
# self.ref, self._config, self._pos, KeyError)
# self.null = other.null
# self.type = other.type
# self.min = other.min
# self.min_inclusive = other.min_inclusive
# self.max = other.max
# self.max_inclusive = other.max_inclusive
# self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the INFO_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from astropy import units as u
if not self._config.get('version_1_2_or_later'):
warn_or_raise(W28, W28, ('unit', 'INFO', '1.2'),
self._config, self._pos)
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(
unit, format=default_format, parse_strict='silent')
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,),
self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(
unit, format=format, parse_strict='silent')
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if 'unit' in attrib:
attrib['unit'] = self.unit.to_string('cds')
w.element(self._element_name, self._content,
attrib=attrib)
class Values(Element, _IDProperty):
"""
VALUES_ element: used within FIELD_ and PARAM_ elements to define the domain of values.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, votable, field, ID=None, null=None, ref=None,
type="legal", id=None, config=None, pos=None, **extras):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._votable = votable
self._field = field
self.ID = resolve_id(ID, id, config, pos)
self.null = null
self._ref = ref
self.type = type
self.min = None
self.max = None
self.min_inclusive = True
self.max_inclusive = True
self._options = []
warn_unknown_attrs('VALUES', extras.keys(), config, pos)
def __repr__(self):
buff = io.StringIO()
self.to_xml(XMLWriter(buff))
return buff.getvalue().strip()
@property
def null(self):
"""
For integral datatypes, *null* is used to define the value
used for missing values.
"""
return self._null
@null.setter
def null(self, null):
if null is not None and isinstance(null, str):
try:
null_val = self._field.converter.parse_scalar(
null, self._config, self._pos)[0]
except Exception:
warn_or_raise(W36, W36, null, self._config, self._pos)
null_val = self._field.converter.parse_scalar(
'0', self._config, self._pos)[0]
else:
null_val = null
self._null = null_val
@null.deleter
def null(self):
self._null = None
@property
def type(self):
"""
[*required*] Defines the applicability of the domain defined
by this VALUES_ element. Must be one of the following
strings:
- 'legal': The domain of this column applies in general to
this datatype. (default)
- 'actual': The domain of this column applies only to the
data enclosed in the parent table.
"""
return self._type
@type.setter
def type(self, type):
if type not in ('legal', 'actual'):
vo_raise(E08, type, self._config, self._pos)
self._type = type
@property
def ref(self):
"""
Refer to another VALUES_ element by ID_, defined previously in
the document, for MIN/MAX/OPTION information.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
if ref is not None:
try:
other = self._votable.get_values_by_id(ref, before=self)
except KeyError:
warn_or_raise(W43, W43, ('VALUES', self.ref), self._config,
self._pos)
ref = None
else:
self.null = other.null
self.type = other.type
self.min = other.min
self.min_inclusive = other.min_inclusive
self.max = other.max
self.max_inclusive = other.max_inclusive
self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def min(self):
"""
The minimum value of the domain. See :attr:`min_inclusive`.
"""
return self._min
@min.setter
def min(self, min):
if hasattr(self._field, 'converter') and min is not None:
self._min = self._field.converter.parse(min)[0]
else:
self._min = min
@min.deleter
def min(self):
self._min = None
@property
def min_inclusive(self):
"""When `True`, the domain includes the minimum value."""
return self._min_inclusive
@min_inclusive.setter
def min_inclusive(self, inclusive):
if inclusive == 'yes':
self._min_inclusive = True
elif inclusive == 'no':
self._min_inclusive = False
else:
self._min_inclusive = bool(inclusive)
@min_inclusive.deleter
def min_inclusive(self):
self._min_inclusive = True
@property
def max(self):
"""
The maximum value of the domain. See :attr:`max_inclusive`.
"""
return self._max
@max.setter
def max(self, max):
if hasattr(self._field, 'converter') and max is not None:
self._max = self._field.converter.parse(max)[0]
else:
self._max = max
@max.deleter
def max(self):
self._max = None
@property
def max_inclusive(self):
"""When `True`, the domain includes the maximum value."""
return self._max_inclusive
@max_inclusive.setter
def max_inclusive(self, inclusive):
if inclusive == 'yes':
self._max_inclusive = True
elif inclusive == 'no':
self._max_inclusive = False
else:
self._max_inclusive = bool(inclusive)
@max_inclusive.deleter
def max_inclusive(self):
self._max_inclusive = True
@property
def options(self):
"""
A list of string key-value tuples defining other OPTION
elements for the domain. All options are ignored -- they are
stored for round-tripping purposes only.
"""
return self._options
def parse(self, iterator, config):
if self.ref is not None:
for start, tag, data, pos in iterator:
if start:
warn_or_raise(W44, W44, tag, config, pos)
else:
if tag != 'VALUES':
warn_or_raise(W44, W44, tag, config, pos)
break
else:
for start, tag, data, pos in iterator:
if start:
if tag == 'MIN':
if 'value' not in data:
vo_raise(E09, 'MIN', config, pos)
self.min = data['value']
self.min_inclusive = data.get('inclusive', 'yes')
warn_unknown_attrs(
'MIN', data.keys(), config, pos,
['value', 'inclusive'])
elif tag == 'MAX':
if 'value' not in data:
vo_raise(E09, 'MAX', config, pos)
self.max = data['value']
self.max_inclusive = data.get('inclusive', 'yes')
warn_unknown_attrs(
'MAX', data.keys(), config, pos,
['value', 'inclusive'])
elif tag == 'OPTION':
if 'value' not in data:
vo_raise(E09, 'OPTION', config, pos)
xmlutil.check_token(
data.get('name'), 'name', config, pos)
self.options.append(
(data.get('name'), data.get('value')))
warn_unknown_attrs(
'OPTION', data.keys(), config, pos,
['value', 'name'])
elif tag == 'VALUES':
break
return self
def is_defaults(self):
"""
Are the settings on this ``VALUE`` element all the same as the
XML defaults?
"""
# If there's nothing meaningful or non-default to write,
# don't write anything.
return (self.ref is None and self.null is None and self.ID is None and
self.max is None and self.min is None and self.options == [])
def to_xml(self, w, **kwargs):
def yes_no(value):
if value:
return 'yes'
return 'no'
if self.is_defaults():
return
if self.ref is not None:
w.element('VALUES', attrib=w.object_attrs(self, ['ref']))
else:
with w.tag('VALUES',
attrib=w.object_attrs(
self, ['ID', 'null', 'ref'])):
if self.min is not None:
w.element(
'MIN',
value=self._field.converter.output(self.min, False),
inclusive=yes_no(self.min_inclusive))
if self.max is not None:
w.element(
'MAX',
value=self._field.converter.output(self.max, False),
inclusive=yes_no(self.max_inclusive))
for name, value in self.options:
w.element(
'OPTION',
name=name,
value=value)
def to_table_column(self, column):
# Have the ref filled in here
meta = {}
for key in ['ID', 'null']:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
if self.min is not None:
meta['min'] = {
'value': self.min,
'inclusive': self.min_inclusive}
if self.max is not None:
meta['max'] = {
'value': self.max,
'inclusive': self.max_inclusive}
if len(self.options):
meta['options'] = dict(self.options)
column.meta['values'] = meta
def from_table_column(self, column):
if column.info.meta is None or 'values' not in column.info.meta:
return
meta = column.info.meta['values']
for key in ['ID', 'null']:
val = meta.get(key, None)
if val is not None:
setattr(self, key, val)
if 'min' in meta:
self.min = meta['min']['value']
self.min_inclusive = meta['min']['inclusive']
if 'max' in meta:
self.max = meta['max']['value']
self.max_inclusive = meta['max']['inclusive']
if 'options' in meta:
self._options = list(meta['options'].items())
class Field(SimpleElement, _IDProperty, _NameProperty, _XtypeProperty,
_UtypeProperty, _UcdProperty):
"""
FIELD_ element: describes the datatype of a particular column of data.
The keyword arguments correspond to setting members of the same
name, documented below.
If *ID* is provided, it is used for the column name in the
resulting recarray of the table. If no *ID* is provided, *name*
is used instead. If neither is provided, an exception will be
raised.
"""
_attr_list_11 = ['ID', 'name', 'datatype', 'arraysize', 'ucd',
'unit', 'width', 'precision', 'utype', 'ref']
_attr_list_12 = _attr_list_11 + ['xtype']
_element_name = 'FIELD'
def __init__(self, votable, ID=None, name=None, datatype=None,
arraysize=None, ucd=None, unit=None, width=None,
precision=None, utype=None, ref=None, type=None, id=None,
xtype=None,
config=None, pos=None, **extra):
if config is None:
if hasattr(votable, '_get_version_checks'):
config = votable._get_version_checks()
else:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs(self._element_name, ['xtype'], config, pos)
# TODO: REMOVE ME ----------------------------------------
# This is a terrible hack to support Simple Image Access
# Protocol results from https://astroarchive.noirlab.edu/ . It creates a field
# for the coordinate projection type of type "double", which
# actually contains character data. We have to hack the field
# to store character data, or we can't read it in. A warning
# will be raised when this happens.
if (config.get('verify', 'ignore') != 'exception' and name == 'cprojection' and
ID == 'cprojection' and ucd == 'VOX:WCS_CoordProjection' and
datatype == 'double'):
datatype = 'char'
arraysize = '3'
vo_warn(W40, (), config, pos)
# ----------------------------------------
self.description = None
self._votable = votable
self.ID = (resolve_id(ID, id, config, pos) or
xmlutil.fix_id(name, config, pos))
self.name = name
if name is None:
if (self._element_name == 'PARAM' and
not config.get('version_1_1_or_later')):
pass
else:
warn_or_raise(W15, W15, self._element_name, config, pos)
self.name = self.ID
if self._ID is None and name is None:
vo_raise(W12, self._element_name, config, pos)
datatype_mapping = {
'string': 'char',
'unicodeString': 'unicodeChar',
'int16': 'short',
'int32': 'int',
'int64': 'long',
'float32': 'float',
'float64': 'double',
# The following appear in some Vizier tables
'unsignedInt': 'long',
'unsignedShort': 'int'
}
datatype_mapping.update(config.get('datatype_mapping', {}))
if datatype in datatype_mapping:
warn_or_raise(W13, W13, (datatype, datatype_mapping[datatype]),
config, pos)
datatype = datatype_mapping[datatype]
self.ref = ref
self.datatype = datatype
self.arraysize = arraysize
self.ucd = ucd
self.unit = unit
self.width = width
self.precision = precision
self.utype = utype
self.type = type
self._links = HomogeneousList(Link)
self.title = self.name
self.values = Values(self._votable, self)
self.xtype = xtype
self._setup(config, pos)
warn_unknown_attrs(self._element_name, extra.keys(), config, pos)
@classmethod
def uniqify_names(cls, fields):
"""
Make sure that all names and titles in a list of fields are
unique, by appending numbers if necessary.
"""
unique = {}
for field in fields:
i = 2
new_id = field.ID
while new_id in unique:
new_id = field.ID + f"_{i:d}"
i += 1
if new_id != field.ID:
vo_warn(W32, (field.ID, new_id), field._config, field._pos)
field.ID = new_id
unique[new_id] = field.ID
for field in fields:
i = 2
if field.name is None:
new_name = field.ID
implicit = True
else:
new_name = field.name
implicit = False
if new_name != field.ID:
while new_name in unique:
new_name = field.name + f" {i:d}"
i += 1
if (not implicit and
new_name != field.name):
vo_warn(W33, (field.name, new_name), field._config, field._pos)
field._unique_name = new_name
unique[new_name] = field.name
def _setup(self, config, pos):
if self.values._ref is not None:
self.values.ref = self.values._ref
self.converter = converters.get_converter(self, config, pos)
@property
def datatype(self):
"""
[*required*] The datatype of the column. Valid values (as
defined by the spec) are:
'boolean', 'bit', 'unsignedByte', 'short', 'int', 'long',
'char', 'unicodeChar', 'float', 'double', 'floatComplex', or
'doubleComplex'
Many VOTABLE files in the wild use 'string' instead of 'char',
so that is also a valid option, though 'string' will always be
converted to 'char' when writing the file back out.
"""
return self._datatype
@datatype.setter
def datatype(self, datatype):
if datatype is None:
if self._config.get('version_1_1_or_later'):
warn_or_raise(E10, E10, self._element_name, self._config,
self._pos)
datatype = 'char'
if datatype not in converters.converter_mapping:
vo_raise(E06, (datatype, self.ID), self._config, self._pos)
self._datatype = datatype
@property
def precision(self):
"""
Along with :attr:`width`, defines the `numerical accuracy`_
associated with the data. These values are used to limit the
precision when writing floating point values back to the XML
file. Otherwise, it is purely informational -- the Numpy
recarray containing the data itself does not use this
information.
"""
return self._precision
@precision.setter
def precision(self, precision):
if precision is not None and not re.match(r"^[FE]?[0-9]+$", precision):
vo_raise(E11, precision, self._config, self._pos)
self._precision = precision
@precision.deleter
def precision(self):
self._precision = None
@property
def width(self):
"""
Along with :attr:`precision`, defines the `numerical
accuracy`_ associated with the data. These values are used to
limit the precision when writing floating point values back to
the XML file. Otherwise, it is purely informational -- the
Numpy recarray containing the data itself does not use this
information.
"""
return self._width
@width.setter
def width(self, width):
if width is not None:
width = int(width)
if width <= 0:
vo_raise(E12, width, self._config, self._pos)
self._width = width
@width.deleter
def width(self):
self._width = None
# ref on FIELD and PARAM behave differently than elsewhere -- here
# they're just informational, such as to refer to a coordinate
# system.
@property
def ref(self):
"""
On FIELD_ elements, ref is used only for informational
purposes, for example to refer to a COOSYS_ or TIMESYS_ element.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the FIELD_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from astropy import units as u
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(
unit, format=default_format, parse_strict='silent')
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,),
self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(
unit, format=format, parse_strict='silent')
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
@property
def arraysize(self):
"""
Specifies the size of the multidimensional array if this
FIELD_ contains more than a single value.
See `multidimensional arrays`_.
"""
return self._arraysize
@arraysize.setter
def arraysize(self, arraysize):
if (arraysize is not None and
not re.match(r"^([0-9]+x)*[0-9]*[*]?(s\W)?$", arraysize)):
vo_raise(E13, arraysize, self._config, self._pos)
self._arraysize = arraysize
@arraysize.deleter
def arraysize(self):
self._arraysize = None
@property
def type(self):
"""
The type attribute on FIELD_ elements is reserved for future
extensions.
"""
return self._type
@type.setter
def type(self, type):
self._type = type
@type.deleter
def type(self):
self._type = None
@property
def values(self):
"""
A :class:`Values` instance (or `None`) defining the domain
of the column.
"""
return self._values
@values.setter
def values(self, values):
assert values is None or isinstance(values, Values)
self._values = values
@values.deleter
def values(self):
self._values = None
@property
def links(self):
"""
A list of :class:`Link` instances used to reference more
details about the meaning of the FIELD_. This is purely
informational and is not used by the `astropy.io.votable`
package.
"""
return self._links
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start:
if tag == 'VALUES':
self.values.__init__(
self._votable, self, config=config, pos=pos, **data)
self.values.parse(iterator, config)
elif tag == 'LINK':
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
elif tag == 'DESCRIPTION':
warn_unknown_attrs(
'DESCRIPTION', data.keys(), config, pos)
elif tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
else:
if tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(
W17, W17, self._element_name, config, pos)
self.description = data or None
elif tag == self._element_name:
break
if self.description is not None:
self.title = " ".join(x.strip() for x in
self.description.splitlines())
else:
self.title = self.name
self._setup(config, pos)
return self
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if 'unit' in attrib:
attrib['unit'] = self.unit.to_string('cds')
with w.tag(self._element_name, attrib=attrib):
if self.description is not None:
w.element('DESCRIPTION', self.description, wrap=True)
if not self.values.is_defaults():
self.values.to_xml(w, **kwargs)
for link in self.links:
link.to_xml(w, **kwargs)
def to_table_column(self, column):
"""
Sets the attributes of a given `astropy.table.Column` instance
to match the information in this `Field`.
"""
for key in ['ucd', 'width', 'precision', 'utype', 'xtype']:
val = getattr(self, key, None)
if val is not None:
column.meta[key] = val
if not self.values.is_defaults():
self.values.to_table_column(column)
for link in self.links:
link.to_table_column(column)
if self.description is not None:
column.description = self.description
if self.unit is not None:
# TODO: Use units framework when it's available
column.unit = self.unit
if (isinstance(self.converter, converters.FloatingPoint) and
self.converter.output_format != '{!r:>}'):
column.format = self.converter.output_format
elif isinstance(self.converter, converters.Char):
column.info.meta['_votable_string_dtype'] = 'char'
elif isinstance(self.converter, converters.UnicodeChar):
column.info.meta['_votable_string_dtype'] = 'unicodeChar'
@classmethod
def from_table_column(cls, votable, column):
"""
Restores a `Field` instance from a given
`astropy.table.Column` instance.
"""
kwargs = {}
meta = column.info.meta
if meta:
for key in ['ucd', 'width', 'precision', 'utype', 'xtype']:
val = meta.get(key, None)
if val is not None:
kwargs[key] = val
# TODO: Use the unit framework when available
if column.info.unit is not None:
kwargs['unit'] = column.info.unit
kwargs['name'] = column.info.name
result = converters.table_column_to_votable_datatype(column)
kwargs.update(result)
field = cls(votable, **kwargs)
if column.info.description is not None:
field.description = column.info.description
field.values.from_table_column(column)
if meta and 'links' in meta:
for link in meta['links']:
field.links.append(Link.from_table_column(link))
# TODO: Parse format into precision and width
return field
class Param(Field):
"""
PARAM_ element: constant-valued columns in the data.
:class:`Param` objects are a subclass of :class:`Field`, and have
all of its methods and members. Additionally, it defines :attr:`value`.
"""
_attr_list_11 = Field._attr_list_11 + ['value']
_attr_list_12 = Field._attr_list_12 + ['value']
_element_name = 'PARAM'
def __init__(self, votable, ID=None, name=None, value=None, datatype=None,
arraysize=None, ucd=None, unit=None, width=None,
precision=None, utype=None, type=None, id=None, config=None,
pos=None, **extra):
self._value = value
Field.__init__(self, votable, ID=ID, name=name, datatype=datatype,
arraysize=arraysize, ucd=ucd, unit=unit,
precision=precision, utype=utype, type=type,
id=id, config=config, pos=pos, **extra)
@property
def value(self):
"""
[*required*] The constant value of the parameter. Its type is
determined by the :attr:`~Field.datatype` member.
"""
return self._value
@value.setter
def value(self, value):
if value is None:
value = ""
if isinstance(value, str):
self._value = self.converter.parse(
value, self._config, self._pos)[0]
else:
self._value = value
def _setup(self, config, pos):
Field._setup(self, config, pos)
self.value = self._value
def to_xml(self, w, **kwargs):
tmp_value = self._value
self._value = self.converter.output(tmp_value, False)
# We must always have a value
if self._value is None:
self._value = ""
Field.to_xml(self, w, **kwargs)
self._value = tmp_value
class CooSys(SimpleElement):
"""
COOSYS_ element: defines a coordinate system.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ['ID', 'equinox', 'epoch', 'system']
_element_name = 'COOSYS'
def __init__(self, ID=None, equinox=None, epoch=None, system=None, id=None,
config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
# COOSYS was deprecated in 1.2 but then re-instated in 1.3
if (config.get('version_1_2_or_later') and
not config.get('version_1_3_or_later')):
warn_or_raise(W27, W27, (), config, pos)
SimpleElement.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.equinox = equinox
self.epoch = epoch
self.system = system
warn_unknown_attrs('COOSYS', extra.keys(), config, pos)
@property
def ID(self):
"""
[*required*] The XML ID of the COOSYS_ element, used for
cross-referencing. May be `None` or a string conforming to
XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
if self._config.get('version_1_1_or_later'):
if ID is None:
vo_raise(E15, (), self._config, self._pos)
xmlutil.check_id(ID, 'ID', self._config, self._pos)
self._ID = ID
@property
def system(self):
"""
Specifies the type of coordinate system. Valid choices are:
'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',
'supergalactic', 'xy', 'barycentric', or 'geo_app'
"""
return self._system
@system.setter
def system(self, system):
if system not in ('eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5',
'galactic', 'supergalactic', 'xy', 'barycentric',
'geo_app'):
warn_or_raise(E16, E16, system, self._config, self._pos)
self._system = system
@system.deleter
def system(self):
self._system = None
@property
def equinox(self):
"""
A parameter required to fix the equatorial or ecliptic systems
(as e.g. "J2000" as the default "eq_FK5" or "B1950" as the
default "eq_FK4").
"""
return self._equinox
@equinox.setter
def equinox(self, equinox):
check_astroyear(equinox, 'equinox', self._config, self._pos)
self._equinox = equinox
@equinox.deleter
def equinox(self):
self._equinox = None
@property
def epoch(self):
"""
Specifies the epoch of the positions. It must be a string
specifying an astronomical year.
"""
return self._epoch
@epoch.setter
def epoch(self, epoch):
check_astroyear(epoch, 'epoch', self._config, self._pos)
self._epoch = epoch
@epoch.deleter
def epoch(self):
self._epoch = None
class TimeSys(SimpleElement):
"""
TIMESYS_ element: defines a time system.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ['ID', 'timeorigin', 'timescale', 'refposition']
_element_name = 'TIMESYS'
def __init__(self, ID=None, timeorigin=None, timescale=None, refposition=None, id=None,
config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
# TIMESYS is supported starting in version 1.4
if not config['version_1_4_or_later']:
warn_or_raise(
W54, W54, config['version'], config, pos)
SimpleElement.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.timeorigin = timeorigin
self.timescale = timescale
self.refposition = refposition
warn_unknown_attrs('TIMESYS', extra.keys(), config, pos,
['ID', 'timeorigin', 'timescale', 'refposition'])
@property
def ID(self):
"""
[*required*] The XML ID of the TIMESYS_ element, used for
cross-referencing. Must be a string conforming to
XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
if ID is None:
vo_raise(E22, (), self._config, self._pos)
xmlutil.check_id(ID, 'ID', self._config, self._pos)
self._ID = ID
@property
def timeorigin(self):
"""
Specifies the time origin of the time coordinate,
given as a Julian Date for the the time scale and
reference point defined. It is usually given as a
floating point literal; for convenience, the magic
strings "MJD-origin" (standing for 2400000.5) and
"JD-origin" (standing for 0) are also allowed.
The timeorigin attribute MUST be given unless the
time’s representation contains a year of a calendar
era, in which case it MUST NOT be present. In VOTables,
these representations currently are Gregorian calendar
years with xtype="timestamp", or years in the Julian
or Besselian calendar when a column has yr, a, or Ba as
its unit and no time origin is given.
"""
return self._timeorigin
@timeorigin.setter
def timeorigin(self, timeorigin):
if (timeorigin is not None and
timeorigin != 'MJD-origin' and timeorigin != 'JD-origin'):
try:
timeorigin = float(timeorigin)
except ValueError:
warn_or_raise(E23, E23, timeorigin, self._config, self._pos)
self._timeorigin = timeorigin
@timeorigin.deleter
def timeorigin(self):
self._timeorigin = None
@property
def timescale(self):
"""
[*required*] String specifying the time scale used. Values
should be taken from the IVOA timescale vocabulary (documented
at http://www.ivoa.net/rdf/timescale).
"""
return self._timescale
@timescale.setter
def timescale(self, timescale):
self._timescale = timescale
@timescale.deleter
def timescale(self):
self._timescale = None
@property
def refposition(self):
"""
[*required*] String specifying the reference position. Values
should be taken from the IVOA refposition vocabulary (documented
at http://www.ivoa.net/rdf/refposition).
"""
return self._refposition
@refposition.setter
def refposition(self, refposition):
self._refposition = refposition
@refposition.deleter
def refposition(self):
self._refposition = None
class FieldRef(SimpleElement, _UtypeProperty, _UcdProperty):
"""
FIELDref_ element: used inside of GROUP_ elements to refer to remote FIELD_ elements.
"""
_attr_list_11 = ['ref']
_attr_list_12 = _attr_list_11 + ['ucd', 'utype']
_element_name = "FIELDref"
_utype_in_v1_2 = True
_ucd_in_v1_2 = True
def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None,
**extra):
"""
*table* is the :class:`Table` object that this :class:`FieldRef`
is a member of.
*ref* is the ID to reference a :class:`Field` object defined
elsewhere.
"""
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
self._table = table
self.ref = ref
self.ucd = ucd
self.utype = utype
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if ucd is not None:
warn_unknown_attrs(self._element_name, ['ucd'], config, pos)
if utype is not None:
warn_unknown_attrs(self._element_name, ['utype'], config, pos)
@property
def ref(self):
"""The ID_ of the FIELD_ that this FIELDref_ references."""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
def get_ref(self):
"""
Lookup the :class:`Field` instance that this :class:`FieldRef`
references.
"""
for field in self._table._votable.iter_fields_and_params():
if isinstance(field, Field) and field.ID == self.ref:
return field
vo_raise(
f"No field named '{self.ref}'",
self._config, self._pos, KeyError)
class ParamRef(SimpleElement, _UtypeProperty, _UcdProperty):
"""
PARAMref_ element: used inside of GROUP_ elements to refer to remote PARAM_ elements.
The keyword arguments correspond to setting members of the same
name, documented below.
It contains the following publicly-accessible members:
*ref*: An XML ID referring to a <PARAM> element.
"""
_attr_list_11 = ['ref']
_attr_list_12 = _attr_list_11 + ['ucd', 'utype']
_element_name = "PARAMref"
_utype_in_v1_2 = True
_ucd_in_v1_2 = True
def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._table = table
self.ref = ref
self.ucd = ucd
self.utype = utype
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if ucd is not None:
warn_unknown_attrs(self._element_name, ['ucd'], config, pos)
if utype is not None:
warn_unknown_attrs(self._element_name, ['utype'], config, pos)
@property
def ref(self):
"""The ID_ of the PARAM_ that this PARAMref_ references."""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
def get_ref(self):
"""
Lookup the :class:`Param` instance that this :class:``PARAMref``
references.
"""
for param in self._table._votable.iter_fields_and_params():
if isinstance(param, Param) and param.ID == self.ref:
return param
vo_raise(
f"No params named '{self.ref}'",
self._config, self._pos, KeyError)
class Group(Element, _IDProperty, _NameProperty, _UtypeProperty,
_UcdProperty, _DescriptionProperty):
"""
GROUP_ element: groups FIELD_ and PARAM_ elements.
This information is currently ignored by the vo package---that is
the columns in the recarray are always flat---but the grouping
information is stored so that it can be written out again to the
XML file.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, table, ID=None, name=None, ref=None, ucd=None,
utype=None, id=None, config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._table = table
self.ID = (resolve_id(ID, id, config, pos)
or xmlutil.fix_id(name, config, pos))
self.name = name
self.ref = ref
self.ucd = ucd
self.utype = utype
self.description = None
self._entries = HomogeneousList(
(FieldRef, ParamRef, Group, Param))
warn_unknown_attrs('GROUP', extra.keys(), config, pos)
def __repr__(self):
return f'<GROUP>... {len(self._entries)} entries ...</GROUP>'
@property
def ref(self):
"""
Currently ignored, as it's not clear from the spec how this is
meant to work.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def entries(self):
"""
[read-only] A list of members of the GROUP_. This list may
only contain objects of type :class:`Param`, :class:`Group`,
:class:`ParamRef` and :class:`FieldRef`.
"""
return self._entries
def _add_fieldref(self, iterator, tag, data, config, pos):
fieldref = FieldRef(self._table, config=config, pos=pos, **data)
self.entries.append(fieldref)
def _add_paramref(self, iterator, tag, data, config, pos):
paramref = ParamRef(self._table, config=config, pos=pos, **data)
self.entries.append(paramref)
def _add_param(self, iterator, tag, data, config, pos):
if isinstance(self._table, VOTableFile):
votable = self._table
else:
votable = self._table._votable
param = Param(votable, config=config, pos=pos, **data)
self.entries.append(param)
param.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self._table, config=config, pos=pos, **data)
self.entries.append(group)
group.parse(iterator, config)
def parse(self, iterator, config):
tag_mapping = {
'FIELDref': self._add_fieldref,
'PARAMref': self._add_paramref,
'PARAM': self._add_param,
'GROUP': self._add_group,
'DESCRIPTION': self._ignore_add}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
else:
if tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'GROUP', config, pos)
self.description = data or None
elif tag == 'GROUP':
break
return self
def to_xml(self, w, **kwargs):
with w.tag(
'GROUP',
attrib=w.object_attrs(
self, ['ID', 'name', 'ref', 'ucd', 'utype'])):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for entry in self.entries:
entry.to_xml(w, **kwargs)
def iter_fields_and_params(self):
"""
Recursively iterate over all :class:`Param` elements in this
:class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Param):
yield entry
elif isinstance(entry, Group):
for field in entry.iter_fields_and_params():
yield field
def iter_groups(self):
"""
Recursively iterate over all sub-:class:`Group` instances in
this :class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Group):
yield entry
for group in entry.iter_groups():
yield group
class Table(Element, _IDProperty, _NameProperty, _UcdProperty,
_DescriptionProperty):
"""
TABLE_ element: optionally contains data.
It contains the following publicly-accessible and mutable
attribute:
*array*: A Numpy masked array of the data itself, where each
row is a row of votable data, and columns are named and typed
based on the <FIELD> elements of the table. The mask is
parallel to the data array, except for variable-length fields.
For those fields, the numpy array's column type is "object"
(``"O"``), and another masked array is stored there.
If the Table contains no data, (for example, its enclosing
:class:`Resource` has :attr:`~Resource.type` == 'meta') *array*
will have zero-length.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, votable, ID=None, name=None, ref=None, ucd=None,
utype=None, nrows=None, id=None, config=None, pos=None,
**extra):
if config is None:
config = {}
self._config = config
self._pos = pos
self._empty = False
Element.__init__(self)
self._votable = votable
self.ID = (resolve_id(ID, id, config, pos)
or xmlutil.fix_id(name, config, pos))
self.name = name
xmlutil.check_id(ref, 'ref', config, pos)
self._ref = ref
self.ucd = ucd
self.utype = utype
if nrows is not None:
nrows = int(nrows)
if nrows < 0:
raise ValueError("'nrows' cannot be negative.")
self._nrows = nrows
self.description = None
self.format = 'tabledata'
self._fields = HomogeneousList(Field)
self._params = HomogeneousList(Param)
self._groups = HomogeneousList(Group)
self._links = HomogeneousList(Link)
self._infos = HomogeneousList(Info)
self.array = ma.array([])
warn_unknown_attrs('TABLE', extra.keys(), config, pos)
def __repr__(self):
return repr(self.to_table())
def __bytes__(self):
return bytes(self.to_table())
def __str__(self):
return str(self.to_table())
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, ref):
"""
Refer to another TABLE, previously defined, by the *ref* ID_
for all metadata (FIELD_, PARAM_ etc.) information.
"""
# When the ref changes, we want to verify that it will work
# by actually going and looking for the referenced table.
# If found, set a bunch of properties in this table based
# on the other one.
xmlutil.check_id(ref, 'ref', self._config, self._pos)
if ref is not None:
try:
table = self._votable.get_table_by_id(ref, before=self)
except KeyError:
warn_or_raise(
W43, W43, ('TABLE', self.ref), self._config, self._pos)
ref = None
else:
self._fields = table.fields
self._params = table.params
self._groups = table.groups
self._links = table.links
else:
del self._fields[:]
del self._params[:]
del self._groups[:]
del self._links[:]
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def format(self):
"""
[*required*] The serialization format of the table. Must be
one of:
'tabledata' (TABLEDATA_), 'binary' (BINARY_), 'binary2' (BINARY2_)
'fits' (FITS_).
Note that the 'fits' format, since it requires an external
file, can not be written out. Any file read in with 'fits'
format will be read out, by default, in 'tabledata' format.
See :ref:`astropy:votable-serialization`.
"""
return self._format
@format.setter
def format(self, format):
format = format.lower()
if format == 'fits':
vo_raise("fits format can not be written out, only read.",
self._config, self._pos, NotImplementedError)
if format == 'binary2':
if not self._config['version_1_3_or_later']:
vo_raise(
"binary2 only supported in votable 1.3 or later",
self._config, self._pos)
elif format not in ('tabledata', 'binary'):
vo_raise(f"Invalid format '{format}'",
self._config, self._pos)
self._format = format
@property
def nrows(self):
"""
[*immutable*] The number of rows in the table, as specified in
the XML file.
"""
return self._nrows
@property
def fields(self):
"""
A list of :class:`Field` objects describing the types of each
of the data columns.
"""
return self._fields
@property
def params(self):
"""
A list of parameters (constant-valued columns) for the
table. Must contain only :class:`Param` objects.
"""
return self._params
@property
def groups(self):
"""
A list of :class:`Group` objects describing how the columns
and parameters are grouped. Currently this information is
only kept around for round-tripping and informational
purposes.
"""
return self._groups
@property
def links(self):
"""
A list of :class:`Link` objects (pointers to other documents
or servers through a URI) for the table.
"""
return self._links
@property
def infos(self):
"""
A list of :class:`Info` objects for the table. Allows for
post-operational diagnostics.
"""
return self._infos
def is_empty(self):
"""
Returns True if this table doesn't contain any real data
because it was skipped over by the parser (through use of the
``table_number`` kwarg).
"""
return self._empty
def create_arrays(self, nrows=0, config=None):
"""
Create a new array to hold the data based on the current set
of fields, and store them in the *array* and member variable.
Any data in the existing array will be lost.
*nrows*, if provided, is the number of rows to allocate.
"""
if nrows is None:
nrows = 0
fields = self.fields
if len(fields) == 0:
array = np.recarray((nrows,), dtype='O')
mask = np.zeros((nrows,), dtype='b')
else:
# for field in fields: field._setup(config)
Field.uniqify_names(fields)
dtype = []
for x in fields:
if x._unique_name == x.ID:
id = x.ID
else:
id = (x._unique_name, x.ID)
dtype.append((id, x.converter.format))
array = np.recarray((nrows,), dtype=np.dtype(dtype))
descr_mask = []
for d in array.dtype.descr:
new_type = (d[1][1] == 'O' and 'O') or 'bool'
if len(d) == 2:
descr_mask.append((d[0], new_type))
elif len(d) == 3:
descr_mask.append((d[0], new_type, d[2]))
mask = np.zeros((nrows,), dtype=descr_mask)
self.array = ma.array(array, mask=mask)
def _resize_strategy(self, size):
"""
Return a new (larger) size based on size, used for
reallocating an array when it fills up. This is in its own
function so the resizing strategy can be easily replaced.
"""
# Once we go beyond 0, make a big step -- after that use a
# factor of 1.5 to help keep memory usage compact
if size == 0:
return 512
return int(np.ceil(size * RESIZE_AMOUNT))
def _add_field(self, iterator, tag, data, config, pos):
field = Field(self._votable, config=config, pos=pos, **data)
self.fields.append(field)
field.parse(iterator, config)
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self._votable, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _add_link(self, iterator, tag, data, config, pos):
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
if not config.get('version_1_2_or_later'):
warn_or_raise(W26, W26, ('INFO', 'TABLE', '1.2'), config, pos)
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def parse(self, iterator, config):
columns = config.get('columns')
# If we've requested to read in only a specific table, skip
# all others
table_number = config.get('table_number')
current_table_number = config.get('_current_table_number')
skip_table = False
if current_table_number is not None:
config['_current_table_number'] += 1
if (table_number is not None and
table_number != current_table_number):
skip_table = True
self._empty = True
table_id = config.get('table_id')
if table_id is not None:
if table_id != self.ID:
skip_table = True
self._empty = True
if self.ref is not None:
# This table doesn't have its own datatype descriptors, it
# just references those from another table.
# This is to call the property setter to go and get the
# referenced information
self.ref = self.ref
for start, tag, data, pos in iterator:
if start:
if tag == 'DATA':
warn_unknown_attrs(
'DATA', data.keys(), config, pos)
break
else:
if tag == 'TABLE':
return self
elif tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'RESOURCE', config, pos)
self.description = data or None
else:
tag_mapping = {
'FIELD': self._add_field,
'PARAM': self._add_param,
'GROUP': self._add_group,
'LINK': self._add_link,
'INFO': self._add_info,
'DESCRIPTION': self._ignore_add}
for start, tag, data, pos in iterator:
if start:
if tag == 'DATA':
if len(self.fields) == 0:
warn_or_raise(E25, E25, None, config, pos)
warn_unknown_attrs(
'DATA', data.keys(), config, pos)
break
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
else:
if tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'RESOURCE', config, pos)
self.description = data or None
elif tag == 'TABLE':
# For error checking purposes
Field.uniqify_names(self.fields)
# We still need to create arrays, even if the file
# contains no DATA section
self.create_arrays(nrows=0, config=config)
return self
self.create_arrays(nrows=self._nrows, config=config)
fields = self.fields
names = [x.ID for x in fields]
# Deal with a subset of the columns, if requested.
if not columns:
colnumbers = list(range(len(fields)))
else:
if isinstance(columns, str):
columns = [columns]
columns = np.asarray(columns)
if issubclass(columns.dtype.type, np.integer):
if np.any(columns < 0) or np.any(columns > len(fields)):
raise ValueError(
"Some specified column numbers out of range")
colnumbers = columns
elif issubclass(columns.dtype.type, np.character):
try:
colnumbers = [names.index(x) for x in columns]
except ValueError:
raise ValueError(
f"Columns '{columns}' not found in fields list")
else:
raise TypeError("Invalid columns list")
if (not skip_table) and (len(fields) > 0):
for start, tag, data, pos in iterator:
if start:
if tag == 'TABLEDATA':
warn_unknown_attrs(
'TABLEDATA', data.keys(), config, pos)
self.array = self._parse_tabledata(
iterator, colnumbers, fields, config)
break
elif tag == 'BINARY':
warn_unknown_attrs(
'BINARY', data.keys(), config, pos)
self.array = self._parse_binary(
1, iterator, colnumbers, fields, config, pos)
break
elif tag == 'BINARY2':
if not config['version_1_3_or_later']:
warn_or_raise(
W52, W52, config['version'], config, pos)
self.array = self._parse_binary(
2, iterator, colnumbers, fields, config, pos)
break
elif tag == 'FITS':
warn_unknown_attrs(
'FITS', data.keys(), config, pos, ['extnum'])
try:
extnum = int(data.get('extnum', 0))
if extnum < 0:
raise ValueError("'extnum' cannot be negative.")
except ValueError:
vo_raise(E17, (), config, pos)
self.array = self._parse_fits(
iterator, extnum, config)
break
else:
warn_or_raise(W37, W37, tag, config, pos)
break
for start, tag, data, pos in iterator:
if not start and tag == 'DATA':
break
for start, tag, data, pos in iterator:
if start and tag == 'INFO':
if not config.get('version_1_2_or_later'):
warn_or_raise(
W26, W26, ('INFO', 'TABLE', '1.2'), config, pos)
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
elif not start and tag == 'TABLE':
break
return self
def _parse_tabledata(self, iterator, colnumbers, fields, config):
# Since we don't know the number of rows up front, we'll
# reallocate the record array to make room as we go. This
# prevents the need to scan through the XML twice. The
# allocation is by factors of 1.5.
invalid = config.get('invalid', 'exception')
# Need to have only one reference so that we can resize the
# array
array = self.array
del self.array
parsers = [field.converter.parse for field in fields]
binparsers = [field.converter.binparse for field in fields]
numrows = 0
alloc_rows = len(array)
colnumbers_bits = [i in colnumbers for i in range(len(fields))]
row_default = [x.converter.default for x in fields]
mask_default = [True] * len(fields)
array_chunk = []
mask_chunk = []
chunk_size = config.get('chunk_size', DEFAULT_CHUNK_SIZE)
for start, tag, data, pos in iterator:
if tag == 'TR':
# Now parse one row
row = row_default[:]
row_mask = mask_default[:]
i = 0
for start, tag, data, pos in iterator:
if start:
binary = (data.get('encoding', None) == 'base64')
warn_unknown_attrs(
tag, data.keys(), config, pos, ['encoding'])
else:
if tag == 'TD':
if i >= len(fields):
vo_raise(E20, len(fields), config, pos)
if colnumbers_bits[i]:
try:
if binary:
rawdata = base64.b64decode(
data.encode('ascii'))
buf = io.BytesIO(rawdata)
buf.seek(0)
try:
value, mask_value = binparsers[i](
buf.read)
except Exception as e:
vo_reraise(
e, config, pos,
"(in row {:d}, col '{}')".format(
len(array_chunk),
fields[i].ID))
else:
try:
value, mask_value = parsers[i](
data, config, pos)
except Exception as e:
vo_reraise(
e, config, pos,
"(in row {:d}, col '{}')".format(
len(array_chunk),
fields[i].ID))
except Exception as e:
if invalid == 'exception':
vo_reraise(e, config, pos)
else:
row[i] = value
row_mask[i] = mask_value
elif tag == 'TR':
break
else:
self._add_unknown_tag(
iterator, tag, data, config, pos)
i += 1
if i < len(fields):
vo_raise(E21, (i, len(fields)), config, pos)
array_chunk.append(tuple(row))
mask_chunk.append(tuple(row_mask))
if len(array_chunk) == chunk_size:
while numrows + chunk_size > alloc_rows:
alloc_rows = self._resize_strategy(alloc_rows)
if alloc_rows != len(array):
array = _resize(array, alloc_rows)
array[numrows:numrows + chunk_size] = array_chunk
array.mask[numrows:numrows + chunk_size] = mask_chunk
numrows += chunk_size
array_chunk = []
mask_chunk = []
elif not start and tag == 'TABLEDATA':
break
# Now, resize the array to the exact number of rows we need and
# put the last chunk values in there.
alloc_rows = numrows + len(array_chunk)
array = _resize(array, alloc_rows)
array[numrows:] = array_chunk
if alloc_rows != 0:
array.mask[numrows:] = mask_chunk
numrows += len(array_chunk)
if (self.nrows is not None and
self.nrows >= 0 and
self.nrows != numrows):
warn_or_raise(W18, W18, (self.nrows, numrows), config, pos)
self._nrows = numrows
return array
def _get_binary_data_stream(self, iterator, config):
have_local_stream = False
for start, tag, data, pos in iterator:
if tag == 'STREAM':
if start:
warn_unknown_attrs(
'STREAM', data.keys(), config, pos,
['type', 'href', 'actuate', 'encoding', 'expires',
'rights'])
if 'href' not in data:
have_local_stream = True
if data.get('encoding', None) != 'base64':
warn_or_raise(
W38, W38, data.get('encoding', None),
config, pos)
else:
href = data['href']
xmlutil.check_anyuri(href, config, pos)
encoding = data.get('encoding', None)
else:
buffer = data
break
if have_local_stream:
buffer = base64.b64decode(buffer.encode('ascii'))
string_io = io.BytesIO(buffer)
string_io.seek(0)
read = string_io.read
else:
if not href.startswith(('http', 'ftp', 'file')):
vo_raise(
"The vo package only supports remote data through http, " +
"ftp or file",
self._config, self._pos, NotImplementedError)
fd = urllib.request.urlopen(href)
if encoding is not None:
if encoding == 'gzip':
fd = gzip.GzipFile(href, 'rb', fileobj=fd)
elif encoding == 'base64':
fd = codecs.EncodedFile(fd, 'base64')
else:
vo_raise(
f"Unknown encoding type '{encoding}'",
self._config, self._pos, NotImplementedError)
read = fd.read
def careful_read(length):
result = read(length)
if len(result) != length:
raise EOFError
return result
return careful_read
def _parse_binary(self, mode, iterator, colnumbers, fields, config, pos):
fields = self.fields
careful_read = self._get_binary_data_stream(iterator, config)
# Need to have only one reference so that we can resize the
# array
array = self.array
del self.array
binparsers = [field.converter.binparse for field in fields]
numrows = 0
alloc_rows = len(array)
while True:
# Resize result arrays if necessary
if numrows >= alloc_rows:
alloc_rows = self._resize_strategy(alloc_rows)
array = _resize(array, alloc_rows)
row_data = []
row_mask_data = []
try:
if mode == 2:
mask_bits = careful_read(int((len(fields) + 7) / 8))
row_mask_data = list(converters.bitarray_to_bool(
mask_bits, len(fields)))
# Ignore the mask for string columns (see issue 8995)
for i, f in enumerate(fields):
if row_mask_data[i] and (f.datatype == 'char' or f.datatype == 'unicodeChar'):
row_mask_data[i] = False
for i, binparse in enumerate(binparsers):
try:
value, value_mask = binparse(careful_read)
except EOFError:
raise
except Exception as e:
vo_reraise(
e, config, pos, "(in row {:d}, col '{}')".format(
numrows, fields[i].ID))
row_data.append(value)
if mode == 1:
row_mask_data.append(value_mask)
else:
row_mask_data[i] = row_mask_data[i] or value_mask
except EOFError:
break
row = [x.converter.default for x in fields]
row_mask = [False] * len(fields)
for i in colnumbers:
row[i] = row_data[i]
row_mask[i] = row_mask_data[i]
array[numrows] = tuple(row)
array.mask[numrows] = tuple(row_mask)
numrows += 1
array = _resize(array, numrows)
return array
def _parse_fits(self, iterator, extnum, config):
for start, tag, data, pos in iterator:
if tag == 'STREAM':
if start:
warn_unknown_attrs(
'STREAM', data.keys(), config, pos,
['type', 'href', 'actuate', 'encoding', 'expires',
'rights'])
href = data['href']
encoding = data.get('encoding', None)
else:
break
if not href.startswith(('http', 'ftp', 'file')):
vo_raise(
"The vo package only supports remote data through http, "
"ftp or file",
self._config, self._pos, NotImplementedError)
fd = urllib.request.urlopen(href)
if encoding is not None:
if encoding == 'gzip':
fd = gzip.GzipFile(href, 'r', fileobj=fd)
elif encoding == 'base64':
fd = codecs.EncodedFile(fd, 'base64')
else:
vo_raise(
f"Unknown encoding type '{encoding}'",
self._config, self._pos, NotImplementedError)
hdulist = fits.open(fd)
array = hdulist[int(extnum)].data
if array.dtype != self.array.dtype:
warn_or_raise(W19, W19, (), self._config, self._pos)
return array
def to_xml(self, w, **kwargs):
specified_format = kwargs.get('tabledata_format')
if specified_format is not None:
format = specified_format
else:
format = self.format
if format == 'fits':
format = 'tabledata'
with w.tag(
'TABLE',
attrib=w.object_attrs(
self,
('ID', 'name', 'ref', 'ucd', 'utype', 'nrows'))):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for element_set in (self.fields, self.params):
for element in element_set:
element._setup({}, None)
if self.ref is None:
for element_set in (self.fields, self.params, self.groups,
self.links):
for element in element_set:
element.to_xml(w, **kwargs)
elif kwargs['version_1_2_or_later']:
index = list(self._votable.iter_tables()).index(self)
group = Group(self, ID=f"_g{index}")
group.to_xml(w, **kwargs)
if len(self.array):
with w.tag('DATA'):
if format == 'tabledata':
self._write_tabledata(w, **kwargs)
elif format == 'binary':
self._write_binary(1, w, **kwargs)
elif format == 'binary2':
self._write_binary(2, w, **kwargs)
if kwargs['version_1_2_or_later']:
for element in self._infos:
element.to_xml(w, **kwargs)
def _write_tabledata(self, w, **kwargs):
fields = self.fields
array = self.array
with w.tag('TABLEDATA'):
w._flush()
if (_has_c_tabledata_writer and
not kwargs.get('_debug_python_based_parser')):
supports_empty_values = [
field.converter.supports_empty_values(kwargs)
for field in fields]
fields = [field.converter.output for field in fields]
indent = len(w._tags) - 1
tablewriter.write_tabledata(
w.write, array.data, array.mask, fields,
supports_empty_values, indent, 1 << 8)
else:
write = w.write
indent_spaces = w.get_indentation_spaces()
tr_start = indent_spaces + "<TR>\n"
tr_end = indent_spaces + "</TR>\n"
td = indent_spaces + " <TD>{}</TD>\n"
td_empty = indent_spaces + " <TD/>\n"
fields = [(i, field.converter.output,
field.converter.supports_empty_values(kwargs))
for i, field in enumerate(fields)]
for row in range(len(array)):
write(tr_start)
array_row = array.data[row]
mask_row = array.mask[row]
for i, output, supports_empty_values in fields:
data = array_row[i]
masked = mask_row[i]
if supports_empty_values and np.all(masked):
write(td_empty)
else:
try:
val = output(data, masked)
except Exception as e:
vo_reraise(
e,
additional="(in row {:d}, col '{}')".format(
row, self.fields[i].ID))
if len(val):
write(td.format(val))
else:
write(td_empty)
write(tr_end)
def _write_binary(self, mode, w, **kwargs):
fields = self.fields
array = self.array
if mode == 1:
tag_name = 'BINARY'
else:
tag_name = 'BINARY2'
with w.tag(tag_name):
with w.tag('STREAM', encoding='base64'):
fields_basic = [(i, field.converter.binoutput)
for (i, field) in enumerate(fields)]
data = io.BytesIO()
for row in range(len(array)):
array_row = array.data[row]
array_mask = array.mask[row]
if mode == 2:
flattened = np.array([np.all(x) for x in array_mask])
data.write(converters.bool_to_bitarray(flattened))
for i, converter in fields_basic:
try:
chunk = converter(array_row[i], array_mask[i])
assert type(chunk) == bytes
except Exception as e:
vo_reraise(
e, additional=f"(in row {row:d}, col '{fields[i].ID}')")
data.write(chunk)
w._flush()
w.write(base64.b64encode(data.getvalue()).decode('ascii'))
def to_table(self, use_names_over_ids=False):
"""
Convert this VO Table to an `astropy.table.Table` instance.
Parameters
----------
use_names_over_ids : bool, optional
When `True` use the ``name`` attributes of columns as the
names of columns in the `astropy.table.Table` instance.
Since names are not guaranteed to be unique, this may cause
some columns to be renamed by appending numbers to the end.
Otherwise (default), use the ID attributes as the column
names.
.. warning::
Variable-length array fields may not be restored
identically when round-tripping through the
`astropy.table.Table` instance.
"""
from astropy.table import Table
meta = {}
for key in ['ID', 'name', 'ref', 'ucd', 'utype', 'description']:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
if use_names_over_ids:
names = [field.name for field in self.fields]
unique_names = []
for i, name in enumerate(names):
new_name = name
i = 2
while new_name in unique_names:
new_name = f'{name}{i}'
i += 1
unique_names.append(new_name)
names = unique_names
else:
names = [field.ID for field in self.fields]
table = Table(self.array, names=names, meta=meta)
for name, field in zip(names, self.fields):
column = table[name]
field.to_table_column(column)
return table
@classmethod
def from_table(cls, votable, table):
"""
Create a `Table` instance from a given `astropy.table.Table`
instance.
"""
kwargs = {}
for key in ['ID', 'name', 'ref', 'ucd', 'utype']:
val = table.meta.get(key)
if val is not None:
kwargs[key] = val
new_table = cls(votable, **kwargs)
if 'description' in table.meta:
new_table.description = table.meta['description']
for colname in table.colnames:
column = table[colname]
new_table.fields.append(Field.from_table_column(votable, column))
if table.mask is None:
new_table.array = ma.array(np.asarray(table))
else:
new_table.array = ma.array(np.asarray(table),
mask=np.asarray(table.mask))
return new_table
def iter_fields_and_params(self):
"""
Recursively iterate over all FIELD and PARAM elements in the
TABLE.
"""
for param in self.params:
yield param
for field in self.fields:
yield field
for group in self.groups:
for field in group.iter_fields_and_params():
yield field
get_field_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_fields_and_params', 'FIELD or PARAM',
"""
Looks up a FIELD or PARAM element by the given ID.
""")
get_field_by_id_or_name = _lookup_by_id_or_name_factory(
'iter_fields_and_params', 'FIELD or PARAM',
"""
Looks up a FIELD or PARAM element by the given ID or name.
""")
get_fields_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_fields_and_params', 'FIELD or PARAM',
"""
Looks up a FIELD or PARAM element by the given utype and
returns an iterator emitting all matches.
""")
def iter_groups(self):
"""
Recursively iterate over all GROUP elements in the TABLE.
"""
for group in self.groups:
yield group
for g in group.iter_groups():
yield g
get_group_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_groups', 'GROUP',
"""
Looks up a GROUP element by the given ID. Used by the group's
"ref" attribute
""")
get_groups_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_groups', 'GROUP',
"""
Looks up a GROUP element by the given utype and returns an
iterator emitting all matches.
""")
def iter_info(self):
for info in self.infos:
yield info
class Resource(Element, _IDProperty, _NameProperty, _UtypeProperty,
_DescriptionProperty):
"""
RESOURCE_ element: Groups TABLE_ and RESOURCE_ elements.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, name=None, ID=None, utype=None, type='results',
id=None, config=None, pos=None, **kwargs):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self.name = name
self.ID = resolve_id(ID, id, config, pos)
self.utype = utype
self.type = type
self._extra_attributes = kwargs
self.description = None
self._coordinate_systems = HomogeneousList(CooSys)
self._time_systems = HomogeneousList(TimeSys)
self._groups = HomogeneousList(Group)
self._params = HomogeneousList(Param)
self._infos = HomogeneousList(Info)
self._links = HomogeneousList(Link)
self._tables = HomogeneousList(Table)
self._resources = HomogeneousList(Resource)
warn_unknown_attrs('RESOURCE', kwargs.keys(), config, pos)
def __repr__(self):
buff = io.StringIO()
w = XMLWriter(buff)
w.element(
self._element_name,
attrib=w.object_attrs(self, self._attr_list))
return buff.getvalue().strip()
@property
def type(self):
"""
[*required*] The type of the resource. Must be either:
- 'results': This resource contains actual result values
(default)
- 'meta': This resource contains only datatype descriptions
(FIELD_ elements), but no actual data.
"""
return self._type
@type.setter
def type(self, type):
if type not in ('results', 'meta'):
vo_raise(E18, type, self._config, self._pos)
self._type = type
@property
def extra_attributes(self):
"""
A dictionary of string keys to string values containing any
extra attributes of the RESOURCE_ element that are not defined
in the specification. (The specification explicitly allows
for extra attributes here, but nowhere else.)
"""
return self._extra_attributes
@property
def coordinate_systems(self):
"""
A list of coordinate system definitions (COOSYS_ elements) for
the RESOURCE_. Must contain only `CooSys` objects.
"""
return self._coordinate_systems
@property
def time_systems(self):
"""
A list of time system definitions (TIMESYS_ elements) for
the RESOURCE_. Must contain only `TimeSys` objects.
"""
return self._time_systems
@property
def infos(self):
"""
A list of informational parameters (key-value pairs) for the
resource. Must only contain `Info` objects.
"""
return self._infos
@property
def groups(self):
"""
A list of groups
"""
return self._groups
@property
def params(self):
"""
A list of parameters (constant-valued columns) for the
resource. Must contain only `Param` objects.
"""
return self._params
@property
def links(self):
"""
A list of links (pointers to other documents or servers
through a URI) for the resource. Must contain only `Link`
objects.
"""
return self._links
@property
def tables(self):
"""
A list of tables in the resource. Must contain only
`Table` objects.
"""
return self._tables
@property
def resources(self):
"""
A list of nested resources inside this resource. Must contain
only `Resource` objects.
"""
return self._resources
def _add_table(self, iterator, tag, data, config, pos):
table = Table(self._votable, config=config, pos=pos, **data)
self.tables.append(table)
table.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self._votable, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_coosys(self, iterator, tag, data, config, pos):
coosys = CooSys(config=config, pos=pos, **data)
self.coordinate_systems.append(coosys)
coosys.parse(iterator, config)
def _add_timesys(self, iterator, tag, data, config, pos):
timesys = TimeSys(config=config, pos=pos, **data)
self.time_systems.append(timesys)
timesys.parse(iterator, config)
def _add_resource(self, iterator, tag, data, config, pos):
resource = Resource(config=config, pos=pos, **data)
self.resources.append(resource)
resource.parse(self._votable, iterator, config)
def _add_link(self, iterator, tag, data, config, pos):
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
def parse(self, votable, iterator, config):
self._votable = votable
tag_mapping = {
'TABLE': self._add_table,
'INFO': self._add_info,
'PARAM': self._add_param,
'GROUP': self._add_group,
'COOSYS': self._add_coosys,
'TIMESYS': self._add_timesys,
'RESOURCE': self._add_resource,
'LINK': self._add_link,
'DESCRIPTION': self._ignore_add
}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
elif tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'RESOURCE', config, pos)
self.description = data or None
elif tag == 'RESOURCE':
break
del self._votable
return self
def to_xml(self, w, **kwargs):
attrs = w.object_attrs(self, ('ID', 'type', 'utype'))
attrs.update(self.extra_attributes)
with w.tag('RESOURCE', attrib=attrs):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for element_set in (self.coordinate_systems, self.time_systems,
self.params, self.infos, self.links,
self.tables, self.resources):
for element in element_set:
element.to_xml(w, **kwargs)
def iter_tables(self):
"""
Recursively iterates over all tables in the resource and
nested resources.
"""
for table in self.tables:
yield table
for resource in self.resources:
for table in resource.iter_tables():
yield table
def iter_fields_and_params(self):
"""
Recursively iterates over all FIELD_ and PARAM_ elements in
the resource, its tables and nested resources.
"""
for param in self.params:
yield param
for table in self.tables:
for param in table.iter_fields_and_params():
yield param
for resource in self.resources:
for param in resource.iter_fields_and_params():
yield param
def iter_coosys(self):
"""
Recursively iterates over all the COOSYS_ elements in the
resource and nested resources.
"""
for coosys in self.coordinate_systems:
yield coosys
for resource in self.resources:
for coosys in resource.iter_coosys():
yield coosys
def iter_timesys(self):
"""
Recursively iterates over all the TIMESYS_ elements in the
resource and nested resources.
"""
for timesys in self.time_systems:
yield timesys
for resource in self.resources:
for timesys in resource.iter_timesys():
yield timesys
def iter_info(self):
"""
Recursively iterates over all the INFO_ elements in the
resource and nested resources.
"""
for info in self.infos:
yield info
for table in self.tables:
for info in table.iter_info():
yield info
for resource in self.resources:
for info in resource.iter_info():
yield info
class VOTableFile(Element, _IDProperty, _DescriptionProperty):
"""
VOTABLE_ element: represents an entire file.
The keyword arguments correspond to setting members of the same
name, documented below.
*version* is settable at construction time only, since conformance
tests for building the rest of the structure depend on it.
"""
def __init__(self, ID=None, id=None, config=None, pos=None, version="1.4"):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.description = None
self._coordinate_systems = HomogeneousList(CooSys)
self._time_systems = HomogeneousList(TimeSys)
self._params = HomogeneousList(Param)
self._infos = HomogeneousList(Info)
self._resources = HomogeneousList(Resource)
self._groups = HomogeneousList(Group)
version = str(version)
if version == '1.0':
warnings.warn('VOTable 1.0 support is deprecated in astropy 4.3 and will be '
'removed in a future release', AstropyDeprecationWarning)
elif (version != '1.0') and (version not in self._version_namespace_map):
allowed_from_map = "', '".join(self._version_namespace_map)
raise ValueError(f"'version' should be in ('1.0', '{allowed_from_map}').")
self._version = version
def __repr__(self):
n_tables = len(list(self.iter_tables()))
return f'<VOTABLE>... {n_tables} tables ...</VOTABLE>'
@property
def version(self):
"""
The version of the VOTable specification that the file uses.
"""
return self._version
@version.setter
def version(self, version):
version = str(version)
if version not in self._version_namespace_map:
allowed_from_map = "', '".join(self._version_namespace_map)
raise ValueError(
f"astropy.io.votable only supports VOTable versions '{allowed_from_map}'")
self._version = version
@property
def coordinate_systems(self):
"""
A list of coordinate system descriptions for the file. Must
contain only `CooSys` objects.
"""
return self._coordinate_systems
@property
def time_systems(self):
"""
A list of time system descriptions for the file. Must
contain only `TimeSys` objects.
"""
return self._time_systems
@property
def params(self):
"""
A list of parameters (constant-valued columns) that apply to
the entire file. Must contain only `Param` objects.
"""
return self._params
@property
def infos(self):
"""
A list of informational parameters (key-value pairs) for the
entire file. Must only contain `Info` objects.
"""
return self._infos
@property
def resources(self):
"""
A list of resources, in the order they appear in the file.
Must only contain `Resource` objects.
"""
return self._resources
@property
def groups(self):
"""
A list of groups, in the order they appear in the file. Only
supported as a child of the VOTABLE element in VOTable 1.2 or
later.
"""
return self._groups
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_resource(self, iterator, tag, data, config, pos):
resource = Resource(config=config, pos=pos, **data)
self.resources.append(resource)
resource.parse(self, iterator, config)
def _add_coosys(self, iterator, tag, data, config, pos):
coosys = CooSys(config=config, pos=pos, **data)
self.coordinate_systems.append(coosys)
coosys.parse(iterator, config)
def _add_timesys(self, iterator, tag, data, config, pos):
timesys = TimeSys(config=config, pos=pos, **data)
self.time_systems.append(timesys)
timesys.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
if not config.get('version_1_2_or_later'):
warn_or_raise(W26, W26, ('GROUP', 'VOTABLE', '1.2'), config, pos)
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _get_version_checks(self):
config = {}
config['version_1_1_or_later'] = \
util.version_compare(self.version, '1.1') >= 0
config['version_1_2_or_later'] = \
util.version_compare(self.version, '1.2') >= 0
config['version_1_3_or_later'] = \
util.version_compare(self.version, '1.3') >= 0
config['version_1_4_or_later'] = \
util.version_compare(self.version, '1.4') >= 0
return config
# Map VOTable version numbers to namespace URIs and schema information.
_version_namespace_map = {
# Version 1.0 isn't well-supported, but is allowed on parse (with a warning).
# It used DTD rather than schema, so this information would not be useful.
# By omitting 1.0 from this dict we can use the keys as the list of versions
# that are allowed in various other checks.
"1.1": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.1",
"schema_location_attr": "xsi:noNamespaceSchemaLocation",
"schema_location_value": "http://www.ivoa.net/xml/VOTable/v1.1"
},
"1.2": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.2",
"schema_location_attr": "xsi:noNamespaceSchemaLocation",
"schema_location_value": "http://www.ivoa.net/xml/VOTable/v1.2"
},
# With 1.3 we'll be more explicit with the schema location.
# - xsi:schemaLocation uses the namespace name along with the URL
# to reference it.
# - For convenience, but somewhat confusingly, the namespace URIs
# are also usable URLs for accessing an applicable schema.
# However to avoid confusion, we'll use the explicit schema URL.
"1.3": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.3",
"schema_location_attr": "xsi:schemaLocation",
"schema_location_value":
"http://www.ivoa.net/xml/VOTable/v1.3 http://www.ivoa.net/xml/VOTable/VOTable-1.3.xsd"
},
# With 1.4 namespace URIs stopped incrementing with minor version changes
# so we use the same URI as with 1.3. See this IVOA note for more info:
# http://www.ivoa.net/documents/Notes/XMLVers/20180529/
"1.4": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.3",
"schema_location_attr": "xsi:schemaLocation",
"schema_location_value":
"http://www.ivoa.net/xml/VOTable/v1.3 http://www.ivoa.net/xml/VOTable/VOTable-1.4.xsd"
}
}
def parse(self, iterator, config):
config['_current_table_number'] = 0
for start, tag, data, pos in iterator:
if start:
if tag == 'xml':
pass
elif tag == 'VOTABLE':
if 'version' not in data:
warn_or_raise(W20, W20, self.version, config, pos)
config['version'] = self.version
else:
config['version'] = self._version = data['version']
if config['version'].lower().startswith('v'):
warn_or_raise(
W29, W29, config['version'], config, pos)
self._version = config['version'] = \
config['version'][1:]
if config['version'] not in self._version_namespace_map:
vo_warn(W21, config['version'], config, pos)
if 'xmlns' in data:
ns_info = self._version_namespace_map.get(config['version'], {})
correct_ns = ns_info.get('namespace_uri')
if data['xmlns'] != correct_ns:
vo_warn(W41, (correct_ns, data['xmlns']), config, pos)
else:
vo_warn(W42, (), config, pos)
break
else:
vo_raise(E19, (), config, pos)
config.update(self._get_version_checks())
tag_mapping = {
'PARAM': self._add_param,
'RESOURCE': self._add_resource,
'COOSYS': self._add_coosys,
'TIMESYS': self._add_timesys,
'INFO': self._add_info,
'DEFINITIONS': self._add_definitions,
'DESCRIPTION': self._ignore_add,
'GROUP': self._add_group}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
elif tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'VOTABLE', config, pos)
self.description = data or None
if not len(self.resources) and config['version_1_2_or_later']:
warn_or_raise(W53, W53, (), config, pos)
return self
def to_xml(self, fd, compressed=False, tabledata_format=None,
_debug_python_based_parser=False, _astropy_version=None):
"""
Write to an XML file.
Parameters
----------
fd : str or file-like
Where to write the file. If a file-like object, must be writable.
compressed : bool, optional
When `True`, write to a gzip-compressed file. (Default:
`False`)
tabledata_format : str, optional
Override the format of the table(s) data to write. Must
be one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified
in each `Table` object as it was created or read in. See
:ref:`astropy:votable-serialization`.
"""
if tabledata_format is not None:
if tabledata_format.lower() not in (
'tabledata', 'binary', 'binary2'):
raise ValueError(f"Unknown format type '{format}'")
kwargs = {
'version': self.version,
'tabledata_format':
tabledata_format,
'_debug_python_based_parser': _debug_python_based_parser,
'_group_number': 1}
kwargs.update(self._get_version_checks())
with util.convert_to_writable_filelike(
fd, compressed=compressed) as fd:
w = XMLWriter(fd)
version = self.version
if _astropy_version is None:
lib_version = astropy_version
else:
lib_version = _astropy_version
xml_header = """
<?xml version="1.0" encoding="utf-8"?>
<!-- Produced with astropy.io.votable version {lib_version}
http://www.astropy.org/ -->\n"""
w.write(xml_header.lstrip().format(**locals()))
# Build the VOTABLE tag attributes.
votable_attr = {
'version': version,
'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"
}
ns_info = self._version_namespace_map.get(version, {})
namespace_uri = ns_info.get('namespace_uri')
if namespace_uri:
votable_attr['xmlns'] = namespace_uri
schema_location_attr = ns_info.get('schema_location_attr')
schema_location_value = ns_info.get('schema_location_value')
if schema_location_attr and schema_location_value:
votable_attr[schema_location_attr] = schema_location_value
with w.tag('VOTABLE', votable_attr):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
element_sets = [self.coordinate_systems, self.time_systems,
self.params, self.infos, self.resources]
if kwargs['version_1_2_or_later']:
element_sets[0] = self.groups
for element_set in element_sets:
for element in element_set:
element.to_xml(w, **kwargs)
def iter_tables(self):
"""
Iterates over all tables in the VOTable file in a "flat" way,
ignoring the nesting of resources etc.
"""
for resource in self.resources:
for table in resource.iter_tables():
yield table
def get_first_table(self):
"""
Often, you know there is only one table in the file, and
that's all you need. This method returns that first table.
"""
for table in self.iter_tables():
if not table.is_empty():
return table
raise IndexError("No table found in VOTABLE file.")
get_table_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_tables', 'TABLE',
"""
Looks up a TABLE_ element by the given ID. Used by the table
"ref" attribute.
""")
get_tables_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_tables', 'TABLE',
"""
Looks up a TABLE_ element by the given utype, and returns an
iterator emitting all matches.
""")
def get_table_by_index(self, idx):
"""
Get a table by its ordinal position in the file.
"""
for i, table in enumerate(self.iter_tables()):
if i == idx:
return table
raise IndexError(
f"No table at index {idx:d} found in VOTABLE file.")
def iter_fields_and_params(self):
"""
Recursively iterate over all FIELD_ and PARAM_ elements in the
VOTABLE_ file.
"""
for resource in self.resources:
for field in resource.iter_fields_and_params():
yield field
get_field_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_fields_and_params', 'FIELD',
"""
Looks up a FIELD_ element by the given ID_. Used by the field's
"ref" attribute.
""")
get_fields_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_fields_and_params', 'FIELD',
"""
Looks up a FIELD_ element by the given utype and returns an
iterator emitting all matches.
""")
get_field_by_id_or_name = _lookup_by_id_or_name_factory(
'iter_fields_and_params', 'FIELD',
"""
Looks up a FIELD_ element by the given ID_ or name.
""")
def iter_values(self):
"""
Recursively iterate over all VALUES_ elements in the VOTABLE_
file.
"""
for field in self.iter_fields_and_params():
yield field.values
get_values_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_values', 'VALUES',
"""
Looks up a VALUES_ element by the given ID. Used by the values
"ref" attribute.
""")
def iter_groups(self):
"""
Recursively iterate over all GROUP_ elements in the VOTABLE_
file.
"""
for table in self.iter_tables():
for group in table.iter_groups():
yield group
get_group_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_groups', 'GROUP',
"""
Looks up a GROUP_ element by the given ID. Used by the group's
"ref" attribute
""")
get_groups_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_groups', 'GROUP',
"""
Looks up a GROUP_ element by the given utype and returns an
iterator emitting all matches.
""")
def iter_coosys(self):
"""
Recursively iterate over all COOSYS_ elements in the VOTABLE_
file.
"""
for coosys in self.coordinate_systems:
yield coosys
for resource in self.resources:
for coosys in resource.iter_coosys():
yield coosys
get_coosys_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_coosys', 'COOSYS',
"""Looks up a COOSYS_ element by the given ID.""")
def iter_timesys(self):
"""
Recursively iterate over all TIMESYS_ elements in the VOTABLE_
file.
"""
for timesys in self.time_systems:
yield timesys
for resource in self.resources:
for timesys in resource.iter_timesys():
yield timesys
get_timesys_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_timesys', 'TIMESYS',
"""Looks up a TIMESYS_ element by the given ID.""")
def iter_info(self):
"""
Recursively iterate over all INFO_ elements in the VOTABLE_
file.
"""
for info in self.infos:
yield info
for resource in self.resources:
for info in resource.iter_info():
yield info
get_info_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_info', 'INFO',
"""Looks up a INFO element by the given ID.""")
def set_all_tables_format(self, format):
"""
Set the output storage format of all tables in the file.
"""
for table in self.iter_tables():
table.format = format
@classmethod
def from_table(cls, table, table_id=None):
"""
Create a `VOTableFile` instance from a given
`astropy.table.Table` instance.
Parameters
----------
table_id : str, optional
Set the given ID attribute on the returned Table instance.
"""
votable_file = cls()
resource = Resource()
votable = Table.from_table(votable_file, table)
if table_id is not None:
votable.ID = table_id
resource.tables.append(votable)
votable_file.resources.append(resource)
return votable_file
|
d26fb1dbdfb4dd502407a1078fa8b1f805736e10f95779dc3ab4ea6f958b2f03 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains a contains the high-level functions to read a
VOTable file.
"""
# STDLIB
import io
import os
import sys
import textwrap
import warnings
# LOCAL
from . import exceptions
from . import tree
from astropy.utils.xml import iterparser
from astropy.utils import data
from astropy.utils.decorators import deprecated_renamed_argument
__all__ = ['parse', 'parse_single_table', 'from_table', 'writeto', 'validate',
'reset_vo_warnings']
VERIFY_OPTIONS = ['ignore', 'warn', 'exception']
@deprecated_renamed_argument('pedantic', 'verify', since='5.0')
def parse(source, columns=None, invalid='exception', verify=None,
chunk_size=tree.DEFAULT_CHUNK_SIZE, table_number=None,
table_id=None, filename=None, unit_format=None,
datatype_mapping=None, _debug_python_based_parser=False):
"""
Parses a VOTABLE_ xml file (or file-like object), and returns a
`~astropy.io.votable.tree.VOTableFile` object.
Parameters
----------
source : path-like or file-like
Path or file-like object containing a VOTABLE_ xml file.
If file, must be readable.
columns : sequence of str, optional
List of field names to include in the output. The default is
to include all fields.
invalid : str, optional
One of the following values:
- 'exception': throw an exception when an invalid value is
encountered (default)
- 'mask': mask out invalid values
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to 'ignore'.
.. versionchanged:: 4.0
``verify`` replaces the ``pedantic`` argument, which will be
deprecated in future.
.. versionchanged:: 5.0
The ``pedantic`` argument is deprecated.
chunk_size : int, optional
The number of rows to read before converting to an array.
Higher numbers are likely to be faster, but will consume more
memory.
table_number : int, optional
The number of table in the file to read in. If `None`, all
tables will be read. If a number, 0 refers to the first table
in the file, and only that numbered table will be parsed and
read in. Should not be used with ``table_id``.
table_id : str, optional
The ID of the table in the file to read in. Should not be
used with ``table_number``.
filename : str, optional
A filename, URL or other identifier to use in error messages.
If *filename* is None and *source* is a string (i.e. a path),
then *source* will be used as a filename for error messages.
Therefore, *filename* is only required when source is a
file-like object.
unit_format : str, astropy.units.format.Base instance or None, optional
The unit format to use when parsing unit attributes. If a
string, must be the name of a unit formatter. The built-in
formats include ``generic``, ``fits``, ``cds``, and
``vounit``. A custom formatter may be provided by passing a
`~astropy.units.UnitBase` instance. If `None` (default),
the unit format to use will be the one specified by the
VOTable specification (which is ``cds`` up to version 1.3 of
VOTable, and ``vounit`` in more recent versions of the spec).
datatype_mapping : dict, optional
A mapping of datatype names (`str`) to valid VOTable datatype names
(str). For example, if the file being read contains the datatype
"unsignedInt" (an invalid datatype in VOTable), include the mapping
``{"unsignedInt": "long"}``.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` object
See Also
--------
astropy.io.votable.exceptions : The exceptions this function may raise.
"""
from . import conf
invalid = invalid.lower()
if invalid not in ('exception', 'mask'):
raise ValueError("accepted values of ``invalid`` are: "
"``'exception'`` or ``'mask'``.")
if verify is None:
conf_verify_lowercase = conf.verify.lower()
# We need to allow verify to be booleans as strings since the
# configuration framework doesn't make it easy/possible to have mixed
# types.
if conf_verify_lowercase in ['false', 'true']:
verify = conf_verify_lowercase == 'true'
else:
verify = conf_verify_lowercase
if isinstance(verify, bool):
verify = 'exception' if verify else 'warn'
elif verify not in VERIFY_OPTIONS:
raise ValueError(f"verify should be one of {'/'.join(VERIFY_OPTIONS)}")
if datatype_mapping is None:
datatype_mapping = {}
config = {
'columns': columns,
'invalid': invalid,
'verify': verify,
'chunk_size': chunk_size,
'table_number': table_number,
'filename': filename,
'unit_format': unit_format,
'datatype_mapping': datatype_mapping
}
if filename is None and isinstance(source, str):
config['filename'] = source
with iterparser.get_xml_iterator(
source,
_debug_python_based_parser=_debug_python_based_parser) as iterator:
return tree.VOTableFile(
config=config, pos=(1, 1)).parse(iterator, config)
def parse_single_table(source, **kwargs):
"""
Parses a VOTABLE_ xml file (or file-like object), reading and
returning only the first `~astropy.io.votable.tree.Table`
instance.
See `parse` for a description of the keyword arguments.
Returns
-------
votable : `~astropy.io.votable.tree.Table` object
"""
if kwargs.get('table_number') is None:
kwargs['table_number'] = 0
votable = parse(source, **kwargs)
return votable.get_first_table()
def writeto(table, file, tabledata_format=None):
"""
Writes a `~astropy.io.votable.tree.VOTableFile` to a VOTABLE_ xml file.
Parameters
----------
table : `~astropy.io.votable.tree.VOTableFile` or `~astropy.table.Table` instance.
file : str or writable file-like
Path or file object to write to
tabledata_format : str, optional
Override the format of the table(s) data to write. Must be
one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified in
each ``table`` object as it was created or read in. See
:ref:`astropy:astropy:votable-serialization`.
"""
from astropy.table import Table
if isinstance(table, Table):
table = tree.VOTableFile.from_table(table)
elif not isinstance(table, tree.VOTableFile):
raise TypeError(
"first argument must be astropy.io.vo.VOTableFile or "
"astropy.table.Table instance")
table.to_xml(file, tabledata_format=tabledata_format,
_debug_python_based_parser=True)
def validate(source, output=sys.stdout, xmllint=False, filename=None):
"""
Prints a validation report for the given file.
Parameters
----------
source : path-like or file-like
Path to a VOTABLE_ xml file or `~pathlib.Path`
object having Path to a VOTABLE_ xml file.
If file-like object, must be readable.
output : file-like, optional
Where to output the report. Defaults to ``sys.stdout``.
If `None`, the output will be returned as a string.
Must be writable.
xmllint : bool, optional
When `True`, also send the file to ``xmllint`` for schema and
DTD validation. Requires that ``xmllint`` is installed. The
default is `False`. ``source`` must be a file on the local
filesystem in order for ``xmllint`` to work.
filename : str, optional
A filename to use in the error messages. If not provided, one
will be automatically determined from ``source``.
Returns
-------
is_valid : bool or str
Returns `True` if no warnings were found. If ``output`` is
`None`, the return value will be a string.
"""
from astropy.utils.console import print_code_line, color_print
return_as_str = False
if output is None:
output = io.StringIO()
return_as_str = True
lines = []
votable = None
reset_vo_warnings()
with data.get_readable_fileobj(source, encoding='binary') as fd:
content = fd.read()
content_buffer = io.BytesIO(content)
content_buffer.seek(0)
if filename is None:
if isinstance(source, str):
filename = source
elif hasattr(source, 'name'):
filename = source.name
elif hasattr(source, 'url'):
filename = source.url
else:
filename = "<unknown>"
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always", exceptions.VOWarning, append=True)
try:
votable = parse(content_buffer, verify='warn', filename=filename)
except ValueError as e:
lines.append(str(e))
lines = [str(x.message) for x in warning_lines if
issubclass(x.category, exceptions.VOWarning)] + lines
content_buffer.seek(0)
output.write(f"Validation report for {filename}\n\n")
if len(lines):
xml_lines = iterparser.xml_readlines(content_buffer)
for warning in lines:
w = exceptions.parse_vowarning(warning)
if not w['is_something']:
output.write(w['message'])
output.write('\n\n')
else:
line = xml_lines[w['nline'] - 1]
warning = w['warning']
if w['is_warning']:
color = 'yellow'
else:
color = 'red'
color_print(
f"{w['nline']:d}: ", '',
warning or 'EXC', color,
': ', '',
textwrap.fill(
w['message'],
initial_indent=' ',
subsequent_indent=' ').lstrip(),
file=output)
print_code_line(line, w['nchar'], file=output)
output.write('\n')
else:
output.write('astropy.io.votable found no violations.\n\n')
success = 0
if xmllint and os.path.exists(filename):
from . import xmlutil
if votable is None:
version = "1.1"
else:
version = votable.version
success, stdout, stderr = xmlutil.validate_schema(
filename, version)
if success != 0:
output.write(
'xmllint schema violations:\n\n')
output.write(stderr.decode('utf-8'))
else:
output.write('xmllint passed\n')
if return_as_str:
return output.getvalue()
return len(lines) == 0 and success == 0
def from_table(table, table_id=None):
"""
Given an `~astropy.table.Table` object, return a
`~astropy.io.votable.tree.VOTableFile` file structure containing
just that single table.
Parameters
----------
table : `~astropy.table.Table` instance
table_id : str, optional
If not `None`, set the given id on the returned
`~astropy.io.votable.tree.Table` instance.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` instance
"""
return tree.VOTableFile.from_table(table, table_id=table_id)
def is_votable(source):
"""
Reads the header of a file to determine if it is a VOTable file.
Parameters
----------
source : path-like or file-like
Path or file object containing a VOTABLE_ xml file.
If file, must be readable.
Returns
-------
is_votable : bool
Returns `True` if the given file is a VOTable file.
"""
try:
with iterparser.get_xml_iterator(source) as iterator:
for start, tag, d, pos in iterator:
if tag != 'xml':
return False
break
for start, tag, d, pos in iterator:
if tag != 'VOTABLE':
return False
break
return True
except ValueError:
return False
def reset_vo_warnings():
"""
Resets all of the vo warning state so that warnings that
have already been emitted will be emitted again. This is
used, for example, by `validate` which must emit all
warnings each time it is called.
"""
from . import converters, xmlutil
# -----------------------------------------------------------#
# This is a special variable used by the Python warnings #
# infrastructure to keep track of warnings that have #
# already been seen. Since we want to get every single #
# warning out of this, we have to delete all of them first. #
# -----------------------------------------------------------#
for module in (converters, exceptions, tree, xmlutil):
try:
del module.__warningregistry__
except AttributeError:
pass
|
6f0eb5e8428456c3cec1eb1f695bf0b607705904d49ddd74414272eae3bf45b0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package reads and writes data formats used by the Virtual
Observatory (VO) initiative, particularly the VOTable XML format.
"""
from .table import (
parse, parse_single_table, validate, from_table, is_votable, writeto)
from .exceptions import (
VOWarning, VOTableChangeWarning, VOTableSpecWarning, UnimplementedWarning,
IOWarning, VOTableSpecError)
from astropy import config as _config
__all__ = [
'Conf', 'conf', 'parse', 'parse_single_table', 'validate',
'from_table', 'is_votable', 'writeto', 'VOWarning',
'VOTableChangeWarning', 'VOTableSpecWarning',
'UnimplementedWarning', 'IOWarning', 'VOTableSpecError']
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.io.votable`.
"""
verify = _config.ConfigItem(
'ignore',
"Can be 'exception' (treat fixable violations of the VOTable spec as "
"exceptions), 'warn' (show warnings for VOTable spec violations), or "
"'ignore' (silently ignore VOTable spec violations)",
aliases=['astropy.io.votable.table.pedantic',
'astropy.io.votable.pedantic'])
conf = Conf()
|
d97d5c9d65d26c13401d280ee5756edfe12704eb8f8794aed4febe44835ae09c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
.. _warnings:
Warnings
--------
.. note::
Most of the following warnings indicate violations of the VOTable
specification. They should be reported to the authors of the
tools that produced the VOTable file.
To control the warnings emitted, use the standard Python
:mod:`warnings` module and the ``astropy.io.votable.exceptions.conf.max_warnings``
configuration item. Most of these are of the type `VOTableSpecWarning`.
{warnings}
.. _exceptions:
Exceptions
----------
.. note::
This is a list of many of the fatal exceptions emitted by ``astropy.io.votable``
when the file does not conform to spec. Other exceptions may be
raised due to unforeseen cases or bugs in ``astropy.io.votable`` itself.
{exceptions}
"""
# STDLIB
import io
import re
from textwrap import dedent
from warnings import warn
from astropy import config as _config
from astropy.utils.exceptions import AstropyWarning
__all__ = [
'Conf', 'conf', 'warn_or_raise', 'vo_raise', 'vo_reraise', 'vo_warn',
'warn_unknown_attrs', 'parse_vowarning', 'VOWarning',
'VOTableChangeWarning', 'VOTableSpecWarning',
'UnimplementedWarning', 'IOWarning', 'VOTableSpecError']
# NOTE: Cannot put this in __init__.py due to circular import.
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.io.votable.exceptions`.
"""
max_warnings = _config.ConfigItem(
10,
'Number of times the same type of warning is displayed '
'before being suppressed',
cfgtype='integer')
conf = Conf()
def _format_message(message, name, config=None, pos=None):
if config is None:
config = {}
if pos is None:
pos = ('?', '?')
filename = config.get('filename', '?')
return f'{filename}:{pos[0]}:{pos[1]}: {name}: {message}'
def _suppressed_warning(warning, config, stacklevel=2):
warning_class = type(warning)
config.setdefault('_warning_counts', dict()).setdefault(warning_class, 0)
config['_warning_counts'][warning_class] += 1
message_count = config['_warning_counts'][warning_class]
if message_count <= conf.max_warnings:
if message_count == conf.max_warnings:
warning.formatted_message += \
' (suppressing further warnings of this type...)'
warn(warning, stacklevel=stacklevel+1)
def warn_or_raise(warning_class, exception_class=None, args=(), config=None,
pos=None, stacklevel=1):
"""
Warn or raise an exception, depending on the verify setting.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
config_value = config.get('verify', 'warn')
if config_value == 'exception':
if exception_class is None:
exception_class = warning_class
vo_raise(exception_class, args, config, pos)
elif config_value == 'warn':
vo_warn(warning_class, args, config, pos, stacklevel=stacklevel+1)
def vo_raise(exception_class, args=(), config=None, pos=None):
"""
Raise an exception, with proper position information if available.
"""
if config is None:
config = {}
raise exception_class(args, config, pos)
def vo_reraise(exc, config=None, pos=None, additional=''):
"""
Raise an exception, with proper position information if available.
Restores the original traceback of the exception, and should only
be called within an "except:" block of code.
"""
if config is None:
config = {}
message = _format_message(str(exc), exc.__class__.__name__, config, pos)
if message.split()[0] == str(exc).split()[0]:
message = str(exc)
if len(additional):
message += ' ' + additional
exc.args = (message,)
raise exc
def vo_warn(warning_class, args=(), config=None, pos=None, stacklevel=1):
"""
Warn, with proper position information if available.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
if config.get('verify', 'warn') != 'ignore':
warning = warning_class(args, config, pos)
_suppressed_warning(warning, config, stacklevel=stacklevel+1)
def warn_unknown_attrs(element, attrs, config, pos, good_attr=[], stacklevel=1):
for attr in attrs:
if attr not in good_attr:
vo_warn(W48, (attr, element), config, pos, stacklevel=stacklevel+1)
_warning_pat = re.compile(
r":?(?P<nline>[0-9?]+):(?P<nchar>[0-9?]+): " +
r"((?P<warning>[WE]\d+): )?(?P<rest>.*)$")
def parse_vowarning(line):
"""
Parses the vo warning string back into its parts.
"""
result = {}
match = _warning_pat.search(line)
if match:
result['warning'] = warning = match.group('warning')
if warning is not None:
result['is_warning'] = (warning[0].upper() == 'W')
result['is_exception'] = not result['is_warning']
result['number'] = int(match.group('warning')[1:])
result['doc_url'] = f"io/votable/api_exceptions.html#{warning.lower()}"
else:
result['is_warning'] = False
result['is_exception'] = False
result['is_other'] = True
result['number'] = None
result['doc_url'] = None
try:
result['nline'] = int(match.group('nline'))
except ValueError:
result['nline'] = 0
try:
result['nchar'] = int(match.group('nchar'))
except ValueError:
result['nchar'] = 0
result['message'] = match.group('rest')
result['is_something'] = True
else:
result['warning'] = None
result['is_warning'] = False
result['is_exception'] = False
result['is_other'] = False
result['is_something'] = False
if not isinstance(line, str):
line = line.decode('utf-8')
result['message'] = line
return result
class VOWarning(AstropyWarning):
"""
The base class of all VO warnings and exceptions.
Handles the formatting of the message with a warning or exception
code, filename, line and column number.
"""
default_args = ()
message_template = ''
def __init__(self, args, config=None, pos=None):
if config is None:
config = {}
if not isinstance(args, tuple):
args = (args, )
msg = self.message_template.format(*args)
self.formatted_message = _format_message(
msg, self.__class__.__name__, config, pos)
Warning.__init__(self, self.formatted_message)
def __str__(self):
return self.formatted_message
@classmethod
def get_short_name(cls):
if len(cls.default_args):
return cls.message_template.format(*cls.default_args)
return cls.message_template
class VOTableChangeWarning(VOWarning, SyntaxWarning):
"""
A change has been made to the input XML file.
"""
class VOTableSpecWarning(VOWarning, SyntaxWarning):
"""
The input XML file violates the spec, but there is an obvious workaround.
"""
class UnimplementedWarning(VOWarning, SyntaxWarning):
"""
A feature of the VOTABLE_ spec is not implemented.
"""
class IOWarning(VOWarning, RuntimeWarning):
"""
A network or IO error occurred, but was recovered using the cache.
"""
class VOTableSpecError(VOWarning, ValueError):
"""
The input XML file violates the spec and there is no good workaround.
"""
class W01(VOTableSpecWarning):
"""
The VOTable spec states:
If a cell contains an array or complex number, it should be
encoded as multiple numbers separated by whitespace.
Many VOTable files in the wild use commas as a separator instead,
and ``astropy.io.votable`` can support this convention depending on the
:ref:`astropy:verifying-votables` setting.
``astropy.io.votable`` always outputs files using only spaces, regardless of
how they were input.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#toc-header-35>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:TABLEDATA>`__
"""
message_template = "Array uses commas rather than whitespace"
class W02(VOTableSpecWarning):
r"""
XML ids must match the following regular expression::
^[A-Za-z_][A-Za-z0-9_\.\-]*$
The VOTable 1.1 says the following:
According to the XML standard, the attribute ``ID`` is a
string beginning with a letter or underscore (``_``), followed
by a sequence of letters, digits, or any of the punctuation
characters ``.`` (dot), ``-`` (dash), ``_`` (underscore), or
``:`` (colon).
However, this is in conflict with the XML standard, which says
colons may not be used. VOTable 1.1's own schema does not allow a
colon here. Therefore, ``astropy.io.votable`` disallows the colon.
VOTable 1.2 corrects this error in the specification.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`XML Names <http://www.w3.org/TR/REC-xml/#NT-Name>`__
"""
message_template = "{} attribute '{}' is invalid. Must be a standard XML id"
default_args = ('x', 'y')
class W03(VOTableChangeWarning):
"""
The VOTable 1.1 spec says the following about ``name`` vs. ``ID``
on ``FIELD`` and ``VALUE`` elements:
``ID`` and ``name`` attributes have a different role in
VOTable: the ``ID`` is meant as a *unique identifier* of an
element seen as a VOTable component, while the ``name`` is
meant for presentation purposes, and need not to be unique
throughout the VOTable document. The ``ID`` attribute is
therefore required in the elements which have to be
referenced, but in principle any element may have an ``ID``
attribute. ... In summary, the ``ID`` is different from the
``name`` attribute in that (a) the ``ID`` attribute is made
from a restricted character set, and must be unique throughout
a VOTable document whereas names are standard XML attributes
and need not be unique; and (b) there should be support in the
parsing software to look up references and extract the
relevant element with matching ``ID``.
It is further recommended in the VOTable 1.2 spec:
While the ``ID`` attribute has to be unique in a VOTable
document, the ``name`` attribute need not. It is however
recommended, as a good practice, to assign unique names within
a ``TABLE`` element. This recommendation means that, between a
``TABLE`` and its corresponding closing ``TABLE`` tag,
``name`` attributes of ``FIELD``, ``PARAM`` and optional
``GROUP`` elements should be all different.
Since ``astropy.io.votable`` requires a unique identifier for each of its
columns, ``ID`` is used for the column name when present.
However, when ``ID`` is not present, (since it is not required by
the specification) ``name`` is used instead. However, ``name``
must be cleansed by replacing invalid characters (such as
whitespace) with underscores.
.. note::
This warning does not indicate that the input file is invalid
with respect to the VOTable specification, only that the
column names in the record array may not match exactly the
``name`` attributes specified in the file.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Implicitly generating an ID from a name '{}' -> '{}'"
default_args = ('x', 'y')
class W04(VOTableSpecWarning):
"""
The ``content-type`` attribute must use MIME content-type syntax as
defined in `RFC 2046 <https://tools.ietf.org/html/rfc2046>`__.
The current check for validity is somewhat over-permissive.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "content-type '{}' must be a valid MIME content type"
default_args = ('x',)
class W05(VOTableSpecWarning):
"""
The attribute must be a valid URI as defined in `RFC 2396
<https://www.ietf.org/rfc/rfc2396.txt>`_.
"""
message_template = "'{}' is not a valid URI"
default_args = ('x',)
class W06(VOTableSpecWarning):
"""
This warning is emitted when a ``ucd`` attribute does not match
the syntax of a `unified content descriptor
<http://vizier.u-strasbg.fr/doc/UCD.htx>`__.
If the VOTable version is 1.2 or later, the UCD will also be
checked to ensure it conforms to the controlled vocabulary defined
by UCD1+.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:ucd>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:ucd>`__
"""
message_template = "Invalid UCD '{}': {}"
default_args = ('x', 'explanation')
class W07(VOTableSpecWarning):
"""
As astro year field is a Besselian or Julian year matching the
regular expression::
^[JB]?[0-9]+([.][0-9]*)?$
Defined in this XML Schema snippet::
<xs:simpleType name="astroYear">
<xs:restriction base="xs:token">
<xs:pattern value="[JB]?[0-9]+([.][0-9]*)?"/>
</xs:restriction>
</xs:simpleType>
"""
message_template = "Invalid astroYear in {}: '{}'"
default_args = ('x', 'y')
class W08(VOTableSpecWarning):
"""
To avoid local-dependent number parsing differences, ``astropy.io.votable``
may require a string or unicode string where a numeric type may
make more sense.
"""
message_template = "'{}' must be a str or bytes object"
default_args = ('x',)
class W09(VOTableSpecWarning):
"""
The VOTable specification uses the attribute name ``ID`` (with
uppercase letters) to specify unique identifiers. Some
VOTable-producing tools use the more standard lowercase ``id``
instead. ``astropy.io.votable`` accepts ``id`` and emits this warning if
``verify`` is ``'warn'``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "ID attribute not capitalized"
class W10(VOTableSpecWarning):
"""
The parser has encountered an element that does not exist in the
specification, or appears in an invalid context. Check the file
against the VOTable schema (with a tool such as `xmllint
<http://xmlsoft.org/xmllint.html>`__. If the file validates
against the schema, and you still receive this warning, this may
indicate a bug in ``astropy.io.votable``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Unknown tag '{}'. Ignoring"
default_args = ('x',)
class W11(VOTableSpecWarning):
"""
Earlier versions of the VOTable specification used a ``gref``
attribute on the ``LINK`` element to specify a `GLU reference
<http://aladin.u-strasbg.fr/glu/>`__. New files should
specify a ``glu:`` protocol using the ``href`` attribute.
Since ``astropy.io.votable`` does not currently support GLU references, it
likewise does not automatically convert the ``gref`` attribute to
the new form.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "The gref attribute on LINK is deprecated in VOTable 1.1"
class W12(VOTableChangeWarning):
"""
In order to name the columns of the Numpy record array, each
``FIELD`` element must have either an ``ID`` or ``name`` attribute
to derive a name from. Strictly speaking, according to the
VOTable schema, the ``name`` attribute is required. However, if
``name`` is not present by ``ID`` is, and ``verify`` is not ``'exception'``,
``astropy.io.votable`` will continue without a ``name`` defined.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = (
"'{}' element must have at least one of 'ID' or 'name' attributes")
default_args = ('x',)
class W13(VOTableSpecWarning):
"""
Some VOTable files in the wild use non-standard datatype names. These
are mapped to standard ones using the following mapping::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' is not a valid VOTable datatype, should be '{}'"
default_args = ('x', 'y')
# W14: Deprecated
class W15(VOTableSpecWarning):
"""
The ``name`` attribute is required on every ``FIELD`` element.
However, many VOTable files in the wild omit it and provide only
an ``ID`` instead. In this case, when ``verify`` is not ``'exception'``
``astropy.io.votable`` will copy the ``name`` attribute to a new ``ID``
attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "{} element missing required 'name' attribute"
default_args = ('x',)
# W16: Deprecated
class W17(VOTableSpecWarning):
"""
A ``DESCRIPTION`` element can only appear once within its parent
element.
According to the schema, it may only occur once (`1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__)
However, it is a `proposed extension
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:addesc>`__
to VOTable 1.2.
"""
message_template = "{} element contains more than one DESCRIPTION element"
default_args = ('x',)
class W18(VOTableSpecWarning):
"""
The number of rows explicitly specified in the ``nrows`` attribute
does not match the actual number of rows (``TR`` elements) present
in the ``TABLE``. This may indicate truncation of the file, or an
internal error in the tool that produced it. If ``verify`` is not
``'exception'``, parsing will proceed, with the loss of some performance.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC10>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC10>`__
"""
message_template = 'TABLE specified nrows={}, but table contains {} rows'
default_args = ('x', 'y')
class W19(VOTableSpecWarning):
"""
The column fields as defined using ``FIELD`` elements do not match
those in the headers of the embedded FITS file. If ``verify`` is not
``'exception'``, the embedded FITS file will take precedence.
"""
message_template = (
'The fields defined in the VOTable do not match those in the ' +
'embedded FITS file')
class W20(VOTableSpecWarning):
"""
If no version number is explicitly given in the VOTable file, the
parser assumes it is written to the VOTable 1.1 specification.
"""
message_template = 'No version number specified in file. Assuming {}'
default_args = ('1.1',)
class W21(UnimplementedWarning):
"""
Unknown issues may arise using ``astropy.io.votable`` with VOTable files
from a version other than 1.1, 1.2, 1.3, or 1.4.
"""
message_template = (
'astropy.io.votable is designed for VOTable version 1.1, 1.2, 1.3,'
' and 1.4, but this file is {}')
default_args = ('x',)
class W22(VOTableSpecWarning):
"""
Version 1.0 of the VOTable specification used the ``DEFINITIONS``
element to define coordinate systems. Version 1.1 now uses
``COOSYS`` elements throughout the document.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:definitions>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:definitions>`__
"""
message_template = 'The DEFINITIONS element is deprecated in VOTable 1.1. Ignoring'
class W23(IOWarning):
"""
Raised when the VO service database can not be updated (possibly
due to a network outage). This is only a warning, since an older
and possible out-of-date VO service database was available
locally.
"""
message_template = "Unable to update service information for '{}'"
default_args = ('x',)
class W24(VOWarning, FutureWarning):
"""
The VO catalog database retrieved from the www is designed for a
newer version of ``astropy.io.votable``. This may cause problems or limited
features performing service queries. Consider upgrading ``astropy.io.votable``
to the latest version.
"""
message_template = "The VO catalog database is for a later version of astropy.io.votable"
class W25(IOWarning):
"""
A VO service query failed due to a network error or malformed
arguments. Another alternative service may be attempted. If all
services fail, an exception will be raised.
"""
message_template = "'{}' failed with: {}"
default_args = ('service', '...')
class W26(VOTableSpecWarning):
"""
The given element was not supported inside of the given element
until the specified VOTable version, however the version declared
in the file is for an earlier version. These attributes may not
be written out to the file.
"""
message_template = "'{}' inside '{}' added in VOTable {}"
default_args = ('child', 'parent', 'X.X')
class W27(VOTableSpecWarning):
"""
The ``COOSYS`` element was deprecated in VOTABLE version 1.2 in
favor of a reference to the Space-Time Coordinate (STC) data
model (see `utype
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:utype>`__
and the IVOA note `referencing STC in VOTable
<http://ivoa.net/Documents/latest/VOTableSTC.html>`__.
"""
message_template = "COOSYS deprecated in VOTable 1.2"
class W28(VOTableSpecWarning):
"""
The given attribute was not supported on the given element until the
specified VOTable version, however the version declared in the file is
for an earlier version. These attributes may not be written out to
the file.
"""
message_template = "'{}' on '{}' added in VOTable {}"
default_args = ('attribute', 'element', 'X.X')
class W29(VOTableSpecWarning):
"""
Some VOTable files specify their version number in the form "v1.0",
when the only supported forms in the spec are "1.0".
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Version specified in non-standard form '{}'"
default_args = ('v1.0',)
class W30(VOTableSpecWarning):
"""
Some VOTable files write missing floating-point values in non-standard ways,
such as "null" and "-". If ``verify`` is not ``'exception'``, any
non-standard floating-point literals are treated as missing values.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid literal for float '{}'. Treating as empty."
default_args = ('x',)
class W31(VOTableSpecWarning):
"""
Since NaN's can not be represented in integer fields directly, a null
value must be specified in the FIELD descriptor to support reading
NaN's from the tabledata.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "NaN given in an integral field without a specified null value"
class W32(VOTableSpecWarning):
"""
Each field in a table must have a unique ID. If two or more fields
have the same ID, some will be renamed to ensure that all IDs are
unique.
From the VOTable 1.2 spec:
The ``ID`` and ``ref`` attributes are defined as XML types
``ID`` and ``IDREF`` respectively. This means that the
contents of ``ID`` is an identifier which must be unique
throughout a VOTable document, and that the contents of the
``ref`` attribute represents a reference to an identifier
which must exist in the VOTable document.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Duplicate ID '{}' renamed to '{}' to ensure uniqueness"
default_args = ('x', 'x_2')
class W33(VOTableChangeWarning):
"""
Each field in a table must have a unique name. If two or more
fields have the same name, some will be renamed to ensure that all
names are unique.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Column name '{}' renamed to '{}' to ensure uniqueness"
default_args = ('x', 'x_2')
class W34(VOTableSpecWarning):
"""
The attribute requires the value to be a valid XML token, as
defined by `XML 1.0
<http://www.w3.org/TR/2000/WD-xml-2e-20000814#NT-Nmtoken>`__.
"""
message_template = "'{}' is an invalid token for attribute '{}'"
default_args = ('x', 'y')
class W35(VOTableSpecWarning):
"""
The ``name`` and ``value`` attributes are required on all ``INFO``
elements.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC32>`__
"""
message_template = "'{}' attribute required for INFO elements"
default_args = ('x',)
class W36(VOTableSpecWarning):
"""
If the field specifies a ``null`` value, that value must conform
to the given ``datatype``.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "null value '{}' does not match field datatype, setting to 0"
default_args = ('x',)
class W37(UnimplementedWarning):
"""
The 3 datatypes defined in the VOTable specification and supported by
``astropy.io.votable`` are ``TABLEDATA``, ``BINARY`` and ``FITS``.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:data>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:data>`__
"""
message_template = "Unsupported data format '{}'"
default_args = ('x',)
class W38(VOTableSpecWarning):
"""
The only encoding for local binary data supported by the VOTable
specification is base64.
"""
message_template = "Inline binary data must be base64 encoded, got '{}'"
default_args = ('x',)
class W39(VOTableSpecWarning):
"""
Bit values do not support masking. This warning is raised upon
setting masked data in a bit column.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Bit values can not be masked"
class W40(VOTableSpecWarning):
"""
This is a terrible hack to support Simple Image Access Protocol
results from `NOIRLab Astro Data Archive <https://astroarchive.noirlab.edu/>`__. It
creates a field for the coordinate projection type of type "double",
which actually contains character data. We have to hack the field
to store character data, or we can't read it in. A warning will be
raised when this happens.
"""
message_template = "'cprojection' datatype repaired"
class W41(VOTableSpecWarning):
"""
An XML namespace was specified on the ``VOTABLE`` element, but the
namespace does not match what is expected for a ``VOTABLE`` file.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
Some files in the wild set the namespace to the location of the
VOTable schema, which is not correct and will not pass some
validating parsers.
"""
message_template = (
"An XML namespace is specified, but is incorrect. Expected " +
"'{}', got '{}'")
default_args = ('x', 'y')
class W42(VOTableSpecWarning):
"""
The root element should specify a namespace.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
"""
message_template = "No XML namespace specified"
class W43(VOTableSpecWarning):
"""
Referenced elements should be defined before referees. From the
VOTable 1.2 spec:
In VOTable1.2, it is further recommended to place the ID
attribute prior to referencing it whenever possible.
"""
message_template = "{} ref='{}' which has not already been defined"
default_args = ('element', 'x',)
class W44(VOTableSpecWarning):
"""
``VALUES`` elements that reference another element should not have
their own content.
From the VOTable 1.2 spec:
The ``ref`` attribute of a ``VALUES`` element can be used to
avoid a repetition of the domain definition, by referring to a
previously defined ``VALUES`` element having the referenced
``ID`` attribute. When specified, the ``ref`` attribute
defines completely the domain without any other element or
attribute, as e.g. ``<VALUES ref="RAdomain"/>``
"""
message_template = "VALUES element with ref attribute has content ('{}')"
default_args = ('element',)
class W45(VOWarning, ValueError):
"""
The ``content-role`` attribute on the ``LINK`` element must be one of
the following::
query, hints, doc, location
And in VOTable 1.3, additionally::
type
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
`1.3
<http://www.ivoa.net/documents/VOTable/20130315/PR-VOTable-1.3-20130315.html#sec:link>`__
"""
message_template = "content-role attribute '{}' invalid"
default_args = ('x',)
class W46(VOTableSpecWarning):
"""
The given char or unicode string is too long for the specified
field length.
"""
message_template = "{} value is too long for specified length of {}"
default_args = ('char or unicode', 'x')
class W47(VOTableSpecWarning):
"""
If no arraysize is specified on a char field, the default of '1'
is implied, but this is rarely what is intended.
"""
message_template = "Missing arraysize indicates length 1"
class W48(VOTableSpecWarning):
"""
The attribute is not defined in the specification.
"""
message_template = "Unknown attribute '{}' on {}"
default_args = ('attribute', 'element')
class W49(VOTableSpecWarning):
"""
Prior to VOTable 1.3, the empty cell was illegal for integer
fields.
If a \"null\" value was specified for the cell, it will be used
for the value, otherwise, 0 will be used.
"""
message_template = "Empty cell illegal for integer fields."
class W50(VOTableSpecWarning):
"""
Invalid unit string as defined in the `Units in the VO, Version 1.0
<https://www.ivoa.net/documents/VOUnits>`_ (VOTable version >= 1.4)
or `Standards for Astronomical Catalogues, Version 2.0
<http://cdsarc.u-strasbg.fr/doc/catstd-3.2.htx>`_ (version < 1.4).
Consider passing an explicit ``unit_format`` parameter if the units
in this file conform to another specification.
"""
message_template = "Invalid unit string '{}'"
default_args = ('x',)
class W51(VOTableSpecWarning):
"""
The integer value is out of range for the size of the field.
"""
message_template = "Value '{}' is out of range for a {} integer field"
default_args = ('x', 'n-bit')
class W52(VOTableSpecWarning):
"""
The BINARY2 format was introduced in VOTable 1.3. It should
not be present in files marked as an earlier version.
"""
message_template = ("The BINARY2 format was introduced in VOTable 1.3, but "
"this file is declared as version '{}'")
default_args = ('1.2',)
class W53(VOTableSpecWarning):
"""
The VOTABLE element must contain at least one RESOURCE element.
"""
message_template = ("VOTABLE element must contain at least one RESOURCE element.")
default_args = ()
class W54(VOTableSpecWarning):
"""
The TIMESYS element was introduced in VOTable 1.4. It should
not be present in files marked as an earlier version.
"""
message_template = (
"The TIMESYS element was introduced in VOTable 1.4, but "
"this file is declared as version '{}'")
default_args = ('1.3',)
class W55(VOTableSpecWarning):
"""
When non-ASCII characters are detected when reading
a TABLEDATA value for a FIELD with ``datatype="char"``, we
can issue this warning.
"""
message_template = (
'FIELD ({}) has datatype="char" but contains non-ASCII '
'value ({})')
default_args = ('', '')
class E01(VOWarning, ValueError):
"""
The size specifier for a ``char`` or ``unicode`` field must be
only a number followed, optionally, by an asterisk.
Multi-dimensional size specifiers are not supported for these
datatypes.
Strings, which are defined as a set of characters, can be
represented in VOTable as a fixed- or variable-length array of
characters::
<FIELD name="unboundedString" datatype="char" arraysize="*"/>
A 1D array of strings can be represented as a 2D array of
characters, but given the logic above, it is possible to define a
variable-length array of fixed-length strings, but not a
fixed-length array of variable-length strings.
"""
message_template = "Invalid size specifier '{}' for a {} field (in field '{}')"
default_args = ('x', 'char/unicode', 'y')
class E02(VOWarning, ValueError):
"""
The number of array elements in the data does not match that specified
in the FIELD specifier.
"""
message_template = (
"Incorrect number of elements in array. " +
"Expected multiple of {}, got {}")
default_args = ('x', 'y')
class E03(VOWarning, ValueError):
"""
Complex numbers should be two values separated by whitespace.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' does not parse as a complex number"
default_args = ('x',)
class E04(VOWarning, ValueError):
"""
A ``bit`` array should be a string of '0's and '1's.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid bit value '{}'"
default_args = ('x',)
class E05(VOWarning, ValueError):
r"""
A ``boolean`` value should be one of the following strings (case
insensitive) in the ``TABLEDATA`` format::
'TRUE', 'FALSE', '1', '0', 'T', 'F', '\0', ' ', '?'
and in ``BINARY`` format::
'T', 'F', '1', '0', '\0', ' ', '?'
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid boolean value '{}'"
default_args = ('x',)
class E06(VOWarning, ValueError):
"""
The supported datatypes are::
double, float, bit, boolean, unsignedByte, short, int, long,
floatComplex, doubleComplex, char, unicodeChar
The following non-standard aliases are also supported, but in
these case :ref:`W13 <W13>` will be raised::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Unknown datatype '{}' on field '{}'"
default_args = ('x', 'y')
# E07: Deprecated
class E08(VOWarning, ValueError):
"""
The ``type`` attribute on the ``VALUES`` element must be either
``legal`` or ``actual``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "type must be 'legal' or 'actual', but is '{}'"
default_args = ('x',)
class E09(VOWarning, ValueError):
"""
The ``MIN``, ``MAX`` and ``OPTION`` elements must always have a
``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "'{}' must have a value attribute"
default_args = ('x',)
class E10(VOWarning, ValueError):
"""
From VOTable 1.1 and later, ``FIELD`` and ``PARAM`` elements must have
a ``datatype`` field.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "'datatype' attribute required on all '{}' elements"
default_args = ('FIELD',)
class E11(VOWarning, ValueError):
"""
The precision attribute is meant to express the number of significant
digits, either as a number of decimal places (e.g. ``precision="F2"`` or
equivalently ``precision="2"`` to express 2 significant figures
after the decimal point), or as a number of significant figures
(e.g. ``precision="E5"`` indicates a relative precision of 10-5).
It is validated using the following regular expression::
[EF]?[1-9][0-9]*
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "precision '{}' is invalid"
default_args = ('x',)
class E12(VOWarning, ValueError):
"""
The width attribute is meant to indicate to the application the
number of characters to be used for input or output of the
quantity.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "width must be a positive integer, got '{}'"
default_args = ('x',)
class E13(VOWarning, ValueError):
r"""
From the VOTable 1.2 spec:
A table cell can contain an array of a given primitive type,
with a fixed or variable number of elements; the array may
even be multidimensional. For instance, the position of a
point in a 3D space can be defined by the following::
<FIELD ID="point_3D" datatype="double" arraysize="3"/>
and each cell corresponding to that definition must contain
exactly 3 numbers. An asterisk (\*) may be appended to
indicate a variable number of elements in the array, as in::
<FIELD ID="values" datatype="int" arraysize="100*"/>
where it is specified that each cell corresponding to that
definition contains 0 to 100 integer numbers. The number may
be omitted to specify an unbounded array (in practice up to
=~2×10⁹ elements).
A table cell can also contain a multidimensional array of a
given primitive type. This is specified by a sequence of
dimensions separated by the ``x`` character, with the first
dimension changing fastest; as in the case of a simple array,
the last dimension may be variable in length. As an example,
the following definition declares a table cell which may
contain a set of up to 10 images, each of 64×64 bytes::
<FIELD ID="thumbs" datatype="unsignedByte" arraysize="64×64×10*"/>
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:dim>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:dim>`__
"""
message_template = "Invalid arraysize attribute '{}'"
default_args = ('x',)
class E14(VOWarning, ValueError):
"""
All ``PARAM`` elements must have a ``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "value attribute is required for all PARAM elements"
class E15(VOWarning, ValueError):
"""
All ``COOSYS`` elements must have an ``ID`` attribute.
Note that the VOTable 1.1 specification says this attribute is
optional, but its corresponding schema indicates it is required.
In VOTable 1.2, the ``COOSYS`` element is deprecated.
"""
message_template = "ID attribute is required for all COOSYS elements"
class E16(VOTableSpecWarning):
"""
The ``system`` attribute on the ``COOSYS`` element must be one of the
following::
'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',
'supergalactic', 'xy', 'barycentric', 'geo_app'
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:COOSYS>`__
"""
message_template = "Invalid system attribute '{}'"
default_args = ('x',)
class E17(VOWarning, ValueError):
"""
``extnum`` attribute must be a positive integer.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "extnum must be a positive integer"
class E18(VOWarning, ValueError):
"""
The ``type`` attribute of the ``RESOURCE`` element must be one of
"results" or "meta".
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "type must be 'results' or 'meta', not '{}'"
default_args = ('x',)
class E19(VOWarning, ValueError):
"""
Raised either when the file doesn't appear to be XML, or the root
element is not VOTABLE.
"""
message_template = "File does not appear to be a VOTABLE"
class E20(VOTableSpecError):
"""
The table had only *x* fields defined, but the data itself has more
columns than that.
"""
message_template = "Data has more columns than are defined in the header ({})"
default_args = ('x',)
class E21(VOWarning, ValueError):
"""
The table had *x* fields defined, but the data itself has only *y*
columns.
"""
message_template = "Data has fewer columns ({}) than are defined in the header ({})"
default_args = ('x', 'y')
class E22(VOWarning, ValueError):
"""
All ``TIMESYS`` elements must have an ``ID`` attribute.
"""
message_template = "ID attribute is required for all TIMESYS elements"
class E23(VOTableSpecWarning):
"""
The ``timeorigin`` attribute on the ``TIMESYS`` element must be
either a floating point literal specifying a valid Julian Date,
or, for convenience, the string "MJD-origin" (standing for 2400000.5)
or the string "JD-origin" (standing for 0).
**References**: `1.4
<http://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html#ToC21>`__
"""
message_template = "Invalid timeorigin attribute '{}'"
default_args = ('x',)
class E24(VOWarning, ValueError):
"""
Non-ASCII unicode values should not be written when the FIELD ``datatype="char"``,
and cannot be written in BINARY or BINARY2 serialization.
"""
message_template = (
'Attempt to write non-ASCII value ({}) to FIELD ({}) which '
'has datatype="char"')
default_args = ('', '')
class E25(VOTableSpecWarning):
"""
A VOTable cannot have a DATA section without any defined FIELD; DATA will be ignored.
"""
message_template = "No FIELDs are defined; DATA section will be ignored."
def _get_warning_and_exception_classes(prefix):
classes = []
for key, val in globals().items():
if re.match(prefix + "[0-9]{2}", key):
classes.append((key, val))
classes.sort()
return classes
def _build_doc_string():
def generate_set(prefix):
classes = _get_warning_and_exception_classes(prefix)
out = io.StringIO()
for name, cls in classes:
out.write(f".. _{name}:\n\n")
msg = f"{cls.__name__}: {cls.get_short_name()}"
if not isinstance(msg, str):
msg = msg.decode('utf-8')
out.write(msg)
out.write('\n')
out.write('~' * len(msg))
out.write('\n\n')
doc = cls.__doc__
if not isinstance(doc, str):
doc = doc.decode('utf-8')
out.write(dedent(doc))
out.write('\n\n')
return out.getvalue()
warnings = generate_set('W')
exceptions = generate_set('E')
return {'warnings': warnings,
'exceptions': exceptions}
if __doc__ is not None:
__doc__ = __doc__.format(**_build_doc_string())
__all__.extend([x[0] for x in _get_warning_and_exception_classes('W')])
__all__.extend([x[0] for x in _get_warning_and_exception_classes('E')])
|
ad09e7fa46757019e532a792a3e77d55d153b9962d53a565b76e3edb395b30f8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains routines to verify the correctness of UCD strings.
"""
# STDLIB
import re
# LOCAL
from astropy.utils import data
__all__ = ['parse_ucd', 'check_ucd']
class UCDWords:
"""
Manages a list of acceptable UCD words.
Works by reading in a data file exactly as provided by IVOA. This
file resides in data/ucd1p-words.txt.
"""
def __init__(self):
self._primary = set()
self._secondary = set()
self._descriptions = {}
self._capitalization = {}
with data.get_pkg_data_fileobj(
"data/ucd1p-words.txt", encoding='ascii') as fd:
for line in fd.readlines():
type, name, descr = [
x.strip() for x in line.split('|')]
name_lower = name.lower()
if type in 'QPEVC':
self._primary.add(name_lower)
if type in 'QSEVC':
self._secondary.add(name_lower)
self._descriptions[name_lower] = descr
self._capitalization[name_lower] = name
def is_primary(self, name):
"""
Returns True if *name* is a valid primary name.
"""
return name.lower() in self._primary
def is_secondary(self, name):
"""
Returns True if *name* is a valid secondary name.
"""
return name.lower() in self._secondary
def get_description(self, name):
"""
Returns the official English description of the given UCD
*name*.
"""
return self._descriptions[name.lower()]
def normalize_capitalization(self, name):
"""
Returns the standard capitalization form of the given name.
"""
return self._capitalization[name.lower()]
_ucd_singleton = None
def parse_ucd(ucd, check_controlled_vocabulary=False, has_colon=False):
"""
Parse the UCD into its component parts.
Parameters
----------
ucd : str
The UCD string
check_controlled_vocabulary : bool, optional
If `True`, then each word in the UCD will be verified against
the UCD1+ controlled vocabulary, (as required by the VOTable
specification version 1.2), otherwise not.
has_colon : bool, optional
If `True`, the UCD may contain a colon (as defined in earlier
versions of the standard).
Returns
-------
parts : list
The result is a list of tuples of the form:
(*namespace*, *word*)
If no namespace was explicitly specified, *namespace* will be
returned as ``'ivoa'`` (i.e., the default namespace).
Raises
------
ValueError
if *ucd* is invalid
"""
global _ucd_singleton
if _ucd_singleton is None:
_ucd_singleton = UCDWords()
if has_colon:
m = re.search(r'[^A-Za-z0-9_.:;\-]', ucd)
else:
m = re.search(r'[^A-Za-z0-9_.;\-]', ucd)
if m is not None:
raise ValueError(f"UCD has invalid character '{m.group(0)}' in '{ucd}'")
word_component_re = r'[A-Za-z0-9][A-Za-z0-9\-_]*'
word_re = fr'{word_component_re}(\.{word_component_re})*'
parts = ucd.split(';')
words = []
for i, word in enumerate(parts):
colon_count = word.count(':')
if colon_count == 1:
ns, word = word.split(':', 1)
if not re.match(word_component_re, ns):
raise ValueError(f"Invalid namespace '{ns}'")
ns = ns.lower()
elif colon_count > 1:
raise ValueError(f"Too many colons in '{word}'")
else:
ns = 'ivoa'
if not re.match(word_re, word):
raise ValueError(f"Invalid word '{word}'")
if ns == 'ivoa' and check_controlled_vocabulary:
if i == 0:
if not _ucd_singleton.is_primary(word):
if _ucd_singleton.is_secondary(word):
raise ValueError(
f"Secondary word '{word}' is not valid as a primary word")
else:
raise ValueError(f"Unknown word '{word}'")
else:
if not _ucd_singleton.is_secondary(word):
if _ucd_singleton.is_primary(word):
raise ValueError(
f"Primary word '{word}' is not valid as a secondary word")
else:
raise ValueError(f"Unknown word '{word}'")
try:
normalized_word = _ucd_singleton.normalize_capitalization(word)
except KeyError:
normalized_word = word
words.append((ns, normalized_word))
return words
def check_ucd(ucd, check_controlled_vocabulary=False, has_colon=False):
"""
Returns False if *ucd* is not a valid `unified content descriptor`_.
Parameters
----------
ucd : str
The UCD string
check_controlled_vocabulary : bool, optional
If `True`, then each word in the UCD will be verified against
the UCD1+ controlled vocabulary, (as required by the VOTable
specification version 1.2), otherwise not.
has_colon : bool, optional
If `True`, the UCD may contain a colon (as defined in earlier
versions of the standard).
Returns
-------
valid : bool
"""
if ucd is None:
return True
try:
parse_ucd(ucd,
check_controlled_vocabulary=check_controlled_vocabulary,
has_colon=has_colon)
except ValueError:
return False
return True
|
8fae8c1d57d7f157e49e07dc2c4298abb8d2ebcaef46322216a936aea5a350c1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from setuptools import Extension
from os.path import join
def get_extensions(build_type='release'):
VO_DIR = 'astropy/io/votable/src'
return [Extension(
"astropy.io.votable.tablewriter",
[join(VO_DIR, "tablewriter.c")],
include_dirs=[VO_DIR])]
|
4d35e041e938a6c595302fd62694bc19482a6b6b050b24935e1677fabbc1584f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Various utilities and cookbook-like things.
"""
# STDLIB
import codecs
import contextlib
import io
import re
import gzip
from packaging.version import Version
__all__ = [
'convert_to_writable_filelike',
'stc_reference_frames',
'coerce_range_list_param',
]
@contextlib.contextmanager
def convert_to_writable_filelike(fd, compressed=False):
"""
Returns a writable file-like object suitable for streaming output.
Parameters
----------
fd : str or file-like
May be:
- a file path string, in which case it is opened, and the file
object is returned.
- an object with a :meth:``write`` method, in which case that
object is returned.
compressed : bool, optional
If `True`, create a gzip-compressed file. (Default is `False`).
Returns
-------
fd : writable file-like
"""
if isinstance(fd, str):
if fd.endswith('.gz') or compressed:
with gzip.GzipFile(fd, 'wb') as real_fd:
encoded_fd = io.TextIOWrapper(real_fd, encoding='utf8')
yield encoded_fd
encoded_fd.flush()
real_fd.flush()
return
else:
with open(fd, 'wt', encoding='utf8') as real_fd:
yield real_fd
return
elif hasattr(fd, 'write'):
assert callable(fd.write)
if compressed:
fd = gzip.GzipFile(fileobj=fd)
# If we can't write Unicode strings, use a codecs.StreamWriter
# object
needs_wrapper = False
try:
fd.write('')
except TypeError:
needs_wrapper = True
if not hasattr(fd, 'encoding') or fd.encoding is None:
needs_wrapper = True
if needs_wrapper:
yield codecs.getwriter('utf-8')(fd)
fd.flush()
else:
yield fd
fd.flush()
return
else:
raise TypeError("Can not be coerced to writable file-like object")
# <http://www.ivoa.net/documents/REC/DM/STC-20071030.html>
stc_reference_frames = set([
'FK4', 'FK5', 'ECLIPTIC', 'ICRS', 'GALACTIC', 'GALACTIC_I', 'GALACTIC_II',
'SUPER_GALACTIC', 'AZ_EL', 'BODY', 'GEO_C', 'GEO_D', 'MAG', 'GSE', 'GSM',
'SM', 'HGC', 'HGS', 'HEEQ', 'HRTN', 'HPC', 'HPR', 'HCC', 'HGI',
'MERCURY_C', 'VENUS_C', 'LUNA_C', 'MARS_C', 'JUPITER_C_III',
'SATURN_C_III', 'URANUS_C_III', 'NEPTUNE_C_III', 'PLUTO_C', 'MERCURY_G',
'VENUS_G', 'LUNA_G', 'MARS_G', 'JUPITER_G_III', 'SATURN_G_III',
'URANUS_G_III', 'NEPTUNE_G_III', 'PLUTO_G', 'UNKNOWNFrame'])
def coerce_range_list_param(p, frames=None, numeric=True):
"""
Coerces and/or verifies the object *p* into a valid range-list-format parameter.
As defined in `Section 8.7.2 of Simple
Spectral Access Protocol
<http://www.ivoa.net/documents/REC/DAL/SSA-20080201.html>`_.
Parameters
----------
p : str or sequence
May be a string as passed verbatim to the service expecting a
range-list, or a sequence. If a sequence, each item must be
either:
- a numeric value
- a named value, such as, for example, 'J' for named
spectrum (if the *numeric* kwarg is False)
- a 2-tuple indicating a range
- the last item my be a string indicating the frame of
reference
frames : sequence of str, optional
A sequence of acceptable frame of reference keywords. If not
provided, the default set in ``set_reference_frames`` will be
used.
numeric : bool, optional
TODO
Returns
-------
parts : tuple
The result is a tuple:
- a string suitable for passing to a service as a range-list
argument
- an integer counting the number of elements
"""
def str_or_none(x):
if x is None:
return ''
if numeric:
x = float(x)
return str(x)
def numeric_or_range(x):
if isinstance(x, tuple) and len(x) == 2:
return f'{str_or_none(x[0])}/{str_or_none(x[1])}'
else:
return str_or_none(x)
def is_frame_of_reference(x):
return isinstance(x, str)
if p is None:
return None, 0
elif isinstance(p, (tuple, list)):
has_frame_of_reference = len(p) > 1 and is_frame_of_reference(p[-1])
if has_frame_of_reference:
points = p[:-1]
else:
points = p[:]
out = ','.join([numeric_or_range(x) for x in points])
length = len(points)
if has_frame_of_reference:
if frames is not None and p[-1] not in frames:
raise ValueError(
f"'{p[-1]}' is not a valid frame of reference")
out += ';' + p[-1]
length += 1
return out, length
elif isinstance(p, str):
number = r'([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)?'
if not numeric:
number = r'(' + number + ')|([A-Z_]+)'
match = re.match(
'^' + number + r'([,/]' + number +
r')+(;(?P<frame>[<A-Za-z_0-9]+))?$',
p)
if match is None:
raise ValueError(f"'{p}' is not a valid range list")
frame = match.groupdict()['frame']
if frames is not None and frame is not None and frame not in frames:
raise ValueError(
f"'{frame}' is not a valid frame of reference")
return p, p.count(',') + p.count(';') + 1
try:
float(p)
return str(p), 1
except TypeError:
raise ValueError(f"'{p}' is not a valid range list")
def version_compare(a, b):
"""
Compare two VOTable version identifiers.
"""
def version_to_tuple(v):
if v[0].lower() == 'v':
v = v[1:]
return Version(v)
av = version_to_tuple(a)
bv = version_to_tuple(b)
# Can't use cmp because it was removed from Python 3.x
return (av > bv) - (av < bv)
|
76bcae3b908fe348b782656e238cfc9b7960962eb42d09dd423573557376414f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Various XML-related utilities
"""
# ASTROPY
from astropy.logger import log
from astropy.utils import data
from astropy.utils.xml import check as xml_check
from astropy.utils.xml import validate
# LOCAL
from .exceptions import (warn_or_raise, vo_warn, W02, W03, W04, W05)
__all__ = [
'check_id', 'fix_id', 'check_token', 'check_mime_content_type',
'check_anyuri', 'validate_schema'
]
def check_id(ID, name='ID', config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if *ID*
is not a valid XML ID_.
*name* is the name of the attribute being checked (used only for
error messages).
"""
if (ID is not None and not xml_check.check_id(ID)):
warn_or_raise(W02, W02, (name, ID), config, pos)
return False
return True
def fix_id(ID, config=None, pos=None):
"""
Given an arbitrary string, create one that can be used as an xml id.
This is rather simplistic at the moment, since it just replaces
non-valid characters with underscores.
"""
if ID is None:
return None
corrected = xml_check.fix_id(ID)
if corrected != ID:
vo_warn(W03, (ID, corrected), config, pos)
return corrected
_token_regex = r"(?![\r\l\t ])[^\r\l\t]*(?![\r\l\t ])"
def check_token(token, attr_name, config=None, pos=None):
"""
Raises a `ValueError` if *token* is not a valid XML token.
As defined by XML Schema Part 2.
"""
if (token is not None and not xml_check.check_token(token)):
return False
return True
def check_mime_content_type(content_type, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*content_type* is not a valid MIME content type.
As defined by RFC 2045 (syntactically, at least).
"""
if (content_type is not None and
not xml_check.check_mime_content_type(content_type)):
warn_or_raise(W04, W04, content_type, config, pos)
return False
return True
def check_anyuri(uri, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*uri* is not a valid URI.
As defined in RFC 2396.
"""
if (uri is not None and not xml_check.check_anyuri(uri)):
warn_or_raise(W05, W05, uri, config, pos)
return False
return True
def validate_schema(filename, version='1.1'):
"""
Validates the given file against the appropriate VOTable schema.
Parameters
----------
filename : str
The path to the XML file to validate
version : str, optional
The VOTABLE version to check, which must be a string \"1.0\",
\"1.1\", \"1.2\" or \"1.3\". If it is not one of these,
version \"1.1\" is assumed.
For version \"1.0\", it is checked against a DTD, since that
version did not have an XML Schema.
Returns
-------
returncode, stdout, stderr : int, str, str
Returns the returncode from xmllint and the stdout and stderr
as strings
"""
if version not in ('1.0', '1.1', '1.2', '1.3'):
log.info(f'{filename} has version {version}, using schema 1.1')
version = '1.1'
if version in ('1.1', '1.2', '1.3'):
schema_path = data.get_pkg_data_filename(
f'data/VOTable.v{version}.xsd')
else:
schema_path = data.get_pkg_data_filename(
'data/VOTable.dtd')
return validate.validate_schema(filename, schema_path)
|
ee064974f910cd9be53fc8550ede669b89e94a8dc66cafe10bb0cf0a5737aa0c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Script support for validating a VO file.
"""
def main(args=None):
from . import table
import argparse
parser = argparse.ArgumentParser(
description=("Check a VOTable file for compliance to the "
"VOTable specification"))
parser.add_argument(
'filename', nargs=1, help='Path to VOTable file to check')
args = parser.parse_args(args)
table.validate(args.filename[0])
|
391f7442fd8d316ddf767e0a720f5ce4d929a4faef171b2af2f08f83e8305b47 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains implementations of command-line scripts that are
included with Astropy.
The actual scripts that are installed in bin/ are simple wrappers for these
modules that will run in any Python version.
"""
|
f89fc41b35a54f1d6ceb4bd2cdee5c6644a5ac96686a811edbb2daa98bb31b11 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
``fitsheader`` is a command line script based on astropy.io.fits for printing
the header(s) of one or more FITS file(s) to the standard output in a human-
readable format.
Example uses of fitsheader:
1. Print the header of all the HDUs of a .fits file::
$ fitsheader filename.fits
2. Print the header of the third and fifth HDU extension::
$ fitsheader --extension 3 --extension 5 filename.fits
3. Print the header of a named extension, e.g. select the HDU containing
keywords EXTNAME='SCI' and EXTVER='2'::
$ fitsheader --extension "SCI,2" filename.fits
4. Print only specific keywords::
$ fitsheader --keyword BITPIX --keyword NAXIS filename.fits
5. Print keywords NAXIS, NAXIS1, NAXIS2, etc using a wildcard::
$ fitsheader --keyword NAXIS* filename.fits
6. Dump the header keywords of all the files in the current directory into a
machine-readable csv file::
$ fitsheader --table ascii.csv *.fits > keywords.csv
7. Specify hierarchical keywords with the dotted or spaced notation::
$ fitsheader --keyword ESO.INS.ID filename.fits
$ fitsheader --keyword "ESO INS ID" filename.fits
8. Compare the headers of different fites files, following ESO's ``fitsort``
format::
$ fitsheader --fitsort --extension 0 --keyword ESO.INS.ID *.fits
9. Same as above, sorting the output along a specified keyword::
$ fitsheader -f DATE-OBS -e 0 -k DATE-OBS -k ESO.INS.ID *.fits
Note that compressed images (HDUs of type
:class:`~astropy.io.fits.CompImageHDU`) really have two headers: a real
BINTABLE header to describe the compressed data, and a fake IMAGE header
representing the image that was compressed. Astropy returns the latter by
default. You must supply the ``--compressed`` option if you require the real
header that describes the compression.
With Astropy installed, please run ``fitsheader --help`` to see the full usage
documentation.
"""
import sys
import argparse
import numpy as np
from astropy.io import fits
from astropy import log, __version__
DESCRIPTION = """
Print the header(s) of a FITS file. Optional arguments allow the desired
extension(s), keyword(s), and output format to be specified.
Note that in the case of a compressed image, the decompressed header is
shown by default.
This script is part of the Astropy package. See
https://docs.astropy.org/en/latest/io/fits/usage/scripts.html#module-astropy.io.fits.scripts.fitsheader
for further documentation.
""".strip()
class ExtensionNotFoundException(Exception):
"""Raised if an HDU extension requested by the user does not exist."""
pass
class HeaderFormatter:
"""Class to format the header(s) of a FITS file for display by the
`fitsheader` tool; essentially a wrapper around a `HDUList` object.
Example usage:
fmt = HeaderFormatter('/path/to/file.fits')
print(fmt.parse(extensions=[0, 3], keywords=['NAXIS', 'BITPIX']))
Parameters
----------
filename : str
Path to a single FITS file.
verbose : bool
Verbose flag, to show more information about missing extensions,
keywords, etc.
Raises
------
OSError
If `filename` does not exist or cannot be read.
"""
def __init__(self, filename, verbose=True):
self.filename = filename
self.verbose = verbose
self._hdulist = fits.open(filename)
def parse(self, extensions=None, keywords=None, compressed=False):
"""Returns the FITS file header(s) in a readable format.
Parameters
----------
extensions : list of int or str, optional
Format only specific HDU(s), identified by number or name.
The name can be composed of the "EXTNAME" or "EXTNAME,EXTVER"
keywords.
keywords : list of str, optional
Keywords for which the value(s) should be returned.
If not specified, then the entire header is returned.
compressed : bool, optional
If True, shows the header describing the compression, rather than
the header obtained after decompression. (Affects FITS files
containing `CompImageHDU` extensions only.)
Returns
-------
formatted_header : str or astropy.table.Table
Traditional 80-char wide format in the case of `HeaderFormatter`;
an Astropy Table object in the case of `TableHeaderFormatter`.
"""
# `hdukeys` will hold the keys of the HDUList items to display
if extensions is None:
hdukeys = range(len(self._hdulist)) # Display all by default
else:
hdukeys = []
for ext in extensions:
try:
# HDU may be specified by number
hdukeys.append(int(ext))
except ValueError:
# The user can specify "EXTNAME" or "EXTNAME,EXTVER"
parts = ext.split(',')
if len(parts) > 1:
extname = ','.join(parts[0:-1])
extver = int(parts[-1])
hdukeys.append((extname, extver))
else:
hdukeys.append(ext)
# Having established which HDUs the user wants, we now format these:
return self._parse_internal(hdukeys, keywords, compressed)
def _parse_internal(self, hdukeys, keywords, compressed):
"""The meat of the formatting; in a separate method to allow overriding.
"""
result = []
for idx, hdu in enumerate(hdukeys):
try:
cards = self._get_cards(hdu, keywords, compressed)
except ExtensionNotFoundException:
continue
if idx > 0: # Separate HDUs by a blank line
result.append('\n')
result.append(f'# HDU {hdu} in {self.filename}:\n')
for c in cards:
result.append(f'{c}\n')
return ''.join(result)
def _get_cards(self, hdukey, keywords, compressed):
"""Returns a list of `astropy.io.fits.card.Card` objects.
This function will return the desired header cards, taking into
account the user's preference to see the compressed or uncompressed
version.
Parameters
----------
hdukey : int or str
Key of a single HDU in the HDUList.
keywords : list of str, optional
Keywords for which the cards should be returned.
compressed : bool, optional
If True, shows the header describing the compression.
Raises
------
ExtensionNotFoundException
If the hdukey does not correspond to an extension.
"""
# First we obtain the desired header
try:
if compressed:
# In the case of a compressed image, return the header before
# decompression (not the default behavior)
header = self._hdulist[hdukey]._header
else:
header = self._hdulist[hdukey].header
except (IndexError, KeyError):
message = f'{self.filename}: Extension {hdukey} not found.'
if self.verbose:
log.warning(message)
raise ExtensionNotFoundException(message)
if not keywords: # return all cards
cards = header.cards
else: # specific keywords are requested
cards = []
for kw in keywords:
try:
crd = header.cards[kw]
if isinstance(crd, fits.card.Card): # Single card
cards.append(crd)
else: # Allow for wildcard access
cards.extend(crd)
except KeyError: # Keyword does not exist
if self.verbose:
log.warning('{filename} (HDU {hdukey}): '
'Keyword {kw} not found.'.format(
filename=self.filename,
hdukey=hdukey,
kw=kw))
return cards
def close(self):
self._hdulist.close()
class TableHeaderFormatter(HeaderFormatter):
"""Class to convert the header(s) of a FITS file into a Table object.
The table returned by the `parse` method will contain four columns:
filename, hdu, keyword, and value.
Subclassed from HeaderFormatter, which contains the meat of the formatting.
"""
def _parse_internal(self, hdukeys, keywords, compressed):
"""Method called by the parse method in the parent class."""
tablerows = []
for hdu in hdukeys:
try:
for card in self._get_cards(hdu, keywords, compressed):
tablerows.append({'filename': self.filename,
'hdu': hdu,
'keyword': card.keyword,
'value': str(card.value)})
except ExtensionNotFoundException:
pass
if tablerows:
from astropy import table
return table.Table(tablerows)
return None
def print_headers_traditional(args):
"""Prints FITS header(s) using the traditional 80-char format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
for idx, filename in enumerate(args.filename): # support wildcards
if idx > 0 and not args.keywords:
print() # print a newline between different files
formatter = None
try:
formatter = HeaderFormatter(filename)
print(formatter.parse(args.extensions,
args.keywords,
args.compressed), end='')
except OSError as e:
log.error(str(e))
finally:
if formatter:
formatter.close()
def print_headers_as_table(args):
"""Prints FITS header(s) in a machine-readable table format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
tables = []
# Create a Table object for each file
for filename in args.filename: # Support wildcards
formatter = None
try:
formatter = TableHeaderFormatter(filename)
tbl = formatter.parse(args.extensions,
args.keywords,
args.compressed)
if tbl:
tables.append(tbl)
except OSError as e:
log.error(str(e)) # file not found or unreadable
finally:
if formatter:
formatter.close()
# Concatenate the tables
if len(tables) == 0:
return False
elif len(tables) == 1:
resulting_table = tables[0]
else:
from astropy import table
resulting_table = table.vstack(tables)
# Print the string representation of the concatenated table
resulting_table.write(sys.stdout, format=args.table)
def print_headers_as_comparison(args):
"""Prints FITS header(s) with keywords as columns.
This follows the dfits+fitsort format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
from astropy import table
tables = []
# Create a Table object for each file
for filename in args.filename: # Support wildcards
formatter = None
try:
formatter = TableHeaderFormatter(filename, verbose=False)
tbl = formatter.parse(args.extensions,
args.keywords,
args.compressed)
if tbl:
# Remove empty keywords
tbl = tbl[np.where(tbl['keyword'] != '')]
else:
tbl = table.Table([[filename]], names=('filename',))
tables.append(tbl)
except OSError as e:
log.error(str(e)) # file not found or unreadable
finally:
if formatter:
formatter.close()
# Concatenate the tables
if len(tables) == 0:
return False
elif len(tables) == 1:
resulting_table = tables[0]
else:
resulting_table = table.vstack(tables)
# If we obtained more than one hdu, merge hdu and keywords columns
hdus = resulting_table['hdu']
if np.ma.isMaskedArray(hdus):
hdus = hdus.compressed()
if len(np.unique(hdus)) > 1:
for tab in tables:
new_column = table.Column(
[f"{row['hdu']}:{row['keyword']}" for row in tab])
tab.add_column(new_column, name='hdu+keyword')
keyword_column_name = 'hdu+keyword'
else:
keyword_column_name = 'keyword'
# Check how many hdus we are processing
final_tables = []
for tab in tables:
final_table = [table.Column([tab['filename'][0]], name='filename')]
if 'value' in tab.colnames:
for row in tab:
if row['keyword'] in ('COMMENT', 'HISTORY'):
continue
final_table.append(table.Column([row['value']],
name=row[keyword_column_name]))
final_tables.append(table.Table(final_table))
final_table = table.vstack(final_tables)
# Sort if requested
if args.fitsort is not True: # then it must be a keyword, therefore sort
final_table.sort(args.fitsort)
# Reorganise to keyword by columns
final_table.pprint(max_lines=-1, max_width=-1)
class KeywordAppendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
keyword = values.replace('.', ' ')
if namespace.keywords is None:
namespace.keywords = []
if keyword not in namespace.keywords:
namespace.keywords.append(keyword)
def main(args=None):
"""This is the main function called by the `fitsheader` script."""
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--version', action='version',
version=f'%(prog)s {__version__}')
parser.add_argument('-e', '--extension', metavar='HDU',
action='append', dest='extensions',
help='specify the extension by name or number; '
'this argument can be repeated '
'to select multiple extensions')
parser.add_argument('-k', '--keyword', metavar='KEYWORD',
action=KeywordAppendAction, dest='keywords',
help='specify a keyword; this argument can be '
'repeated to select multiple keywords; '
'also supports wildcards')
parser.add_argument('-t', '--table',
nargs='?', default=False, metavar='FORMAT',
help='print the header(s) in machine-readable table '
'format; the default format is '
'"ascii.fixed_width" (can be "ascii.csv", '
'"ascii.html", "ascii.latex", "fits", etc)')
parser.add_argument('-f', '--fitsort', action='store_true',
help='print the headers as a table with each unique '
'keyword in a given column (fitsort format); '
'if a SORT_KEYWORD is specified, the result will be '
'sorted along that keyword')
parser.add_argument('-c', '--compressed', action='store_true',
help='for compressed image data, '
'show the true header which describes '
'the compression rather than the data')
parser.add_argument('filename', nargs='+',
help='path to one or more files; '
'wildcards are supported')
args = parser.parse_args(args)
# If `--table` was used but no format specified,
# then use ascii.fixed_width by default
if args.table is None:
args.table = 'ascii.fixed_width'
# Now print the desired headers
try:
if args.table:
print_headers_as_table(args)
elif args.fitsort:
print_headers_as_comparison(args)
else:
print_headers_traditional(args)
except OSError:
# A 'Broken pipe' OSError may occur when stdout is closed prematurely,
# eg. when calling `fitsheader file.fits | head`. We let this pass.
pass
|
a2fc679a44f101c8bbc2db0dd27befdac5435fc7aec15ad23d996ca3ba6e28c6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
``fitscheck`` is a command line script based on astropy.io.fits for verifying
and updating the CHECKSUM and DATASUM keywords of .fits files. ``fitscheck``
can also detect and often fix other FITS standards violations. ``fitscheck``
facilitates re-writing the non-standard checksums originally generated by
astropy.io.fits with standard checksums which will interoperate with CFITSIO.
``fitscheck`` will refuse to write new checksums if the checksum keywords are
missing or their values are bad. Use ``--force`` to write new checksums
regardless of whether or not they currently exist or pass. Use
``--ignore-missing`` to tolerate missing checksum keywords without comment.
Example uses of fitscheck:
1. Add checksums::
$ fitscheck --write *.fits
2. Write new checksums, even if existing checksums are bad or missing::
$ fitscheck --write --force *.fits
3. Verify standard checksums and FITS compliance without changing the files::
$ fitscheck --compliance *.fits
4. Only check and fix compliance problems, ignoring checksums::
$ fitscheck --checksum none --compliance --write *.fits
5. Verify standard interoperable checksums::
$ fitscheck *.fits
6. Delete checksum keywords::
$ fitscheck --checksum remove --write *.fits
"""
import sys
import logging
import argparse
import warnings
from astropy.io import fits
from astropy import __version__
log = logging.getLogger('fitscheck')
DESCRIPTION = """
e.g. fitscheck example.fits
Verifies and optionally re-writes the CHECKSUM and DATASUM keywords
for a .fits file.
Optionally detects and fixes FITS standard compliance problems.
This script is part of the Astropy package. See
https://docs.astropy.org/en/latest/io/fits/usage/scripts.html#module-astropy.io.fits.scripts.fitscheck
for further documentation.
""".strip()
def handle_options(args):
if not len(args):
args = ['-h']
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--version', action='version',
version=f'%(prog)s {__version__}')
parser.add_argument(
'fits_files', metavar='file', nargs='+',
help='.fits files to process.')
parser.add_argument(
'-k', '--checksum', dest='checksum_kind',
choices=['standard', 'remove', 'none'],
help='Choose FITS checksum mode or none. Defaults standard.',
default='standard')
parser.add_argument(
'-w', '--write', dest='write_file',
help='Write out file checksums and/or FITS compliance fixes.',
default=False, action='store_true')
parser.add_argument(
'-f', '--force', dest='force',
help='Do file update even if original checksum was bad.',
default=False, action='store_true')
parser.add_argument(
'-c', '--compliance', dest='compliance',
help='Do FITS compliance checking; fix if possible.',
default=False, action='store_true')
parser.add_argument(
'-i', '--ignore-missing', dest='ignore_missing',
help='Ignore missing checksums.',
default=False, action='store_true')
parser.add_argument(
'-v', '--verbose', dest='verbose', help='Generate extra output.',
default=False, action='store_true')
global OPTIONS
OPTIONS = parser.parse_args(args)
if OPTIONS.checksum_kind == 'none':
OPTIONS.checksum_kind = False
elif OPTIONS.checksum_kind == 'standard':
OPTIONS.checksum_kind = True
elif OPTIONS.checksum_kind == 'remove':
OPTIONS.write_file = True
OPTIONS.force = True
return OPTIONS.fits_files
def setup_logging():
log.handlers.clear()
if OPTIONS.verbose:
log.setLevel(logging.INFO)
else:
log.setLevel(logging.WARNING)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(message)s'))
log.addHandler(handler)
def verify_checksums(filename):
"""
Prints a message if any HDU in `filename` has a bad checksum or datasum.
"""
with warnings.catch_warnings(record=True) as wlist:
warnings.simplefilter('always')
with fits.open(filename, checksum=OPTIONS.checksum_kind) as hdulist:
for i, hdu in enumerate(hdulist):
# looping on HDUs is needed to read them and verify the
# checksums
if not OPTIONS.ignore_missing:
if not hdu._checksum:
log.warning('MISSING {!r} .. Checksum not found '
'in HDU #{}'.format(filename, i))
return 1
if not hdu._datasum:
log.warning('MISSING {!r} .. Datasum not found '
'in HDU #{}'.format(filename, i))
return 1
for w in wlist:
if str(w.message).startswith(('Checksum verification failed',
'Datasum verification failed')):
log.warning('BAD %r %s', filename, str(w.message))
return 1
log.info(f'OK {filename!r}')
return 0
def verify_compliance(filename):
"""Check for FITS standard compliance."""
with fits.open(filename) as hdulist:
try:
hdulist.verify('exception')
except fits.VerifyError as exc:
log.warning('NONCOMPLIANT %r .. %s',
filename, str(exc).replace('\n', ' '))
return 1
return 0
def update(filename):
"""
Sets the ``CHECKSUM`` and ``DATASUM`` keywords for each HDU of `filename`.
Also updates fixes standards violations if possible and requested.
"""
output_verify = 'silentfix' if OPTIONS.compliance else 'ignore'
# For unit tests we reset temporarily the warning filters. Indeed, before
# updating the checksums, fits.open will verify the existing checksums and
# raise warnings, which are later caught and converted to log.warning...
# which is an issue when testing, using the "error" action to convert
# warnings to exceptions.
with warnings.catch_warnings():
warnings.resetwarnings()
with fits.open(filename, do_not_scale_image_data=True,
checksum=OPTIONS.checksum_kind, mode='update') as hdulist:
hdulist.flush(output_verify=output_verify)
def process_file(filename):
"""
Handle a single .fits file, returning the count of checksum and compliance
errors.
"""
try:
checksum_errors = verify_checksums(filename)
if OPTIONS.compliance:
compliance_errors = verify_compliance(filename)
else:
compliance_errors = 0
if OPTIONS.write_file and checksum_errors == 0 or OPTIONS.force:
update(filename)
return checksum_errors + compliance_errors
except Exception as e:
log.error(f'EXCEPTION {filename!r} .. {e}')
return 1
def main(args=None):
"""
Processes command line parameters into options and files, then checks
or update FITS DATASUM and CHECKSUM keywords for the specified files.
"""
errors = 0
fits_files = handle_options(args or sys.argv[1:])
setup_logging()
for filename in fits_files:
errors += process_file(filename)
if errors:
log.warning(f'{errors} errors')
return int(bool(errors))
|
373f3abd34ed75e1319d06c3537bcb1c535ac96428d68875832e11708c01cc79 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
``fitsinfo`` is a command-line script based on astropy.io.fits for
printing a summary of the HDUs in one or more FITS files(s) to the
standard output.
Example usage of ``fitsinfo``:
1. Print a summary of the HDUs in a FITS file::
$ fitsinfo filename.fits
Filename: filename.fits
No. Name Type Cards Dimensions Format
0 PRIMARY PrimaryHDU 138 ()
1 SCI ImageHDU 61 (800, 800) int16
2 SCI ImageHDU 61 (800, 800) int16
3 SCI ImageHDU 61 (800, 800) int16
4 SCI ImageHDU 61 (800, 800) int16
2. Print a summary of HDUs of all the FITS files in the current directory::
$ fitsinfo *.fits
"""
import argparse
import astropy.io.fits as fits
from astropy import log, __version__
DESCRIPTION = """
Print a summary of the HDUs in a FITS file(s).
This script is part of the Astropy package. See
https://docs.astropy.org/en/latest/io/fits/usage/scripts.html#module-astropy.io.fits.scripts.fitsinfo
for further documentation.
""".strip()
def fitsinfo(filename):
"""
Print a summary of the HDUs in a FITS file.
Parameters
----------
filename : str
The path to a FITS file.
"""
try:
fits.info(filename)
except OSError as e:
log.error(str(e))
return
def main(args=None):
"""The main function called by the `fitsinfo` script."""
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--version', action='version',
version=f'%(prog)s {__version__}')
parser.add_argument('filename', nargs='+',
help='Path to one or more FITS files. '
'Wildcards are supported.')
args = parser.parse_args(args)
for idx, filename in enumerate(args.filename):
if idx > 0:
print()
fitsinfo(filename)
|
1f472f3e9488aaedef13bccbb0317123a60726dea4d2ecf814489f1ff64e85ce | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import argparse
import glob
import logging
import os
import sys
from astropy.io import fits
from astropy.io.fits.util import fill
from astropy import __version__
log = logging.getLogger('fitsdiff')
DESCRIPTION = """
Compare two FITS image files and report the differences in header keywords and
data.
fitsdiff [options] filename1 filename2
where filename1 filename2 are the two files to be compared. They may also be
wild cards, in such cases, they must be enclosed by double or single quotes, or
they may be directory names. If both are directory names, all files in each of
the directories will be included; if only one is a directory name, then the
directory name will be prefixed to the file name(s) specified by the other
argument. for example::
fitsdiff "*.fits" "/machine/data1"
will compare all FITS files in the current directory to the corresponding files
in the directory /machine/data1.
This script is part of the Astropy package. See
https://docs.astropy.org/en/latest/io/fits/usage/scripts.html#fitsdiff
for further documentation.
""".strip()
EPILOG = fill("""
If the two files are identical within the specified conditions, it will report
"No difference is found." If the value(s) of -c and -k takes the form
'@filename', list is in the text file 'filename', and each line in that text
file contains one keyword.
Example
-------
fitsdiff -k filename,filtnam1 -n 5 -r 1.e-6 test1.fits test2
This command will compare files test1.fits and test2.fits, report maximum of 5
different pixels values per extension, only report data values larger than
1.e-6 relative to each other, and will neglect the different values of keywords
FILENAME and FILTNAM1 (or their very existence).
fitsdiff command-line arguments can also be set using the environment variable
FITSDIFF_SETTINGS. If the FITSDIFF_SETTINGS environment variable is present,
each argument present will override the corresponding argument on the
command-line unless the --exact option is specified. The FITSDIFF_SETTINGS
environment variable exists to make it easier to change the
behavior of fitsdiff on a global level, such as in a set of regression tests.
""".strip(), width=80)
class StoreListAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super().__init__(option_strings, dest, nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, [])
# Accept either a comma-separated list or a filename (starting with @)
# containing a value on each line
if values and values[0] == '@':
value = values[1:]
if not os.path.exists(value):
log.warning(f'{self.dest} argument {value} does not exist')
return
try:
values = [v.strip() for v in open(value, 'r').readlines()]
setattr(namespace, self.dest, values)
except OSError as exc:
log.warning('reading {} for {} failed: {}; ignoring this '
'argument'.format(value, self.dest, exc))
del exc
else:
setattr(namespace, self.dest,
[v.strip() for v in values.split(',')])
def handle_options(argv=None):
parser = argparse.ArgumentParser(
description=DESCRIPTION, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--version', action='version',
version=f'%(prog)s {__version__}')
parser.add_argument(
'fits_files', metavar='file', nargs='+',
help='.fits files to process.')
parser.add_argument(
'-q', '--quiet', action='store_true',
help='Produce no output and just return a status code.')
parser.add_argument(
'-n', '--num-diffs', type=int, default=10, dest='numdiffs',
metavar='INTEGER',
help='Max number of data differences (image pixel or table element) '
'to report per extension (default %(default)s).')
parser.add_argument(
'-r', '--rtol', '--relative-tolerance', type=float, default=None,
dest='rtol', metavar='NUMBER',
help='The relative tolerance for comparison of two numbers, '
'specifically two floating point numbers. This applies to data '
'in both images and tables, and to floating point keyword values '
'in headers (default %(default)s).')
parser.add_argument(
'-a', '--atol', '--absolute-tolerance', type=float, default=None,
dest='atol', metavar='NUMBER',
help='The absolute tolerance for comparison of two numbers, '
'specifically two floating point numbers. This applies to data '
'in both images and tables, and to floating point keyword values '
'in headers (default %(default)s).')
parser.add_argument(
'-b', '--no-ignore-blanks', action='store_false',
dest='ignore_blanks', default=True,
help="Don't ignore trailing blanks (whitespace) in string values. "
"Otherwise trailing blanks both in header keywords/values and in "
"table column values) are not treated as significant i.e., "
"without this option 'ABCDEF ' and 'ABCDEF' are considered "
"equivalent. ")
parser.add_argument(
'--no-ignore-blank-cards', action='store_false',
dest='ignore_blank_cards', default=True,
help="Don't ignore entirely blank cards in headers. Normally fitsdiff "
"does not consider blank cards when comparing headers, but this "
"will ensure that even blank cards match up. ")
parser.add_argument(
'--exact', action='store_true',
dest='exact_comparisons', default=False,
help="Report ALL differences, "
"overriding command-line options and FITSDIFF_SETTINGS. ")
parser.add_argument(
'-o', '--output-file', metavar='FILE',
help='Output results to this file; otherwise results are printed to '
'stdout.')
parser.add_argument(
'-u', '--ignore-hdus', action=StoreListAction,
default=[], dest='ignore_hdus',
metavar='HDU_NAMES',
help='Comma-separated list of HDU names not to be compared. HDU '
'names may contain wildcard patterns.')
group = parser.add_argument_group('Header Comparison Options')
group.add_argument(
'-k', '--ignore-keywords', action=StoreListAction,
default=[], dest='ignore_keywords',
metavar='KEYWORDS',
help='Comma-separated list of keywords not to be compared. Keywords '
'may contain wildcard patterns. To exclude all keywords, use '
'"*"; make sure to have double or single quotes around the '
'asterisk on the command-line.')
group.add_argument(
'-c', '--ignore-comments', action=StoreListAction,
default=[], dest='ignore_comments',
metavar='COMMENTS',
help='Comma-separated list of keywords whose comments will not be '
'compared. Wildcards may be used as with --ignore-keywords.')
group = parser.add_argument_group('Table Comparison Options')
group.add_argument(
'-f', '--ignore-fields', action=StoreListAction,
default=[], dest='ignore_fields',
metavar='COLUMNS',
help='Comma-separated list of fields (i.e. columns) not to be '
'compared. All columns may be excluded using "*" as with '
'--ignore-keywords.')
options = parser.parse_args(argv)
# Determine which filenames to compare
if len(options.fits_files) != 2:
parser.error('\nfitsdiff requires two arguments; '
'see `fitsdiff --help` for more details.')
return options
def setup_logging(outfile=None):
log.setLevel(logging.INFO)
error_handler = logging.StreamHandler(sys.stderr)
error_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
error_handler.setLevel(logging.WARNING)
log.addHandler(error_handler)
if outfile is not None:
output_handler = logging.FileHandler(outfile)
else:
output_handler = logging.StreamHandler()
class LevelFilter(logging.Filter):
"""Log only messages matching the specified level."""
def __init__(self, name='', level=logging.NOTSET):
logging.Filter.__init__(self, name)
self.level = level
def filter(self, rec):
return rec.levelno == self.level
# File output logs all messages, but stdout logs only INFO messages
# (since errors are already logged to stderr)
output_handler.addFilter(LevelFilter(level=logging.INFO))
output_handler.setFormatter(logging.Formatter('%(message)s'))
log.addHandler(output_handler)
def match_files(paths):
if os.path.isfile(paths[0]) and os.path.isfile(paths[1]):
# shortcut if both paths are files
return [paths]
dirnames = [None, None]
filelists = [None, None]
for i, path in enumerate(paths):
if glob.has_magic(path):
files = [os.path.split(f) for f in glob.glob(path)]
if not files:
log.error('Wildcard pattern %r did not match any files.', path)
sys.exit(2)
dirs, files = list(zip(*files))
if len(set(dirs)) > 1:
log.error('Wildcard pattern %r should match only one '
'directory.', path)
sys.exit(2)
dirnames[i] = set(dirs).pop()
filelists[i] = sorted(files)
elif os.path.isdir(path):
dirnames[i] = path
filelists[i] = [f for f in sorted(os.listdir(path))
if os.path.isfile(os.path.join(path, f))]
elif os.path.isfile(path):
dirnames[i] = os.path.dirname(path)
filelists[i] = [os.path.basename(path)]
else:
log.error(
'%r is not an existing file, directory, or wildcard '
'pattern; see `fitsdiff --help` for more usage help.', path)
sys.exit(2)
dirnames[i] = os.path.abspath(dirnames[i])
filematch = set(filelists[0]) & set(filelists[1])
for a, b in [(0, 1), (1, 0)]:
if len(filelists[a]) > len(filematch) and not os.path.isdir(paths[a]):
for extra in sorted(set(filelists[a]) - filematch):
log.warning('%r has no match in %r', extra, dirnames[b])
return [(os.path.join(dirnames[0], f),
os.path.join(dirnames[1], f)) for f in filematch]
def main(args=None):
args = args or sys.argv[1:]
if 'FITSDIFF_SETTINGS' in os.environ:
args = os.environ['FITSDIFF_SETTINGS'].split() + args
opts = handle_options(args)
if opts.rtol is None:
opts.rtol = 0.0
if opts.atol is None:
opts.atol = 0.0
if opts.exact_comparisons:
# override the options so that each is the most restrictive
opts.ignore_keywords = []
opts.ignore_comments = []
opts.ignore_fields = []
opts.rtol = 0.0
opts.atol = 0.0
opts.ignore_blanks = False
opts.ignore_blank_cards = False
if not opts.quiet:
setup_logging(opts.output_file)
files = match_files(opts.fits_files)
close_file = False
if opts.quiet:
out_file = None
elif opts.output_file:
out_file = open(opts.output_file, 'w')
close_file = True
else:
out_file = sys.stdout
identical = []
try:
for a, b in files:
# TODO: pass in any additional arguments here too
diff = fits.diff.FITSDiff(
a, b,
ignore_hdus=opts.ignore_hdus,
ignore_keywords=opts.ignore_keywords,
ignore_comments=opts.ignore_comments,
ignore_fields=opts.ignore_fields,
numdiffs=opts.numdiffs,
rtol=opts.rtol,
atol=opts.atol,
ignore_blanks=opts.ignore_blanks,
ignore_blank_cards=opts.ignore_blank_cards)
diff.report(fileobj=out_file)
identical.append(diff.identical)
return int(not all(identical))
finally:
if close_file:
out_file.close()
# Close the file if used for the logging output, and remove handlers to
# avoid having them multiple times for unit tests.
for handler in log.handlers:
if isinstance(handler, logging.FileHandler):
handler.close()
log.removeHandler(handler)
|
4cc69644f0a8178ab5450c7ec58d2c781b631fb3e763e28c822c7ba9d953820f | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import sys
import numpy as np
from .base import DTYPE2BITPIX, DELAYED
from .image import PrimaryHDU
from .table import _TableLikeHDU
from astropy.io.fits.column import Column, ColDefs, FITS2NUMPY
from astropy.io.fits.fitsrec import FITS_rec, FITS_record
from astropy.io.fits.util import _is_int, _is_pseudo_integer, _pseudo_zero
from astropy.utils import lazyproperty
class Group(FITS_record):
"""
One group of the random group data.
"""
def __init__(self, input, row=0, start=None, end=None, step=None,
base=None):
super().__init__(input, row, start, end, step, base)
@property
def parnames(self):
return self.array.parnames
@property
def data(self):
# The last column in the coldefs is the data portion of the group
return self.field(self.array._coldefs.names[-1])
@lazyproperty
def _unique(self):
return _par_indices(self.parnames)
def par(self, parname):
"""
Get the group parameter value.
"""
if _is_int(parname):
result = self.array[self.row][parname]
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
result = self.array[self.row][indx[0]]
# if more than one group parameter have the same name
else:
result = self.array[self.row][indx[0]].astype('f8')
for i in indx[1:]:
result += self.array[self.row][i]
return result
def setpar(self, parname, value):
"""
Set the group parameter value.
"""
# TODO: It would be nice if, instead of requiring a multi-part value to
# be an array, there were an *option* to automatically split the value
# into multiple columns if it doesn't already fit in the array data
# type.
if _is_int(parname):
self.array[self.row][parname] = value
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
self.array[self.row][indx[0]] = value
# if more than one group parameter have the same name, the
# value must be a list (or tuple) containing arrays
else:
if isinstance(value, (list, tuple)) and \
len(indx) == len(value):
for i in range(len(indx)):
self.array[self.row][indx[i]] = value[i]
else:
raise ValueError('Parameter value must be a sequence with '
'{} arrays/numbers.'.format(len(indx)))
class GroupData(FITS_rec):
"""
Random groups data object.
Allows structured access to FITS Group data in a manner analogous
to tables.
"""
_record_type = Group
def __new__(cls, input=None, bitpix=None, pardata=None, parnames=[],
bscale=None, bzero=None, parbscales=None, parbzeros=None):
"""
Parameters
----------
input : array or FITS_rec instance
input data, either the group data itself (a
`numpy.ndarray`) or a record array (`FITS_rec`) which will
contain both group parameter info and the data. The rest
of the arguments are used only for the first case.
bitpix : int
data type as expressed in FITS ``BITPIX`` value (8, 16, 32,
64, -32, or -64)
pardata : sequence of array
parameter data, as a list of (numeric) arrays.
parnames : sequence of str
list of parameter names.
bscale : int
``BSCALE`` of the data
bzero : int
``BZERO`` of the data
parbscales : sequence of int
list of bscales for the parameters
parbzeros : sequence of int
list of bzeros for the parameters
"""
if not isinstance(input, FITS_rec):
if pardata is None:
npars = 0
else:
npars = len(pardata)
if parbscales is None:
parbscales = [None] * npars
if parbzeros is None:
parbzeros = [None] * npars
if parnames is None:
parnames = [f'PAR{idx + 1}' for idx in range(npars)]
if len(parnames) != npars:
raise ValueError('The number of parameter data arrays does '
'not match the number of parameters.')
unique_parnames = _unique_parnames(parnames + ['DATA'])
if bitpix is None:
bitpix = DTYPE2BITPIX[input.dtype.name]
fits_fmt = GroupsHDU._bitpix2tform[bitpix] # -32 -> 'E'
format = FITS2NUMPY[fits_fmt] # 'E' -> 'f4'
data_fmt = f'{str(input.shape[1:])}{format}'
formats = ','.join(([format] * npars) + [data_fmt])
gcount = input.shape[0]
cols = [Column(name=unique_parnames[idx], format=fits_fmt,
bscale=parbscales[idx], bzero=parbzeros[idx])
for idx in range(npars)]
cols.append(Column(name=unique_parnames[-1], format=fits_fmt,
bscale=bscale, bzero=bzero))
coldefs = ColDefs(cols)
self = FITS_rec.__new__(cls,
np.rec.array(None,
formats=formats,
names=coldefs.names,
shape=gcount))
# By default the data field will just be 'DATA', but it may be
# uniquified if 'DATA' is already used by one of the group names
self._data_field = unique_parnames[-1]
self._coldefs = coldefs
self.parnames = parnames
for idx, name in enumerate(unique_parnames[:-1]):
column = coldefs[idx]
# Note: _get_scale_factors is used here and in other cases
# below to determine whether the column has non-default
# scale/zero factors.
# TODO: Find a better way to do this than using this interface
scale, zero = self._get_scale_factors(column)[3:5]
if scale or zero:
self._cache_field(name, pardata[idx])
else:
np.rec.recarray.field(self, idx)[:] = pardata[idx]
column = coldefs[self._data_field]
scale, zero = self._get_scale_factors(column)[3:5]
if scale or zero:
self._cache_field(self._data_field, input)
else:
np.rec.recarray.field(self, npars)[:] = input
else:
self = FITS_rec.__new__(cls, input)
self.parnames = None
return self
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if isinstance(obj, GroupData):
self.parnames = obj.parnames
elif isinstance(obj, FITS_rec):
self.parnames = obj._coldefs.names
def __getitem__(self, key):
out = super().__getitem__(key)
if isinstance(out, GroupData):
out.parnames = self.parnames
return out
@property
def data(self):
"""
The raw group data represented as a multi-dimensional `numpy.ndarray`
array.
"""
# The last column in the coldefs is the data portion of the group
return self.field(self._coldefs.names[-1])
@lazyproperty
def _unique(self):
return _par_indices(self.parnames)
def par(self, parname):
"""
Get the group parameter values.
"""
if _is_int(parname):
result = self.field(parname)
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
result = self.field(indx[0])
# if more than one group parameter have the same name
else:
result = self.field(indx[0]).astype('f8')
for i in indx[1:]:
result += self.field(i)
return result
class GroupsHDU(PrimaryHDU, _TableLikeHDU):
"""
FITS Random Groups HDU class.
See the :ref:`astropy:random-groups` section in the Astropy documentation
for more details on working with this type of HDU.
"""
_bitpix2tform = {8: 'B', 16: 'I', 32: 'J', 64: 'K', -32: 'E', -64: 'D'}
_data_type = GroupData
_data_field = 'DATA'
"""
The name of the table record array field that will contain the group data
for each group; 'DATA' by default, but may be preceded by any number of
underscores if 'DATA' is already a parameter name
"""
def __init__(self, data=None, header=None):
super().__init__(data=data, header=header)
if data is not DELAYED:
self.update_header()
# Update the axes; GROUPS HDUs should always have at least one axis
if len(self._axes) <= 0:
self._axes = [0]
self._header['NAXIS'] = 1
self._header.set('NAXIS1', 0, after='NAXIS')
@classmethod
def match_header(cls, header):
keyword = header.cards[0].keyword
return (keyword == 'SIMPLE' and 'GROUPS' in header and
header['GROUPS'] is True)
@lazyproperty
def data(self):
"""
The data of a random group FITS file will be like a binary table's
data.
"""
if self._axes == [0]:
return
data = self._get_tbdata()
data._coldefs = self.columns
data.parnames = self.parnames
del self.columns
return data
@lazyproperty
def parnames(self):
"""The names of the group parameters as described by the header."""
pcount = self._header['PCOUNT']
# The FITS standard doesn't really say what to do if a parname is
# missing, so for now just assume that won't happen
return [self._header['PTYPE' + str(idx + 1)] for idx in range(pcount)]
@lazyproperty
def columns(self):
if self._has_data and hasattr(self.data, '_coldefs'):
return self.data._coldefs
format = self._bitpix2tform[self._header['BITPIX']]
pcount = self._header['PCOUNT']
parnames = []
bscales = []
bzeros = []
for idx in range(pcount):
bscales.append(self._header.get('PSCAL' + str(idx + 1), None))
bzeros.append(self._header.get('PZERO' + str(idx + 1), None))
parnames.append(self._header['PTYPE' + str(idx + 1)])
formats = [format] * len(parnames)
dim = [None] * len(parnames)
# Now create columns from collected parameters, but first add the DATA
# column too, to contain the group data.
parnames.append('DATA')
bscales.append(self._header.get('BSCALE'))
bzeros.append(self._header.get('BZEROS'))
data_shape = self.shape[:-1]
formats.append(str(int(np.prod(data_shape))) + format)
dim.append(data_shape)
parnames = _unique_parnames(parnames)
self._data_field = parnames[-1]
cols = [Column(name=name, format=fmt, bscale=bscale, bzero=bzero,
dim=dim)
for name, fmt, bscale, bzero, dim in
zip(parnames, formats, bscales, bzeros, dim)]
coldefs = ColDefs(cols)
return coldefs
@property
def _nrows(self):
if not self._data_loaded:
# The number of 'groups' equates to the number of rows in the table
# representation of the data
return self._header.get('GCOUNT', 0)
else:
return len(self.data)
@lazyproperty
def _theap(self):
# Only really a lazyproperty for symmetry with _TableBaseHDU
return 0
@property
def is_image(self):
return False
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
size = 0
naxis = self._header.get('NAXIS', 0)
# for random group image, NAXIS1 should be 0, so we skip NAXIS1.
if naxis > 1:
size = 1
for idx in range(1, naxis):
size = size * self._header['NAXIS' + str(idx + 1)]
bitpix = self._header['BITPIX']
gcount = self._header.get('GCOUNT', 1)
pcount = self._header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
def update_header(self):
old_naxis = self._header.get('NAXIS', 0)
if self._data_loaded:
if isinstance(self.data, GroupData):
self._axes = list(self.data.data.shape)[1:]
self._axes.reverse()
self._axes = [0] + self._axes
field0 = self.data.dtype.names[0]
field0_code = self.data.dtype.fields[field0][0].name
elif self.data is None:
self._axes = [0]
field0_code = 'uint8' # For lack of a better default
else:
raise ValueError('incorrect array type')
self._header['BITPIX'] = DTYPE2BITPIX[field0_code]
self._header['NAXIS'] = len(self._axes)
# add NAXISi if it does not exist
for idx, axis in enumerate(self._axes):
if (idx == 0):
after = 'NAXIS'
else:
after = 'NAXIS' + str(idx)
self._header.set('NAXIS' + str(idx + 1), axis, after=after)
# delete extra NAXISi's
for idx in range(len(self._axes) + 1, old_naxis + 1):
try:
del self._header['NAXIS' + str(idx)]
except KeyError:
pass
if self._has_data and isinstance(self.data, GroupData):
self._header.set('GROUPS', True,
after='NAXIS' + str(len(self._axes)))
self._header.set('PCOUNT', len(self.data.parnames), after='GROUPS')
self._header.set('GCOUNT', len(self.data), after='PCOUNT')
column = self.data._coldefs[self._data_field]
scale, zero = self.data._get_scale_factors(column)[3:5]
if scale:
self._header.set('BSCALE', column.bscale)
if zero:
self._header.set('BZERO', column.bzero)
for idx, name in enumerate(self.data.parnames):
self._header.set('PTYPE' + str(idx + 1), name)
column = self.data._coldefs[idx]
scale, zero = self.data._get_scale_factors(column)[3:5]
if scale:
self._header.set('PSCAL' + str(idx + 1), column.bscale)
if zero:
self._header.set('PZERO' + str(idx + 1), column.bzero)
# Update the position of the EXTEND keyword if it already exists
if 'EXTEND' in self._header:
if len(self._axes):
after = 'NAXIS' + str(len(self._axes))
else:
after = 'NAXIS'
self._header.set('EXTEND', after=after)
def _writedata_internal(self, fileobj):
"""
Basically copy/pasted from `_ImageBaseHDU._writedata_internal()`, but
we have to get the data's byte order a different way...
TODO: Might be nice to store some indication of the data's byte order
as an attribute or function so that we don't have to do this.
"""
size = 0
if self.data is not None:
self.data._scale_back()
# Based on the system type, determine the byteorders that
# would need to be swapped to get to big-endian output
if sys.byteorder == 'little':
swap_types = ('<', '=')
else:
swap_types = ('<',)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_integer(self.data.dtype):
# Convert the unsigned array to signed
output = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f'>i{self.data.dtype.itemsize}')
should_swap = False
else:
output = self.data
fname = self.data.dtype.names[0]
byteorder = self.data.dtype.fields[fname][0].str[0]
should_swap = (byteorder in swap_types)
if should_swap:
if output.flags.writeable:
output.byteswap(True)
try:
fileobj.writearray(output)
finally:
output.byteswap(True)
else:
# For read-only arrays, there is no way around making
# a byteswapped copy of the data.
fileobj.writearray(output.byteswap(False))
else:
fileobj.writearray(output)
size += output.size * output.itemsize
return size
def _verify(self, option='warn'):
errs = super()._verify(option=option)
# Verify locations and values of mandatory keywords.
self.req_cards('NAXIS', 2,
lambda v: (_is_int(v) and 1 <= v <= 999), 1,
option, errs)
self.req_cards('NAXIS1', 3, lambda v: (_is_int(v) and v == 0), 0,
option, errs)
after = self._header['NAXIS'] + 3
pos = lambda x: x >= after
self.req_cards('GCOUNT', pos, _is_int, 1, option, errs)
self.req_cards('PCOUNT', pos, _is_int, 0, option, errs)
self.req_cards('GROUPS', pos, lambda v: (v is True), True, option,
errs)
return errs
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
# Check the byte order of the data. If it is little endian we
# must swap it before calculating the datasum.
# TODO: Maybe check this on a per-field basis instead of assuming
# that all fields have the same byte order?
byteorder = \
self.data.dtype.fields[self.data.dtype.names[0]][0].str[0]
if byteorder != '>':
if self.data.flags.writeable:
byteswapped = True
d = self.data.byteswap(True)
d.dtype = d.dtype.newbyteorder('>')
else:
# If the data is not writeable, we just make a byteswapped
# copy and don't bother changing it back after
d = self.data.byteswap(False)
d.dtype = d.dtype.newbyteorder('>')
byteswapped = False
else:
byteswapped = False
d = self.data
byte_data = d.view(type=np.ndarray, dtype=np.ubyte)
cs = self._compute_checksum(byte_data)
# If the data was byteswapped in this method then return it to
# its original little-endian order.
if byteswapped:
d.byteswap(True)
d.dtype = d.dtype.newbyteorder('<')
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _summary(self):
summary = super()._summary()
name, ver, classname, length, shape, format, gcount = summary
# Drop the first axis from the shape
if shape:
shape = shape[1:]
if shape and all(shape):
# Update the format
format = self.columns[0].dtype.name
# Update the GCOUNT report
gcount = f'{self._gcount} Groups {self._pcount} Parameters'
return (name, ver, classname, length, shape, format, gcount)
def _par_indices(names):
"""
Given a list of objects, returns a mapping of objects in that list to the
index or indices at which that object was found in the list.
"""
unique = {}
for idx, name in enumerate(names):
# Case insensitive
name = name.upper()
if name in unique:
unique[name].append(idx)
else:
unique[name] = [idx]
return unique
def _unique_parnames(names):
"""
Given a list of parnames, including possible duplicates, returns a new list
of parnames with duplicates prepended by one or more underscores to make
them unique. This is also case insensitive.
"""
upper_names = set()
unique_names = []
for name in names:
name_upper = name.upper()
while name_upper in upper_names:
name = '_' + name
name_upper = '_' + name_upper
unique_names.append(name)
upper_names.add(name_upper)
return unique_names
|
305f57c4aa7802b29b95988e73c022d9a4b0a1811d4fc3a05c88f3fccf0f5275 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import csv
import operator
import os
import re
import sys
import textwrap
import warnings
from contextlib import suppress
import numpy as np
from numpy import char as chararray
from .base import DELAYED, _ValidHDU, ExtensionHDU
# This module may have many dependencies on astropy.io.fits.column, but
# astropy.io.fits.column has fewer dependencies overall, so it's easier to
# keep table/column-related utilities in astropy.io.fits.column
from astropy.io.fits.column import (FITS2NUMPY, KEYWORD_NAMES, KEYWORD_TO_ATTRIBUTE,
ATTRIBUTE_TO_KEYWORD, TDEF_RE, Column, ColDefs,
_AsciiColDefs, _FormatP, _FormatQ, _makep,
_parse_tformat, _scalar_to_format, _convert_format,
_cmp_recformats)
from astropy.io.fits.fitsrec import FITS_rec, _get_recarray_field, _has_unicode_fields
from astropy.io.fits.header import Header, _pad_length
from astropy.io.fits.util import _is_int, _str_to_num
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
class FITSTableDumpDialect(csv.excel):
"""
A CSV dialect for the Astropy format of ASCII dumps of FITS tables.
"""
delimiter = ' '
lineterminator = '\n'
quotechar = '"'
quoting = csv.QUOTE_ALL
skipinitialspace = True
class _TableLikeHDU(_ValidHDU):
"""
A class for HDUs that have table-like data. This is used for both
Binary/ASCII tables as well as Random Access Group HDUs (which are
otherwise too dissimilar for tables to use _TableBaseHDU directly).
"""
_data_type = FITS_rec
_columns_type = ColDefs
# TODO: Temporary flag representing whether uints are enabled; remove this
# after restructuring to support uints by default on a per-column basis
_uint = False
@classmethod
def match_header(cls, header):
"""
This is an abstract HDU type for HDUs that contain table-like data.
This is even more abstract than _TableBaseHDU which is specifically for
the standard ASCII and Binary Table types.
"""
raise NotImplementedError
@classmethod
def from_columns(cls, columns, header=None, nrows=0, fill=False,
character_as_bytes=False, **kwargs):
"""
Given either a `ColDefs` object, a sequence of `Column` objects,
or another table HDU or table data (a `FITS_rec` or multi-field
`numpy.ndarray` or `numpy.recarray` object, return a new table HDU of
the class this method was called on using the column definition from
the input.
See also `FITS_rec.from_columns`.
Parameters
----------
columns : sequence of `Column`, `ColDefs` -like
The columns from which to create the table data, or an object with
a column-like structure from which a `ColDefs` can be instantiated.
This includes an existing `BinTableHDU` or `TableHDU`, or a
`numpy.recarray` to give some examples.
If these columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns will be
used as a template for a new table with the requested number of
rows.
header : `Header`
An optional `Header` object to instantiate the new HDU yet. Header
keywords specifically related to defining the table structure (such
as the "TXXXn" keywords like TTYPEn) will be overridden by the
supplied column definitions, but all other informational and data
model-specific keywords are kept.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If `False`,
copy the data from input, undefined cells will still be filled with
zeros/blanks.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the
HDU. By default this is `False` and (unicode) strings are returned,
but for large tables this may use up a lot of memory.
Notes
-----
Any additional keyword arguments accepted by the HDU class's
``__init__`` may also be passed in as keyword arguments.
"""
coldefs = cls._columns_type(columns)
data = FITS_rec.from_columns(coldefs, nrows=nrows, fill=fill,
character_as_bytes=character_as_bytes)
hdu = cls(data=data, header=header, character_as_bytes=character_as_bytes, **kwargs)
coldefs._add_listener(hdu)
return hdu
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
# The base class doesn't make any assumptions about where the column
# definitions come from, so just return an empty ColDefs
return ColDefs([])
@property
def _nrows(self):
"""
table-like HDUs must provide an attribute that specifies the number of
rows in the HDU's table.
For now this is an internal-only attribute.
"""
raise NotImplementedError
def _get_tbdata(self):
"""Get the table data from an input HDU object."""
columns = self.columns
# TODO: Details related to variable length arrays need to be dealt with
# specifically in the BinTableHDU class, since they're a detail
# specific to FITS binary tables
if (any(type(r) in (_FormatP, _FormatQ)
for r in columns._recformats) and
self._data_size is not None and
self._data_size > self._theap):
# We have a heap; include it in the raw_data
raw_data = self._get_raw_data(self._data_size, np.uint8,
self._data_offset)
tbsize = self._header['NAXIS1'] * self._header['NAXIS2']
data = raw_data[:tbsize].view(dtype=columns.dtype,
type=np.rec.recarray)
else:
raw_data = self._get_raw_data(self._nrows, columns.dtype,
self._data_offset)
if raw_data is None:
# This can happen when a brand new table HDU is being created
# and no data has been assigned to the columns, which case just
# return an empty array
raw_data = np.array([], dtype=columns.dtype)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
data = data.view(self._data_type)
columns._add_listener(data)
return data
def _init_tbdata(self, data):
columns = self.columns
data.dtype = data.dtype.newbyteorder('>')
# hack to enable pseudo-uint support
data._uint = self._uint
# pass datLoc, for P format
data._heapoffset = self._theap
data._heapsize = self._header['PCOUNT']
tbsize = self._header['NAXIS1'] * self._header['NAXIS2']
data._gap = self._theap - tbsize
# pass the attributes
for idx, col in enumerate(columns):
# get the data for each column object from the rec.recarray
col.array = data.field(idx)
# delete the _arrays attribute so that it is recreated to point to the
# new data placed in the column object above
del columns._arrays
def _update_load_data(self):
"""Load the data if asked to."""
if not self._data_loaded:
self.data
def _update_column_added(self, columns, column):
"""
Update the data upon addition of a new column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns, nrows=self._nrows, fill=False,
character_as_bytes=self._character_as_bytes
)
def _update_column_removed(self, columns, col_idx):
"""
Update the data upon removal of a column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns, nrows=self._nrows, fill=False,
character_as_bytes=self._character_as_bytes
)
class _TableBaseHDU(ExtensionHDU, _TableLikeHDU):
"""
FITS table extension base HDU class.
Parameters
----------
data : array
Data to be used.
header : `Header` instance
Header to be used. If the ``data`` is also specified, header keywords
specifically related to defining the table structure (such as the
"TXXXn" keywords like TTYPEn) will be overridden by the supplied column
definitions, but all other informational and data model-specific
keywords are kept.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_manages_own_heap = False
"""
This flag implies that when writing VLA tables (P/Q format) the heap
pointers that go into P/Q table columns should not be reordered or
rearranged in any way by the default heap management code.
This is included primarily as an optimization for compressed image HDUs
which perform their own heap maintenance.
"""
def __init__(self, data=None, header=None, name=None, uint=False, ver=None,
character_as_bytes=False):
super().__init__(data=data, header=header, name=name, ver=ver)
self._uint = uint
self._character_as_bytes = character_as_bytes
if data is DELAYED:
# this should never happen
if header is None:
raise ValueError('No header to setup HDU.')
# if the file is read the first time, no need to copy, and keep it
# unchanged
else:
self._header = header
else:
# construct a list of cards of minimal header
cards = [
('XTENSION', self._extension, self._ext_comment),
('BITPIX', 8, 'array data type'),
('NAXIS', 2, 'number of array dimensions'),
('NAXIS1', 0, 'length of dimension 1'),
('NAXIS2', 0, 'length of dimension 2'),
('PCOUNT', 0, 'number of group parameters'),
('GCOUNT', 1, 'number of groups'),
('TFIELDS', 0, 'number of table fields')]
if header is not None:
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = header.copy(strip=True)
cards.extend(hcopy.cards)
self._header = Header(cards)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# self._data_type is FITS_rec.
if isinstance(data, self._data_type):
self.data = data
else:
self.data = self._data_type.from_columns(data)
# TEMP: Special column keywords are normally overwritten by attributes
# from Column objects. In Astropy 3.0, several new keywords are now
# recognized as being special column keywords, but we don't
# automatically clear them yet, as we need to raise a deprecation
# warning for at least one major version.
if header is not None:
future_ignore = set()
for keyword in header.keys():
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group('label')
except Exception:
continue # skip if there is no match
if base_keyword in {'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'}:
future_ignore.add(base_keyword)
if future_ignore:
keys = ', '.join(x + 'n' for x in sorted(future_ignore))
warnings.warn("The following keywords are now recognized as special "
"column-related attributes and should be set via the "
"Column objects: {}. In future, these values will be "
"dropped from manually specified headers automatically "
"and replaced with values generated based on the "
"Column objects.".format(keys), AstropyDeprecationWarning)
# TODO: Too much of the code in this class uses header keywords
# in making calculations related to the data size. This is
# unreliable, however, in cases when users mess with the header
# unintentionally--code that does this should be cleaned up.
self._header['NAXIS1'] = self.data._raw_itemsize
self._header['NAXIS2'] = self.data.shape[0]
self._header['TFIELDS'] = len(self.data._coldefs)
self.columns = self.data._coldefs
self.columns._add_listener(self.data)
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError('Table data has incorrect type.')
# Ensure that the correct EXTNAME is set on the new header if one was
# created, or that it overrides the existing EXTNAME if different
if name:
self.name = name
if ver is not None:
self.ver = ver
@classmethod
def match_header(cls, header):
"""
This is an abstract type that implements the shared functionality of
the ASCII and Binary Table HDU types, which should be used instead of
this.
"""
raise NotImplementedError
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
if self._has_data and hasattr(self.data, '_coldefs'):
return self.data._coldefs
return self._columns_type(self)
@lazyproperty
def data(self):
data = self._get_tbdata()
data._coldefs = self.columns
data._character_as_bytes = self._character_as_bytes
# Columns should now just return a reference to the data._coldefs
del self.columns
return data
@data.setter
def data(self, data):
if 'data' in self.__dict__:
if self.__dict__['data'] is data:
return
else:
self._data_replaced = True
else:
self._data_replaced = True
self._modified = True
if data is None and self.columns:
# Create a new table with the same columns, but empty rows
formats = ','.join(self.columns._recformats)
data = np.rec.array(None, formats=formats,
names=self.columns.names,
shape=0)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# Go ahead and always make a view, even if the data is already the
# correct class (self._data_type) so we can update things like the
# column defs, if necessary
data = data.view(self._data_type)
if not isinstance(data.columns, self._columns_type):
# This would be the place, if the input data was for an ASCII
# table and this is binary table, or vice versa, to convert the
# data to the appropriate format for the table type
new_columns = self._columns_type(data.columns)
data = FITS_rec.from_columns(new_columns)
if 'data' in self.__dict__:
self.columns._remove_listener(self.__dict__['data'])
self.__dict__['data'] = data
self.columns = self.data.columns
self.columns._add_listener(self.data)
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError('Table data has incorrect type.')
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
@property
def _nrows(self):
if not self._data_loaded:
return self._header.get('NAXIS2', 0)
else:
return len(self.data)
@lazyproperty
def _theap(self):
size = self._header['NAXIS1'] * self._header['NAXIS2']
return self._header.get('THEAP', size)
# TODO: Need to either rename this to update_header, for symmetry with the
# Image HDUs, or just at some point deprecate it and remove it altogether,
# since header updates should occur automatically when necessary...
def update(self):
"""
Update header keywords to reflect recent changes of columns.
"""
self._header.set('NAXIS1', self.data._raw_itemsize, after='NAXIS')
self._header.set('NAXIS2', self.data.shape[0], after='NAXIS1')
self._header.set('TFIELDS', len(self.columns), after='GCOUNT')
self._clear_table_keywords()
self._populate_table_keywords()
def copy(self):
"""
Make a copy of the table HDU, both header and data are copied.
"""
# touch the data, so it's defined (in the case of reading from a
# FITS file)
return self.__class__(data=self.data.copy(),
header=self._header.copy())
def _prewriteto(self, checksum=False, inplace=False):
if self._has_data:
self.data._scale_back(
update_heap_pointers=not self._manages_own_heap)
# check TFIELDS and NAXIS2
self._header['TFIELDS'] = len(self.data._coldefs)
self._header['NAXIS2'] = self.data.shape[0]
# calculate PCOUNT, for variable length tables
tbsize = self._header['NAXIS1'] * self._header['NAXIS2']
heapstart = self._header.get('THEAP', tbsize)
self.data._gap = heapstart - tbsize
pcount = self.data._heapsize + self.data._gap
if pcount > 0:
self._header['PCOUNT'] = pcount
# update the other T****n keywords
self._populate_table_keywords()
# update TFORM for variable length columns
for idx in range(self.data._nfields):
format = self.data._coldefs._recformats[idx]
if isinstance(format, _FormatP):
_max = self.data.field(idx).max
# May be either _FormatP or _FormatQ
format_cls = format.__class__
format = format_cls(format.dtype, repeat=format.repeat,
max=_max)
self._header['TFORM' + str(idx + 1)] = format.tform
return super()._prewriteto(checksum, inplace)
def _verify(self, option='warn'):
"""
_TableBaseHDU verify method.
"""
errs = super()._verify(option=option)
if not (isinstance(self._header[0], str) and
self._header[0].rstrip() == self._extension):
err_text = 'The XTENSION keyword must match the HDU type.'
fix_text = f'Converted the XTENSION keyword to {self._extension}.'
def fix(header=self._header):
header[0] = (self._extension, self._ext_comment)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
self.req_cards('NAXIS', None, lambda v: (v == 2), 2, option, errs)
self.req_cards('BITPIX', None, lambda v: (v == 8), 8, option, errs)
self.req_cards('TFIELDS', 7,
lambda v: (_is_int(v) and v >= 0 and v <= 999), 0,
option, errs)
tfields = self._header['TFIELDS']
for idx in range(tfields):
self.req_cards('TFORM' + str(idx + 1), None, None, None, option,
errs)
return errs
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
nrows = 0
else:
nrows = len(self.data)
ncols = len(self.columns)
format = self.columns.formats
# if data is not touched yet, use header info.
else:
nrows = self._header['NAXIS2']
ncols = self._header['TFIELDS']
format = ', '.join([self._header['TFORM' + str(j + 1)]
for j in range(ncols)])
format = f'[{format}]'
dims = f"{nrows}R x {ncols}C"
ncards = len(self._header)
return (self.name, self.ver, class_name, ncards, dims, format)
def _update_column_removed(self, columns, idx):
super()._update_column_removed(columns, idx)
# Fix the header to reflect the column removal
self._clear_table_keywords(index=idx)
def _update_column_attribute_changed(self, column, col_idx, attr,
old_value, new_value):
"""
Update the header when one of the column objects is updated.
"""
# base_keyword is the keyword without the index such as TDIM
# while keyword is like TDIM1
base_keyword = ATTRIBUTE_TO_KEYWORD[attr]
keyword = base_keyword + str(col_idx + 1)
if keyword in self._header:
if new_value is None:
# If the new value is None, i.e. None was assigned to the
# column attribute, then treat this as equivalent to deleting
# that attribute
del self._header[keyword]
else:
self._header[keyword] = new_value
else:
keyword_idx = KEYWORD_NAMES.index(base_keyword)
# Determine the appropriate keyword to insert this one before/after
# if it did not already exist in the header
for before_keyword in reversed(KEYWORD_NAMES[:keyword_idx]):
before_keyword += str(col_idx + 1)
if before_keyword in self._header:
self._header.insert(before_keyword, (keyword, new_value),
after=True)
break
else:
for after_keyword in KEYWORD_NAMES[keyword_idx + 1:]:
after_keyword += str(col_idx + 1)
if after_keyword in self._header:
self._header.insert(after_keyword,
(keyword, new_value))
break
else:
# Just append
self._header[keyword] = new_value
def _clear_table_keywords(self, index=None):
"""
Wipe out any existing table definition keywords from the header.
If specified, only clear keywords for the given table index (shifting
up keywords for any other columns). The index is zero-based.
Otherwise keywords for all columns.
"""
# First collect all the table structure related keyword in the header
# into a single list so we can then sort them by index, which will be
# useful later for updating the header in a sensible order (since the
# header *might* not already be written in a reasonable order)
table_keywords = []
for idx, keyword in enumerate(self._header.keys()):
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group('label')
except Exception:
continue # skip if there is no match
if base_keyword in KEYWORD_TO_ATTRIBUTE:
# TEMP: For Astropy 3.0 we don't clear away the following keywords
# as we are first raising a deprecation warning that these will be
# dropped automatically if they were specified in the header. We
# can remove this once we are happy to break backward-compatibility
if base_keyword in {'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'}:
continue
num = int(match.group('num')) - 1 # convert to zero-base
table_keywords.append((idx, match.group(0), base_keyword,
num))
# First delete
rev_sorted_idx_0 = sorted(table_keywords, key=operator.itemgetter(0),
reverse=True)
for idx, keyword, _, num in rev_sorted_idx_0:
if index is None or index == num:
del self._header[idx]
# Now shift up remaining column keywords if only one column was cleared
if index is not None:
sorted_idx_3 = sorted(table_keywords, key=operator.itemgetter(3))
for _, keyword, base_keyword, num in sorted_idx_3:
if num <= index:
continue
old_card = self._header.cards[keyword]
new_card = (base_keyword + str(num), old_card.value,
old_card.comment)
self._header.insert(keyword, new_card)
del self._header[keyword]
# Also decrement TFIELDS
if 'TFIELDS' in self._header:
self._header['TFIELDS'] -= 1
def _populate_table_keywords(self):
"""Populate the new table definition keywords from the header."""
for idx, column in enumerate(self.columns):
for keyword, attr in KEYWORD_TO_ATTRIBUTE.items():
val = getattr(column, attr)
if val is not None:
keyword = keyword + str(idx + 1)
self._header[keyword] = val
class TableHDU(_TableBaseHDU):
"""
FITS ASCII table extension HDU class.
Parameters
----------
data : array or `FITS_rec`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = 'TABLE'
_ext_comment = 'ASCII table extension'
_padding_byte = ' '
_columns_type = _AsciiColDefs
__format_RE = re.compile(
r'(?P<code>[ADEFIJ])(?P<width>\d+)(?:\.(?P<prec>\d+))?')
def __init__(self, data=None, header=None, name=None, ver=None, character_as_bytes=False):
super().__init__(data, header, name=name, ver=ver, character_as_bytes=character_as_bytes)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == 'XTENSION' and xtension == cls._extension
def _get_tbdata(self):
columns = self.columns
names = [n for idx, n in enumerate(columns.names)]
# determine if there are duplicate field names and if there
# are throw an exception
dup = np.rec.find_duplicate(names)
if dup:
raise ValueError(f"Duplicate field names: {dup}")
# TODO: Determine if this extra logic is necessary--I feel like the
# _AsciiColDefs class should be responsible for telling the table what
# its dtype should be...
itemsize = columns.spans[-1] + columns.starts[-1] - 1
dtype = {}
for idx in range(len(columns)):
data_type = 'S' + str(columns.spans[idx])
if idx == len(columns) - 1:
# The last column is padded out to the value of NAXIS1
if self._header['NAXIS1'] > itemsize:
data_type = 'S' + str(columns.spans[idx] +
self._header['NAXIS1'] - itemsize)
dtype[columns.names[idx]] = (data_type, columns.starts[idx] - 1)
raw_data = self._get_raw_data(self._nrows, dtype, self._data_offset)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
return data.view(self._data_type)
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
# We need to pad the data to a block length before calculating
# the datasum.
bytes_array = self.data.view(type=np.ndarray, dtype=np.ubyte)
padding = np.frombuffer(_pad_length(self.size) * b' ',
dtype=np.ubyte)
d = np.append(bytes_array, padding)
cs = self._compute_checksum(d)
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _verify(self, option='warn'):
"""
`TableHDU` verify method.
"""
errs = super()._verify(option=option)
self.req_cards('PCOUNT', None, lambda v: (v == 0), 0, option, errs)
tfields = self._header['TFIELDS']
for idx in range(tfields):
self.req_cards('TBCOL' + str(idx + 1), None, _is_int, None, option,
errs)
return errs
class BinTableHDU(_TableBaseHDU):
"""
Binary table HDU class.
Parameters
----------
data : array, `FITS_rec`, or `~astropy.table.Table`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = 'BINTABLE'
_ext_comment = 'binary table extension'
def __init__(self, data=None, header=None, name=None, uint=False, ver=None,
character_as_bytes=False):
from astropy.table import Table
if isinstance(data, Table):
from astropy.io.fits.convenience import table_to_hdu
hdu = table_to_hdu(data)
if header is not None:
hdu.header.update(header)
data = hdu.data
header = hdu.header
super().__init__(data, header, name=name, uint=uint, ver=ver,
character_as_bytes=character_as_bytes)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return (card.keyword == 'XTENSION' and
xtension in (cls._extension, 'A3DTABLE'))
def _calculate_datasum_with_heap(self):
"""
Calculate the value for the ``DATASUM`` card given the input data
"""
with _binary_table_byte_swap(self.data) as data:
dout = data.view(type=np.ndarray, dtype=np.ubyte)
csum = self._compute_checksum(dout)
# Now add in the heap data to the checksum (we can skip any gap
# between the table and the heap since it's all zeros and doesn't
# contribute to the checksum
if data._get_raw_data() is None:
# This block is still needed because
# test_variable_length_table_data leads to ._get_raw_data
# returning None which means _get_heap_data doesn't work.
# Which happens when the data is loaded in memory rather than
# being unloaded on disk
for idx in range(data._nfields):
if isinstance(data.columns._recformats[idx], _FormatP):
for coldata in data.field(idx):
# coldata should already be byteswapped from the call
# to _binary_table_byte_swap
if not len(coldata):
continue
csum = self._compute_checksum(coldata, csum)
else:
csum = self._compute_checksum(data._get_heap_data(), csum)
return csum
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# This method calculates the datasum while incorporating any
# heap data, which is obviously not handled from the base
# _calculate_datasum
return self._calculate_datasum_with_heap()
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _writedata_internal(self, fileobj):
size = 0
if self.data is None:
return size
with _binary_table_byte_swap(self.data) as data:
if _has_unicode_fields(data):
# If the raw data was a user-supplied recarray, we can't write
# unicode columns directly to the file, so we have to switch
# to a slower row-by-row write
self._writedata_by_row(fileobj)
else:
fileobj.writearray(data)
# write out the heap of variable length array columns this has
# to be done after the "regular" data is written (above)
# to avoid a bug in the lustre filesystem client, don't
# write 0-byte objects
if data._gap > 0:
fileobj.write((data._gap * '\0').encode('ascii'))
nbytes = data._gap
if not self._manages_own_heap:
# Write the heap data one column at a time, in the order
# that the data pointers appear in the column (regardless
# if that data pointer has a different, previous heap
# offset listed)
for idx in range(data._nfields):
if not isinstance(data.columns._recformats[idx],
_FormatP):
continue
field = self.data.field(idx)
for row in field:
if len(row) > 0:
nbytes += row.nbytes
fileobj.writearray(row)
else:
heap_data = data._get_heap_data()
if len(heap_data) > 0:
nbytes += len(heap_data)
fileobj.writearray(heap_data)
data._heapsize = nbytes - data._gap
size += nbytes
size += self.data.size * self.data._raw_itemsize
return size
def _writedata_by_row(self, fileobj):
fields = [self.data.field(idx)
for idx in range(len(self.data.columns))]
# Creating Record objects is expensive (as in
# `for row in self.data:` so instead we just iterate over the row
# indices and get one field at a time:
for idx in range(len(self.data)):
for field in fields:
item = field[idx]
field_width = None
if field.dtype.kind == 'U':
# Read the field *width* by reading past the field kind.
i = field.dtype.str.index(field.dtype.kind)
field_width = int(field.dtype.str[i+1:])
item = np.char.encode(item, 'ascii')
fileobj.writearray(item)
if field_width is not None:
j = item.dtype.str.index(item.dtype.kind)
item_length = int(item.dtype.str[j+1:])
# Fix padding problem (see #5296).
padding = '\x00'*(field_width - item_length)
fileobj.write(padding.encode('ascii'))
_tdump_file_format = textwrap.dedent("""
- **datafile:** Each line of the data file represents one row of table
data. The data is output one column at a time in column order. If
a column contains an array, each element of the column array in the
current row is output before moving on to the next column. Each row
ends with a new line.
Integer data is output right-justified in a 21-character field
followed by a blank. Floating point data is output right justified
using 'g' format in a 21-character field with 15 digits of
precision, followed by a blank. String data that does not contain
whitespace is output left-justified in a field whose width matches
the width specified in the ``TFORM`` header parameter for the
column, followed by a blank. When the string data contains
whitespace characters, the string is enclosed in quotation marks
(``""``). For the last data element in a row, the trailing blank in
the field is replaced by a new line character.
For column data containing variable length arrays ('P' format), the
array data is preceded by the string ``'VLA_Length= '`` and the
integer length of the array for that row, left-justified in a
21-character field, followed by a blank.
.. note::
This format does *not* support variable length arrays using the
('Q' format) due to difficult to overcome ambiguities. What this
means is that this file format cannot support VLA columns in
tables stored in files that are over 2 GB in size.
For column data representing a bit field ('X' format), each bit
value in the field is output right-justified in a 21-character field
as 1 (for true) or 0 (for false).
- **cdfile:** Each line of the column definitions file provides the
definitions for one column in the table. The line is broken up into
8, sixteen-character fields. The first field provides the column
name (``TTYPEn``). The second field provides the column format
(``TFORMn``). The third field provides the display format
(``TDISPn``). The fourth field provides the physical units
(``TUNITn``). The fifth field provides the dimensions for a
multidimensional array (``TDIMn``). The sixth field provides the
value that signifies an undefined value (``TNULLn``). The seventh
field provides the scale factor (``TSCALn``). The eighth field
provides the offset value (``TZEROn``). A field value of ``""`` is
used to represent the case where no value is provided.
- **hfile:** Each line of the header parameters file provides the
definition of a single HDU header card as represented by the card
image.
""")
def dump(self, datafile=None, cdfile=None, hfile=None, overwrite=False):
"""
Dump the table HDU to a file in ASCII format. The table may be dumped
in three separate files, one containing column definitions, one
containing header parameters, and one for table data.
Parameters
----------
datafile : path-like or file-like, optional
Output data file. The default is the root name of the
fits file associated with this HDU appended with the
extension ``.txt``.
cdfile : path-like or file-like, optional
Output column definitions file. The default is `None`, no
column definitions output is produced.
hfile : path-like or file-like, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Notes
-----
The primary use for the `dump` method is to allow viewing and editing
the table data and parameters in a standard text editor.
The `load` method can be used to create a new table from the three
plain text (ASCII) files.
"""
# check if the output files already exist
exist = []
files = [datafile, cdfile, hfile]
for f in files:
if isinstance(f, str):
if os.path.exists(f) and os.path.getsize(f) != 0:
if overwrite:
os.remove(f)
else:
exist.append(f)
if exist:
raise OSError(' '.join([f"File '{f}' already exists."
for f in exist])+" If you mean to "
"replace the file(s) "
"then use the argument "
"'overwrite=True'.")
# Process the data
self._dump_data(datafile)
# Process the column definitions
if cdfile:
self._dump_coldefs(cdfile)
# Process the header parameters
if hfile:
self._header.tofile(hfile, sep='\n', endcard=False, padding=False)
if isinstance(dump.__doc__, str):
dump.__doc__ += _tdump_file_format.replace('\n', '\n ')
def load(cls, datafile, cdfile=None, hfile=None, replace=False,
header=None):
"""
Create a table from the input ASCII files. The input is from up to
three separate files, one containing column definitions, one containing
header parameters, and one containing column data.
The column definition and header parameters files are not required.
When absent the column definitions and/or header parameters are taken
from the header object given in the header argument; otherwise sensible
defaults are inferred (though this mode is not recommended).
Parameters
----------
datafile : path-like or file-like
Input data file containing the table data in ASCII format.
cdfile : path-like or file-like, optional
Input column definition file containing the names,
formats, display formats, physical units, multidimensional
array dimensions, undefined values, scale factors, and
offsets associated with the columns in the table. If
`None`, the column definitions are taken from the current
values in this object.
hfile : path-like or file-like, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table. If
`None`, the header parameter definitions are taken from
the current values in this objects header.
replace : bool, optional
When `True`, indicates that the entire header should be
replaced with the contents of the ASCII file instead of
just updating the current header.
header : `~astropy.io.fits.Header`, optional
When the cdfile and hfile are missing, use this Header object in
the creation of the new table and HDU. Otherwise this Header
supersedes the keywords from hfile, which is only used to update
values not present in this Header, unless ``replace=True`` in which
this Header's values are completely replaced with the values from
hfile.
Notes
-----
The primary use for the `load` method is to allow the input of ASCII
data that was edited in a standard text editor of the table data and
parameters. The `dump` method can be used to create the initial ASCII
files.
"""
# Process the parameter file
if header is None:
header = Header()
if hfile:
if replace:
header = Header.fromtextfile(hfile)
else:
header.extend(Header.fromtextfile(hfile), update=True,
update_first=True)
coldefs = None
# Process the column definitions file
if cdfile:
coldefs = cls._load_coldefs(cdfile)
# Process the data file
data = cls._load_data(datafile, coldefs)
if coldefs is None:
coldefs = ColDefs(data)
# Create a new HDU using the supplied header and data
hdu = cls(data=data, header=header)
hdu.columns = coldefs
return hdu
if isinstance(load.__doc__, str):
load.__doc__ += _tdump_file_format.replace('\n', '\n ')
load = classmethod(load)
# Have to create a classmethod from this here instead of as a decorator;
# otherwise we can't update __doc__
def _dump_data(self, fileobj):
"""
Write the table data in the ASCII format read by BinTableHDU.load()
to fileobj.
"""
if not fileobj and self._file:
root = os.path.splitext(self._file.name)[0]
fileobj = root + '.txt'
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
close_file = True
linewriter = csv.writer(fileobj, dialect=FITSTableDumpDialect)
# Process each row of the table and output one row at a time
def format_value(val, format):
if format[0] == 'S':
itemsize = int(format[1:])
return '{:{size}}'.format(val, size=itemsize)
elif format in np.typecodes['AllInteger']:
# output integer
return f'{val:21d}'
elif format in np.typecodes['Complex']:
return f'{val.real:21.15g}+{val.imag:.15g}j'
elif format in np.typecodes['Float']:
# output floating point
return f'{val:#21.15g}'
for row in self.data:
line = [] # the line for this row of the table
# Process each column of the row.
for column in self.columns:
# format of data in a variable length array
# where None means it is not a VLA:
vla_format = None
format = _convert_format(column.format)
if isinstance(format, _FormatP):
# P format means this is a variable length array so output
# the length of the array for this row and set the format
# for the VLA data
line.append('VLA_Length=')
line.append(f'{len(row[column.name]):21d}')
_, dtype, option = _parse_tformat(column.format)
vla_format = FITS2NUMPY[option[0]][0]
if vla_format:
# Output the data for each element in the array
for val in row[column.name].flat:
line.append(format_value(val, vla_format))
else:
# The column data is a single element
dtype = self.data.dtype.fields[column.name][0]
array_format = dtype.char
if array_format == 'V':
array_format = dtype.base.char
if array_format == 'S':
array_format += str(dtype.itemsize)
if dtype.char == 'V':
for value in row[column.name].flat:
line.append(format_value(value, array_format))
else:
line.append(format_value(row[column.name],
array_format))
linewriter.writerow(line)
if close_file:
fileobj.close()
def _dump_coldefs(self, fileobj):
"""
Write the column definition parameters in the ASCII format read by
BinTableHDU.load() to fileobj.
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
close_file = True
# Process each column of the table and output the result to the
# file one at a time
for column in self.columns:
line = [column.name, column.format]
attrs = ['disp', 'unit', 'dim', 'null', 'bscale', 'bzero']
line += ['{!s:16s}'.format(value if value else '""')
for value in (getattr(column, attr) for attr in attrs)]
fileobj.write(' '.join(line))
fileobj.write('\n')
if close_file:
fileobj.close()
@classmethod
def _load_data(cls, fileobj, coldefs=None):
"""
Read the table data from the ASCII file output by BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'r')
close_file = True
initialpos = fileobj.tell() # We'll be returning here later
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
# First we need to do some preprocessing on the file to find out how
# much memory we'll need to reserve for the table. This is necessary
# even if we already have the coldefs in order to determine how many
# rows to reserve memory for
vla_lengths = []
recformats = []
names = []
nrows = 0
if coldefs is not None:
recformats = coldefs._recformats
names = coldefs.names
def update_recformats(value, idx):
fitsformat = _scalar_to_format(value)
recformat = _convert_format(fitsformat)
if idx >= len(recformats):
recformats.append(recformat)
else:
if _cmp_recformats(recformats[idx], recformat) < 0:
recformats[idx] = recformat
# TODO: The handling of VLAs could probably be simplified a bit
for row in linereader:
nrows += 1
if coldefs is not None:
continue
col = 0
idx = 0
while idx < len(row):
if row[idx] == 'VLA_Length=':
if col < len(vla_lengths):
vla_length = vla_lengths[col]
else:
vla_length = int(row[idx + 1])
vla_lengths.append(vla_length)
idx += 2
while vla_length:
update_recformats(row[idx], col)
vla_length -= 1
idx += 1
col += 1
else:
if col >= len(vla_lengths):
vla_lengths.append(None)
update_recformats(row[idx], col)
col += 1
idx += 1
# Update the recformats for any VLAs
for idx, length in enumerate(vla_lengths):
if length is not None:
recformats[idx] = str(length) + recformats[idx]
dtype = np.rec.format_parser(recformats, names, None).dtype
# TODO: In the future maybe enable loading a bit at a time so that we
# can convert from this format to an actual FITS file on disk without
# needing enough physical memory to hold the entire thing at once
hdu = BinTableHDU.from_columns(np.recarray(shape=1, dtype=dtype),
nrows=nrows, fill=True)
# TODO: It seems to me a lot of this could/should be handled from
# within the FITS_rec class rather than here.
data = hdu.data
for idx, length in enumerate(vla_lengths):
if length is not None:
arr = data.columns._arrays[idx]
dt = recformats[idx][len(str(length)):]
# NOTE: FormatQ not supported here; it's hard to determine
# whether or not it will be necessary to use a wider descriptor
# type. The function documentation will have to serve as a
# warning that this is not supported.
recformats[idx] = _FormatP(dt, max=length)
data.columns._recformats[idx] = recformats[idx]
name = data.columns.names[idx]
data._cache_field(name, _makep(arr, arr, recformats[idx]))
def format_value(col, val):
# Special formatting for a couple particular data types
if recformats[col] == FITS2NUMPY['L']:
return bool(int(val))
elif recformats[col] == FITS2NUMPY['M']:
# For some reason, in arrays/fields where numpy expects a
# complex it's not happy to take a string representation
# (though it's happy to do that in other contexts), so we have
# to convert the string representation for it:
return complex(val)
else:
return val
# Jump back to the start of the data and create a new line reader
fileobj.seek(initialpos)
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
for row, line in enumerate(linereader):
col = 0
idx = 0
while idx < len(line):
if line[idx] == 'VLA_Length=':
vla_len = vla_lengths[col]
idx += 2
slice_ = slice(idx, idx + vla_len)
data[row][col][:] = line[idx:idx + vla_len]
idx += vla_len
elif dtype[col].shape:
# This is an array column
array_size = int(np.multiply.reduce(dtype[col].shape))
slice_ = slice(idx, idx + array_size)
idx += array_size
else:
slice_ = None
if slice_ is None:
# This is a scalar row element
data[row][col] = format_value(col, line[idx])
idx += 1
else:
data[row][col].flat[:] = [format_value(col, val)
for val in line[slice_]]
col += 1
if close_file:
fileobj.close()
return data
@classmethod
def _load_coldefs(cls, fileobj):
"""
Read the table column definitions from the ASCII file output by
BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'r')
close_file = True
columns = []
for line in fileobj:
words = line[:-1].split()
kwargs = {}
for key in ['name', 'format', 'disp', 'unit', 'dim']:
kwargs[key] = words.pop(0).replace('""', '')
for key in ['null', 'bscale', 'bzero']:
word = words.pop(0).replace('""', '')
if word:
word = _str_to_num(word)
kwargs[key] = word
columns.append(Column(**kwargs))
if close_file:
fileobj.close()
return ColDefs(columns)
@contextlib.contextmanager
def _binary_table_byte_swap(data):
"""
Ensures that all the data of a binary FITS table (represented as a FITS_rec
object) is in a big-endian byte order. Columns are swapped in-place one
at a time, and then returned to their previous byte order when this context
manager exits.
Because a new dtype is needed to represent the byte-swapped columns, the
new dtype is temporarily applied as well.
"""
orig_dtype = data.dtype
names = []
formats = []
offsets = []
to_swap = []
if sys.byteorder == 'little':
swap_types = ('<', '=')
else:
swap_types = ('<',)
for idx, name in enumerate(orig_dtype.names):
field = _get_recarray_field(data, idx)
field_dtype, field_offset = orig_dtype.fields[name]
names.append(name)
formats.append(field_dtype)
offsets.append(field_offset)
if isinstance(field, chararray.chararray):
continue
# only swap unswapped
# must use field_dtype.base here since for multi-element dtypes,
# the .str with be '|V<N>' where <N> is the total bytes per element
if field.itemsize > 1 and field_dtype.base.str[0] in swap_types:
to_swap.append(field)
# Override the dtype for this field in the new record dtype with
# the byteswapped version
formats[-1] = field_dtype.newbyteorder()
# deal with var length table
recformat = data.columns._recformats[idx]
if isinstance(recformat, _FormatP):
coldata = data.field(idx)
for c in coldata:
if (not isinstance(c, chararray.chararray) and
c.itemsize > 1 and c.dtype.str[0] in swap_types):
to_swap.append(c)
for arr in reversed(to_swap):
arr.byteswap(True)
data.dtype = np.dtype({'names': names,
'formats': formats,
'offsets': offsets})
yield data
for arr in to_swap:
arr.byteswap(True)
data.dtype = orig_dtype
|
6504816f5e310d4aadf1f8c5405c48df53c8875a4815851b2e76775309929c09 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
from .base import (register_hdu, unregister_hdu, DELAYED, BITPIX2DTYPE,
DTYPE2BITPIX)
from .compressed import CompImageHDU
from .groups import GroupsHDU, GroupData, Group
from .hdulist import HDUList
from .image import PrimaryHDU, ImageHDU
from .nonstandard import FitsHDU
from .streaming import StreamingHDU
from .table import TableHDU, BinTableHDU
__all__ = ['HDUList', 'PrimaryHDU', 'ImageHDU', 'TableHDU', 'BinTableHDU',
'GroupsHDU', 'GroupData', 'Group', 'CompImageHDU', 'FitsHDU',
'StreamingHDU', 'register_hdu', 'unregister_hdu', 'DELAYED',
'BITPIX2DTYPE', 'DTYPE2BITPIX']
|
5a2a18229a922e64f4778406ebcf37051944ff4e4522dd629536968596e338a3 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import io
from astropy.io.fits.file import _File
from .base import NonstandardExtHDU
from .hdulist import HDUList
from astropy.io.fits.header import Header, _pad_length
from astropy.io.fits.util import fileobj_name
from astropy.utils import lazyproperty
class FitsHDU(NonstandardExtHDU):
"""
A non-standard extension HDU for encapsulating entire FITS files within a
single HDU of a container FITS file. These HDUs have an extension (that is
an XTENSION keyword) of FITS.
The FITS file contained in the HDU's data can be accessed by the `hdulist`
attribute which returns the contained FITS file as an `HDUList` object.
"""
_extension = 'FITS'
@lazyproperty
def hdulist(self):
self._file.seek(self._data_offset)
fileobj = io.BytesIO()
# Read the data into a BytesIO--reading directly from the file
# won't work (at least for gzipped files) due to problems deep
# within the gzip module that make it difficult to read gzip files
# embedded in another file
fileobj.write(self._file.read(self.size))
fileobj.seek(0)
if self._header['COMPRESS']:
fileobj = gzip.GzipFile(fileobj=fileobj)
return HDUList.fromfile(fileobj, mode='readonly')
@classmethod
def fromfile(cls, filename, compress=False):
"""
Like `FitsHDU.fromhdulist()`, but creates a FitsHDU from a file on
disk.
Parameters
----------
filename : str
The path to the file to read into a FitsHDU
compress : bool, optional
Gzip compress the FITS file
"""
with HDUList.fromfile(filename) as hdulist:
return cls.fromhdulist(hdulist, compress=compress)
@classmethod
def fromhdulist(cls, hdulist, compress=False):
"""
Creates a new FitsHDU from a given HDUList object.
Parameters
----------
hdulist : HDUList
A valid Headerlet object.
compress : bool, optional
Gzip compress the FITS file
"""
fileobj = bs = io.BytesIO()
if compress:
if hasattr(hdulist, '_file'):
name = fileobj_name(hdulist._file)
else:
name = None
fileobj = gzip.GzipFile(name, mode='wb', fileobj=bs)
hdulist.writeto(fileobj)
if compress:
fileobj.close()
# A proper HDUList should still be padded out to a multiple of 2880
# technically speaking
padding = (_pad_length(bs.tell()) * cls._padding_byte).encode('ascii')
bs.write(padding)
bs.seek(0)
cards = [
('XTENSION', cls._extension, 'FITS extension'),
('BITPIX', 8, 'array data type'),
('NAXIS', 1, 'number of array dimensions'),
('NAXIS1', len(bs.getvalue()), 'Axis length'),
('PCOUNT', 0, 'number of parameters'),
('GCOUNT', 1, 'number of groups'),
]
# Add the XINDn keywords proposed by Perry, though nothing is done with
# these at the moment
if len(hdulist) > 1:
for idx, hdu in enumerate(hdulist[1:]):
cards.append(('XIND' + str(idx + 1), hdu._header_offset,
f'byte offset of extension {idx + 1}'))
cards.append(('COMPRESS', compress, 'Uses gzip compression'))
header = Header(cards)
return cls._readfrom_internal(_File(bs), header=header)
@classmethod
def match_header(cls, header):
card = header.cards[0]
if card.keyword != 'XTENSION':
return False
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return xtension == cls._extension
# TODO: Add header verification
def _summary(self):
# TODO: Perhaps make this more descriptive...
return (self.name, self.ver, self.__class__.__name__, len(self._header))
|
15b2c3a78a923a2ebed91581b705605f23aec1056c22accd481b26d6703626cf | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import sys
import mmap
import warnings
import numpy as np
from .base import DELAYED, _ValidHDU, ExtensionHDU, BITPIX2DTYPE, DTYPE2BITPIX
from astropy.io.fits.header import Header
from astropy.io.fits.util import (_is_pseudo_integer, _pseudo_zero, _is_int,
_is_dask_array)
from astropy.io.fits.verify import VerifyWarning
from astropy.utils import isiterable, lazyproperty
__all__ = ["Section", "PrimaryHDU", "ImageHDU"]
class _ImageBaseHDU(_ValidHDU):
"""FITS image HDU base class.
Attributes
----------
header
image header
data
image data
"""
standard_keyword_comments = {
'SIMPLE': 'conforms to FITS standard',
'XTENSION': 'Image extension',
'BITPIX': 'array data type',
'NAXIS': 'number of array dimensions',
'GROUPS': 'has groups',
'PCOUNT': 'number of parameters',
'GCOUNT': 'number of groups'
}
def __init__(self, data=None, header=None, do_not_scale_image_data=False,
uint=True, scale_back=False, ignore_blank=False, **kwargs):
from .groups import GroupsHDU
super().__init__(data=data, header=header)
if data is DELAYED:
# Presumably if data is DELAYED then this HDU is coming from an
# open file, and was not created in memory
if header is None:
# this should never happen
raise ValueError('No header to setup HDU.')
else:
# TODO: Some of this card manipulation should go into the
# PrimaryHDU and GroupsHDU subclasses
# construct a list of cards of minimal header
if isinstance(self, ExtensionHDU):
c0 = ('XTENSION', 'IMAGE',
self.standard_keyword_comments['XTENSION'])
else:
c0 = ('SIMPLE', True, self.standard_keyword_comments['SIMPLE'])
cards = [
c0,
('BITPIX', 8, self.standard_keyword_comments['BITPIX']),
('NAXIS', 0, self.standard_keyword_comments['NAXIS'])]
if isinstance(self, GroupsHDU):
cards.append(('GROUPS', True,
self.standard_keyword_comments['GROUPS']))
if isinstance(self, (ExtensionHDU, GroupsHDU)):
cards.append(('PCOUNT', 0,
self.standard_keyword_comments['PCOUNT']))
cards.append(('GCOUNT', 1,
self.standard_keyword_comments['GCOUNT']))
if header is not None:
orig = header.copy()
header = Header(cards)
header.extend(orig, strip=True, update=True, end=True)
else:
header = Header(cards)
self._header = header
self._do_not_scale_image_data = do_not_scale_image_data
self._uint = uint
self._scale_back = scale_back
# Keep track of whether BZERO/BSCALE were set from the header so that
# values for self._orig_bzero and self._orig_bscale can be set
# properly, if necessary, once the data has been set.
bzero_in_header = 'BZERO' in self._header
bscale_in_header = 'BSCALE' in self._header
self._bzero = self._header.get('BZERO', 0)
self._bscale = self._header.get('BSCALE', 1)
# Save off other important values from the header needed to interpret
# the image data
self._axes = [self._header.get('NAXIS' + str(axis + 1), 0)
for axis in range(self._header.get('NAXIS', 0))]
# Not supplying a default for BITPIX makes sense because BITPIX
# is either in the header or should be determined from the dtype of
# the data (which occurs when the data is set).
self._bitpix = self._header.get('BITPIX')
self._gcount = self._header.get('GCOUNT', 1)
self._pcount = self._header.get('PCOUNT', 0)
self._blank = None if ignore_blank else self._header.get('BLANK')
self._verify_blank()
self._orig_bitpix = self._bitpix
self._orig_blank = self._header.get('BLANK')
# These get set again below, but need to be set to sensible defaults
# here.
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
# Set the name attribute if it was provided (if this is an ImageHDU
# this will result in setting the EXTNAME keyword of the header as
# well)
if 'name' in kwargs and kwargs['name']:
self.name = kwargs['name']
if 'ver' in kwargs and kwargs['ver']:
self.ver = kwargs['ver']
# Set to True if the data or header is replaced, indicating that
# update_header should be called
self._modified = False
if data is DELAYED:
if (not do_not_scale_image_data and
(self._bscale != 1 or self._bzero != 0)):
# This indicates that when the data is accessed or written out
# to a new file it will need to be rescaled
self._data_needs_rescale = True
return
else:
# Setting data will update the header and set _bitpix, _bzero,
# and _bscale to the appropriate BITPIX for the data, and always
# sets _bzero=0 and _bscale=1.
self.data = data
# Check again for BITPIX/BSCALE/BZERO in case they changed when the
# data was assigned. This can happen, for example, if the input
# data is an unsigned int numpy array.
self._bitpix = self._header.get('BITPIX')
# Do not provide default values for BZERO and BSCALE here because
# the keywords will have been deleted in the header if appropriate
# after scaling. We do not want to put them back in if they
# should not be there.
self._bzero = self._header.get('BZERO')
self._bscale = self._header.get('BSCALE')
# Handle case where there was no BZERO/BSCALE in the initial header
# but there should be a BSCALE/BZERO now that the data has been set.
if not bzero_in_header:
self._orig_bzero = self._bzero
if not bscale_in_header:
self._orig_bscale = self._bscale
@classmethod
def match_header(cls, header):
"""
_ImageBaseHDU is sort of an abstract class for HDUs containing image
data (as opposed to table data) and should never be used directly.
"""
raise NotImplementedError
@property
def is_image(self):
return True
@property
def section(self):
"""
Access a section of the image array without loading the entire array
into memory. The :class:`Section` object returned by this attribute is
not meant to be used directly by itself. Rather, slices of the section
return the appropriate slice of the data, and loads *only* that section
into memory.
Sections are mostly obsoleted by memmap support, but should still be
used to deal with very large scaled images. See the
:ref:`astropy:data-sections` section of the Astropy documentation for
more details.
"""
return Section(self)
@property
def shape(self):
"""
Shape of the image array--should be equivalent to ``self.data.shape``.
"""
# Determine from the values read from the header
return tuple(reversed(self._axes))
@property
def header(self):
return self._header
@header.setter
def header(self, header):
self._header = header
self._modified = True
self.update_header()
@lazyproperty
def data(self):
"""
Image/array data as a `~numpy.ndarray`.
Please remember that the order of axes on an Numpy array are opposite
of the order specified in the FITS file. For example for a 2D image
the "rows" or y-axis are the first dimension, and the "columns" or
x-axis are the second dimension.
If the data is scaled using the BZERO and BSCALE parameters, this
attribute returns the data scaled to its physical values unless the
file was opened with ``do_not_scale_image_data=True``.
"""
if len(self._axes) < 1:
return
data = self._get_scaled_image_data(self._data_offset, self.shape)
self._update_header_scale_info(data.dtype)
return data
@data.setter
def data(self, data):
if 'data' in self.__dict__ and self.__dict__['data'] is not None:
if self.__dict__['data'] is data:
return
else:
self._data_replaced = True
was_unsigned = _is_pseudo_integer(self.__dict__['data'].dtype)
else:
self._data_replaced = True
was_unsigned = False
if (data is not None
and not isinstance(data, np.ndarray)
and not _is_dask_array(data)):
# Try to coerce the data into a numpy array--this will work, on
# some level, for most objects
try:
data = np.array(data)
except Exception:
raise TypeError('data object {!r} could not be coerced into an '
'ndarray'.format(data))
if data.shape == ():
raise TypeError('data object {!r} should have at least one '
'dimension'.format(data))
self.__dict__['data'] = data
self._modified = True
if self.data is None:
self._axes = []
else:
# Set new values of bitpix, bzero, and bscale now, but wait to
# revise original values until header is updated.
self._bitpix = DTYPE2BITPIX[data.dtype.name]
self._bscale = 1
self._bzero = 0
self._blank = None
self._axes = list(data.shape)
self._axes.reverse()
# Update the header, including adding BZERO/BSCALE if new data is
# unsigned. Does not change the values of self._bitpix,
# self._orig_bitpix, etc.
self.update_header()
if (data is not None and was_unsigned):
self._update_header_scale_info(data.dtype)
# Keep _orig_bitpix as it was until header update is done, then
# set it, to allow easier handling of the case of unsigned
# integer data being converted to something else. Setting these here
# is needed only for the case do_not_scale_image_data=True when
# setting the data to unsigned int.
# If necessary during initialization, i.e. if BSCALE and BZERO were
# not in the header but the data was unsigned, the attributes below
# will be update in __init__.
self._orig_bitpix = self._bitpix
self._orig_bscale = self._bscale
self._orig_bzero = self._bzero
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
def update_header(self):
"""
Update the header keywords to agree with the data.
"""
if not (self._modified or self._header._modified or
(self._has_data and self.shape != self.data.shape)):
# Not likely that anything needs updating
return
old_naxis = self._header.get('NAXIS', 0)
if 'BITPIX' not in self._header:
bitpix_comment = self.standard_keyword_comments['BITPIX']
else:
bitpix_comment = self._header.comments['BITPIX']
# Update the BITPIX keyword and ensure it's in the correct
# location in the header
self._header.set('BITPIX', self._bitpix, bitpix_comment, after=0)
# If the data's shape has changed (this may have happened without our
# noticing either via a direct update to the data.shape attribute) we
# need to update the internal self._axes
if self._has_data and self.shape != self.data.shape:
self._axes = list(self.data.shape)
self._axes.reverse()
# Update the NAXIS keyword and ensure it's in the correct location in
# the header
if 'NAXIS' in self._header:
naxis_comment = self._header.comments['NAXIS']
else:
naxis_comment = self.standard_keyword_comments['NAXIS']
self._header.set('NAXIS', len(self._axes), naxis_comment,
after='BITPIX')
# TODO: This routine is repeated in several different classes--it
# should probably be made available as a method on all standard HDU
# types
# add NAXISi if it does not exist
for idx, axis in enumerate(self._axes):
naxisn = 'NAXIS' + str(idx + 1)
if naxisn in self._header:
self._header[naxisn] = axis
else:
if (idx == 0):
after = 'NAXIS'
else:
after = 'NAXIS' + str(idx)
self._header.set(naxisn, axis, after=after)
# delete extra NAXISi's
for idx in range(len(self._axes) + 1, old_naxis + 1):
try:
del self._header['NAXIS' + str(idx)]
except KeyError:
pass
if 'BLANK' in self._header:
self._blank = self._header['BLANK']
# Add BSCALE/BZERO to header if data is unsigned int.
self._update_pseudo_int_scale_keywords()
self._modified = False
def _update_header_scale_info(self, dtype=None):
"""
Delete BSCALE/BZERO from header if necessary.
"""
# Note that _dtype_for_bitpix determines the dtype based on the
# "original" values of bitpix, bscale, and bzero, stored in
# self._orig_bitpix, etc. It contains the logic for determining which
# special cases of BZERO/BSCALE, if any, are auto-detected as following
# the FITS unsigned int convention.
# Added original_was_unsigned with the intent of facilitating the
# special case of do_not_scale_image_data=True and uint=True
# eventually.
# FIXME: unused, maybe it should be useful?
# if self._dtype_for_bitpix() is not None:
# original_was_unsigned = self._dtype_for_bitpix().kind == 'u'
# else:
# original_was_unsigned = False
if (self._do_not_scale_image_data or
(self._orig_bzero == 0 and self._orig_bscale == 1)):
return
if dtype is None:
dtype = self._dtype_for_bitpix()
if (dtype is not None and dtype.kind == 'u' and
(self._scale_back or self._scale_back is None)):
# Data is pseudo-unsigned integers, and the scale_back option
# was not explicitly set to False, so preserve all the scale
# factors
return
for keyword in ['BSCALE', 'BZERO']:
try:
del self._header[keyword]
# Since _update_header_scale_info can, currently, be called
# *after* _prewriteto(), replace these with blank cards so
# the header size doesn't change
self._header.append()
except KeyError:
pass
if dtype is None:
dtype = self._dtype_for_bitpix()
if dtype is not None:
self._header['BITPIX'] = DTYPE2BITPIX[dtype.name]
self._bzero = 0
self._bscale = 1
self._bitpix = self._header['BITPIX']
self._blank = self._header.pop('BLANK', None)
def scale(self, type=None, option='old', bscale=None, bzero=None):
"""
Scale image data by using ``BSCALE``/``BZERO``.
Call to this method will scale `data` and update the keywords of
``BSCALE`` and ``BZERO`` in the HDU's header. This method should only
be used right before writing to the output file, as the data will be
scaled and is therefore not very usable after the call.
Parameters
----------
type : str, optional
destination data type, use a string representing a numpy
dtype name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'``
etc.). If is `None`, use the current data type.
option : str, optional
How to scale the data: ``"old"`` uses the original ``BSCALE`` and
``BZERO`` values from when the data was read/created (defaulting to
1 and 0 if they don't exist). For integer data only, ``"minmax"``
uses the minimum and maximum of the data to scale. User-specified
``bscale``/``bzero`` values always take precedence.
bscale, bzero : int, optional
User-specified ``BSCALE`` and ``BZERO`` values
"""
# Disable blank support for now
self._scale_internal(type=type, option=option, bscale=bscale,
bzero=bzero, blank=None)
def _scale_internal(self, type=None, option='old', bscale=None, bzero=None,
blank=0):
"""
This is an internal implementation of the `scale` method, which
also supports handling BLANK properly.
TODO: This is only needed for fixing #3865 without introducing any
public API changes. We should support BLANK better when rescaling
data, and when that is added the need for this internal interface
should go away.
Note: the default of ``blank=0`` merely reflects the current behavior,
and is not necessarily a deliberate choice (better would be to disallow
conversion of floats to ints without specifying a BLANK if there are
NaN/inf values).
"""
if self.data is None:
return
# Determine the destination (numpy) data type
if type is None:
type = BITPIX2DTYPE[self._bitpix]
_type = getattr(np, type)
# Determine how to scale the data
# bscale and bzero takes priority
if bscale is not None and bzero is not None:
_scale = bscale
_zero = bzero
elif bscale is not None:
_scale = bscale
_zero = 0
elif bzero is not None:
_scale = 1
_zero = bzero
elif (option == 'old' and self._orig_bscale is not None and
self._orig_bzero is not None):
_scale = self._orig_bscale
_zero = self._orig_bzero
elif option == 'minmax' and not issubclass(_type, np.floating):
if _is_dask_array(self.data):
min = self.data.min().compute()
max = self.data.max().compute()
else:
min = np.minimum.reduce(self.data.flat)
max = np.maximum.reduce(self.data.flat)
if _type == np.uint8: # uint8 case
_zero = min
_scale = (max - min) / (2.0 ** 8 - 1)
else:
_zero = (max + min) / 2.0
# throw away -2^N
nbytes = 8 * _type().itemsize
_scale = (max - min) / (2.0 ** nbytes - 2)
else:
_scale = 1
_zero = 0
# Do the scaling
if _zero != 0:
if _is_dask_array(self.data):
self.data = self.data - _zero
else:
# 0.9.6.3 to avoid out of range error for BZERO = +32768
# We have to explicitly cast _zero to prevent numpy from raising an
# error when doing self.data -= zero, and we do this instead of
# self.data = self.data - zero to avoid doubling memory usage.
np.add(self.data, -_zero, out=self.data, casting='unsafe')
self._header['BZERO'] = _zero
else:
try:
del self._header['BZERO']
except KeyError:
pass
if _scale and _scale != 1:
self.data = self.data / _scale
self._header['BSCALE'] = _scale
else:
try:
del self._header['BSCALE']
except KeyError:
pass
# Set blanks
if blank is not None and issubclass(_type, np.integer):
# TODO: Perhaps check that the requested BLANK value fits in the
# integer type being scaled to?
self.data[np.isnan(self.data)] = blank
self._header['BLANK'] = blank
if self.data.dtype.type != _type:
self.data = np.array(np.around(self.data), dtype=_type)
# Update the BITPIX Card to match the data
self._bitpix = DTYPE2BITPIX[self.data.dtype.name]
self._bzero = self._header.get('BZERO', 0)
self._bscale = self._header.get('BSCALE', 1)
self._blank = blank
self._header['BITPIX'] = self._bitpix
# Since the image has been manually scaled, the current
# bitpix/bzero/bscale now serve as the 'original' scaling of the image,
# as though the original image has been completely replaced
self._orig_bitpix = self._bitpix
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
self._orig_blank = self._blank
def _verify(self, option='warn'):
# update_header can fix some things that would otherwise cause
# verification to fail, so do that now...
self.update_header()
self._verify_blank()
return super()._verify(option)
def _verify_blank(self):
# Probably not the best place for this (it should probably happen
# in _verify as well) but I want to be able to raise this warning
# both when the HDU is created and when written
if self._blank is None:
return
messages = []
# TODO: Once the FITSSchema framewhere is merged these warnings
# should be handled by the schema
if not _is_int(self._blank):
messages.append(
"Invalid value for 'BLANK' keyword in header: {!r} "
"The 'BLANK' keyword must be an integer. It will be "
"ignored in the meantime.".format(self._blank))
self._blank = None
if not self._bitpix > 0:
messages.append(
"Invalid 'BLANK' keyword in header. The 'BLANK' keyword "
"is only applicable to integer data, and will be ignored "
"in this HDU.")
self._blank = None
for msg in messages:
warnings.warn(msg, VerifyWarning)
def _prewriteto(self, checksum=False, inplace=False):
if self._scale_back:
self._scale_internal(BITPIX2DTYPE[self._orig_bitpix],
blank=self._orig_blank)
self.update_header()
if not inplace and self._data_needs_rescale:
# Go ahead and load the scaled image data and update the header
# with the correct post-rescaling headers
_ = self.data
return super()._prewriteto(checksum, inplace)
def _writedata_internal(self, fileobj):
size = 0
if self.data is None:
return size
elif _is_dask_array(self.data):
return self._writeinternal_dask(fileobj)
else:
# Based on the system type, determine the byteorders that
# would need to be swapped to get to big-endian output
if sys.byteorder == 'little':
swap_types = ('<', '=')
else:
swap_types = ('<',)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_integer(self.data.dtype):
# Convert the unsigned array to signed
output = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f'>i{self.data.dtype.itemsize}')
should_swap = False
else:
output = self.data
byteorder = output.dtype.str[0]
should_swap = (byteorder in swap_types)
if should_swap:
if output.flags.writeable:
output.byteswap(True)
try:
fileobj.writearray(output)
finally:
output.byteswap(True)
else:
# For read-only arrays, there is no way around making
# a byteswapped copy of the data.
fileobj.writearray(output.byteswap(False))
else:
fileobj.writearray(output)
size += output.size * output.itemsize
return size
def _writeinternal_dask(self, fileobj):
if sys.byteorder == 'little':
swap_types = ('<', '=')
else:
swap_types = ('<',)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_integer(self.data.dtype):
raise NotImplementedError("This dtype isn't currently supported with dask.")
else:
output = self.data
byteorder = output.dtype.str[0]
should_swap = (byteorder in swap_types)
if should_swap:
from dask.utils import M
# NOTE: the inplace flag to byteswap needs to be False otherwise the array is
# byteswapped in place every time it is computed and this affects
# the input dask array.
output = output.map_blocks(M.byteswap, False).map_blocks(M.newbyteorder, "S")
initial_position = fileobj.tell()
n_bytes = output.nbytes
# Extend the file n_bytes into the future
fileobj.seek(initial_position + n_bytes - 1)
fileobj.write(b'\0')
fileobj.flush()
if fileobj.fileobj_mode not in ('rb+', 'wb+', 'ab+'):
# Use another file handle if the current one is not in
# read/write mode
fp = open(fileobj.name, mode='rb+')
should_close = True
else:
fp = fileobj._file
should_close = False
try:
outmmap = mmap.mmap(fp.fileno(),
length=initial_position + n_bytes,
access=mmap.ACCESS_WRITE)
outarr = np.ndarray(shape=output.shape,
dtype=output.dtype,
offset=initial_position,
buffer=outmmap)
output.store(outarr, lock=True, compute=True)
finally:
if should_close:
fp.close()
outmmap.close()
# On Windows closing the memmap causes the file pointer to return to 0, so
# we need to go back to the end of the data (since padding may be written
# after)
fileobj.seek(initial_position + n_bytes)
return n_bytes
def _dtype_for_bitpix(self):
"""
Determine the dtype that the data should be converted to depending on
the BITPIX value in the header, and possibly on the BSCALE value as
well. Returns None if there should not be any change.
"""
bitpix = self._orig_bitpix
# Handle possible conversion to uints if enabled
if self._uint and self._orig_bscale == 1:
if bitpix == 8 and self._orig_bzero == -128:
return np.dtype('int8')
for bits, dtype in ((16, np.dtype('uint16')),
(32, np.dtype('uint32')),
(64, np.dtype('uint64'))):
if bitpix == bits and self._orig_bzero == 1 << (bits - 1):
return dtype
if bitpix > 16: # scale integers to Float64
return np.dtype('float64')
elif bitpix > 0: # scale integers to Float32
return np.dtype('float32')
def _convert_pseudo_integer(self, data):
"""
Handle "pseudo-unsigned" integers, if the user requested it. Returns
the converted data array if so; otherwise returns None.
In this case case, we don't need to handle BLANK to convert it to NAN,
since we can't do NaNs with integers, anyway, i.e. the user is
responsible for managing blanks.
"""
dtype = self._dtype_for_bitpix()
# bool(dtype) is always False--have to explicitly compare to None; this
# caused a fair amount of hair loss
if dtype is not None and dtype.kind == 'u':
# Convert the input raw data into an unsigned integer array and
# then scale the data adjusting for the value of BZERO. Note that
# we subtract the value of BZERO instead of adding because of the
# way numpy converts the raw signed array into an unsigned array.
bits = dtype.itemsize * 8
data = np.array(data, dtype=dtype)
data -= np.uint64(1 << (bits - 1))
return data
def _get_scaled_image_data(self, offset, shape):
"""
Internal function for reading image data from a file and apply scale
factors to it. Normally this is used for the entire image, but it
supports alternate offset/shape for Section support.
"""
code = BITPIX2DTYPE[self._orig_bitpix]
raw_data = self._get_raw_data(shape, code, offset)
raw_data.dtype = raw_data.dtype.newbyteorder('>')
if self._do_not_scale_image_data or (
self._orig_bzero == 0 and self._orig_bscale == 1 and
self._blank is None):
# No further conversion of the data is necessary
return raw_data
try:
if self._file.strict_memmap:
raise ValueError("Cannot load a memory-mapped image: "
"BZERO/BSCALE/BLANK header keywords present. "
"Set memmap=False.")
except AttributeError: # strict_memmap not set
pass
data = None
if not (self._orig_bzero == 0 and self._orig_bscale == 1):
data = self._convert_pseudo_integer(raw_data)
if data is None:
# In these cases, we end up with floating-point arrays and have to
# apply bscale and bzero. We may have to handle BLANK and convert
# to NaN in the resulting floating-point arrays.
# The BLANK keyword should only be applied for integer data (this
# is checked in __init__ but it can't hurt to double check here)
blanks = None
if self._blank is not None and self._bitpix > 0:
blanks = raw_data.flat == self._blank
# The size of blanks in bytes is the number of elements in
# raw_data.flat. However, if we use np.where instead we will
# only use 8 bytes for each index where the condition is true.
# So if the number of blank items is fewer than
# len(raw_data.flat) / 8, using np.where will use less memory
if blanks.sum() < len(blanks) / 8:
blanks = np.where(blanks)
new_dtype = self._dtype_for_bitpix()
if new_dtype is not None:
data = np.array(raw_data, dtype=new_dtype)
else: # floating point cases
if self._file is not None and self._file.memmap:
data = raw_data.copy()
elif not raw_data.flags.writeable:
# create a writeable copy if needed
data = raw_data.copy()
# if not memmap, use the space already in memory
else:
data = raw_data
del raw_data
if self._orig_bscale != 1:
np.multiply(data, self._orig_bscale, data)
if self._orig_bzero != 0:
data += self._orig_bzero
if self._blank:
data.flat[blanks] = np.nan
return data
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
format = ''
else:
format = self.data.dtype.name
format = format[format.rfind('.')+1:]
else:
if self.shape and all(self.shape):
# Only show the format if all the dimensions are non-zero
# if data is not touched yet, use header info.
format = BITPIX2DTYPE[self._bitpix]
else:
format = ''
if (format and not self._do_not_scale_image_data and
(self._orig_bscale != 1 or self._orig_bzero != 0)):
new_dtype = self._dtype_for_bitpix()
if new_dtype is not None:
format += f' (rescales to {new_dtype.name})'
# Display shape in FITS-order
shape = tuple(reversed(self.shape))
return (self.name, self.ver, class_name, len(self._header), shape, format, '')
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
d = self.data
# First handle the special case where the data is unsigned integer
# 16, 32 or 64
if _is_pseudo_integer(self.data.dtype):
d = np.array(self.data - _pseudo_zero(self.data.dtype),
dtype=f'i{self.data.dtype.itemsize}')
# Check the byte order of the data. If it is little endian we
# must swap it before calculating the datasum.
if d.dtype.str[0] != '>':
if d.flags.writeable:
byteswapped = True
d = d.byteswap(True)
d.dtype = d.dtype.newbyteorder('>')
else:
# If the data is not writeable, we just make a byteswapped
# copy and don't bother changing it back after
d = d.byteswap(False)
d.dtype = d.dtype.newbyteorder('>')
byteswapped = False
else:
byteswapped = False
cs = self._compute_checksum(d.flatten().view(np.uint8))
# If the data was byteswapped in this method then return it to
# its original little-endian order.
if byteswapped and not _is_pseudo_integer(self.data.dtype):
d.byteswap(True)
d.dtype = d.dtype.newbyteorder('<')
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
class Section:
"""
Image section.
Slices of this object load the corresponding section of an image array from
the underlying FITS file on disk, and applies any BSCALE/BZERO factors.
Section slices cannot be assigned to, and modifications to a section are
not saved back to the underlying file.
See the :ref:`astropy:data-sections` section of the Astropy documentation
for more details.
"""
def __init__(self, hdu):
self.hdu = hdu
def __getitem__(self, key):
if not isinstance(key, tuple):
key = (key,)
naxis = len(self.hdu.shape)
return_scalar = (all(isinstance(k, (int, np.integer)) for k in key)
and len(key) == naxis)
if not any(k is Ellipsis for k in key):
# We can always add a ... at the end, after making note of whether
# to return a scalar.
key += Ellipsis,
ellipsis_count = len([k for k in key if k is Ellipsis])
if len(key) - ellipsis_count > naxis or ellipsis_count > 1:
raise IndexError('too many indices for array')
# Insert extra dimensions as needed.
idx = next(i for i, k in enumerate(key + (Ellipsis,)) if k is Ellipsis)
key = key[:idx] + (slice(None),) * (naxis - len(key) + 1) + key[idx+1:]
return_0dim = (all(isinstance(k, (int, np.integer)) for k in key)
and len(key) == naxis)
dims = []
offset = 0
# Find all leading axes for which a single point is used.
for idx in range(naxis):
axis = self.hdu.shape[idx]
indx = _IndexInfo(key[idx], axis)
offset = offset * axis + indx.offset
if not _is_int(key[idx]):
dims.append(indx.npts)
break
is_contiguous = indx.contiguous
for jdx in range(idx + 1, naxis):
axis = self.hdu.shape[jdx]
indx = _IndexInfo(key[jdx], axis)
dims.append(indx.npts)
if indx.npts == axis and indx.contiguous:
# The offset needs to multiply the length of all remaining axes
offset *= axis
else:
is_contiguous = False
if is_contiguous:
dims = tuple(dims) or (1,)
bitpix = self.hdu._orig_bitpix
offset = self.hdu._data_offset + offset * abs(bitpix) // 8
data = self.hdu._get_scaled_image_data(offset, dims)
else:
data = self._getdata(key)
if return_scalar:
data = data.item()
elif return_0dim:
data = data.squeeze()
return data
def _getdata(self, keys):
for idx, (key, axis) in enumerate(zip(keys, self.hdu.shape)):
if isinstance(key, slice):
ks = range(*key.indices(axis))
break
elif isiterable(key):
# Handle both integer and boolean arrays.
ks = np.arange(axis, dtype=int)[key]
break
# This should always break at some point if _getdata is called.
data = [self[keys[:idx] + (k,) + keys[idx + 1:]] for k in ks]
if any(isinstance(key, slice) or isiterable(key)
for key in keys[idx + 1:]):
# data contains multidimensional arrays; combine them.
return np.array(data)
else:
# Only singleton dimensions remain; concatenate in a 1D array.
return np.concatenate([np.atleast_1d(array) for array in data])
class PrimaryHDU(_ImageBaseHDU):
"""
FITS primary HDU class.
"""
_default_name = 'PRIMARY'
def __init__(self, data=None, header=None, do_not_scale_image_data=False,
ignore_blank=False,
uint=True, scale_back=None):
"""
Construct a primary HDU.
Parameters
----------
data : array or ``astropy.io.fits.hdu.base.DELAYED``, optional
The data in the HDU.
header : `~astropy.io.fits.Header`, optional
The header to be used (as a template). If ``header`` is `None`, a
minimal header will be provided.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. (default: False)
ignore_blank : bool, optional
If `True`, the BLANK header keyword will be ignored if present.
Otherwise, pixels equal to this value will be replaced with
NaNs. (default: False)
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
(default: True)
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data. Pseudo-unsigned integers are automatically
rescaled unless scale_back is explicitly set to `False`.
(default: None)
"""
super().__init__(
data=data, header=header,
do_not_scale_image_data=do_not_scale_image_data, uint=uint,
ignore_blank=ignore_blank,
scale_back=scale_back)
# insert the keywords EXTEND
if header is None:
dim = self._header['NAXIS']
if dim == 0:
dim = ''
self._header.set('EXTEND', True, after='NAXIS' + str(dim))
@classmethod
def match_header(cls, header):
card = header.cards[0]
# Due to problems discussed in #5808, we cannot assume the 'GROUPS'
# keyword to be True/False, have to check the value
return (card.keyword == 'SIMPLE' and
('GROUPS' not in header or header['GROUPS'] != True) and # noqa
card.value)
def update_header(self):
super().update_header()
# Update the position of the EXTEND keyword if it already exists
if 'EXTEND' in self._header:
if len(self._axes):
after = 'NAXIS' + str(len(self._axes))
else:
after = 'NAXIS'
self._header.set('EXTEND', after=after)
def _verify(self, option='warn'):
errs = super()._verify(option=option)
# Verify location and value of mandatory keywords.
# The EXTEND keyword is only mandatory if the HDU has extensions; this
# condition is checked by the HDUList object. However, if we already
# have an EXTEND keyword check that its position is correct
if 'EXTEND' in self._header:
naxis = self._header.get('NAXIS', 0)
self.req_cards('EXTEND', naxis + 3, lambda v: isinstance(v, bool),
True, option, errs)
return errs
class ImageHDU(_ImageBaseHDU, ExtensionHDU):
"""
FITS image extension HDU class.
"""
_extension = 'IMAGE'
def __init__(self, data=None, header=None, name=None,
do_not_scale_image_data=False, uint=True, scale_back=None,
ver=None):
"""
Construct an image HDU.
Parameters
----------
data : array
The data in the HDU.
header : `~astropy.io.fits.Header`
The header to be used (as a template). If ``header`` is
`None`, a minimal header will be provided.
name : str, optional
The name of the HDU, will be the value of the keyword
``EXTNAME``.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. (default: False)
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
(default: True)
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data. Pseudo-unsigned integers are automatically
rescaled unless scale_back is explicitly set to `False`.
(default: None)
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
"""
# This __init__ currently does nothing differently from the base class,
# and is only explicitly defined for the docstring.
super().__init__(
data=data, header=header, name=name,
do_not_scale_image_data=do_not_scale_image_data, uint=uint,
scale_back=scale_back, ver=ver)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == 'XTENSION' and xtension == cls._extension
def _verify(self, option='warn'):
"""
ImageHDU verify method.
"""
errs = super()._verify(option=option)
naxis = self._header.get('NAXIS', 0)
# PCOUNT must == 0, GCOUNT must == 1; the former is verified in
# ExtensionHDU._verify, however ExtensionHDU._verify allows PCOUNT
# to be >= 0, so we need to check it here
self.req_cards('PCOUNT', naxis + 3, lambda v: (_is_int(v) and v == 0),
0, option, errs)
return errs
class _IndexInfo:
def __init__(self, indx, naxis):
if _is_int(indx):
if 0 <= indx < naxis:
self.npts = 1
self.offset = indx
self.contiguous = True
else:
raise IndexError(f'Index {indx} out of range.')
elif isinstance(indx, slice):
start, stop, step = indx.indices(naxis)
self.npts = (stop - start) // step
self.offset = start
self.contiguous = step == 1
elif isiterable(indx):
self.npts = len(indx)
self.offset = 0
self.contiguous = False
else:
raise IndexError(f'Illegal index {indx}')
|
055361311d29f4c23b3ea09f45c07175e2f1ac7d34f36368ded4f724568e28a6 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import ctypes
import gc
import itertools
import math
import re
import time
import warnings
from contextlib import suppress
import numpy as np
from .base import DELAYED, ExtensionHDU, BITPIX2DTYPE, DTYPE2BITPIX
from .image import ImageHDU
from .table import BinTableHDU
from astropy.io.fits import conf
from astropy.io.fits.card import Card
from astropy.io.fits.column import Column, ColDefs, TDEF_RE
from astropy.io.fits.column import KEYWORD_NAMES as TABLE_KEYWORD_NAMES
from astropy.io.fits.fitsrec import FITS_rec
from astropy.io.fits.header import Header
from astropy.io.fits.util import (_is_pseudo_integer, _pseudo_zero, _is_int,
_get_array_mmap)
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
try:
from astropy.io.fits import compression
COMPRESSION_SUPPORTED = COMPRESSION_ENABLED = True
except ImportError:
COMPRESSION_SUPPORTED = COMPRESSION_ENABLED = False
# Quantization dithering method constants; these are right out of fitsio.h
NO_DITHER = -1
SUBTRACTIVE_DITHER_1 = 1
SUBTRACTIVE_DITHER_2 = 2
QUANTIZE_METHOD_NAMES = {
NO_DITHER: 'NO_DITHER',
SUBTRACTIVE_DITHER_1: 'SUBTRACTIVE_DITHER_1',
SUBTRACTIVE_DITHER_2: 'SUBTRACTIVE_DITHER_2'
}
DITHER_SEED_CLOCK = 0
DITHER_SEED_CHECKSUM = -1
COMPRESSION_TYPES = ('RICE_1', 'GZIP_1', 'GZIP_2', 'PLIO_1', 'HCOMPRESS_1')
# Default compression parameter values
DEFAULT_COMPRESSION_TYPE = 'RICE_1'
DEFAULT_QUANTIZE_LEVEL = 16.
DEFAULT_QUANTIZE_METHOD = NO_DITHER
DEFAULT_DITHER_SEED = DITHER_SEED_CLOCK
DEFAULT_HCOMP_SCALE = 0
DEFAULT_HCOMP_SMOOTH = 0
DEFAULT_BLOCK_SIZE = 32
DEFAULT_BYTE_PIX = 4
CMTYPE_ALIASES = {'RICE_ONE': 'RICE_1'}
COMPRESSION_KEYWORDS = {'ZIMAGE', 'ZCMPTYPE', 'ZBITPIX', 'ZNAXIS', 'ZMASKCMP',
'ZSIMPLE', 'ZTENSION', 'ZEXTEND'}
class CompImageHeader(Header):
"""
Header object for compressed image HDUs designed to keep the compression
header and the underlying image header properly synchronized.
This essentially wraps the image header, so that all values are read from
and written to the image header. However, updates to the image header will
also update the table header where appropriate.
Note that if no image header is passed in, the code will instantiate a
regular `~astropy.io.fits.Header`.
"""
# TODO: The difficulty of implementing this screams a need to rewrite this
# module
_keyword_remaps = {
'SIMPLE': 'ZSIMPLE', 'XTENSION': 'ZTENSION', 'BITPIX': 'ZBITPIX',
'NAXIS': 'ZNAXIS', 'EXTEND': 'ZEXTEND', 'BLOCKED': 'ZBLOCKED',
'PCOUNT': 'ZPCOUNT', 'GCOUNT': 'ZGCOUNT', 'CHECKSUM': 'ZHECKSUM',
'DATASUM': 'ZDATASUM'
}
_zdef_re = re.compile(r'(?P<label>^[Zz][a-zA-Z]*)(?P<num>[1-9][0-9 ]*$)?')
_compression_keywords = set(_keyword_remaps.values()).union(
['ZIMAGE', 'ZCMPTYPE', 'ZMASKCMP', 'ZQUANTIZ', 'ZDITHER0'])
_indexed_compression_keywords = {'ZNAXIS', 'ZTILE', 'ZNAME', 'ZVAL'}
# TODO: Once it place it should be possible to manage some of this through
# the schema system, but it's not quite ready for that yet. Also it still
# makes more sense to change CompImageHDU to subclass ImageHDU :/
def __new__(cls, table_header, image_header=None):
# 2019-09-14 (MHvK): No point wrapping anything if no image_header is
# given. This happens if __getitem__ and copy are called - our super
# class will aim to initialize a new, possibly partially filled
# header, but we cannot usefully deal with that.
# TODO: the above suggests strongly we should *not* subclass from
# Header. See also comment above about the need for reorganization.
if image_header is None:
return Header(table_header)
else:
return super().__new__(cls)
def __init__(self, table_header, image_header):
self._cards = image_header._cards
self._keyword_indices = image_header._keyword_indices
self._rvkc_indices = image_header._rvkc_indices
self._modified = image_header._modified
self._table_header = table_header
# We need to override and Header methods that can modify the header, and
# ensure that they sync with the underlying _table_header
def __setitem__(self, key, value):
# This isn't pretty, but if the `key` is either an int or a tuple we
# need to figure out what keyword name that maps to before doing
# anything else; these checks will be repeated later in the
# super().__setitem__ call but I don't see another way around it
# without some major refactoring
if self._set_slice(key, value, self):
return
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
# We don't want to specify and index otherwise, because that will
# break the behavior for new keywords and for commentary keywords
keyword, index = key, None
if self._is_reserved_keyword(keyword):
return
super().__setitem__(key, value)
if index is not None:
remapped_keyword = self._remap_keyword(keyword)
self._table_header[remapped_keyword, index] = value
# Else this will pass through to ._update
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# If given a slice pass that on to the superclass and bail out
# early; we only want to make updates to _table_header when given
# a key specifying a single keyword
return super().__delitem__(key)
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
keyword, index = key, None
if key not in self:
raise KeyError(f"Keyword {key!r} not found.")
super().__delitem__(key)
remapped_keyword = self._remap_keyword(keyword)
if remapped_keyword in self._table_header:
if index is not None:
del self._table_header[(remapped_keyword, index)]
else:
del self._table_header[remapped_keyword]
def append(self, card=None, useblanks=True, bottom=False, end=False):
# This logic unfortunately needs to be duplicated from the base class
# in order to determine the keyword
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
'The value appended to a Header must be either a keyword or '
'(keyword, value, [comment]) tuple; got: {!r}'.format(card))
if self._is_reserved_keyword(card.keyword):
return
super().append(card=card, useblanks=useblanks, bottom=bottom, end=end)
remapped_keyword = self._remap_keyword(card.keyword)
# card.keyword strips the HIERARCH if present so this must be added
# back to avoid a warning.
if str(card).startswith("HIERARCH ") and not remapped_keyword.startswith("HIERARCH "):
remapped_keyword = "HIERARCH " + remapped_keyword
card = Card(remapped_keyword, card.value, card.comment)
# Here we disable the use of blank cards, because the call above to
# Header.append may have already deleted a blank card in the table
# header, thanks to inheritance: Header.append calls 'del self[-1]'
# to delete a blank card, which calls CompImageHeader.__deltitem__,
# which deletes the blank card both in the image and the table headers!
self._table_header.append(card=card, useblanks=False,
bottom=bottom, end=end)
def insert(self, key, card, useblanks=True, after=False):
if isinstance(key, int):
# Determine condition to pass through to append
if after:
if key == -1:
key = len(self._cards)
else:
key += 1
if key >= len(self._cards):
self.append(card, end=True)
return
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif not isinstance(card, Card):
raise ValueError(
'The value inserted into a Header must be either a keyword or '
'(keyword, value, [comment]) tuple; got: {!r}'.format(card))
if self._is_reserved_keyword(card.keyword):
return
# Now the tricky part is to determine where to insert in the table
# header. If given a numerical index we need to map that to the
# corresponding index in the table header. Although rare, there may be
# cases where there is no mapping in which case we just try the same
# index
# NOTE: It is crucial that remapped_index in particular is figured out
# before the image header is modified
remapped_index = self._remap_index(key)
remapped_keyword = self._remap_keyword(card.keyword)
super().insert(key, card, useblanks=useblanks, after=after)
card = Card(remapped_keyword, card.value, card.comment)
# Here we disable the use of blank cards, because the call above to
# Header.insert may have already deleted a blank card in the table
# header, thanks to inheritance: Header.insert calls 'del self[-1]'
# to delete a blank card, which calls CompImageHeader.__delitem__,
# which deletes the blank card both in the image and the table headers!
self._table_header.insert(remapped_index, card, useblanks=False,
after=after)
def _update(self, card):
keyword = card[0]
if self._is_reserved_keyword(keyword):
return
super()._update(card)
if keyword in Card._commentary_keywords:
# Otherwise this will result in a duplicate insertion
return
remapped_keyword = self._remap_keyword(keyword)
self._table_header._update((remapped_keyword,) + card[1:])
# Last piece needed (I think) for synchronizing with the real header
# This one is tricky since _relativeinsert calls insert
def _relativeinsert(self, card, before=None, after=None, replace=False):
keyword = card[0]
if self._is_reserved_keyword(keyword):
return
# Now we have to figure out how to remap 'before' and 'after'
if before is None:
if isinstance(after, int):
remapped_after = self._remap_index(after)
else:
remapped_after = self._remap_keyword(after)
remapped_before = None
else:
if isinstance(before, int):
remapped_before = self._remap_index(before)
else:
remapped_before = self._remap_keyword(before)
remapped_after = None
super()._relativeinsert(card, before=before, after=after,
replace=replace)
remapped_keyword = self._remap_keyword(keyword)
card = Card(remapped_keyword, card[1], card[2])
self._table_header._relativeinsert(card, before=remapped_before,
after=remapped_after,
replace=replace)
@classmethod
def _is_reserved_keyword(cls, keyword, warn=True):
msg = ('Keyword {!r} is reserved for use by the FITS Tiled Image '
'Convention and will not be stored in the header for the '
'image being compressed.'.format(keyword))
if keyword == 'TFIELDS':
if warn:
warnings.warn(msg)
return True
m = TDEF_RE.match(keyword)
if m and m.group('label').upper() in TABLE_KEYWORD_NAMES:
if warn:
warnings.warn(msg)
return True
m = cls._zdef_re.match(keyword)
if m:
label = m.group('label').upper()
num = m.group('num')
if num is not None and label in cls._indexed_compression_keywords:
if warn:
warnings.warn(msg)
return True
elif label in cls._compression_keywords:
if warn:
warnings.warn(msg)
return True
return False
@classmethod
def _remap_keyword(cls, keyword):
# Given a keyword that one might set on an image, remap that keyword to
# the name used for it in the COMPRESSED HDU header
# This is mostly just a lookup in _keyword_remaps, but needs handling
# for NAXISn keywords
is_naxisn = False
if keyword[:5] == 'NAXIS':
with suppress(ValueError):
index = int(keyword[5:])
is_naxisn = index > 0
if is_naxisn:
return f'ZNAXIS{index}'
# If the keyword does not need to be remapped then just return the
# original keyword
return cls._keyword_remaps.get(keyword, keyword)
def _remap_index(self, idx):
# Given an integer index into this header, map that to the index in the
# table header for the same card. If the card doesn't exist in the
# table header (generally should *not* be the case) this will just
# return the same index
# This *does* also accept a keyword or (keyword, repeat) tuple and
# obtains the associated numerical index with self._cardindex
if not isinstance(idx, int):
idx = self._cardindex(idx)
keyword, repeat = self._keyword_from_index(idx)
remapped_insert_keyword = self._remap_keyword(keyword)
with suppress(IndexError, KeyError):
idx = self._table_header._cardindex((remapped_insert_keyword,
repeat))
return idx
# TODO: Fix this class so that it doesn't actually inherit from BinTableHDU,
# but instead has an internal BinTableHDU reference
class CompImageHDU(BinTableHDU):
"""
Compressed Image HDU class.
"""
_manages_own_heap = True
"""
The calls to CFITSIO lay out the heap data in memory, and we write it out
the same way CFITSIO organizes it. In principle this would break if a user
manually changes the underlying compressed data by hand, but there is no
reason they would want to do that (and if they do that's their
responsibility).
"""
_default_name = "COMPRESSED_IMAGE"
def __init__(self, data=None, header=None, name=None,
compression_type=DEFAULT_COMPRESSION_TYPE,
tile_size=None,
hcomp_scale=DEFAULT_HCOMP_SCALE,
hcomp_smooth=DEFAULT_HCOMP_SMOOTH,
quantize_level=DEFAULT_QUANTIZE_LEVEL,
quantize_method=DEFAULT_QUANTIZE_METHOD,
dither_seed=DEFAULT_DITHER_SEED,
do_not_scale_image_data=False,
uint=False, scale_back=False, **kwargs):
"""
Parameters
----------
data : array, optional
Uncompressed image data
header : `~astropy.io.fits.Header`, optional
Header to be associated with the image; when reading the HDU from a
file (data=DELAYED), the header read from the file
name : str, optional
The ``EXTNAME`` value; if this value is `None`, then the name from
the input image header will be used; if there is no name in the
input image header then the default name ``COMPRESSED_IMAGE`` is
used.
compression_type : str, optional
Compression algorithm: one of
``'RICE_1'``, ``'RICE_ONE'``, ``'PLIO_1'``, ``'GZIP_1'``,
``'GZIP_2'``, ``'HCOMPRESS_1'``
tile_size : int, optional
Compression tile sizes. Default treats each row of image as a
tile.
hcomp_scale : float, optional
HCOMPRESS scale parameter
hcomp_smooth : float, optional
HCOMPRESS smooth parameter
quantize_level : float, optional
Floating point quantization level; see note below
quantize_method : int, optional
Floating point quantization dithering method; can be either
``NO_DITHER`` (-1; default), ``SUBTRACTIVE_DITHER_1`` (1), or
``SUBTRACTIVE_DITHER_2`` (2); see note below
dither_seed : int, optional
Random seed to use for dithering; can be either an integer in the
range 1 to 1000 (inclusive), ``DITHER_SEED_CLOCK`` (0; default), or
``DITHER_SEED_CHECKSUM`` (-1); see note below
Notes
-----
The astropy.io.fits package supports 2 methods of image compression:
1) The entire FITS file may be externally compressed with the gzip
or pkzip utility programs, producing a ``*.gz`` or ``*.zip``
file, respectively. When reading compressed files of this type,
Astropy first uncompresses the entire file into a temporary file
before performing the requested read operations. The
astropy.io.fits package does not support writing to these types
of compressed files. This type of compression is supported in
the ``_File`` class, not in the `CompImageHDU` class. The file
compression type is recognized by the ``.gz`` or ``.zip`` file
name extension.
2) The `CompImageHDU` class supports the FITS tiled image
compression convention in which the image is subdivided into a
grid of rectangular tiles, and each tile of pixels is
individually compressed. The details of this FITS compression
convention are described at the `FITS Support Office web site
<https://fits.gsfc.nasa.gov/registry/tilecompression.html>`_.
Basically, the compressed image tiles are stored in rows of a
variable length array column in a FITS binary table. The
astropy.io.fits recognizes that this binary table extension
contains an image and treats it as if it were an image
extension. Under this tile-compression format, FITS header
keywords remain uncompressed. At this time, Astropy does not
support the ability to extract and uncompress sections of the
image without having to uncompress the entire image.
The astropy.io.fits package supports 3 general-purpose compression
algorithms plus one other special-purpose compression technique that is
designed for data masks with positive integer pixel values. The 3
general purpose algorithms are GZIP, Rice, and HCOMPRESS, and the
special-purpose technique is the IRAF pixel list compression technique
(PLIO). The ``compression_type`` parameter defines the compression
algorithm to be used.
The FITS image can be subdivided into any desired rectangular grid of
compression tiles. With the GZIP, Rice, and PLIO algorithms, the
default is to take each row of the image as a tile. The HCOMPRESS
algorithm is inherently 2-dimensional in nature, so the default in this
case is to take 16 rows of the image per tile. In most cases, it makes
little difference what tiling pattern is used, so the default tiles are
usually adequate. In the case of very small images, it could be more
efficient to compress the whole image as a single tile. Note that the
image dimensions are not required to be an integer multiple of the tile
dimensions; if not, then the tiles at the edges of the image will be
smaller than the other tiles. The ``tile_size`` parameter may be
provided as a list of tile sizes, one for each dimension in the image.
For example a ``tile_size`` value of ``[100,100]`` would divide a 300 X
300 image into 9 100 X 100 tiles.
The 4 supported image compression algorithms are all 'lossless' when
applied to integer FITS images; the pixel values are preserved exactly
with no loss of information during the compression and uncompression
process. In addition, the HCOMPRESS algorithm supports a 'lossy'
compression mode that will produce larger amount of image compression.
This is achieved by specifying a non-zero value for the ``hcomp_scale``
parameter. Since the amount of compression that is achieved depends
directly on the RMS noise in the image, it is usually more convenient
to specify the ``hcomp_scale`` factor relative to the RMS noise.
Setting ``hcomp_scale = 2.5`` means use a scale factor that is 2.5
times the calculated RMS noise in the image tile. In some cases it may
be desirable to specify the exact scaling to be used, instead of
specifying it relative to the calculated noise value. This may be done
by specifying the negative of the desired scale value (typically in the
range -2 to -100).
Very high compression factors (of 100 or more) can be achieved by using
large ``hcomp_scale`` values, however, this can produce undesirable
'blocky' artifacts in the compressed image. A variation of the
HCOMPRESS algorithm (called HSCOMPRESS) can be used in this case to
apply a small amount of smoothing of the image when it is uncompressed
to help cover up these artifacts. This smoothing is purely cosmetic
and does not cause any significant change to the image pixel values.
Setting the ``hcomp_smooth`` parameter to 1 will engage the smoothing
algorithm.
Floating point FITS images (which have ``BITPIX`` = -32 or -64) usually
contain too much 'noise' in the least significant bits of the mantissa
of the pixel values to be effectively compressed with any lossless
algorithm. Consequently, floating point images are first quantized
into scaled integer pixel values (and thus throwing away much of the
noise) before being compressed with the specified algorithm (either
GZIP, RICE, or HCOMPRESS). This technique produces much higher
compression factors than simply using the GZIP utility to externally
compress the whole FITS file, but it also means that the original
floating point value pixel values are not exactly preserved. When done
properly, this integer scaling technique will only discard the
insignificant noise while still preserving all the real information in
the image. The amount of precision that is retained in the pixel
values is controlled by the ``quantize_level`` parameter. Larger
values will result in compressed images whose pixels more closely match
the floating point pixel values, but at the same time the amount of
compression that is achieved will be reduced. Users should experiment
with different values for this parameter to determine the optimal value
that preserves all the useful information in the image, without
needlessly preserving all the 'noise' which will hurt the compression
efficiency.
The default value for the ``quantize_level`` scale factor is 16, which
means that scaled integer pixel values will be quantized such that the
difference between adjacent integer values will be 1/16th of the noise
level in the image background. An optimized algorithm is used to
accurately estimate the noise in the image. As an example, if the RMS
noise in the background pixels of an image = 32.0, then the spacing
between adjacent scaled integer pixel values will equal 2.0 by default.
Note that the RMS noise is independently calculated for each tile of
the image, so the resulting integer scaling factor may fluctuate
slightly for each tile. In some cases, it may be desirable to specify
the exact quantization level to be used, instead of specifying it
relative to the calculated noise value. This may be done by specifying
the negative of desired quantization level for the value of
``quantize_level``. In the previous example, one could specify
``quantize_level = -2.0`` so that the quantized integer levels differ
by 2.0. Larger negative values for ``quantize_level`` means that the
levels are more coarsely-spaced, and will produce higher compression
factors.
The quantization algorithm can also apply one of two random dithering
methods in order to reduce bias in the measured intensity of background
regions. The default method, specified with the constant
``SUBTRACTIVE_DITHER_1`` adds dithering to the zero-point of the
quantization array itself rather than adding noise to the actual image.
The random noise is added on a pixel-by-pixel basis, so in order
restore each pixel from its integer value to its floating point value
it is necessary to replay the same sequence of random numbers for each
pixel (see below). The other method, ``SUBTRACTIVE_DITHER_2``, is
exactly like the first except that before dithering any pixel with a
floating point value of ``0.0`` is replaced with the special integer
value ``-2147483647``. When the image is uncompressed, pixels with
this value are restored back to ``0.0`` exactly. Finally, a value of
``NO_DITHER`` disables dithering entirely.
As mentioned above, when using the subtractive dithering algorithm it
is necessary to be able to generate a (pseudo-)random sequence of noise
for each pixel, and replay that same sequence upon decompressing. To
facilitate this, a random seed between 1 and 10000 (inclusive) is used
to seed a random number generator, and that seed is stored in the
``ZDITHER0`` keyword in the header of the compressed HDU. In order to
use that seed to generate the same sequence of random numbers the same
random number generator must be used at compression and decompression
time; for that reason the tiled image convention provides an
implementation of a very simple pseudo-random number generator. The
seed itself can be provided in one of three ways, controllable by the
``dither_seed`` argument: It may be specified manually, or it may be
generated arbitrarily based on the system's clock
(``DITHER_SEED_CLOCK``) or based on a checksum of the pixels in the
image's first tile (``DITHER_SEED_CHECKSUM``). The clock-based method
is the default, and is sufficient to ensure that the value is
reasonably "arbitrary" and that the same seed is unlikely to be
generated sequentially. The checksum method, on the other hand,
ensures that the same seed is used every time for a specific image.
This is particularly useful for software testing as it ensures that the
same image will always use the same seed.
"""
if not COMPRESSION_SUPPORTED:
# TODO: Raise a more specific Exception type
raise Exception('The astropy.io.fits.compression module is not '
'available. Creation of compressed image HDUs is '
'disabled.')
compression_type = CMTYPE_ALIASES.get(compression_type, compression_type)
if data is DELAYED:
# Reading the HDU from a file
super().__init__(data=data, header=header)
else:
# Create at least a skeleton HDU that matches the input
# header and data (if any were input)
super().__init__(data=None, header=header)
# Store the input image data
self.data = data
# Update the table header (_header) to the compressed
# image format and to match the input data (if any);
# Create the image header (_image_header) from the input
# image header (if any) and ensure it matches the input
# data; Create the initially empty table data array to
# hold the compressed data.
self._update_header_data(header, name,
compression_type=compression_type,
tile_size=tile_size,
hcomp_scale=hcomp_scale,
hcomp_smooth=hcomp_smooth,
quantize_level=quantize_level,
quantize_method=quantize_method,
dither_seed=dither_seed)
# TODO: A lot of this should be passed on to an internal image HDU o
# something like that, see ticket #88
self._do_not_scale_image_data = do_not_scale_image_data
self._uint = uint
self._scale_back = scale_back
self._axes = [self._header.get('ZNAXIS' + str(axis + 1), 0)
for axis in range(self._header.get('ZNAXIS', 0))]
# store any scale factors from the table header
if do_not_scale_image_data:
self._bzero = 0
self._bscale = 1
else:
self._bzero = self._header.get('BZERO', 0)
self._bscale = self._header.get('BSCALE', 1)
self._bitpix = self._header['ZBITPIX']
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
self._orig_bitpix = self._bitpix
def _remove_unnecessary_default_extnames(self, header):
"""Remove default EXTNAME values if they are unnecessary.
Some data files (eg from CFHT) can have the default EXTNAME and
an explicit value. This method removes the default if a more
specific header exists. It also removes any duplicate default
values.
"""
if 'EXTNAME' in header:
indices = header._keyword_indices['EXTNAME']
# Only continue if there is more than one found
n_extname = len(indices)
if n_extname > 1:
extnames_to_remove = [index for index in indices
if header[index] == self._default_name]
if len(extnames_to_remove) == n_extname:
# Keep the first (they are all the same)
extnames_to_remove.pop(0)
# Remove them all in reverse order to keep the index unchanged.
for index in reversed(sorted(extnames_to_remove)):
del header[index]
@property
def name(self):
# Convert the value to a string to be flexible in some pathological
# cases (see ticket #96)
# Similar to base class but uses .header rather than ._header
return str(self.header.get('EXTNAME', self._default_name))
@name.setter
def name(self, value):
# This is a copy of the base class but using .header instead
# of ._header to ensure that the name stays in sync.
if not isinstance(value, str):
raise TypeError("'name' attribute must be a string")
if not conf.extension_name_case_sensitive:
value = value.upper()
if 'EXTNAME' in self.header:
self.header['EXTNAME'] = value
else:
self.header['EXTNAME'] = (value, 'extension name')
@classmethod
def match_header(cls, header):
card = header.cards[0]
if card.keyword != 'XTENSION':
return False
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
if xtension not in ('BINTABLE', 'A3DTABLE'):
return False
if 'ZIMAGE' not in header or not header['ZIMAGE']:
return False
if COMPRESSION_SUPPORTED and COMPRESSION_ENABLED:
return True
elif not COMPRESSION_SUPPORTED:
warnings.warn('Failure matching header to a compressed image '
'HDU: The compression module is not available.\n'
'The HDU will be treated as a Binary Table HDU.',
AstropyUserWarning)
return False
else:
# Compression is supported but disabled; just pass silently (#92)
return False
def _update_header_data(self, image_header,
name=None,
compression_type=None,
tile_size=None,
hcomp_scale=None,
hcomp_smooth=None,
quantize_level=None,
quantize_method=None,
dither_seed=None):
"""
Update the table header (`_header`) to the compressed
image format and to match the input data (if any). Create
the image header (`_image_header`) from the input image
header (if any) and ensure it matches the input
data. Create the initially-empty table data array to hold
the compressed data.
This method is mainly called internally, but a user may wish to
call this method after assigning new data to the `CompImageHDU`
object that is of a different type.
Parameters
----------
image_header : `~astropy.io.fits.Header`
header to be associated with the image
name : str, optional
the ``EXTNAME`` value; if this value is `None`, then the name from
the input image header will be used; if there is no name in the
input image header then the default name 'COMPRESSED_IMAGE' is used
compression_type : str, optional
compression algorithm 'RICE_1', 'PLIO_1', 'GZIP_1', 'GZIP_2',
'HCOMPRESS_1'; if this value is `None`, use value already in the
header; if no value already in the header, use 'RICE_1'
tile_size : sequence of int, optional
compression tile sizes as a list; if this value is `None`, use
value already in the header; if no value already in the header,
treat each row of image as a tile
hcomp_scale : float, optional
HCOMPRESS scale parameter; if this value is `None`, use the value
already in the header; if no value already in the header, use 1
hcomp_smooth : float, optional
HCOMPRESS smooth parameter; if this value is `None`, use the value
already in the header; if no value already in the header, use 0
quantize_level : float, optional
floating point quantization level; if this value is `None`, use the
value already in the header; if no value already in header, use 16
quantize_method : int, optional
floating point quantization dithering method; can be either
NO_DITHER (-1), SUBTRACTIVE_DITHER_1 (1; default), or
SUBTRACTIVE_DITHER_2 (2)
dither_seed : int, optional
random seed to use for dithering; can be either an integer in the
range 1 to 1000 (inclusive), DITHER_SEED_CLOCK (0; default), or
DITHER_SEED_CHECKSUM (-1)
"""
# Clean up EXTNAME duplicates
self._remove_unnecessary_default_extnames(self._header)
image_hdu = ImageHDU(data=self.data, header=self._header)
self._image_header = CompImageHeader(self._header, image_hdu.header)
self._axes = image_hdu._axes
del image_hdu
# Determine based on the size of the input data whether to use the Q
# column format to store compressed data or the P format.
# The Q format is used only if the uncompressed data is larger than
# 4 GB. This is not a perfect heuristic, as one can contrive an input
# array which, when compressed, the entire binary table representing
# the compressed data is larger than 4GB. That said, this is the same
# heuristic used by CFITSIO, so this should give consistent results.
# And the cases where this heuristic is insufficient are extreme and
# almost entirely contrived corner cases, so it will do for now
if self._has_data:
huge_hdu = self.data.nbytes > 2 ** 32
else:
huge_hdu = False
# Update the extension name in the table header
if not name and 'EXTNAME' not in self._header:
# Do not sync this with the image header since the default
# name is specific to the table header.
self._header.set('EXTNAME', self._default_name,
'name of this binary table extension',
after='TFIELDS')
elif name:
# Force the name into table and image headers.
self.name = name
# Set the compression type in the table header.
if compression_type:
if compression_type not in COMPRESSION_TYPES:
warnings.warn(
'Unknown compression type provided (supported are {}). '
'Default ({}) compression will be used.'
.format(', '.join(map(repr, COMPRESSION_TYPES)),
DEFAULT_COMPRESSION_TYPE),
AstropyUserWarning)
compression_type = DEFAULT_COMPRESSION_TYPE
self._header.set('ZCMPTYPE', compression_type,
'compression algorithm', after='TFIELDS')
else:
compression_type = self._header.get('ZCMPTYPE',
DEFAULT_COMPRESSION_TYPE)
compression_type = CMTYPE_ALIASES.get(compression_type,
compression_type)
# If the input image header had BSCALE/BZERO cards, then insert
# them in the table header.
if image_header:
bzero = image_header.get('BZERO', 0.0)
bscale = image_header.get('BSCALE', 1.0)
after_keyword = 'EXTNAME'
if bscale != 1.0:
self._header.set('BSCALE', bscale, after=after_keyword)
after_keyword = 'BSCALE'
if bzero != 0.0:
self._header.set('BZERO', bzero, after=after_keyword)
try:
bitpix_comment = image_header.comments['BITPIX']
except (AttributeError, KeyError):
bitpix_comment = 'data type of original image'
try:
naxis_comment = image_header.comments['NAXIS']
except (AttributeError, KeyError):
naxis_comment = 'dimension of original image'
# Set the label for the first column in the table
self._header.set('TTYPE1', 'COMPRESSED_DATA', 'label for field 1',
after='TFIELDS')
# Set the data format for the first column. It is dependent
# on the requested compression type.
if compression_type == 'PLIO_1':
tform1 = '1QI' if huge_hdu else '1PI'
else:
tform1 = '1QB' if huge_hdu else '1PB'
self._header.set('TFORM1', tform1,
'data format of field: variable length array',
after='TTYPE1')
# Create the first column for the table. This column holds the
# compressed data.
col1 = Column(name=self._header['TTYPE1'], format=tform1)
# Create the additional columns required for floating point
# data and calculate the width of the output table.
zbitpix = self._image_header['BITPIX']
if zbitpix < 0 and quantize_level != 0.0:
# floating point image has 'COMPRESSED_DATA',
# 'UNCOMPRESSED_DATA', 'ZSCALE', and 'ZZERO' columns (unless using
# lossless compression, per CFITSIO)
ncols = 4
# CFITSIO 3.28 and up automatically use the GZIP_COMPRESSED_DATA
# store floating point data that couldn't be quantized, instead
# of the UNCOMPRESSED_DATA column. There's no way to control
# this behavior so the only way to determine which behavior will
# be employed is via the CFITSIO version
ttype2 = 'GZIP_COMPRESSED_DATA'
# The required format for the GZIP_COMPRESSED_DATA is actually
# missing from the standard docs, but CFITSIO suggests it
# should be 1PB, which is logical.
tform2 = '1QB' if huge_hdu else '1PB'
# Set up the second column for the table that will hold any
# uncompressable data.
self._header.set('TTYPE2', ttype2, 'label for field 2',
after='TFORM1')
self._header.set('TFORM2', tform2,
'data format of field: variable length array',
after='TTYPE2')
col2 = Column(name=ttype2, format=tform2)
# Set up the third column for the table that will hold
# the scale values for quantized data.
self._header.set('TTYPE3', 'ZSCALE', 'label for field 3',
after='TFORM2')
self._header.set('TFORM3', '1D',
'data format of field: 8-byte DOUBLE',
after='TTYPE3')
col3 = Column(name=self._header['TTYPE3'],
format=self._header['TFORM3'])
# Set up the fourth column for the table that will hold
# the zero values for the quantized data.
self._header.set('TTYPE4', 'ZZERO', 'label for field 4',
after='TFORM3')
self._header.set('TFORM4', '1D',
'data format of field: 8-byte DOUBLE',
after='TTYPE4')
after = 'TFORM4'
col4 = Column(name=self._header['TTYPE4'],
format=self._header['TFORM4'])
# Create the ColDefs object for the table
cols = ColDefs([col1, col2, col3, col4])
else:
# default table has just one 'COMPRESSED_DATA' column
ncols = 1
after = 'TFORM1'
# remove any header cards for the additional columns that
# may be left over from the previous data
to_remove = ['TTYPE2', 'TFORM2', 'TTYPE3', 'TFORM3', 'TTYPE4',
'TFORM4']
for k in to_remove:
try:
del self._header[k]
except KeyError:
pass
# Create the ColDefs object for the table
cols = ColDefs([col1])
# Update the table header with the width of the table, the
# number of fields in the table, the indicator for a compressed
# image HDU, the data type of the image data and the number of
# dimensions in the image data array.
self._header.set('NAXIS1', cols.dtype.itemsize,
'width of table in bytes')
self._header.set('TFIELDS', ncols, 'number of fields in each row',
after='GCOUNT')
self._header.set('ZIMAGE', True, 'extension contains compressed image',
after=after)
self._header.set('ZBITPIX', zbitpix,
bitpix_comment, after='ZIMAGE')
self._header.set('ZNAXIS', self._image_header['NAXIS'], naxis_comment,
after='ZBITPIX')
# Strip the table header of all the ZNAZISn and ZTILEn keywords
# that may be left over from the previous data
for idx in itertools.count(1):
try:
del self._header['ZNAXIS' + str(idx)]
del self._header['ZTILE' + str(idx)]
except KeyError:
break
# Verify that any input tile size parameter is the appropriate
# size to match the HDU's data.
naxis = self._image_header['NAXIS']
if not tile_size:
tile_size = []
elif len(tile_size) != naxis:
warnings.warn('Provided tile size not appropriate for the data. '
'Default tile size will be used.', AstropyUserWarning)
tile_size = []
# Set default tile dimensions for HCOMPRESS_1
if compression_type == 'HCOMPRESS_1':
if (self._image_header['NAXIS1'] < 4 or
self._image_header['NAXIS2'] < 4):
raise ValueError('Hcompress minimum image dimension is '
'4 pixels')
elif tile_size:
if tile_size[0] < 4 or tile_size[1] < 4:
# user specified tile size is too small
raise ValueError('Hcompress minimum tile dimension is '
'4 pixels')
major_dims = len([ts for ts in tile_size if ts > 1])
if major_dims > 2:
raise ValueError(
'HCOMPRESS can only support 2-dimensional tile sizes.'
'All but two of the tile_size dimensions must be set '
'to 1.')
if tile_size and (tile_size[0] == 0 and tile_size[1] == 0):
# compress the whole image as a single tile
tile_size[0] = self._image_header['NAXIS1']
tile_size[1] = self._image_header['NAXIS2']
for i in range(2, naxis):
# set all higher tile dimensions = 1
tile_size[i] = 1
elif not tile_size:
# The Hcompress algorithm is inherently 2D in nature, so the
# row by row tiling that is used for other compression
# algorithms is not appropriate. If the image has less than 30
# rows, then the entire image will be compressed as a single
# tile. Otherwise the tiles will consist of 16 rows of the
# image. This keeps the tiles to a reasonable size, and it
# also includes enough rows to allow good compression
# efficiency. It the last tile of the image happens to contain
# less than 4 rows, then find another tile size with between 14
# and 30 rows (preferably even), so that the last tile has at
# least 4 rows.
# 1st tile dimension is the row length of the image
tile_size.append(self._image_header['NAXIS1'])
if self._image_header['NAXIS2'] <= 30:
tile_size.append(self._image_header['NAXIS1'])
else:
# look for another good tile dimension
naxis2 = self._image_header['NAXIS2']
for dim in [16, 24, 20, 30, 28, 26, 22, 18, 14]:
if naxis2 % dim == 0 or naxis2 % dim > 3:
tile_size.append(dim)
break
else:
tile_size.append(17)
for i in range(2, naxis):
# set all higher tile dimensions = 1
tile_size.append(1)
# check if requested tile size causes the last tile to have
# less than 4 pixels
remain = self._image_header['NAXIS1'] % tile_size[0] # 1st dimen
if remain > 0 and remain < 4:
tile_size[0] += 1 # try increasing tile size by 1
remain = self._image_header['NAXIS1'] % tile_size[0]
if remain > 0 and remain < 4:
raise ValueError('Last tile along 1st dimension has '
'less than 4 pixels')
remain = self._image_header['NAXIS2'] % tile_size[1] # 2nd dimen
if remain > 0 and remain < 4:
tile_size[1] += 1 # try increasing tile size by 1
remain = self._image_header['NAXIS2'] % tile_size[1]
if remain > 0 and remain < 4:
raise ValueError('Last tile along 2nd dimension has '
'less than 4 pixels')
# Set up locations for writing the next cards in the header.
last_znaxis = 'ZNAXIS'
if self._image_header['NAXIS'] > 0:
after1 = 'ZNAXIS1'
else:
after1 = 'ZNAXIS'
# Calculate the number of rows in the output table and
# write the ZNAXISn and ZTILEn cards to the table header.
nrows = 0
for idx, axis in enumerate(self._axes):
naxis = 'NAXIS' + str(idx + 1)
znaxis = 'ZNAXIS' + str(idx + 1)
ztile = 'ZTILE' + str(idx + 1)
if tile_size and len(tile_size) >= idx + 1:
ts = tile_size[idx]
else:
if ztile not in self._header:
# Default tile size
if not idx:
ts = self._image_header['NAXIS1']
else:
ts = 1
else:
ts = self._header[ztile]
tile_size.append(ts)
if not nrows:
nrows = (axis - 1) // ts + 1
else:
nrows *= ((axis - 1) // ts + 1)
if image_header and naxis in image_header:
self._header.set(znaxis, axis, image_header.comments[naxis],
after=last_znaxis)
else:
self._header.set(znaxis, axis,
'length of original image axis',
after=last_znaxis)
self._header.set(ztile, ts, 'size of tiles to be compressed',
after=after1)
last_znaxis = znaxis
after1 = ztile
# Set the NAXIS2 header card in the table hdu to the number of
# rows in the table.
self._header.set('NAXIS2', nrows, 'number of rows in table')
self.columns = cols
# Set the compression parameters in the table header.
# First, setup the values to be used for the compression parameters
# in case none were passed in. This will be either the value
# already in the table header for that parameter or the default
# value.
for idx in itertools.count(1):
zname = 'ZNAME' + str(idx)
if zname not in self._header:
break
zval = 'ZVAL' + str(idx)
if self._header[zname] == 'NOISEBIT':
if quantize_level is None:
quantize_level = self._header[zval]
if self._header[zname] == 'SCALE ':
if hcomp_scale is None:
hcomp_scale = self._header[zval]
if self._header[zname] == 'SMOOTH ':
if hcomp_smooth is None:
hcomp_smooth = self._header[zval]
if quantize_level is None:
quantize_level = DEFAULT_QUANTIZE_LEVEL
if hcomp_scale is None:
hcomp_scale = DEFAULT_HCOMP_SCALE
if hcomp_smooth is None:
hcomp_smooth = DEFAULT_HCOMP_SCALE
# Next, strip the table header of all the ZNAMEn and ZVALn keywords
# that may be left over from the previous data
for idx in itertools.count(1):
zname = 'ZNAME' + str(idx)
if zname not in self._header:
break
zval = 'ZVAL' + str(idx)
del self._header[zname]
del self._header[zval]
# Finally, put the appropriate keywords back based on the
# compression type.
after_keyword = 'ZCMPTYPE'
idx = 1
if compression_type == 'RICE_1':
self._header.set('ZNAME1', 'BLOCKSIZE', 'compression block size',
after=after_keyword)
self._header.set('ZVAL1', DEFAULT_BLOCK_SIZE, 'pixels per block',
after='ZNAME1')
self._header.set('ZNAME2', 'BYTEPIX',
'bytes per pixel (1, 2, 4, or 8)', after='ZVAL1')
if self._header['ZBITPIX'] == 8:
bytepix = 1
elif self._header['ZBITPIX'] == 16:
bytepix = 2
else:
bytepix = DEFAULT_BYTE_PIX
self._header.set('ZVAL2', bytepix,
'bytes per pixel (1, 2, 4, or 8)',
after='ZNAME2')
after_keyword = 'ZVAL2'
idx = 3
elif compression_type == 'HCOMPRESS_1':
self._header.set('ZNAME1', 'SCALE', 'HCOMPRESS scale factor',
after=after_keyword)
self._header.set('ZVAL1', hcomp_scale, 'HCOMPRESS scale factor',
after='ZNAME1')
self._header.set('ZNAME2', 'SMOOTH', 'HCOMPRESS smooth option',
after='ZVAL1')
self._header.set('ZVAL2', hcomp_smooth, 'HCOMPRESS smooth option',
after='ZNAME2')
after_keyword = 'ZVAL2'
idx = 3
if self._image_header['BITPIX'] < 0: # floating point image
self._header.set('ZNAME' + str(idx), 'NOISEBIT',
'floating point quantization level',
after=after_keyword)
self._header.set('ZVAL' + str(idx), quantize_level,
'floating point quantization level',
after='ZNAME' + str(idx))
# Add the dither method and seed
if quantize_method:
if quantize_method not in [NO_DITHER, SUBTRACTIVE_DITHER_1,
SUBTRACTIVE_DITHER_2]:
name = QUANTIZE_METHOD_NAMES[DEFAULT_QUANTIZE_METHOD]
warnings.warn('Unknown quantization method provided. '
'Default method ({}) used.'.format(name))
quantize_method = DEFAULT_QUANTIZE_METHOD
if quantize_method == NO_DITHER:
zquantiz_comment = 'No dithering during quantization'
else:
zquantiz_comment = 'Pixel Quantization Algorithm'
self._header.set('ZQUANTIZ',
QUANTIZE_METHOD_NAMES[quantize_method],
zquantiz_comment,
after='ZVAL' + str(idx))
else:
# If the ZQUANTIZ keyword is missing the default is to assume
# no dithering, rather than whatever DEFAULT_QUANTIZE_METHOD
# is set to
quantize_method = self._header.get('ZQUANTIZ', NO_DITHER)
if isinstance(quantize_method, str):
for k, v in QUANTIZE_METHOD_NAMES.items():
if v.upper() == quantize_method:
quantize_method = k
break
else:
quantize_method = NO_DITHER
if quantize_method == NO_DITHER:
if 'ZDITHER0' in self._header:
# If dithering isn't being used then there's no reason to
# keep the ZDITHER0 keyword
del self._header['ZDITHER0']
else:
if dither_seed:
dither_seed = self._generate_dither_seed(dither_seed)
elif 'ZDITHER0' in self._header:
dither_seed = self._header['ZDITHER0']
else:
dither_seed = self._generate_dither_seed(
DEFAULT_DITHER_SEED)
self._header.set('ZDITHER0', dither_seed,
'dithering offset when quantizing floats',
after='ZQUANTIZ')
if image_header:
# Move SIMPLE card from the image header to the
# table header as ZSIMPLE card.
if 'SIMPLE' in image_header:
self._header.set('ZSIMPLE', image_header['SIMPLE'],
image_header.comments['SIMPLE'],
before='ZBITPIX')
# Move EXTEND card from the image header to the
# table header as ZEXTEND card.
if 'EXTEND' in image_header:
self._header.set('ZEXTEND', image_header['EXTEND'],
image_header.comments['EXTEND'])
# Move BLOCKED card from the image header to the
# table header as ZBLOCKED card.
if 'BLOCKED' in image_header:
self._header.set('ZBLOCKED', image_header['BLOCKED'],
image_header.comments['BLOCKED'])
# Move XTENSION card from the image header to the
# table header as ZTENSION card.
# Since we only handle compressed IMAGEs, ZTENSION should
# always be IMAGE, even if the caller has passed in a header
# for some other type of extension.
if 'XTENSION' in image_header:
self._header.set('ZTENSION', 'IMAGE',
image_header.comments['XTENSION'],
before='ZBITPIX')
# Move PCOUNT and GCOUNT cards from image header to the table
# header as ZPCOUNT and ZGCOUNT cards.
if 'PCOUNT' in image_header:
self._header.set('ZPCOUNT', image_header['PCOUNT'],
image_header.comments['PCOUNT'],
after=last_znaxis)
if 'GCOUNT' in image_header:
self._header.set('ZGCOUNT', image_header['GCOUNT'],
image_header.comments['GCOUNT'],
after='ZPCOUNT')
# Move CHECKSUM and DATASUM cards from the image header to the
# table header as XHECKSUM and XDATASUM cards.
if 'CHECKSUM' in image_header:
self._header.set('ZHECKSUM', image_header['CHECKSUM'],
image_header.comments['CHECKSUM'])
if 'DATASUM' in image_header:
self._header.set('ZDATASUM', image_header['DATASUM'],
image_header.comments['DATASUM'])
else:
# Move XTENSION card from the image header to the
# table header as ZTENSION card.
# Since we only handle compressed IMAGEs, ZTENSION should
# always be IMAGE, even if the caller has passed in a header
# for some other type of extension.
if 'XTENSION' in self._image_header:
self._header.set('ZTENSION', 'IMAGE',
self._image_header.comments['XTENSION'],
before='ZBITPIX')
# Move PCOUNT and GCOUNT cards from image header to the table
# header as ZPCOUNT and ZGCOUNT cards.
if 'PCOUNT' in self._image_header:
self._header.set('ZPCOUNT', self._image_header['PCOUNT'],
self._image_header.comments['PCOUNT'],
after=last_znaxis)
if 'GCOUNT' in self._image_header:
self._header.set('ZGCOUNT', self._image_header['GCOUNT'],
self._image_header.comments['GCOUNT'],
after='ZPCOUNT')
# When we have an image checksum we need to ensure that the same
# number of blank cards exist in the table header as there were in
# the image header. This allows those blank cards to be carried
# over to the image header when the hdu is uncompressed.
if 'ZHECKSUM' in self._header:
required_blanks = image_header._countblanks()
image_blanks = self._image_header._countblanks()
table_blanks = self._header._countblanks()
for _ in range(required_blanks - image_blanks):
self._image_header.append()
table_blanks += 1
for _ in range(required_blanks - table_blanks):
self._header.append()
@lazyproperty
def data(self):
# The data attribute is the image data (not the table data).
data = compression.decompress_hdu(self)
if data is None:
return data
# Scale the data if necessary
if (self._orig_bzero != 0 or self._orig_bscale != 1):
new_dtype = self._dtype_for_bitpix()
data = np.array(data, dtype=new_dtype)
zblank = None
if 'ZBLANK' in self.compressed_data.columns.names:
zblank = self.compressed_data['ZBLANK']
else:
if 'ZBLANK' in self._header:
zblank = np.array(self._header['ZBLANK'], dtype='int32')
elif 'BLANK' in self._header:
zblank = np.array(self._header['BLANK'], dtype='int32')
if zblank is not None:
blanks = (data == zblank)
if self._bscale != 1:
np.multiply(data, self._bscale, data)
if self._bzero != 0:
# We have to explicitly cast self._bzero to prevent numpy from
# raising an error when doing self.data += self._bzero, and we
# do this instead of self.data = self.data + self._bzero to
# avoid doubling memory usage.
np.add(data, self._bzero, out=data, casting='unsafe')
if zblank is not None:
data = np.where(blanks, np.nan, data)
# Right out of _ImageBaseHDU.data
self._update_header_scale_info(data.dtype)
return data
@data.setter
def data(self, data):
if (data is not None) and (not isinstance(data, np.ndarray) or
data.dtype.fields is not None):
raise TypeError('CompImageHDU data has incorrect type:{}; '
'dtype.fields = {}'.format(
type(data), data.dtype.fields))
@lazyproperty
def compressed_data(self):
# First we will get the table data (the compressed
# data) from the file, if there is any.
compressed_data = super().data
if isinstance(compressed_data, np.rec.recarray):
# Make sure not to use 'del self.data' so we don't accidentally
# go through the self.data.fdel and close the mmap underlying
# the compressed_data array
del self.__dict__['data']
return compressed_data
else:
# This will actually set self.compressed_data with the
# pre-allocated space for the compression data; this is something I
# might do away with in the future
self._update_compressed_data()
return self.compressed_data
@compressed_data.deleter
def compressed_data(self):
# Deleting the compressed_data attribute has to be handled
# with a little care to prevent a reference leak
# First delete the ._coldefs attributes under it to break a possible
# reference cycle
if 'compressed_data' in self.__dict__:
del self.__dict__['compressed_data']._coldefs
# Now go ahead and delete from self.__dict__; normally
# lazyproperty.__delete__ does this for us, but we can prempt it to
# do some additional cleanup
del self.__dict__['compressed_data']
# If this file was mmap'd, numpy.memmap will hold open a file
# handle until the underlying mmap object is garbage-collected;
# since this reference leak can sometimes hang around longer than
# welcome go ahead and force a garbage collection
gc.collect()
@property
def shape(self):
"""
Shape of the image array--should be equivalent to ``self.data.shape``.
"""
# Determine from the values read from the header
return tuple(reversed(self._axes))
@lazyproperty
def header(self):
# The header attribute is the header for the image data. It
# is not actually stored in the object dictionary. Instead,
# the _image_header is stored. If the _image_header attribute
# has already been defined we just return it. If not, we must
# create it from the table header (the _header attribute).
if hasattr(self, '_image_header'):
return self._image_header
# Clean up any possible doubled EXTNAME keywords that use
# the default. Do this on the original header to ensure
# duplicates are removed cleanly.
self._remove_unnecessary_default_extnames(self._header)
# Start with a copy of the table header.
image_header = self._header.copy()
# Delete cards that are related to the table. And move
# the values of those cards that relate to the image from
# their corresponding table cards. These include
# ZBITPIX -> BITPIX, ZNAXIS -> NAXIS, and ZNAXISn -> NAXISn.
# (Note: Used set here instead of list in case there are any duplicate
# keywords, which there may be in some pathological cases:
# https://github.com/astropy/astropy/issues/2750
for keyword in set(image_header):
if CompImageHeader._is_reserved_keyword(keyword, warn=False):
del image_header[keyword]
if 'ZSIMPLE' in self._header:
image_header.set('SIMPLE', self._header['ZSIMPLE'],
self._header.comments['ZSIMPLE'], before=0)
elif 'ZTENSION' in self._header:
if self._header['ZTENSION'] != 'IMAGE':
warnings.warn("ZTENSION keyword in compressed "
"extension != 'IMAGE'", AstropyUserWarning)
image_header.set('XTENSION', 'IMAGE',
self._header.comments['ZTENSION'], before=0)
else:
image_header.set('XTENSION', 'IMAGE', before=0)
image_header.set('BITPIX', self._header['ZBITPIX'],
self._header.comments['ZBITPIX'], before=1)
image_header.set('NAXIS', self._header['ZNAXIS'],
self._header.comments['ZNAXIS'], before=2)
last_naxis = 'NAXIS'
for idx in range(image_header['NAXIS']):
znaxis = 'ZNAXIS' + str(idx + 1)
naxis = znaxis[1:]
image_header.set(naxis, self._header[znaxis],
self._header.comments[znaxis],
after=last_naxis)
last_naxis = naxis
# Delete any other spurious NAXISn keywords:
naxis = image_header['NAXIS']
for keyword in list(image_header['NAXIS?*']):
try:
n = int(keyword[5:])
except Exception:
continue
if n > naxis:
del image_header[keyword]
# Although PCOUNT and GCOUNT are considered mandatory for IMAGE HDUs,
# ZPCOUNT and ZGCOUNT are optional, probably because for IMAGE HDUs
# their values are always 0 and 1 respectively
if 'ZPCOUNT' in self._header:
image_header.set('PCOUNT', self._header['ZPCOUNT'],
self._header.comments['ZPCOUNT'],
after=last_naxis)
else:
image_header.set('PCOUNT', 0, after=last_naxis)
if 'ZGCOUNT' in self._header:
image_header.set('GCOUNT', self._header['ZGCOUNT'],
self._header.comments['ZGCOUNT'],
after='PCOUNT')
else:
image_header.set('GCOUNT', 1, after='PCOUNT')
if 'ZEXTEND' in self._header:
image_header.set('EXTEND', self._header['ZEXTEND'],
self._header.comments['ZEXTEND'])
if 'ZBLOCKED' in self._header:
image_header.set('BLOCKED', self._header['ZBLOCKED'],
self._header.comments['ZBLOCKED'])
# Move the ZHECKSUM and ZDATASUM cards to the image header
# as CHECKSUM and DATASUM
if 'ZHECKSUM' in self._header:
image_header.set('CHECKSUM', self._header['ZHECKSUM'],
self._header.comments['ZHECKSUM'])
if 'ZDATASUM' in self._header:
image_header.set('DATASUM', self._header['ZDATASUM'],
self._header.comments['ZDATASUM'])
# Remove the EXTNAME card if the value in the table header
# is the default value of COMPRESSED_IMAGE.
if ('EXTNAME' in image_header and
image_header['EXTNAME'] == self._default_name):
del image_header['EXTNAME']
# Look to see if there are any blank cards in the table
# header. If there are, there should be the same number
# of blank cards in the image header. Add blank cards to
# the image header to make it so.
table_blanks = self._header._countblanks()
image_blanks = image_header._countblanks()
for _ in range(table_blanks - image_blanks):
image_header.append()
# Create the CompImageHeader that syncs with the table header, and save
# it off to self._image_header so it can be referenced later
# unambiguously
self._image_header = CompImageHeader(self._header, image_header)
return self._image_header
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
_shape, _format = (), ''
else:
# the shape will be in the order of NAXIS's which is the
# reverse of the numarray shape
_shape = list(self.data.shape)
_format = self.data.dtype.name
_shape.reverse()
_shape = tuple(_shape)
_format = _format[_format.rfind('.') + 1:]
# if data is not touched yet, use header info.
else:
_shape = ()
for idx in range(self.header['NAXIS']):
_shape += (self.header['NAXIS' + str(idx + 1)],)
_format = BITPIX2DTYPE[self.header['BITPIX']]
return (self.name, self.ver, class_name, len(self.header), _shape,
_format)
def _update_compressed_data(self):
"""
Compress the image data so that it may be written to a file.
"""
# Check to see that the image_header matches the image data
image_bitpix = DTYPE2BITPIX[self.data.dtype.name]
if image_bitpix != self._orig_bitpix or self.data.shape != self.shape:
self._update_header_data(self.header)
# TODO: This is copied right out of _ImageBaseHDU._writedata_internal;
# it would be cool if we could use an internal ImageHDU and use that to
# write to a buffer for compression or something. See ticket #88
# deal with unsigned integer 16, 32 and 64 data
old_data = self.data
if _is_pseudo_integer(self.data.dtype):
# Convert the unsigned array to signed
self.data = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f'=i{self.data.dtype.itemsize}')
should_swap = False
else:
should_swap = not self.data.dtype.isnative
if should_swap:
if self.data.flags.writeable:
self.data.byteswap(True)
else:
# For read-only arrays, there is no way around making
# a byteswapped copy of the data.
self.data = self.data.byteswap(False)
try:
nrows = self._header['NAXIS2']
tbsize = self._header['NAXIS1'] * nrows
self._header['PCOUNT'] = 0
if 'THEAP' in self._header:
del self._header['THEAP']
self._theap = tbsize
# First delete the original compressed data, if it exists
del self.compressed_data
# Make sure that the data is contiguous otherwise CFITSIO
# will not write the expected data
self.data = np.ascontiguousarray(self.data)
# Compress the data.
# The current implementation of compress_hdu assumes the empty
# compressed data table has already been initialized in
# self.compressed_data, and writes directly to it
# compress_hdu returns the size of the heap for the written
# compressed image table
heapsize, self.compressed_data = compression.compress_hdu(self)
finally:
# if data was byteswapped return it to its original order
if should_swap:
self.data.byteswap(True)
self.data = old_data
# CFITSIO will write the compressed data in big-endian order
dtype = self.columns.dtype.newbyteorder('>')
buf = self.compressed_data
compressed_data = buf[:self._theap].view(dtype=dtype,
type=np.rec.recarray)
self.compressed_data = compressed_data.view(FITS_rec)
self.compressed_data._coldefs = self.columns
self.compressed_data._heapoffset = self._theap
self.compressed_data._heapsize = heapsize
def scale(self, type=None, option='old', bscale=1, bzero=0):
"""
Scale image data by using ``BSCALE`` and ``BZERO``.
Calling this method will scale ``self.data`` and update the keywords of
``BSCALE`` and ``BZERO`` in ``self._header`` and ``self._image_header``.
This method should only be used right before writing to the output
file, as the data will be scaled and is therefore not very usable after
the call.
Parameters
----------
type : str, optional
destination data type, use a string representing a numpy dtype
name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'`` etc.). If is
`None`, use the current data type.
option : str, optional
how to scale the data: if ``"old"``, use the original ``BSCALE``
and ``BZERO`` values when the data was read/created. If
``"minmax"``, use the minimum and maximum of the data to scale.
The option will be overwritten by any user-specified bscale/bzero
values.
bscale, bzero : int, optional
user specified ``BSCALE`` and ``BZERO`` values.
"""
if self.data is None:
return
# Determine the destination (numpy) data type
if type is None:
type = BITPIX2DTYPE[self._bitpix]
_type = getattr(np, type)
# Determine how to scale the data
# bscale and bzero takes priority
if (bscale != 1 or bzero != 0):
_scale = bscale
_zero = bzero
else:
if option == 'old':
_scale = self._orig_bscale
_zero = self._orig_bzero
elif option == 'minmax':
if isinstance(_type, np.floating):
_scale = 1
_zero = 0
else:
_min = np.minimum.reduce(self.data.flat)
_max = np.maximum.reduce(self.data.flat)
if _type == np.uint8: # uint8 case
_zero = _min
_scale = (_max - _min) / (2. ** 8 - 1)
else:
_zero = (_max + _min) / 2.
# throw away -2^N
_scale = (_max - _min) / (2. ** (8 * _type.bytes) - 2)
# Do the scaling
if _zero != 0:
# We have to explicitly cast self._bzero to prevent numpy from
# raising an error when doing self.data -= _zero, and we
# do this instead of self.data = self.data - _zero to
# avoid doubling memory usage.
np.subtract(self.data, _zero, out=self.data, casting='unsafe')
self.header['BZERO'] = _zero
else:
# Delete from both headers
for header in (self.header, self._header):
with suppress(KeyError):
del header['BZERO']
if _scale != 1:
self.data /= _scale
self.header['BSCALE'] = _scale
else:
for header in (self.header, self._header):
with suppress(KeyError):
del header['BSCALE']
if self.data.dtype.type != _type:
self.data = np.array(np.around(self.data), dtype=_type) # 0.7.7.1
# Update the BITPIX Card to match the data
self._bitpix = DTYPE2BITPIX[self.data.dtype.name]
self._bzero = self.header.get('BZERO', 0)
self._bscale = self.header.get('BSCALE', 1)
# Update BITPIX for the image header specifically
# TODO: Make this more clear by using self._image_header, but only once
# this has been fixed so that the _image_header attribute is guaranteed
# to be valid
self.header['BITPIX'] = self._bitpix
# Update the table header to match the scaled data
self._update_header_data(self.header)
# Since the image has been manually scaled, the current
# bitpix/bzero/bscale now serve as the 'original' scaling of the image,
# as though the original image has been completely replaced
self._orig_bitpix = self._bitpix
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
def _prewriteto(self, checksum=False, inplace=False):
if self._scale_back:
self.scale(BITPIX2DTYPE[self._orig_bitpix])
if self._has_data:
self._update_compressed_data()
# Use methods in the superclass to update the header with
# scale/checksum keywords based on the data type of the image data
self._update_pseudo_int_scale_keywords()
# Shove the image header and data into a new ImageHDU and use that
# to compute the image checksum
image_hdu = ImageHDU(data=self.data, header=self.header)
image_hdu._update_checksum(checksum)
if 'CHECKSUM' in image_hdu.header:
# This will also pass through to the ZHECKSUM keyword and
# ZDATASUM keyword
self._image_header.set('CHECKSUM',
image_hdu.header['CHECKSUM'],
image_hdu.header.comments['CHECKSUM'])
if 'DATASUM' in image_hdu.header:
self._image_header.set('DATASUM', image_hdu.header['DATASUM'],
image_hdu.header.comments['DATASUM'])
# Store a temporary backup of self.data in a different attribute;
# see below
self._imagedata = self.data
# Now we need to perform an ugly hack to set the compressed data as
# the .data attribute on the HDU so that the call to _writedata
# handles it properly
self.__dict__['data'] = self.compressed_data
return super()._prewriteto(checksum=checksum, inplace=inplace)
def _writeheader(self, fileobj):
"""
Bypasses `BinTableHDU._writeheader()` which updates the header with
metadata about the data that is meaningless here; another reason
why this class maybe shouldn't inherit directly from BinTableHDU...
"""
return ExtensionHDU._writeheader(self, fileobj)
def _writedata(self, fileobj):
"""
Wrap the basic ``_writedata`` method to restore the ``.data``
attribute to the uncompressed image data in the case of an exception.
"""
try:
return super()._writedata(fileobj)
finally:
# Restore the .data attribute to its rightful value (if any)
if hasattr(self, '_imagedata'):
self.__dict__['data'] = self._imagedata
del self._imagedata
else:
del self.data
def _close(self, closed=True):
super()._close(closed=closed)
# Also make sure to close access to the compressed data mmaps
if (closed and self._data_loaded and
_get_array_mmap(self.compressed_data) is not None):
del self.compressed_data
# TODO: This was copied right out of _ImageBaseHDU; get rid of it once we
# find a way to rewrite this class as either a subclass or wrapper for an
# ImageHDU
def _dtype_for_bitpix(self):
"""
Determine the dtype that the data should be converted to depending on
the BITPIX value in the header, and possibly on the BSCALE value as
well. Returns None if there should not be any change.
"""
bitpix = self._orig_bitpix
# Handle possible conversion to uints if enabled
if self._uint and self._orig_bscale == 1:
for bits, dtype in ((16, np.dtype('uint16')),
(32, np.dtype('uint32')),
(64, np.dtype('uint64'))):
if bitpix == bits and self._orig_bzero == 1 << (bits - 1):
return dtype
if bitpix > 16: # scale integers to Float64
return np.dtype('float64')
elif bitpix > 0: # scale integers to Float32
return np.dtype('float32')
def _update_header_scale_info(self, dtype=None):
if (not self._do_not_scale_image_data and
not (self._orig_bzero == 0 and self._orig_bscale == 1)):
for keyword in ['BSCALE', 'BZERO']:
# Make sure to delete from both the image header and the table
# header; later this will be streamlined
for header in (self.header, self._header):
with suppress(KeyError):
del header[keyword]
# Since _update_header_scale_info can, currently, be
# called *after* _prewriteto(), replace these with
# blank cards so the header size doesn't change
header.append()
if dtype is None:
dtype = self._dtype_for_bitpix()
if dtype is not None:
self.header['BITPIX'] = DTYPE2BITPIX[dtype.name]
self._bzero = 0
self._bscale = 1
self._bitpix = self.header['BITPIX']
def _generate_dither_seed(self, seed):
if not _is_int(seed):
raise TypeError("Seed must be an integer")
if not -1 <= seed <= 10000:
raise ValueError(
"Seed for random dithering must be either between 1 and "
"10000 inclusive, 0 for autogeneration from the system "
"clock, or -1 for autogeneration from a checksum of the first "
"image tile (got {})".format(seed))
if seed == DITHER_SEED_CHECKSUM:
# Determine the tile dimensions from the ZTILEn keywords
naxis = self._header['ZNAXIS']
tile_dims = [self._header[f'ZTILE{idx + 1}']
for idx in range(naxis)]
tile_dims.reverse()
# Get the first tile by using the tile dimensions as the end
# indices of slices (starting from 0)
first_tile = self.data[tuple(slice(d) for d in tile_dims)]
# The checksum algorithm used is literally just the sum of the bytes
# of the tile data (not its actual floating point values). Integer
# overflow is irrelevant.
csum = first_tile.view(dtype='uint8').sum()
# Since CFITSIO uses an unsigned long (which may be different on
# different platforms) go ahead and truncate the sum to its
# unsigned long value and take the result modulo 10000
return (ctypes.c_ulong(csum).value % 10000) + 1
elif seed == DITHER_SEED_CLOCK:
# This isn't exactly the same algorithm as CFITSIO, but that's okay
# since the result is meant to be arbitrary. The primary difference
# is that CFITSIO incorporates the HDU number into the result in
# the hopes of heading off the possibility of the same seed being
# generated for two HDUs at the same time. Here instead we just
# add in the HDU object's id
return ((sum(int(x) for x in math.modf(time.time())) + id(self)) %
10000) + 1
else:
return seed
|
562b11c97eee636792e3f741d596fdd90cfcb813be3e45835035fa3770718d5f | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import datetime
import os
import sys
import warnings
from contextlib import suppress
from inspect import signature, Parameter
import numpy as np
from astropy.io.fits import conf
from astropy.io.fits.file import _File
from astropy.io.fits.header import (Header, _BasicHeader, _pad_length,
_DelayedHeader)
from astropy.io.fits.util import (_is_int, _is_pseudo_integer, _pseudo_zero,
itersubclasses, decode_ascii, _get_array_mmap, first,
_free_space_check, _extract_number)
from astropy.io.fits.verify import _Verify, _ErrList
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
__all__ = [
"DELAYED",
# classes
"InvalidHDUException",
"ExtensionHDU",
"NonstandardExtHDU",
]
class _Delayed:
pass
DELAYED = _Delayed()
BITPIX2DTYPE = {8: 'uint8', 16: 'int16', 32: 'int32', 64: 'int64',
-32: 'float32', -64: 'float64'}
"""Maps FITS BITPIX values to Numpy dtype names."""
DTYPE2BITPIX = {'int8': 8, 'uint8': 8, 'int16': 16, 'uint16': 16,
'int32': 32, 'uint32': 32, 'int64': 64, 'uint64': 64,
'float32': -32, 'float64': -64}
"""
Maps Numpy dtype names to FITS BITPIX values (this includes unsigned
integers, with the assumption that the pseudo-unsigned integer convention
will be used in this case.
"""
class InvalidHDUException(Exception):
"""
A custom exception class used mainly to signal to _BaseHDU.__new__ that
an HDU cannot possibly be considered valid, and must be assumed to be
corrupted.
"""
def _hdu_class_from_header(cls, header):
"""
Iterates through the subclasses of _BaseHDU and uses that class's
match_header() method to determine which subclass to instantiate.
It's important to be aware that the class hierarchy is traversed in a
depth-last order. Each match_header() should identify an HDU type as
uniquely as possible. Abstract types may choose to simply return False
or raise NotImplementedError to be skipped.
If any unexpected exceptions are raised while evaluating
match_header(), the type is taken to be _CorruptedHDU.
Used primarily by _BaseHDU._readfrom_internal and _BaseHDU._from_data to
find an appropriate HDU class to use based on values in the header.
"""
klass = cls # By default, if no subclasses are defined
if header:
for c in reversed(list(itersubclasses(cls))):
try:
# HDU classes built into astropy.io.fits are always considered,
# but extension HDUs must be explicitly registered
if not (c.__module__.startswith('astropy.io.fits.') or
c in cls._hdu_registry):
continue
if c.match_header(header):
klass = c
break
except NotImplementedError:
continue
except Exception as exc:
warnings.warn(
'An exception occurred matching an HDU header to the '
'appropriate HDU type: {}'.format(exc),
AstropyUserWarning)
warnings.warn('The HDU will be treated as corrupted.',
AstropyUserWarning)
klass = _CorruptedHDU
del exc
break
return klass
# TODO: Come up with a better __repr__ for HDUs (and for HDULists, for that
# matter)
class _BaseHDU:
"""Base class for all HDU (header data unit) classes."""
_hdu_registry = set()
# This HDU type is part of the FITS standard
_standard = True
# Byte to use for padding out blocks
_padding_byte = '\x00'
_default_name = ''
# _header uses a descriptor to delay the loading of the fits.Header object
# until it is necessary.
_header = _DelayedHeader()
def __init__(self, data=None, header=None, *args, **kwargs):
if header is None:
header = Header()
self._header = header
self._header_str = None
self._file = None
self._buffer = None
self._header_offset = None
self._data_offset = None
self._data_size = None
# This internal variable is used to track whether the data attribute
# still points to the same data array as when the HDU was originally
# created (this does not track whether the data is actually the same
# content-wise)
self._data_replaced = False
self._data_needs_rescale = False
self._new = True
self._output_checksum = False
if 'DATASUM' in self._header and 'CHECKSUM' not in self._header:
self._output_checksum = 'datasum'
elif 'CHECKSUM' in self._header:
self._output_checksum = True
def __init_subclass__(cls, **kwargs):
# Add the same data.deleter to all HDUs with a data property.
# It's unfortunate, but there's otherwise no straightforward way
# that a property can inherit setters/deleters of the property of the
# same name on base classes.
data_prop = cls.__dict__.get('data', None)
if (isinstance(data_prop, (lazyproperty, property))
and data_prop.fdel is None):
# Don't do anything if the class has already explicitly
# set the deleter for its data property
def data(self):
# The deleter
if self._file is not None and self._data_loaded:
data_refcount = sys.getrefcount(self.data)
# Manually delete *now* so that FITS_rec.__del__
# cleanup can happen if applicable
del self.__dict__['data']
# Don't even do this unless the *only* reference to the
# .data array was the one we're deleting by deleting
# this attribute; if any other references to the array
# are hanging around (perhaps the user ran ``data =
# hdu.data``) don't even consider this:
if data_refcount == 2:
self._file._maybe_close_mmap()
setattr(cls, 'data', data_prop.deleter(data))
return super().__init_subclass__(**kwargs)
@property
def header(self):
return self._header
@header.setter
def header(self, value):
self._header = value
@property
def name(self):
# Convert the value to a string to be flexible in some pathological
# cases (see ticket #96)
return str(self._header.get('EXTNAME', self._default_name))
@name.setter
def name(self, value):
if not isinstance(value, str):
raise TypeError("'name' attribute must be a string")
if not conf.extension_name_case_sensitive:
value = value.upper()
if 'EXTNAME' in self._header:
self._header['EXTNAME'] = value
else:
self._header['EXTNAME'] = (value, 'extension name')
@property
def ver(self):
return self._header.get('EXTVER', 1)
@ver.setter
def ver(self, value):
if not _is_int(value):
raise TypeError("'ver' attribute must be an integer")
if 'EXTVER' in self._header:
self._header['EXTVER'] = value
else:
self._header['EXTVER'] = (value, 'extension value')
@property
def level(self):
return self._header.get('EXTLEVEL', 1)
@level.setter
def level(self, value):
if not _is_int(value):
raise TypeError("'level' attribute must be an integer")
if 'EXTLEVEL' in self._header:
self._header['EXTLEVEL'] = value
else:
self._header['EXTLEVEL'] = (value, 'extension level')
@property
def is_image(self):
return (
self.name == 'PRIMARY' or
('XTENSION' in self._header and
(self._header['XTENSION'] == 'IMAGE' or
(self._header['XTENSION'] == 'BINTABLE' and
'ZIMAGE' in self._header and self._header['ZIMAGE'] is True))))
@property
def _data_loaded(self):
return ('data' in self.__dict__ and self.data is not DELAYED)
@property
def _has_data(self):
return self._data_loaded and self.data is not None
@classmethod
def register_hdu(cls, hducls):
cls._hdu_registry.add(hducls)
@classmethod
def unregister_hdu(cls, hducls):
if hducls in cls._hdu_registry:
cls._hdu_registry.remove(hducls)
@classmethod
def match_header(cls, header):
raise NotImplementedError
@classmethod
def fromstring(cls, data, checksum=False, ignore_missing_end=False,
**kwargs):
"""
Creates a new HDU object of the appropriate type from a string
containing the HDU's entire header and, optionally, its data.
Note: When creating a new HDU from a string without a backing file
object, the data of that HDU may be read-only. It depends on whether
the underlying string was an immutable Python str/bytes object, or some
kind of read-write memory buffer such as a `memoryview`.
Parameters
----------
data : str, bytearray, memoryview, ndarray
A byte string containing the HDU's header and data.
checksum : bool, optional
Check the HDU's checksum and/or datasum.
ignore_missing_end : bool, optional
Ignore a missing end card in the header data. Note that without the
end card the end of the header may be ambiguous and resulted in a
corrupt HDU. In this case the assumption is that the first 2880
block that does not begin with valid FITS header data is the
beginning of the data.
**kwargs : optional
May consist of additional keyword arguments specific to an HDU
type--these correspond to keywords recognized by the constructors of
different HDU classes such as `PrimaryHDU`, `ImageHDU`, or
`BinTableHDU`. Any unrecognized keyword arguments are simply
ignored.
"""
return cls._readfrom_internal(data, checksum=checksum,
ignore_missing_end=ignore_missing_end,
**kwargs)
@classmethod
def readfrom(cls, fileobj, checksum=False, ignore_missing_end=False,
**kwargs):
"""
Read the HDU from a file. Normally an HDU should be opened with
:func:`open` which reads the entire HDU list in a FITS file. But this
method is still provided for symmetry with :func:`writeto`.
Parameters
----------
fileobj : file-like
Input FITS file. The file's seek pointer is assumed to be at the
beginning of the HDU.
checksum : bool
If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card
values (when present in the HDU header) match the header and data
of all HDU's in the file.
ignore_missing_end : bool
Do not issue an exception when opening a file that is missing an
``END`` card in the last header.
"""
# TODO: Figure out a way to make it possible for the _File
# constructor to be a noop if the argument is already a _File
if not isinstance(fileobj, _File):
fileobj = _File(fileobj)
hdu = cls._readfrom_internal(fileobj, checksum=checksum,
ignore_missing_end=ignore_missing_end,
**kwargs)
# If the checksum had to be checked the data may have already been read
# from the file, in which case we don't want to seek relative
fileobj.seek(hdu._data_offset + hdu._data_size, os.SEEK_SET)
return hdu
def writeto(self, name, output_verify='exception', overwrite=False,
checksum=False):
"""
Write the HDU to a new file. This is a convenience method to
provide a user easier output interface if only one HDU needs
to be written to a file.
Parameters
----------
name : path-like or file-like
Output FITS file. If the file object is already opened, it must
be opened in a writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the header of the HDU when written to the file.
"""
from .hdulist import HDUList
hdulist = HDUList([self])
hdulist.writeto(name, output_verify, overwrite=overwrite,
checksum=checksum)
@classmethod
def _from_data(cls, data, header, **kwargs):
"""
Instantiate the HDU object after guessing the HDU class from the
FITS Header.
"""
klass = _hdu_class_from_header(cls, header)
return klass(data=data, header=header, **kwargs)
@classmethod
def _readfrom_internal(cls, data, header=None, checksum=False,
ignore_missing_end=False, **kwargs):
"""
Provides the bulk of the internal implementation for readfrom and
fromstring.
For some special cases, supports using a header that was already
created, and just using the input data for the actual array data.
"""
hdu_buffer = None
hdu_fileobj = None
header_offset = 0
if isinstance(data, _File):
if header is None:
header_offset = data.tell()
try:
# First we try to read the header with the fast parser
# from _BasicHeader, which will read only the standard
# 8 character keywords to get the structural keywords
# that are needed to build the HDU object.
header_str, header = _BasicHeader.fromfile(data)
except Exception:
# If the fast header parsing failed, then fallback to
# the classic Header parser, which has better support
# and reporting for the various issues that can be found
# in the wild.
data.seek(header_offset)
header = Header.fromfile(data,
endcard=not ignore_missing_end)
hdu_fileobj = data
data_offset = data.tell() # *after* reading the header
else:
try:
# Test that the given object supports the buffer interface by
# ensuring an ndarray can be created from it
np.ndarray((), dtype='ubyte', buffer=data)
except TypeError:
raise TypeError(
'The provided object {!r} does not contain an underlying '
'memory buffer. fromstring() requires an object that '
'supports the buffer interface such as bytes, buffer, '
'memoryview, ndarray, etc. This restriction is to ensure '
'that efficient access to the array/table data is possible.'
.format(data))
if header is None:
def block_iter(nbytes):
idx = 0
while idx < len(data):
yield data[idx:idx + nbytes]
idx += nbytes
header_str, header = Header._from_blocks(
block_iter, True, '', not ignore_missing_end, True)
if len(data) > len(header_str):
hdu_buffer = data
elif data:
hdu_buffer = data
header_offset = 0
data_offset = len(header_str)
# Determine the appropriate arguments to pass to the constructor from
# self._kwargs. self._kwargs contains any number of optional arguments
# that may or may not be valid depending on the HDU type
cls = _hdu_class_from_header(cls, header)
sig = signature(cls.__init__)
new_kwargs = kwargs.copy()
if Parameter.VAR_KEYWORD not in (x.kind for x in sig.parameters.values()):
# If __init__ accepts arbitrary keyword arguments, then we can go
# ahead and pass all keyword arguments; otherwise we need to delete
# any that are invalid
for key in kwargs:
if key not in sig.parameters:
del new_kwargs[key]
try:
hdu = cls(data=DELAYED, header=header, **new_kwargs)
except TypeError:
# This may happen because some HDU class (e.g. GroupsHDU) wants
# to set a keyword on the header, which is not possible with the
# _BasicHeader. While HDU classes should not need to modify the
# header in general, sometimes this is needed to fix it. So in
# this case we build a full Header and try again to create the
# HDU object.
if isinstance(header, _BasicHeader):
header = Header.fromstring(header_str)
hdu = cls(data=DELAYED, header=header, **new_kwargs)
else:
raise
# One of these may be None, depending on whether the data came from a
# file or a string buffer--later this will be further abstracted
hdu._file = hdu_fileobj
hdu._buffer = hdu_buffer
hdu._header_offset = header_offset # beginning of the header area
hdu._data_offset = data_offset # beginning of the data area
# data area size, including padding
size = hdu.size
hdu._data_size = size + _pad_length(size)
if isinstance(hdu._header, _BasicHeader):
# Delete the temporary _BasicHeader.
# We need to do this before an eventual checksum computation,
# since it needs to modify temporarily the header
#
# The header string is stored in the HDU._header_str attribute,
# so that it can be used directly when we need to create the
# classic Header object, without having to parse again the file.
del hdu._header
hdu._header_str = header_str
# Checksums are not checked on invalid HDU types
if checksum and checksum != 'remove' and isinstance(hdu, _ValidHDU):
hdu._verify_checksum_datasum()
return hdu
def _get_raw_data(self, shape, code, offset):
"""
Return raw array from either the HDU's memory buffer or underlying
file.
"""
if isinstance(shape, int):
shape = (shape,)
if self._buffer:
return np.ndarray(shape, dtype=code, buffer=self._buffer,
offset=offset)
elif self._file:
return self._file.readarray(offset=offset, dtype=code, shape=shape)
else:
return None
# TODO: Rework checksum handling so that it's not necessary to add a
# checksum argument here
# TODO: The BaseHDU class shouldn't even handle checksums since they're
# only implemented on _ValidHDU...
def _prewriteto(self, checksum=False, inplace=False):
self._update_pseudo_int_scale_keywords()
# Handle checksum
self._update_checksum(checksum)
def _update_pseudo_int_scale_keywords(self):
"""
If the data is signed int 8, unsigned int 16, 32, or 64,
add BSCALE/BZERO cards to header.
"""
if (self._has_data and self._standard and
_is_pseudo_integer(self.data.dtype)):
# CompImageHDUs need TFIELDS immediately after GCOUNT,
# so BSCALE has to go after TFIELDS if it exists.
if 'TFIELDS' in self._header:
self._header.set('BSCALE', 1, after='TFIELDS')
elif 'GCOUNT' in self._header:
self._header.set('BSCALE', 1, after='GCOUNT')
else:
self._header.set('BSCALE', 1)
self._header.set('BZERO', _pseudo_zero(self.data.dtype),
after='BSCALE')
def _update_checksum(self, checksum, checksum_keyword='CHECKSUM',
datasum_keyword='DATASUM'):
"""Update the 'CHECKSUM' and 'DATASUM' keywords in the header (or
keywords with equivalent semantics given by the ``checksum_keyword``
and ``datasum_keyword`` arguments--see for example ``CompImageHDU``
for an example of why this might need to be overridden).
"""
# If the data is loaded it isn't necessarily 'modified', but we have no
# way of knowing for sure
modified = self._header._modified or self._data_loaded
if checksum == 'remove':
if checksum_keyword in self._header:
del self._header[checksum_keyword]
if datasum_keyword in self._header:
del self._header[datasum_keyword]
elif (modified or self._new or
(checksum and ('CHECKSUM' not in self._header or
'DATASUM' not in self._header or
not self._checksum_valid or
not self._datasum_valid))):
if checksum == 'datasum':
self.add_datasum(datasum_keyword=datasum_keyword)
elif checksum:
self.add_checksum(checksum_keyword=checksum_keyword,
datasum_keyword=datasum_keyword)
def _postwriteto(self):
# If data is unsigned integer 16, 32 or 64, remove the
# BSCALE/BZERO cards
if (self._has_data and self._standard and
_is_pseudo_integer(self.data.dtype)):
for keyword in ('BSCALE', 'BZERO'):
with suppress(KeyError):
del self._header[keyword]
def _writeheader(self, fileobj):
offset = 0
with suppress(AttributeError, OSError):
offset = fileobj.tell()
self._header.tofile(fileobj)
try:
size = fileobj.tell() - offset
except (AttributeError, OSError):
size = len(str(self._header))
return offset, size
def _writedata(self, fileobj):
size = 0
fileobj.flush()
try:
offset = fileobj.tell()
except (AttributeError, OSError):
offset = 0
if self._data_loaded or self._data_needs_rescale:
if self.data is not None:
size += self._writedata_internal(fileobj)
# pad the FITS data block
# to avoid a bug in the lustre filesystem client, don't
# write zero-byte objects
if size > 0 and _pad_length(size) > 0:
padding = _pad_length(size) * self._padding_byte
# TODO: Not that this is ever likely, but if for some odd
# reason _padding_byte is > 0x80 this will fail; but really if
# somebody's custom fits format is doing that, they're doing it
# wrong and should be reprimanded harshly.
fileobj.write(padding.encode('ascii'))
size += len(padding)
else:
# The data has not been modified or does not need need to be
# rescaled, so it can be copied, unmodified, directly from an
# existing file or buffer
size += self._writedata_direct_copy(fileobj)
# flush, to make sure the content is written
fileobj.flush()
# return both the location and the size of the data area
return offset, size
def _writedata_internal(self, fileobj):
"""
The beginning and end of most _writedata() implementations are the
same, but the details of writing the data array itself can vary between
HDU types, so that should be implemented in this method.
Should return the size in bytes of the data written.
"""
fileobj.writearray(self.data)
return self.data.size * self.data.itemsize
def _writedata_direct_copy(self, fileobj):
"""Copies the data directly from one file/buffer to the new file.
For now this is handled by loading the raw data from the existing data
(including any padding) via a memory map or from an already in-memory
buffer and using Numpy's existing file-writing facilities to write to
the new file.
If this proves too slow a more direct approach may be used.
"""
raw = self._get_raw_data(self._data_size, 'ubyte', self._data_offset)
if raw is not None:
fileobj.writearray(raw)
return raw.nbytes
else:
return 0
# TODO: This is the start of moving HDU writing out of the _File class;
# Though right now this is an internal private method (though still used by
# HDUList, eventually the plan is to have this be moved into writeto()
# somehow...
def _writeto(self, fileobj, inplace=False, copy=False):
try:
dirname = os.path.dirname(fileobj._file.name)
except (AttributeError, TypeError):
dirname = None
with _free_space_check(self, dirname):
self._writeto_internal(fileobj, inplace, copy)
def _writeto_internal(self, fileobj, inplace, copy):
# For now fileobj is assumed to be a _File object
if not inplace or self._new:
header_offset, _ = self._writeheader(fileobj)
data_offset, data_size = self._writedata(fileobj)
# Set the various data location attributes on newly-written HDUs
if self._new:
self._header_offset = header_offset
self._data_offset = data_offset
self._data_size = data_size
return
hdrloc = self._header_offset
hdrsize = self._data_offset - self._header_offset
datloc = self._data_offset
datsize = self._data_size
if self._header._modified:
# Seek to the original header location in the file
self._file.seek(hdrloc)
# This should update hdrloc with he header location in the new file
hdrloc, hdrsize = self._writeheader(fileobj)
# If the data is to be written below with self._writedata, that
# will also properly update the data location; but it should be
# updated here too
datloc = hdrloc + hdrsize
elif copy:
# Seek to the original header location in the file
self._file.seek(hdrloc)
# Before writing, update the hdrloc with the current file position,
# which is the hdrloc for the new file
hdrloc = fileobj.tell()
fileobj.write(self._file.read(hdrsize))
# The header size is unchanged, but the data location may be
# different from before depending on if previous HDUs were resized
datloc = fileobj.tell()
if self._data_loaded:
if self.data is not None:
# Seek through the array's bases for an memmap'd array; we
# can't rely on the _File object to give us this info since
# the user may have replaced the previous mmap'd array
if copy or self._data_replaced:
# Of course, if we're copying the data to a new file
# we don't care about flushing the original mmap;
# instead just read it into the new file
array_mmap = None
else:
array_mmap = _get_array_mmap(self.data)
if array_mmap is not None:
array_mmap.flush()
else:
self._file.seek(self._data_offset)
datloc, datsize = self._writedata(fileobj)
elif copy:
datsize = self._writedata_direct_copy(fileobj)
self._header_offset = hdrloc
self._data_offset = datloc
self._data_size = datsize
self._data_replaced = False
def _close(self, closed=True):
# If the data was mmap'd, close the underlying mmap (this will
# prevent any future access to the .data attribute if there are
# not other references to it; if there are other references then
# it is up to the user to clean those up
if (closed and self._data_loaded and
_get_array_mmap(self.data) is not None):
del self.data
# For backwards-compatibility, though nobody should have
# been using this directly:
_AllHDU = _BaseHDU
# For convenience...
# TODO: register_hdu could be made into a class decorator which would be pretty
# cool, but only once 2.6 support is dropped.
register_hdu = _BaseHDU.register_hdu
unregister_hdu = _BaseHDU.unregister_hdu
class _CorruptedHDU(_BaseHDU):
"""
A Corrupted HDU class.
This class is used when one or more mandatory `Card`s are
corrupted (unparsable), such as the ``BITPIX``, ``NAXIS``, or
``END`` cards. A corrupted HDU usually means that the data size
cannot be calculated or the ``END`` card is not found. In the case
of a missing ``END`` card, the `Header` may also contain the binary
data
.. note::
In future, it may be possible to decipher where the last block
of the `Header` ends, but this task may be difficult when the
extension is a `TableHDU` containing ASCII data.
"""
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
# Note: On compressed files this might report a negative size; but the
# file is corrupt anyways so I'm not too worried about it.
if self._buffer is not None:
return len(self._buffer) - self._data_offset
return self._file.size - self._data_offset
def _summary(self):
return (self.name, self.ver, 'CorruptedHDU')
def verify(self):
pass
class _NonstandardHDU(_BaseHDU, _Verify):
"""
A Non-standard HDU class.
This class is used for a Primary HDU when the ``SIMPLE`` Card has
a value of `False`. A non-standard HDU comes from a file that
resembles a FITS file but departs from the standards in some
significant way. One example would be files where the numbers are
in the DEC VAX internal storage format rather than the standard
FITS most significant byte first. The header for this HDU should
be valid. The data for this HDU is read from the file as a byte
stream that begins at the first byte after the header ``END`` card
and continues until the end of the file.
"""
_standard = False
@classmethod
def match_header(cls, header):
"""
Matches any HDU that has the 'SIMPLE' keyword but is not a standard
Primary or Groups HDU.
"""
# The SIMPLE keyword must be in the first card
card = header.cards[0]
# The check that 'GROUPS' is missing is a bit redundant, since the
# match_header for GroupsHDU will always be called before this one.
if card.keyword == 'SIMPLE':
if 'GROUPS' not in header and card.value is False:
return True
else:
raise InvalidHDUException
else:
return False
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
if self._buffer is not None:
return len(self._buffer) - self._data_offset
return self._file.size - self._data_offset
def _writedata(self, fileobj):
"""
Differs from the base class :class:`_writedata` in that it doesn't
automatically add padding, and treats the data as a string of raw bytes
instead of an array.
"""
offset = 0
size = 0
fileobj.flush()
try:
offset = fileobj.tell()
except OSError:
offset = 0
if self.data is not None:
fileobj.write(self.data)
# flush, to make sure the content is written
fileobj.flush()
size = len(self.data)
# return both the location and the size of the data area
return offset, size
def _summary(self):
return (self.name, self.ver, 'NonstandardHDU', len(self._header))
@lazyproperty
def data(self):
"""
Return the file data.
"""
return self._get_raw_data(self.size, 'ubyte', self._data_offset)
def _verify(self, option='warn'):
errs = _ErrList([], unit='Card')
# verify each card
for card in self._header.cards:
errs.append(card._verify(option))
return errs
class _ValidHDU(_BaseHDU, _Verify):
"""
Base class for all HDUs which are not corrupted.
"""
def __init__(self, data=None, header=None, name=None, ver=None, **kwargs):
super().__init__(data=data, header=header)
if (header is not None and
not isinstance(header, (Header, _BasicHeader))):
# TODO: Instead maybe try initializing a new Header object from
# whatever is passed in as the header--there are various types
# of objects that could work for this...
raise ValueError('header must be a Header object')
# NOTE: private data members _checksum and _datasum are used by the
# utility script "fitscheck" to detect missing checksums.
self._checksum = None
self._checksum_valid = None
self._datasum = None
self._datasum_valid = None
if name is not None:
self.name = name
if ver is not None:
self.ver = ver
@classmethod
def match_header(cls, header):
"""
Matches any HDU that is not recognized as having either the SIMPLE or
XTENSION keyword in its header's first card, but is nonetheless not
corrupted.
TODO: Maybe it would make more sense to use _NonstandardHDU in this
case? Not sure...
"""
return first(header.keys()) not in ('SIMPLE', 'XTENSION')
@property
def size(self):
"""
Size (in bytes) of the data portion of the HDU.
"""
size = 0
naxis = self._header.get('NAXIS', 0)
if naxis > 0:
size = 1
for idx in range(naxis):
size = size * self._header['NAXIS' + str(idx + 1)]
bitpix = self._header['BITPIX']
gcount = self._header.get('GCOUNT', 1)
pcount = self._header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
def filebytes(self):
"""
Calculates and returns the number of bytes that this HDU will write to
a file.
"""
f = _File()
# TODO: Fix this once new HDU writing API is settled on
return self._writeheader(f)[1] + self._writedata(f)[1]
def fileinfo(self):
"""
Returns a dictionary detailing information about the locations
of this HDU within any associated file. The values are only
valid after a read or write of the associated file with no
intervening changes to the `HDUList`.
Returns
-------
dict or None
The dictionary details information about the locations of
this HDU within an associated file. Returns `None` when
the HDU is not associated with a file.
Dictionary contents:
========== ================================================
Key Value
========== ================================================
file File object associated with the HDU
filemode Mode in which the file was opened (readonly, copyonwrite,
update, append, ostream)
hdrLoc Starting byte location of header in file
datLoc Starting byte location of data block in file
datSpan Data size including padding
========== ================================================
"""
if hasattr(self, '_file') and self._file:
return {'file': self._file, 'filemode': self._file.mode,
'hdrLoc': self._header_offset, 'datLoc': self._data_offset,
'datSpan': self._data_size}
else:
return None
def copy(self):
"""
Make a copy of the HDU, both header and data are copied.
"""
if self.data is not None:
data = self.data.copy()
else:
data = None
return self.__class__(data=data, header=self._header.copy())
def _verify(self, option='warn'):
errs = _ErrList([], unit='Card')
is_valid = BITPIX2DTYPE.__contains__
# Verify location and value of mandatory keywords.
# Do the first card here, instead of in the respective HDU classes, so
# the checking is in order, in case of required cards in wrong order.
if isinstance(self, ExtensionHDU):
firstkey = 'XTENSION'
firstval = self._extension
else:
firstkey = 'SIMPLE'
firstval = True
self.req_cards(firstkey, 0, None, firstval, option, errs)
self.req_cards('BITPIX', 1, lambda v: (_is_int(v) and is_valid(v)), 8,
option, errs)
self.req_cards('NAXIS', 2,
lambda v: (_is_int(v) and 0 <= v <= 999), 0,
option, errs)
naxis = self._header.get('NAXIS', 0)
if naxis < 1000:
for ax in range(3, naxis + 3):
key = 'NAXIS' + str(ax - 2)
self.req_cards(key, ax,
lambda v: (_is_int(v) and v >= 0),
_extract_number(self._header[key], default=1),
option, errs)
# Remove NAXISj cards where j is not in range 1, naxis inclusive.
for keyword in self._header:
if keyword.startswith('NAXIS') and len(keyword) > 5:
try:
number = int(keyword[5:])
if number <= 0 or number > naxis:
raise ValueError
except ValueError:
err_text = ("NAXISj keyword out of range ('{}' when "
"NAXIS == {})".format(keyword, naxis))
def fix(self=self, keyword=keyword):
del self._header[keyword]
errs.append(
self.run_option(option=option, err_text=err_text,
fix=fix, fix_text="Deleted."))
# Verify that the EXTNAME keyword exists and is a string
if 'EXTNAME' in self._header:
if not isinstance(self._header['EXTNAME'], str):
err_text = 'The EXTNAME keyword must have a string value.'
fix_text = 'Converted the EXTNAME keyword to a string value.'
def fix(header=self._header):
header['EXTNAME'] = str(header['EXTNAME'])
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
# verify each card
for card in self._header.cards:
errs.append(card._verify(option))
return errs
# TODO: Improve this API a little bit--for one, most of these arguments
# could be optional
def req_cards(self, keyword, pos, test, fix_value, option, errlist):
"""
Check the existence, location, and value of a required `Card`.
Parameters
----------
keyword : str
The keyword to validate
pos : int, callable
If an ``int``, this specifies the exact location this card should
have in the header. Remember that Python is zero-indexed, so this
means ``pos=0`` requires the card to be the first card in the
header. If given a callable, it should take one argument--the
actual position of the keyword--and return `True` or `False`. This
can be used for custom evaluation. For example if
``pos=lambda idx: idx > 10`` this will check that the keyword's
index is greater than 10.
test : callable
This should be a callable (generally a function) that is passed the
value of the given keyword and returns `True` or `False`. This can
be used to validate the value associated with the given keyword.
fix_value : str, int, float, complex, bool, None
A valid value for a FITS keyword to to use if the given ``test``
fails to replace an invalid value. In other words, this provides
a default value to use as a replacement if the keyword's current
value is invalid. If `None`, there is no replacement value and the
keyword is unfixable.
option : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
errlist : list
A list of validation errors already found in the FITS file; this is
used primarily for the validation system to collect errors across
multiple HDUs and multiple calls to `req_cards`.
Notes
-----
If ``pos=None``, the card can be anywhere in the header. If the card
does not exist, the new card will have the ``fix_value`` as its value
when created. Also check the card's value by using the ``test``
argument.
"""
errs = errlist
fix = None
try:
index = self._header.index(keyword)
except ValueError:
index = None
fixable = fix_value is not None
insert_pos = len(self._header) + 1
# If pos is an int, insert at the given position (and convert it to a
# lambda)
if _is_int(pos):
insert_pos = pos
pos = lambda x: x == insert_pos
# if the card does not exist
if index is None:
err_text = f"'{keyword}' card does not exist."
fix_text = f"Fixed by inserting a new '{keyword}' card."
if fixable:
# use repr to accommodate both string and non-string types
# Boolean is also OK in this constructor
card = (keyword, fix_value)
def fix(self=self, insert_pos=insert_pos, card=card):
self._header.insert(insert_pos, card)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix, fixable=fixable))
else:
# if the supposed location is specified
if pos is not None:
if not pos(index):
err_text = f"'{keyword}' card at the wrong place (card {index})."
fix_text = f"Fixed by moving it to the right place (card {insert_pos})."
def fix(self=self, index=index, insert_pos=insert_pos):
card = self._header.cards[index]
del self._header[index]
self._header.insert(insert_pos, card)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
# if value checking is specified
if test:
val = self._header[keyword]
if not test(val):
err_text = f"'{keyword}' card has invalid value '{val}'."
fix_text = f"Fixed by setting a new value '{fix_value}'."
if fixable:
def fix(self=self, keyword=keyword, val=fix_value):
self._header[keyword] = fix_value
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix, fixable=fixable))
return errs
def add_datasum(self, when=None, datasum_keyword='DATASUM'):
"""
Add the ``DATASUM`` card to this HDU with the value set to the
checksum calculated for the data.
Parameters
----------
when : str, optional
Comment string for the card that by default represents the
time when the checksum was calculated
datasum_keyword : str, optional
The name of the header keyword to store the datasum value in;
this is typically 'DATASUM' per convention, but there exist
use cases in which a different keyword should be used
Returns
-------
checksum : int
The calculated datasum
Notes
-----
For testing purposes, provide a ``when`` argument to enable the comment
value in the card to remain consistent. This will enable the
generation of a ``CHECKSUM`` card with a consistent value.
"""
cs = self._calculate_datasum()
if when is None:
when = f'data unit checksum updated {self._get_timestamp()}'
self._header[datasum_keyword] = (str(cs), when)
return cs
def add_checksum(self, when=None, override_datasum=False,
checksum_keyword='CHECKSUM', datasum_keyword='DATASUM'):
"""
Add the ``CHECKSUM`` and ``DATASUM`` cards to this HDU with
the values set to the checksum calculated for the HDU and the
data respectively. The addition of the ``DATASUM`` card may
be overridden.
Parameters
----------
when : str, optional
comment string for the cards; by default the comments
will represent the time when the checksum was calculated
override_datasum : bool, optional
add the ``CHECKSUM`` card only
checksum_keyword : str, optional
The name of the header keyword to store the checksum value in; this
is typically 'CHECKSUM' per convention, but there exist use cases
in which a different keyword should be used
datasum_keyword : str, optional
See ``checksum_keyword``
Notes
-----
For testing purposes, first call `add_datasum` with a ``when``
argument, then call `add_checksum` with a ``when`` argument and
``override_datasum`` set to `True`. This will provide consistent
comments for both cards and enable the generation of a ``CHECKSUM``
card with a consistent value.
"""
if not override_datasum:
# Calculate and add the data checksum to the header.
data_cs = self.add_datasum(when, datasum_keyword=datasum_keyword)
else:
# Just calculate the data checksum
data_cs = self._calculate_datasum()
if when is None:
when = f'HDU checksum updated {self._get_timestamp()}'
# Add the CHECKSUM card to the header with a value of all zeros.
if datasum_keyword in self._header:
self._header.set(checksum_keyword, '0' * 16, when,
before=datasum_keyword)
else:
self._header.set(checksum_keyword, '0' * 16, when)
csum = self._calculate_checksum(data_cs,
checksum_keyword=checksum_keyword)
self._header[checksum_keyword] = csum
def verify_datasum(self):
"""
Verify that the value in the ``DATASUM`` keyword matches the value
calculated for the ``DATASUM`` of the current HDU data.
Returns
-------
valid : int
- 0 - failure
- 1 - success
- 2 - no ``DATASUM`` keyword present
"""
if 'DATASUM' in self._header:
datasum = self._calculate_datasum()
if datasum == int(self._header['DATASUM']):
return 1
else:
# Failed
return 0
else:
return 2
def verify_checksum(self):
"""
Verify that the value in the ``CHECKSUM`` keyword matches the
value calculated for the current HDU CHECKSUM.
Returns
-------
valid : int
- 0 - failure
- 1 - success
- 2 - no ``CHECKSUM`` keyword present
"""
if 'CHECKSUM' in self._header:
if 'DATASUM' in self._header:
datasum = self._calculate_datasum()
else:
datasum = 0
checksum = self._calculate_checksum(datasum)
if checksum == self._header['CHECKSUM']:
return 1
else:
# Failed
return 0
else:
return 2
def _verify_checksum_datasum(self):
"""
Verify the checksum/datasum values if the cards exist in the header.
Simply displays warnings if either the checksum or datasum don't match.
"""
if 'CHECKSUM' in self._header:
self._checksum = self._header['CHECKSUM']
self._checksum_valid = self.verify_checksum()
if not self._checksum_valid:
warnings.warn(
'Checksum verification failed for HDU {}.\n'.format(
(self.name, self.ver)), AstropyUserWarning)
if 'DATASUM' in self._header:
self._datasum = self._header['DATASUM']
self._datasum_valid = self.verify_datasum()
if not self._datasum_valid:
warnings.warn(
'Datasum verification failed for HDU {}.\n'.format(
(self.name, self.ver)), AstropyUserWarning)
def _get_timestamp(self):
"""
Return the current timestamp in ISO 8601 format, with microseconds
stripped off.
Ex.: 2007-05-30T19:05:11
"""
return datetime.datetime.now().isoformat()[:19]
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if not self._data_loaded:
# This is the case where the data has not been read from the file
# yet. We find the data in the file, read it, and calculate the
# datasum.
if self.size > 0:
raw_data = self._get_raw_data(self._data_size, 'ubyte',
self._data_offset)
return self._compute_checksum(raw_data)
else:
return 0
elif self.data is not None:
return self._compute_checksum(self.data.view('ubyte'))
else:
return 0
def _calculate_checksum(self, datasum, checksum_keyword='CHECKSUM'):
"""
Calculate the value of the ``CHECKSUM`` card in the HDU.
"""
old_checksum = self._header[checksum_keyword]
self._header[checksum_keyword] = '0' * 16
# Convert the header to bytes.
s = self._header.tostring().encode('utf8')
# Calculate the checksum of the Header and data.
cs = self._compute_checksum(np.frombuffer(s, dtype='ubyte'), datasum)
# Encode the checksum into a string.
s = self._char_encode(~cs)
# Return the header card value.
self._header[checksum_keyword] = old_checksum
return s
def _compute_checksum(self, data, sum32=0):
"""
Compute the ones-complement checksum of a sequence of bytes.
Parameters
----------
data
a memory region to checksum
sum32
incremental checksum value from another region
Returns
-------
ones complement checksum
"""
blocklen = 2880
sum32 = np.uint32(sum32)
for i in range(0, len(data), blocklen):
length = min(blocklen, len(data) - i) # ????
sum32 = self._compute_hdu_checksum(data[i:i + length], sum32)
return sum32
def _compute_hdu_checksum(self, data, sum32=0):
"""
Translated from FITS Checksum Proposal by Seaman, Pence, and Rots.
Use uint32 literals as a hedge against type promotion to int64.
This code should only be called with blocks of 2880 bytes
Longer blocks result in non-standard checksums with carry overflow
Historically, this code *was* called with larger blocks and for that
reason still needs to be for backward compatibility.
"""
u8 = np.uint32(8)
u16 = np.uint32(16)
uFFFF = np.uint32(0xFFFF)
if data.nbytes % 2:
last = data[-1]
data = data[:-1]
else:
last = np.uint32(0)
data = data.view('>u2')
hi = sum32 >> u16
lo = sum32 & uFFFF
hi += np.add.reduce(data[0::2], dtype=np.uint64)
lo += np.add.reduce(data[1::2], dtype=np.uint64)
if (data.nbytes // 2) % 2:
lo += last << u8
else:
hi += last << u8
hicarry = hi >> u16
locarry = lo >> u16
while hicarry or locarry:
hi = (hi & uFFFF) + locarry
lo = (lo & uFFFF) + hicarry
hicarry = hi >> u16
locarry = lo >> u16
return (hi << u16) + lo
# _MASK and _EXCLUDE used for encoding the checksum value into a character
# string.
_MASK = [0xFF000000,
0x00FF0000,
0x0000FF00,
0x000000FF]
_EXCLUDE = [0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40,
0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60]
def _encode_byte(self, byte):
"""
Encode a single byte.
"""
quotient = byte // 4 + ord('0')
remainder = byte % 4
ch = np.array(
[(quotient + remainder), quotient, quotient, quotient],
dtype='int32')
check = True
while check:
check = False
for x in self._EXCLUDE:
for j in [0, 2]:
if ch[j] == x or ch[j + 1] == x:
ch[j] += 1
ch[j + 1] -= 1
check = True
return ch
def _char_encode(self, value):
"""
Encodes the checksum ``value`` using the algorithm described
in SPR section A.7.2 and returns it as a 16 character string.
Parameters
----------
value
a checksum
Returns
-------
ascii encoded checksum
"""
value = np.uint32(value)
asc = np.zeros((16,), dtype='byte')
ascii = np.zeros((16,), dtype='byte')
for i in range(4):
byte = (value & self._MASK[i]) >> ((3 - i) * 8)
ch = self._encode_byte(byte)
for j in range(4):
asc[4 * j + i] = ch[j]
for i in range(16):
ascii[i] = asc[(i + 15) % 16]
return decode_ascii(ascii.tobytes())
class ExtensionHDU(_ValidHDU):
"""
An extension HDU class.
This class is the base class for the `TableHDU`, `ImageHDU`, and
`BinTableHDU` classes.
"""
_extension = ''
@classmethod
def match_header(cls, header):
"""
This class should never be instantiated directly. Either a standard
extension HDU type should be used for a specific extension, or
NonstandardExtHDU should be used.
"""
raise NotImplementedError
def writeto(self, name, output_verify='exception', overwrite=False,
checksum=False):
"""
Works similarly to the normal writeto(), but prepends a default
`PrimaryHDU` are required by extension HDUs (which cannot stand on
their own).
"""
from .hdulist import HDUList
from .image import PrimaryHDU
hdulist = HDUList([PrimaryHDU(), self])
hdulist.writeto(name, output_verify, overwrite=overwrite,
checksum=checksum)
def _verify(self, option='warn'):
errs = super()._verify(option=option)
# Verify location and value of mandatory keywords.
naxis = self._header.get('NAXIS', 0)
self.req_cards('PCOUNT', naxis + 3, lambda v: (_is_int(v) and v >= 0),
0, option, errs)
self.req_cards('GCOUNT', naxis + 4, lambda v: (_is_int(v) and v == 1),
1, option, errs)
return errs
# For backwards compatibility, though this needs to be deprecated
# TODO: Mark this as deprecated
_ExtensionHDU = ExtensionHDU
class NonstandardExtHDU(ExtensionHDU):
"""
A Non-standard Extension HDU class.
This class is used for an Extension HDU when the ``XTENSION``
`Card` has a non-standard value. In this case, Astropy can figure
out how big the data is but not what it is. The data for this HDU
is read from the file as a byte stream that begins at the first
byte after the header ``END`` card and continues until the
beginning of the next header or the end of the file.
"""
_standard = False
@classmethod
def match_header(cls, header):
"""
Matches any extension HDU that is not one of the standard extension HDU
types.
"""
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
# A3DTABLE is not really considered a 'standard' extension, as it was
# sort of the prototype for BINTABLE; however, since our BINTABLE
# implementation handles A3DTABLE HDUs it is listed here.
standard_xtensions = ('IMAGE', 'TABLE', 'BINTABLE', 'A3DTABLE')
# The check that xtension is not one of the standard types should be
# redundant.
return (card.keyword == 'XTENSION' and
xtension not in standard_xtensions)
def _summary(self):
axes = tuple(self.data.shape)
return (self.name, self.ver, 'NonstandardExtHDU', len(self._header), axes)
@lazyproperty
def data(self):
"""
Return the file data.
"""
return self._get_raw_data(self.size, 'ubyte', self._data_offset)
# TODO: Mark this as deprecated
_NonstandardExtHDU = NonstandardExtHDU
|
2b92276f39a3eb67eb6513605551fa64ca36e29f33797f665940a6627e08e255 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import itertools
import os
import re
import shutil
import sys
import warnings
import numpy as np
from . import compressed
from .base import _BaseHDU, _ValidHDU, _NonstandardHDU, ExtensionHDU
from .groups import GroupsHDU
from .image import PrimaryHDU, ImageHDU
from astropy.io.fits.file import _File, FILE_MODES
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import (_free_space_check, _get_array_mmap, _is_int,
_tmp_name, fileobj_closed, fileobj_mode,
ignore_sigint, isfile)
from astropy.io.fits.verify import _Verify, _ErrList, VerifyError, VerifyWarning
from astropy.utils import indent
from astropy.utils.exceptions import AstropyUserWarning
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2
if HAS_BZ2:
import bz2
__all__ = ["HDUList", "fitsopen"]
# FITS file signature as per RFC 4047
FITS_SIGNATURE = b'SIMPLE = T'
def fitsopen(name, mode='readonly', memmap=None, save_backup=False,
cache=True, lazy_load_hdus=None, ignore_missing_simple=False,
**kwargs):
"""Factory function to open a FITS file and return an `HDUList` object.
Parameters
----------
name : str, file-like or `pathlib.Path`
File to be opened.
mode : str, optional
Open mode, 'readonly', 'update', 'append', 'denywrite', or
'ostream'. Default is 'readonly'.
If ``name`` is a file object that is already opened, ``mode`` must
match the mode the file was opened with, readonly (rb), update (rb+),
append (ab+), ostream (w), denywrite (rb)).
memmap : bool, optional
Is memory mapping to be used? This value is obtained from the
configuration item ``astropy.io.fits.Conf.use_memmap``.
Default is `True`.
save_backup : bool, optional
If the file was opened in update or append mode, this ensures that
a backup of the original file is saved before any changes are flushed.
The backup has the same name as the original file with ".bak" appended.
If "file.bak" already exists then "file.bak.1" is used, and so on.
Default is `False`.
cache : bool, optional
If the file name is a URL, `~astropy.utils.data.download_file` is used
to open the file. This specifies whether or not to save the file
locally in Astropy's download cache. Default is `True`.
lazy_load_hdus : bool, optional
To avoid reading all the HDUs and headers in a FITS file immediately
upon opening. This is an optimization especially useful for large
files, as FITS has no way of determining the number and offsets of all
the HDUs in a file without scanning through the file and reading all
the headers. Default is `True`.
To disable lazy loading and read all HDUs immediately (the old
behavior) use ``lazy_load_hdus=False``. This can lead to fewer
surprises--for example with lazy loading enabled, ``len(hdul)``
can be slow, as it means the entire FITS file needs to be read in
order to determine the number of HDUs. ``lazy_load_hdus=False``
ensures that all HDUs have already been loaded after the file has
been opened.
.. versionadded:: 1.3
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the central value and
``BSCALE == 1`` as unsigned integer data. For example, ``int16`` data
with ``BZERO = 32768`` and ``BSCALE = 1`` would be treated as
``uint16`` data. Default is `True` so that the pseudo-unsigned
integer convention is assumed.
ignore_missing_end : bool, optional
Do not raise an exception when opening a file that is missing an
``END`` card in the last header. Default is `False`.
ignore_missing_simple : bool, optional
Do not raise an exception when the SIMPLE keyword is missing. Note
that io.fits will raise a warning if a SIMPLE card is present but
written in a way that does not follow the FITS Standard.
Default is `False`.
.. versionadded:: 4.2
checksum : bool, str, optional
If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card values
(when present in the HDU header) match the header and data of all HDU's
in the file. Updates to a file that already has a checksum will
preserve and update the existing checksums unless this argument is
given a value of 'remove', in which case the CHECKSUM and DATASUM
values are not checked, and are removed when saving changes to the
file. Default is `False`.
disable_image_compression : bool, optional
If `True`, treats compressed image HDU's like normal binary table
HDU's. Default is `False`.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. Default is `False`.
character_as_bytes : bool, optional
Whether to return bytes for string columns, otherwise unicode strings
are returned, but this does not respect memory mapping and loads the
whole column in memory when accessed. Default is `False`.
ignore_blank : bool, optional
If `True`, the BLANK keyword is ignored if present.
Default is `False`.
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled image
data, restore the data to the original type and reapply the original
BSCALE/BZERO values. This could lead to loss of accuracy if scaling
back to integer values after performing floating point operations on
the data. Default is `False`.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
Returns
-------
hdulist : `HDUList`
`HDUList` containing all of the header data units in the file.
"""
from astropy.io.fits import conf
if memmap is None:
# distinguish between True (kwarg explicitly set)
# and None (preference for memmap in config, might be ignored)
memmap = None if conf.use_memmap else False
else:
memmap = bool(memmap)
if lazy_load_hdus is None:
lazy_load_hdus = conf.lazy_load_hdus
else:
lazy_load_hdus = bool(lazy_load_hdus)
if 'uint' not in kwargs:
kwargs['uint'] = conf.enable_uint
if not name:
raise ValueError(f'Empty filename: {name!r}')
return HDUList.fromfile(name, mode, memmap, save_backup, cache,
lazy_load_hdus, ignore_missing_simple, **kwargs)
class HDUList(list, _Verify):
"""
HDU list class. This is the top-level FITS object. When a FITS
file is opened, a `HDUList` object is returned.
"""
def __init__(self, hdus=[], file=None):
"""
Construct a `HDUList` object.
Parameters
----------
hdus : BaseHDU or sequence thereof, optional
The HDU object(s) to comprise the `HDUList`. Should be
instances of HDU classes like `ImageHDU` or `BinTableHDU`.
file : file-like, bytes, optional
The opened physical file associated with the `HDUList`
or a bytes object containing the contents of the FITS
file.
"""
if isinstance(file, bytes):
self._data = file
self._file = None
else:
self._file = file
self._data = None
# For internal use only--the keyword args passed to fitsopen /
# HDUList.fromfile/string when opening the file
self._open_kwargs = {}
self._in_read_next_hdu = False
# If we have read all the HDUs from the file or not
# The assumes that all HDUs have been written when we first opened the
# file; we do not currently support loading additional HDUs from a file
# while it is being streamed to. In the future that might be supported
# but for now this is only used for the purpose of lazy-loading of
# existing HDUs.
if file is None:
self._read_all = True
elif self._file is not None:
# Should never attempt to read HDUs in ostream mode
self._read_all = self._file.mode == 'ostream'
else:
self._read_all = False
if hdus is None:
hdus = []
# can take one HDU, as well as a list of HDU's as input
if isinstance(hdus, _ValidHDU):
hdus = [hdus]
elif not isinstance(hdus, (HDUList, list)):
raise TypeError("Invalid input for HDUList.")
for idx, hdu in enumerate(hdus):
if not isinstance(hdu, _BaseHDU):
raise TypeError(f"Element {idx} in the HDUList input is not an HDU.")
super().__init__(hdus)
if file is None:
# Only do this when initializing from an existing list of HDUs
# When initializing from a file, this will be handled by the
# append method after the first HDU is read
self.update_extend()
def __len__(self):
if not self._in_read_next_hdu:
self.readall()
return super().__len__()
def __repr__(self):
# In order to correctly repr an HDUList we need to load all the
# HDUs as well
self.readall()
return super().__repr__()
def __iter__(self):
# While effectively this does the same as:
# for idx in range(len(self)):
# yield self[idx]
# the more complicated structure is here to prevent the use of len(),
# which would break the lazy loading
for idx in itertools.count():
try:
yield self[idx]
except IndexError:
break
def __getitem__(self, key):
"""
Get an HDU from the `HDUList`, indexed by number or name.
"""
# If the key is a slice we need to make sure the necessary HDUs
# have been loaded before passing the slice on to super.
if isinstance(key, slice):
max_idx = key.stop
# Check for and handle the case when no maximum was
# specified (e.g. [1:]).
if max_idx is None:
# We need all of the HDUs, so load them
# and reset the maximum to the actual length.
max_idx = len(self)
# Just in case the max_idx is negative...
max_idx = self._positive_index_of(max_idx)
number_loaded = super().__len__()
if max_idx >= number_loaded:
# We need more than we have, try loading up to and including
# max_idx. Note we do not try to be clever about skipping HDUs
# even though key.step might conceivably allow it.
for i in range(number_loaded, max_idx):
# Read until max_idx or to the end of the file, whichever
# comes first.
if not self._read_next_hdu():
break
try:
hdus = super().__getitem__(key)
except IndexError as e:
# Raise a more helpful IndexError if the file was not fully read.
if self._read_all:
raise e
else:
raise IndexError('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
else:
return HDUList(hdus)
# Originally this used recursion, but hypothetically an HDU with
# a very large number of HDUs could blow the stack, so use a loop
# instead
try:
return self._try_while_unread_hdus(super().__getitem__,
self._positive_index_of(key))
except IndexError as e:
# Raise a more helpful IndexError if the file was not fully read.
if self._read_all:
raise e
else:
raise IndexError('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
def __contains__(self, item):
"""
Returns `True` if ``item`` is an ``HDU`` _in_ ``self`` or a valid
extension specification (e.g., integer extension number, extension
name, or a tuple of extension name and an extension version)
of a ``HDU`` in ``self``.
"""
try:
self._try_while_unread_hdus(self.index_of, item)
except (KeyError, ValueError):
return False
return True
def __setitem__(self, key, hdu):
"""
Set an HDU to the `HDUList`, indexed by number or name.
"""
_key = self._positive_index_of(key)
if isinstance(hdu, (slice, list)):
if _is_int(_key):
raise ValueError('An element in the HDUList must be an HDU.')
for item in hdu:
if not isinstance(item, _BaseHDU):
raise ValueError(f'{item} is not an HDU.')
else:
if not isinstance(hdu, _BaseHDU):
raise ValueError(f'{hdu} is not an HDU.')
try:
self._try_while_unread_hdus(super().__setitem__, _key, hdu)
except IndexError:
raise IndexError(f'Extension {key} is out of bound or not found.')
self._resize = True
self._truncate = False
def __delitem__(self, key):
"""
Delete an HDU from the `HDUList`, indexed by number or name.
"""
if isinstance(key, slice):
end_index = len(self)
else:
key = self._positive_index_of(key)
end_index = len(self) - 1
self._try_while_unread_hdus(super().__delitem__, key)
if (key == end_index or key == -1 and not self._resize):
self._truncate = True
else:
self._truncate = False
self._resize = True
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
output_verify = self._open_kwargs.get('output_verify', 'exception')
self.close(output_verify=output_verify)
@classmethod
def fromfile(cls, fileobj, mode=None, memmap=None,
save_backup=False, cache=True, lazy_load_hdus=True,
ignore_missing_simple=False, **kwargs):
"""
Creates an `HDUList` instance from a file-like object.
The actual implementation of ``fitsopen()``, and generally shouldn't
be used directly. Use :func:`open` instead (and see its
documentation for details of the parameters accepted by this method).
"""
return cls._readfrom(fileobj=fileobj, mode=mode, memmap=memmap,
save_backup=save_backup, cache=cache,
ignore_missing_simple=ignore_missing_simple,
lazy_load_hdus=lazy_load_hdus, **kwargs)
@classmethod
def fromstring(cls, data, **kwargs):
"""
Creates an `HDUList` instance from a string or other in-memory data
buffer containing an entire FITS file. Similar to
:meth:`HDUList.fromfile`, but does not accept the mode or memmap
arguments, as they are only relevant to reading from a file on disk.
This is useful for interfacing with other libraries such as CFITSIO,
and may also be useful for streaming applications.
Parameters
----------
data : str, buffer-like, etc.
A string or other memory buffer containing an entire FITS file.
Buffer-like objects include :class:`~bytes`, :class:`~bytearray`,
:class:`~memoryview`, and :class:`~numpy.ndarray`.
It should be noted that if that memory is read-only (such as a
Python string) the returned :class:`HDUList`'s data portions will
also be read-only.
**kwargs : dict
Optional keyword arguments. See
:func:`astropy.io.fits.open` for details.
Returns
-------
hdul : HDUList
An :class:`HDUList` object representing the in-memory FITS file.
"""
try:
# Test that the given object supports the buffer interface by
# ensuring an ndarray can be created from it
np.ndarray((), dtype='ubyte', buffer=data)
except TypeError:
raise TypeError(
'The provided object {} does not contain an underlying '
'memory buffer. fromstring() requires an object that '
'supports the buffer interface such as bytes, buffer, '
'memoryview, ndarray, etc. This restriction is to ensure '
'that efficient access to the array/table data is possible.'
''.format(data))
return cls._readfrom(data=data, **kwargs)
def fileinfo(self, index):
"""
Returns a dictionary detailing information about the locations
of the indexed HDU within any associated file. The values are
only valid after a read or write of the associated file with
no intervening changes to the `HDUList`.
Parameters
----------
index : int
Index of HDU for which info is to be returned.
Returns
-------
fileinfo : dict or None
The dictionary details information about the locations of
the indexed HDU within an associated file. Returns `None`
when the HDU is not associated with a file.
Dictionary contents:
========== ========================================================
Key Value
========== ========================================================
file File object associated with the HDU
filename Name of associated file object
filemode Mode in which the file was opened (readonly,
update, append, denywrite, ostream)
resized Flag that when `True` indicates that the data has been
resized since the last read/write so the returned values
may not be valid.
hdrLoc Starting byte location of header in file
datLoc Starting byte location of data block in file
datSpan Data size including padding
========== ========================================================
"""
if self._file is not None:
output = self[index].fileinfo()
if not output:
# OK, the HDU associated with this index is not yet
# tied to the file associated with the HDUList. The only way
# to get the file object is to check each of the HDU's in the
# list until we find the one associated with the file.
f = None
for hdu in self:
info = hdu.fileinfo()
if info:
f = info['file']
fm = info['filemode']
break
output = {'file': f, 'filemode': fm, 'hdrLoc': None,
'datLoc': None, 'datSpan': None}
output['filename'] = self._file.name
output['resized'] = self._wasresized()
else:
output = None
return output
def __copy__(self):
"""
Return a shallow copy of an HDUList.
Returns
-------
copy : `HDUList`
A shallow copy of this `HDUList` object.
"""
return self[:]
# Syntactic sugar for `__copy__()` magic method
copy = __copy__
def __deepcopy__(self, memo=None):
return HDUList([hdu.copy() for hdu in self])
def pop(self, index=-1):
""" Remove an item from the list and return it.
Parameters
----------
index : int, str, tuple of (string, int), optional
An integer value of ``index`` indicates the position from which
``pop()`` removes and returns an HDU. A string value or a tuple
of ``(string, int)`` functions as a key for identifying the
HDU to be removed and returned. If ``key`` is a tuple, it is
of the form ``(key, ver)`` where ``ver`` is an ``EXTVER``
value that must match the HDU being searched for.
If the key is ambiguous (e.g. there are multiple 'SCI' extensions)
the first match is returned. For a more precise match use the
``(name, ver)`` pair.
If even the ``(name, ver)`` pair is ambiguous the numeric index
must be used to index the duplicate HDU.
Returns
-------
hdu : BaseHDU
The HDU object at position indicated by ``index`` or having name
and version specified by ``index``.
"""
# Make sure that HDUs are loaded before attempting to pop
self.readall()
list_index = self.index_of(index)
return super().pop(list_index)
def insert(self, index, hdu):
"""
Insert an HDU into the `HDUList` at the given ``index``.
Parameters
----------
index : int
Index before which to insert the new HDU.
hdu : BaseHDU
The HDU object to insert
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError(f'{hdu} is not an HDU.')
num_hdus = len(self)
if index == 0 or num_hdus == 0:
if num_hdus != 0:
# We are inserting a new Primary HDU so we need to
# make the current Primary HDU into an extension HDU.
if isinstance(self[0], GroupsHDU):
raise ValueError(
"The current Primary HDU is a GroupsHDU. "
"It can't be made into an extension HDU, "
"so another HDU cannot be inserted before it.")
hdu1 = ImageHDU(self[0].data, self[0].header)
# Insert it into position 1, then delete HDU at position 0.
super().insert(1, hdu1)
super().__delitem__(0)
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super().insert(0, phdu)
index = 1
else:
if isinstance(hdu, GroupsHDU):
raise ValueError('A GroupsHDU must be inserted as a '
'Primary HDU.')
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
hdu = ImageHDU(hdu.data, hdu.header)
super().insert(index, hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def append(self, hdu):
"""
Append a new HDU to the `HDUList`.
Parameters
----------
hdu : BaseHDU
HDU to add to the `HDUList`.
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError('HDUList can only append an HDU.')
if len(self) > 0:
if isinstance(hdu, GroupsHDU):
raise ValueError(
"Can't append a GroupsHDU to a non-empty HDUList")
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
# TODO: This isn't necessarily sufficient to copy the HDU;
# _header_offset and friends need to be copied too.
hdu = ImageHDU(hdu.data, hdu.header)
else:
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary
# HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super().append(phdu)
super().append(hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def index_of(self, key):
"""
Get the index of an HDU from the `HDUList`.
Parameters
----------
key : int, str, tuple of (string, int) or BaseHDU
The key identifying the HDU. If ``key`` is a tuple, it is of the
form ``(name, ver)`` where ``ver`` is an ``EXTVER`` value that must
match the HDU being searched for.
If the key is ambiguous (e.g. there are multiple 'SCI' extensions)
the first match is returned. For a more precise match use the
``(name, ver)`` pair.
If even the ``(name, ver)`` pair is ambiguous (it shouldn't be
but it's not impossible) the numeric index must be used to index
the duplicate HDU.
When ``key`` is an HDU object, this function returns the
index of that HDU object in the ``HDUList``.
Returns
-------
index : int
The index of the HDU in the `HDUList`.
Raises
------
ValueError
If ``key`` is an HDU object and it is not found in the ``HDUList``.
KeyError
If an HDU specified by the ``key`` that is an extension number,
extension name, or a tuple of extension name and version is not
found in the ``HDUList``.
"""
if _is_int(key):
return key
elif isinstance(key, tuple):
_key, _ver = key
elif isinstance(key, _BaseHDU):
return self.index(key)
else:
_key = key
_ver = None
if not isinstance(_key, str):
raise KeyError(
'{} indices must be integers, extension names as strings, '
'or (extname, version) tuples; got {}'
''.format(self.__class__.__name__, _key))
_key = (_key.strip()).upper()
found = None
for idx, hdu in enumerate(self):
name = hdu.name
if isinstance(name, str):
name = name.strip().upper()
# 'PRIMARY' should always work as a reference to the first HDU
if ((name == _key or (_key == 'PRIMARY' and idx == 0)) and
(_ver is None or _ver == hdu.ver)):
found = idx
break
if (found is None):
raise KeyError(f'Extension {key!r} not found.')
else:
return found
def _positive_index_of(self, key):
"""
Same as index_of, but ensures always returning a positive index
or zero.
(Really this should be called non_negative_index_of but it felt
too long.)
This means that if the key is a negative integer, we have to
convert it to the corresponding positive index. This means
knowing the length of the HDUList, which in turn means loading
all HDUs. Therefore using negative indices on HDULists is inherently
inefficient.
"""
index = self.index_of(key)
if index >= 0:
return index
if abs(index) > len(self):
raise IndexError(
f'Extension {index} is out of bound or not found.')
return len(self) + index
def readall(self):
"""
Read data of all HDUs into memory.
"""
while self._read_next_hdu():
pass
@ignore_sigint
def flush(self, output_verify='fix', verbose=False):
"""
Force a write of the `HDUList` back to the file (for append and
update modes only).
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
verbose : bool
When `True`, print verbose messages
"""
if self._file.mode not in ('append', 'update', 'ostream'):
warnings.warn("Flush for '{}' mode is not supported."
.format(self._file.mode), AstropyUserWarning)
return
save_backup = self._open_kwargs.get('save_backup', False)
if save_backup and self._file.mode in ('append', 'update'):
filename = self._file.name
if os.path.exists(filename):
# The the file doesn't actually exist anymore for some reason
# then there's no point in trying to make a backup
backup = filename + '.bak'
idx = 1
while os.path.exists(backup):
backup = filename + '.bak.' + str(idx)
idx += 1
warnings.warn('Saving a backup of {} to {}.'.format(
filename, backup), AstropyUserWarning)
try:
shutil.copy(filename, backup)
except OSError as exc:
raise OSError('Failed to save backup to destination {}: '
'{}'.format(filename, exc))
self.verify(option=output_verify)
if self._file.mode in ('append', 'ostream'):
for hdu in self:
if verbose:
try:
extver = str(hdu._header['extver'])
except KeyError:
extver = ''
# only append HDU's which are "new"
if hdu._new:
hdu._prewriteto(checksum=hdu._output_checksum)
with _free_space_check(self):
hdu._writeto(self._file)
if verbose:
print('append HDU', hdu.name, extver)
hdu._new = False
hdu._postwriteto()
elif self._file.mode == 'update':
self._flush_update()
def update_extend(self):
"""
Make sure that if the primary header needs the keyword ``EXTEND`` that
it has it and it is correct.
"""
if not len(self):
return
if not isinstance(self[0], PrimaryHDU):
# A PrimaryHDU will be automatically inserted at some point, but it
# might not have been added yet
return
hdr = self[0].header
def get_first_ext():
try:
return self[1]
except IndexError:
return None
if 'EXTEND' in hdr:
if not hdr['EXTEND'] and get_first_ext() is not None:
hdr['EXTEND'] = True
elif get_first_ext() is not None:
if hdr['NAXIS'] == 0:
hdr.set('EXTEND', True, after='NAXIS')
else:
n = hdr['NAXIS']
hdr.set('EXTEND', True, after='NAXIS' + str(n))
def writeto(self, fileobj, output_verify='exception', overwrite=False,
checksum=False):
"""
Write the `HDUList` to a new file.
Parameters
----------
fileobj : str, file-like or `pathlib.Path`
File to write to. If a file object, must be opened in a
writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the headers of all HDU's written to the file.
"""
if (len(self) == 0):
warnings.warn("There is nothing to write.", AstropyUserWarning)
return
self.verify(option=output_verify)
# make sure the EXTEND keyword is there if there is extension
self.update_extend()
# make note of whether the input file object is already open, in which
# case we should not close it after writing (that should be the job
# of the caller)
closed = isinstance(fileobj, str) or fileobj_closed(fileobj)
mode = FILE_MODES[fileobj_mode(fileobj)] if isfile(fileobj) else 'ostream'
# This can accept an open file object that's open to write only, or in
# append/update modes but only if the file doesn't exist.
fileobj = _File(fileobj, mode=mode, overwrite=overwrite)
hdulist = self.fromfile(fileobj)
try:
dirname = os.path.dirname(hdulist._file.name)
except (AttributeError, TypeError):
dirname = None
with _free_space_check(self, dirname=dirname):
for hdu in self:
hdu._prewriteto(checksum=checksum)
hdu._writeto(hdulist._file)
hdu._postwriteto()
hdulist.close(output_verify=output_verify, closed=closed)
def close(self, output_verify='exception', verbose=False, closed=True):
"""
Close the associated FITS file and memmap object, if any.
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
verbose : bool
When `True`, print out verbose messages.
closed : bool
When `True`, close the underlying file object.
"""
try:
if (self._file and self._file.mode in ('append', 'update')
and not self._file.closed):
self.flush(output_verify=output_verify, verbose=verbose)
finally:
if self._file and closed and hasattr(self._file, 'close'):
self._file.close()
# Give individual HDUs an opportunity to do on-close cleanup
for hdu in self:
hdu._close(closed=closed)
def info(self, output=None):
"""
Summarize the info of the HDUs in this `HDUList`.
Note that this function prints its results to the console---it
does not return a value.
Parameters
----------
output : file-like or bool, optional
A file-like object to write the output to. If `False`, does not
output to a file and instead returns a list of tuples representing
the HDU info. Writes to ``sys.stdout`` by default.
"""
if output is None:
output = sys.stdout
if self._file is None:
name = '(No file associated with this HDUList)'
else:
name = self._file.name
results = [f'Filename: {name}',
'No. Name Ver Type Cards Dimensions Format']
format = '{:3d} {:10} {:3} {:11} {:5d} {} {} {}'
default = ('', '', '', 0, (), '', '')
for idx, hdu in enumerate(self):
summary = hdu._summary()
if len(summary) < len(default):
summary += default[len(summary):]
summary = (idx,) + summary
if output:
results.append(format.format(*summary))
else:
results.append(summary)
if output:
output.write('\n'.join(results))
output.write('\n')
output.flush()
else:
return results[2:]
def filename(self):
"""
Return the file name associated with the HDUList object if one exists.
Otherwise returns None.
Returns
-------
filename : str
A string containing the file name associated with the HDUList
object if an association exists. Otherwise returns None.
"""
if self._file is not None:
if hasattr(self._file, 'name'):
return self._file.name
return None
@classmethod
def _readfrom(cls, fileobj=None, data=None, mode=None, memmap=None,
cache=True, lazy_load_hdus=True, ignore_missing_simple=False,
**kwargs):
"""
Provides the implementations from HDUList.fromfile and
HDUList.fromstring, both of which wrap this method, as their
implementations are largely the same.
"""
if fileobj is not None:
if not isinstance(fileobj, _File):
# instantiate a FITS file object (ffo)
fileobj = _File(fileobj, mode=mode, memmap=memmap, cache=cache)
# The Astropy mode is determined by the _File initializer if the
# supplied mode was None
mode = fileobj.mode
hdulist = cls(file=fileobj)
else:
if mode is None:
# The default mode
mode = 'readonly'
hdulist = cls(file=data)
# This method is currently only called from HDUList.fromstring and
# HDUList.fromfile. If fileobj is None then this must be the
# fromstring case; the data type of ``data`` will be checked in the
# _BaseHDU.fromstring call.
if (not ignore_missing_simple and
hdulist._file and
hdulist._file.mode != 'ostream' and
hdulist._file.size > 0):
pos = hdulist._file.tell()
# FITS signature is supposed to be in the first 30 bytes, but to
# allow reading various invalid files we will check in the first
# card (80 bytes).
simple = hdulist._file.read(80)
match_sig = (simple[:29] == FITS_SIGNATURE[:-1] and
simple[29:30] in (b'T', b'F'))
if not match_sig:
# Check the SIMPLE card is there but not written correctly
match_sig_relaxed = re.match(rb"SIMPLE\s*=\s*[T|F]", simple)
if match_sig_relaxed:
warnings.warn("Found a SIMPLE card but its format doesn't"
" respect the FITS Standard", VerifyWarning)
else:
if hdulist._file.close_on_error:
hdulist._file.close()
raise OSError(
'No SIMPLE card found, this file does not appear to '
'be a valid FITS file. If this is really a FITS file, '
'try with ignore_missing_simple=True')
hdulist._file.seek(pos)
# Store additional keyword args that were passed to fits.open
hdulist._open_kwargs = kwargs
if fileobj is not None and fileobj.writeonly:
# Output stream--not interested in reading/parsing
# the HDUs--just writing to the output file
return hdulist
# Make sure at least the PRIMARY HDU can be read
read_one = hdulist._read_next_hdu()
# If we're trying to read only and no header units were found,
# raise an exception
if not read_one and mode in ('readonly', 'denywrite'):
# Close the file if necessary (issue #6168)
if hdulist._file.close_on_error:
hdulist._file.close()
raise OSError('Empty or corrupt FITS file')
if not lazy_load_hdus or kwargs.get('checksum') is True:
# Go ahead and load all HDUs
while hdulist._read_next_hdu():
pass
# initialize/reset attributes to be used in "update/append" mode
hdulist._resize = False
hdulist._truncate = False
return hdulist
def _try_while_unread_hdus(self, func, *args, **kwargs):
"""
Attempt an operation that accesses an HDU by index/name
that can fail if not all HDUs have been read yet. Keep
reading HDUs until the operation succeeds or there are no
more HDUs to read.
"""
while True:
try:
return func(*args, **kwargs)
except Exception:
if self._read_next_hdu():
continue
else:
raise
def _read_next_hdu(self):
"""
Lazily load a single HDU from the fileobj or data string the `HDUList`
was opened from, unless no further HDUs are found.
Returns True if a new HDU was loaded, or False otherwise.
"""
if self._read_all:
return False
saved_compression_enabled = compressed.COMPRESSION_ENABLED
fileobj, data, kwargs = self._file, self._data, self._open_kwargs
if fileobj is not None and fileobj.closed:
return False
try:
self._in_read_next_hdu = True
if ('disable_image_compression' in kwargs and
kwargs['disable_image_compression']):
compressed.COMPRESSION_ENABLED = False
# read all HDUs
try:
if fileobj is not None:
try:
# Make sure we're back to the end of the last read
# HDU
if len(self) > 0:
last = self[len(self) - 1]
if last._data_offset is not None:
offset = last._data_offset + last._data_size
fileobj.seek(offset, os.SEEK_SET)
hdu = _BaseHDU.readfrom(fileobj, **kwargs)
except EOFError:
self._read_all = True
return False
except OSError:
# Close the file: see
# https://github.com/astropy/astropy/issues/6168
#
if self._file.close_on_error:
self._file.close()
if fileobj.writeonly:
self._read_all = True
return False
else:
raise
else:
if not data:
self._read_all = True
return False
hdu = _BaseHDU.fromstring(data, **kwargs)
self._data = data[hdu._data_offset + hdu._data_size:]
super().append(hdu)
if len(self) == 1:
# Check for an extension HDU and update the EXTEND
# keyword of the primary HDU accordingly
self.update_extend()
hdu._new = False
if 'checksum' in kwargs:
hdu._output_checksum = kwargs['checksum']
# check in the case there is extra space after the last HDU or
# corrupted HDU
except (VerifyError, ValueError) as exc:
warnings.warn(
'Error validating header for HDU #{} (note: Astropy '
'uses zero-based indexing).\n{}\n'
'There may be extra bytes after the last HDU or the '
'file is corrupted.'.format(
len(self), indent(str(exc))), VerifyWarning)
del exc
self._read_all = True
return False
finally:
compressed.COMPRESSION_ENABLED = saved_compression_enabled
self._in_read_next_hdu = False
return True
def _verify(self, option='warn'):
errs = _ErrList([], unit='HDU')
# the first (0th) element must be a primary HDU
if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)) and \
(not isinstance(self[0], _NonstandardHDU)):
err_text = "HDUList's 0th element is not a primary HDU."
fix_text = 'Fixed by inserting one as 0th HDU.'
def fix(self=self):
self.insert(0, PrimaryHDU())
err = self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix)
errs.append(err)
if len(self) > 1 and ('EXTEND' not in self[0].header or
self[0].header['EXTEND'] is not True):
err_text = ('Primary HDU does not contain an EXTEND keyword '
'equal to T even though there are extension HDUs.')
fix_text = 'Fixed by inserting or updating the EXTEND keyword.'
def fix(header=self[0].header):
naxis = header['NAXIS']
if naxis == 0:
after = 'NAXIS'
else:
after = 'NAXIS' + str(naxis)
header.set('EXTEND', value=True, after=after)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
# each element calls their own verify
for idx, hdu in enumerate(self):
if idx > 0 and (not isinstance(hdu, ExtensionHDU)):
err_text = f"HDUList's element {str(idx)} is not an extension HDU."
err = self.run_option(option, err_text=err_text, fixable=False)
errs.append(err)
else:
result = hdu._verify(option)
if result:
errs.append(result)
return errs
def _flush_update(self):
"""Implements flushing changes to a file in update mode."""
for hdu in self:
# Need to all _prewriteto() for each HDU first to determine if
# resizing will be necessary
hdu._prewriteto(checksum=hdu._output_checksum, inplace=True)
try:
self._wasresized()
# if the HDUList is resized, need to write out the entire contents of
# the hdulist to the file.
if self._resize or self._file.compression:
self._flush_resize()
else:
# if not resized, update in place
for hdu in self:
hdu._writeto(self._file, inplace=True)
# reset the modification attributes after updating
for hdu in self:
hdu._header._modified = False
finally:
for hdu in self:
hdu._postwriteto()
def _flush_resize(self):
"""
Implements flushing changes in update mode when parts of one or more HDU
need to be resized.
"""
old_name = self._file.name
old_memmap = self._file.memmap
name = _tmp_name(old_name)
if not self._file.file_like:
old_mode = os.stat(old_name).st_mode
# The underlying file is an actual file object. The HDUList is
# resized, so we need to write it to a tmp file, delete the
# original file, and rename the tmp file to the original file.
if self._file.compression == 'gzip':
new_file = gzip.GzipFile(name, mode='ab+')
elif self._file.compression == 'bzip2':
if not HAS_BZ2:
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module.")
new_file = bz2.BZ2File(name, mode='w')
else:
new_file = name
with self.fromfile(new_file, mode='append') as hdulist:
for hdu in self:
hdu._writeto(hdulist._file, inplace=True, copy=True)
if sys.platform.startswith('win'):
# Collect a list of open mmaps to the data; this well be
# used later. See below.
mmaps = [(idx, _get_array_mmap(hdu.data), hdu.data)
for idx, hdu in enumerate(self) if hdu._has_data]
hdulist._file.close()
self._file.close()
if sys.platform.startswith('win'):
# Close all open mmaps to the data. This is only necessary on
# Windows, which will not allow a file to be renamed or deleted
# until all handles to that file have been closed.
for idx, mmap, arr in mmaps:
if mmap is not None:
mmap.close()
os.remove(self._file.name)
# reopen the renamed new file with "update" mode
os.rename(name, old_name)
os.chmod(old_name, old_mode)
if isinstance(new_file, gzip.GzipFile):
old_file = gzip.GzipFile(old_name, mode='rb+')
else:
old_file = old_name
ffo = _File(old_file, mode='update', memmap=old_memmap)
self._file = ffo
for hdu in self:
# Need to update the _file attribute and close any open mmaps
# on each HDU
if hdu._has_data and _get_array_mmap(hdu.data) is not None:
del hdu.data
hdu._file = ffo
if sys.platform.startswith('win'):
# On Windows, all the original data mmaps were closed above.
# However, it's possible that the user still has references to
# the old data which would no longer work (possibly even cause
# a segfault if they try to access it). This replaces the
# buffers used by the original arrays with the buffers of mmap
# arrays created from the new file. This seems to work, but
# it's a flaming hack and carries no guarantees that it won't
# lead to odd behavior in practice. Better to just not keep
# references to data from files that had to be resized upon
# flushing (on Windows--again, this is no problem on Linux).
for idx, mmap, arr in mmaps:
if mmap is not None:
# https://github.com/numpy/numpy/issues/8628
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=DeprecationWarning)
arr.data = self[idx].data.data
del mmaps # Just to be sure
else:
# The underlying file is not a file object, it is a file like
# object. We can't write out to a file, we must update the file
# like object in place. To do this, we write out to a temporary
# file, then delete the contents in our file like object, then
# write the contents of the temporary file to the now empty file
# like object.
self.writeto(name)
hdulist = self.fromfile(name)
ffo = self._file
ffo.truncate(0)
ffo.seek(0)
for hdu in hdulist:
hdu._writeto(ffo, inplace=True, copy=True)
# Close the temporary file and delete it.
hdulist.close()
os.remove(hdulist._file.name)
# reset the resize attributes after updating
self._resize = False
self._truncate = False
for hdu in self:
hdu._header._modified = False
hdu._new = False
hdu._file = ffo
def _wasresized(self, verbose=False):
"""
Determine if any changes to the HDUList will require a file resize
when flushing the file.
Side effect of setting the objects _resize attribute.
"""
if not self._resize:
# determine if any of the HDU is resized
for hdu in self:
# Header:
nbytes = len(str(hdu._header))
if nbytes != (hdu._data_offset - hdu._header_offset):
self._resize = True
self._truncate = False
if verbose:
print('One or more header is resized.')
break
# Data:
if not hdu._has_data:
continue
nbytes = hdu.size
nbytes = nbytes + _pad_length(nbytes)
if nbytes != hdu._data_size:
self._resize = True
self._truncate = False
if verbose:
print('One or more data area is resized.')
break
if self._truncate:
try:
self._file.truncate(hdu._data_offset + hdu._data_size)
except OSError:
self._resize = True
self._truncate = False
return self._resize
|
798ea2e07ba2e31691e37b55871debfde99586b100814992c037f4cd35544158 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import os
from .base import _BaseHDU, BITPIX2DTYPE
from .hdulist import HDUList
from .image import PrimaryHDU
from astropy.io.fits.file import _File
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import fileobj_name
class StreamingHDU:
"""
A class that provides the capability to stream data to a FITS file
instead of requiring data to all be written at once.
The following pseudocode illustrates its use::
header = astropy.io.fits.Header()
for all the cards you need in the header:
header[key] = (value, comment)
shdu = astropy.io.fits.StreamingHDU('filename.fits', header)
for each piece of data:
shdu.write(data)
shdu.close()
"""
def __init__(self, name, header):
"""
Construct a `StreamingHDU` object given a file name and a header.
Parameters
----------
name : path-like or file-like
The file to which the header and data will be streamed. If opened,
the file object must be opened in a writeable binary mode such as
'wb' or 'ab+'.
header : `Header` instance
The header object associated with the data to be written
to the file.
Notes
-----
The file will be opened and the header appended to the end of
the file. If the file does not already exist, it will be
created, and if the header represents a Primary header, it
will be written to the beginning of the file. If the file
does not exist and the provided header is not a Primary
header, a default Primary HDU will be inserted at the
beginning of the file and the provided header will be added as
the first extension. If the file does already exist, but the
provided header represents a Primary header, the header will
be modified to an image extension header and appended to the
end of the file.
"""
if isinstance(name, gzip.GzipFile):
raise TypeError('StreamingHDU not supported for GzipFile objects.')
self._header = header.copy()
# handle a file object instead of a file name
filename = fileobj_name(name) or ''
# Check if the file already exists. If it does not, check to see
# if we were provided with a Primary Header. If not we will need
# to prepend a default PrimaryHDU to the file before writing the
# given header.
newfile = False
if filename:
if not os.path.exists(filename) or os.path.getsize(filename) == 0:
newfile = True
elif (hasattr(name, 'len') and name.len == 0):
newfile = True
if newfile:
if 'SIMPLE' not in self._header:
hdulist = HDUList([PrimaryHDU()])
hdulist.writeto(name, 'exception')
else:
# This will not be the first extension in the file so we
# must change the Primary header provided into an image
# extension header.
if 'SIMPLE' in self._header:
self._header.set('XTENSION', 'IMAGE', 'Image extension',
after='SIMPLE')
del self._header['SIMPLE']
if 'PCOUNT' not in self._header:
dim = self._header['NAXIS']
if dim == 0:
dim = ''
else:
dim = str(dim)
self._header.set('PCOUNT', 0, 'number of parameters',
after='NAXIS' + dim)
if 'GCOUNT' not in self._header:
self._header.set('GCOUNT', 1, 'number of groups',
after='PCOUNT')
self._ffo = _File(name, 'append')
# TODO : Fix this once the HDU writing API is cleaned up
tmp_hdu = _BaseHDU()
# Passing self._header as an argument to _BaseHDU() will cause its
# values to be modified in undesired ways...need to have a better way
# of doing this
tmp_hdu._header = self._header
self._header_offset = tmp_hdu._writeheader(self._ffo)[0]
self._data_offset = self._ffo.tell()
self._size = self.size
if self._size != 0:
self.writecomplete = False
else:
self.writecomplete = True
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def write(self, data):
"""
Write the given data to the stream.
Parameters
----------
data : ndarray
Data to stream to the file.
Returns
-------
writecomplete : int
Flag that when `True` indicates that all of the required
data has been written to the stream.
Notes
-----
Only the amount of data specified in the header provided to the class
constructor may be written to the stream. If the provided data would
cause the stream to overflow, an `OSError` exception is
raised and the data is not written. Once sufficient data has been
written to the stream to satisfy the amount specified in the header,
the stream is padded to fill a complete FITS block and no more data
will be accepted. An attempt to write more data after the stream has
been filled will raise an `OSError` exception. If the
dtype of the input data does not match what is expected by the header,
a `TypeError` exception is raised.
"""
size = self._ffo.tell() - self._data_offset
if self.writecomplete or size + data.nbytes > self._size:
raise OSError('Attempt to write more data to the stream than the '
'header specified.')
if BITPIX2DTYPE[self._header['BITPIX']] != data.dtype.name:
raise TypeError('Supplied data does not match the type specified '
'in the header.')
if data.dtype.str[0] != '>':
# byteswap little endian arrays before writing
output = data.byteswap()
else:
output = data
self._ffo.writearray(output)
if self._ffo.tell() - self._data_offset == self._size:
# the stream is full so pad the data to the next FITS block
self._ffo.write(_pad_length(self._size) * '\0')
self.writecomplete = True
self._ffo.flush()
return self.writecomplete
@property
def size(self):
"""
Return the size (in bytes) of the data portion of the HDU.
"""
size = 0
naxis = self._header.get('NAXIS', 0)
if naxis > 0:
simple = self._header.get('SIMPLE', 'F')
random_groups = self._header.get('GROUPS', 'F')
if simple == 'T' and random_groups == 'T':
groups = 1
else:
groups = 0
size = 1
for idx in range(groups, naxis):
size = size * self._header['NAXIS' + str(idx + 1)]
bitpix = self._header['BITPIX']
gcount = self._header.get('GCOUNT', 1)
pcount = self._header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
def close(self):
"""
Close the physical FITS file.
"""
self._ffo.close()
|
8f1e0a86830e8df8e636c0a5ff20dafa2913d47de09a56a46f82db480f81e324 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import copy
import gc
import pickle
import re
import sys
import warnings
import pytest
import numpy as np
from numpy import char as chararray
try:
import objgraph
HAVE_OBJGRAPH = True
except ImportError:
HAVE_OBJGRAPH = False
from astropy.io import fits
from astropy.table import Table
from astropy.units import UnitsWarning, Unit, UnrecognizedUnit
from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from astropy.io.fits.column import ColumnAttribute, Delayed, NUMPY2FITS
from astropy.io.fits.util import decode_ascii
from astropy.io.fits.verify import VerifyError
from . import FitsTestCase
def comparefloats(a, b):
"""
Compare two float scalars or arrays and see if they are consistent
Consistency is determined ensuring the difference is less than the
expected amount. Return True if consistent, False if any differences.
"""
aa = a
bb = b
# compute expected precision
if aa.dtype.name == 'float32' or bb.dtype.name == 'float32':
precision = 0.000001
else:
precision = 0.0000000000000001
precision = 0.00001 # until precision problem is fixed in astropy.io.fits
diff = np.absolute(aa - bb)
mask0 = aa == 0
masknz = aa != 0.
if np.any(mask0):
if diff[mask0].max() != 0.:
return False
if np.any(masknz):
if (diff[masknz] / np.absolute(aa[masknz])).max() > precision:
return False
return True
def comparerecords(a, b):
"""
Compare two record arrays
Does this field by field, using approximation testing for float columns
(Complex not yet handled.)
Column names not compared, but column types and sizes are.
"""
nfieldsa = len(a.dtype.names)
nfieldsb = len(b.dtype.names)
if nfieldsa != nfieldsb:
print("number of fields don't match")
return False
for i in range(nfieldsa):
fielda = a.field(i)
fieldb = b.field(i)
if fielda.dtype.char == 'S':
fielda = decode_ascii(fielda)
if fieldb.dtype.char == 'S':
fieldb = decode_ascii(fieldb)
if (not isinstance(fielda, type(fieldb)) and not
isinstance(fieldb, type(fielda))):
print("type(fielda): ", type(fielda), " fielda: ", fielda)
print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb)
print(f'field {i} type differs')
return False
if len(fielda) and isinstance(fielda[0], np.floating):
if not comparefloats(fielda, fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f'field {i} differs')
return False
elif (isinstance(fielda, fits.column._VLF) or
isinstance(fieldb, fits.column._VLF)):
for row in range(len(fielda)):
if np.any(fielda[row] != fieldb[row]):
print(f'fielda[{row}]: {fielda[row]}')
print(f'fieldb[{row}]: {fieldb[row]}')
print(f'field {i} differs in row {row}')
else:
if np.any(fielda != fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f'field {i} differs')
return False
return True
def _assert_attr_col(new_tbhdu, tbhdu):
"""
Helper function to compare column attributes
"""
# Double check that the headers are equivalent
assert tbhdu.columns.names == new_tbhdu.columns.names
attrs = [k for k, v in fits.Column.__dict__.items()
if isinstance(v, ColumnAttribute)]
for name in tbhdu.columns.names:
col = tbhdu.columns[name]
new_col = new_tbhdu.columns[name]
for attr in attrs:
if getattr(col, attr) and getattr(new_col, attr):
assert getattr(col, attr) == getattr(new_col, attr)
class TestTableFunctions(FitsTestCase):
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
This is like the test of the same name in test_image, but tests this
for tables as well.
"""
ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()])
thdr = ifd[1].header
thdr['FILENAME'] = 'labq01i3q_rawtag.fits'
thdu = fits.BinTableHDU(header=thdr)
ofd = fits.HDUList(thdu)
ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'
# Original header should be unchanged
assert thdr['FILENAME'] == 'labq01i3q_rawtag.fits'
def test_open(self):
# open some existing FITS files:
tt = fits.open(self.data('tb.fits'))
fd = fits.open(self.data('test0.fits'))
# create some local arrays
a1 = chararray.array(['abc', 'def', 'xx'])
r1 = np.array([11., 12., 13.], dtype=np.float32)
# create a table from scratch, using a mixture of columns from existing
# tables and locally created arrays:
# first, create individual column definitions
c1 = fits.Column(name='abc', format='3A', array=a1)
c2 = fits.Column(name='def', format='E', array=r1)
a3 = np.array([3, 4, 5], dtype='i2')
c3 = fits.Column(name='xyz', format='I', array=a3)
a4 = np.array([1, 2, 3], dtype='i2')
c4 = fits.Column(name='t1', format='I', array=a4)
a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype='c8')
c5 = fits.Column(name='t2', format='C', array=a5)
# Note that X format must be two-D array
a6 = np.array([[0], [1], [0]], dtype=np.uint8)
c6 = fits.Column(name='t3', format='X', array=a6)
a7 = np.array([101, 102, 103], dtype='i4')
c7 = fits.Column(name='t4', format='J', array=a7)
a8 = np.array([[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1]], dtype=np.uint8)
c8 = fits.Column(name='t5', format='11X', array=a8)
# second, create a column-definitions object for all columns in a table
x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8])
tbhdu = fits.BinTableHDU.from_columns(x)
# another way to create a table is by using existing table's
# information:
x2 = fits.ColDefs(tt[1])
t2 = fits.BinTableHDU.from_columns(x2, nrows=2)
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')
assert comparerecords(t2.data, ra)
# the table HDU's data is a subclass of a record array, so we can
# access one row like this:
assert tbhdu.data[1][0] == a1[1]
assert tbhdu.data[1][1] == r1[1]
assert tbhdu.data[1][2] == a3[1]
assert tbhdu.data[1][3] == a4[1]
assert tbhdu.data[1][4] == a5[1]
assert (tbhdu.data[1][5] == a6[1].view('bool')).all()
assert tbhdu.data[1][6] == a7[1]
assert (tbhdu.data[1][7] == a8[1]).all()
# and a column like this:
assert str(tbhdu.data.field('abc')) == "['abc' 'def' 'xx']"
# An alternative way to create a column-definitions object is from an
# existing table.
_ = fits.ColDefs(tt[1])
# now we write out the newly created table HDU to a FITS file:
fout = fits.HDUList(fits.PrimaryHDU())
fout.append(tbhdu)
fout.writeto(self.temp('tableout1.fits'), overwrite=True)
with fits.open(self.temp('tableout1.fits')) as f2:
temp = f2[1].data.field(7)
assert (temp[0] == [True, True, False, True, False, True,
True, True, False, False, True]).all()
# An alternative way to create an output table FITS file:
fout2 = fits.open(self.temp('tableout2.fits'), 'append')
fout2.append(fd[0])
fout2.append(tbhdu)
fout2.close()
tt.close()
fd.close()
def test_binary_table(self):
# binary table:
t = fits.open(self.data('tb.fits'))
assert t[1].header['tform1'] == '1J'
info = {'name': ['c1', 'c2', 'c3', 'c4'],
'format': ['1J', '3A', '1E', '1L'],
'unit': ['', '', '', ''],
'null': [-2147483647, '', '', ''],
'bscale': ['', '', 3, ''],
'bzero': ['', '', 0.4, ''],
'disp': ['I11', 'A3', 'G15.7', 'L6'],
'start': ['', '', '', ''],
'dim': ['', '', '', ''],
'coord_inc': ['', '', '', ''],
'coord_type': ['', '', '', ''],
'coord_unit': ['', '', '', ''],
'coord_ref_point': ['', '', '', ''],
'coord_ref_value': ['', '', '', ''],
'time_ref_pos': ['', '', '', '']}
assert t[1].columns.info(output=False) == info
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')
assert comparerecords(t[1].data, ra[:2])
# Change scaled field and scale back to the original array
t[1].data.field('c4')[0] = 1
t[1].data._scale_back()
assert str(np.rec.recarray.field(t[1].data, 'c4')) == '[84 84]'
# look at data column-wise
assert (t[1].data.field(0) == np.array([1, 2])).all()
# When there are scaled columns, the raw data are in data._parent
t.close()
def test_ascii_table(self):
# ASCII table
a = fits.open(self.data('ascii.fits'))
ra1 = np.rec.array([
(10.123000144958496, 37),
(5.1999998092651367, 23),
(15.609999656677246, 17),
(0.0, 0),
(345.0, 345)], names='c1, c2')
assert comparerecords(a[1].data, ra1)
# Test slicing
a2 = a[1].data[2:][2:]
ra2 = np.rec.array([(345.0, 345)], names='c1, c2')
assert comparerecords(a2, ra2)
assert (a2.field(1) == np.array([345])).all()
ra3 = np.rec.array([
(10.123000144958496, 37),
(15.609999656677246, 17),
(345.0, 345)
], names='c1, c2')
assert comparerecords(a[1].data[::2], ra3)
# Test Start Column
a1 = chararray.array(['abcd', 'def'])
r1 = np.array([11., 12.])
c1 = fits.Column(name='abc', format='A3', start=19, array=a1)
c2 = fits.Column(name='def', format='E', start=3, array=r1)
c3 = fits.Column(name='t1', format='I', array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c2, c1, c3])
assert (dict(hdu.data.dtype.fields) ==
{'abc': (np.dtype('|S3'), 18),
'def': (np.dtype('|S15'), 2),
't1': (np.dtype('|S10'), 21)})
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
# Test Scaling
r1 = np.array([11., 12.])
c2 = fits.Column(name='def', format='D', array=r1, bscale=2.3,
bzero=0.6)
hdu = fits.TableHDU.from_columns([c2])
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with open(self.temp('toto.fits')) as f:
assert '4.95652173913043548D+00' in f.read()
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
# Test Integer precision according to width
c1 = fits.Column(name='t2', format='I2', array=[91, 92, 93])
c2 = fits.Column(name='t4', format='I5', array=[91, 92, 93])
c3 = fits.Column(name='t8', format='I10', array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c1, c2, c3])
assert c1.array.dtype == np.int16
assert c2.array.dtype == np.int32
assert c3.array.dtype == np.int64
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
a.close()
def test_endianness(self):
x = np.ndarray((1,), dtype=object)
channelsIn = np.array([3], dtype='uint8')
x[0] = channelsIn
col = fits.Column(name="Channels", format="PB()", array=x)
cols = fits.ColDefs([col])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.name = "RFI"
tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)
hduL = fits.open(self.temp('testendian.fits'))
rfiHDU = hduL['RFI']
data = rfiHDU.data
channelsOut = data.field('Channels')[0]
assert (channelsIn == channelsOut).all()
hduL.close()
def test_column_endianness(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77
(Astropy doesn't preserve byte order of non-native order column arrays)
"""
a = [1., 2., 3., 4.]
a1 = np.array(a, dtype='<f8')
a2 = np.array(a, dtype='>f8')
col1 = fits.Column(name='a', format='D', array=a1)
col2 = fits.Column(name='b', format='D', array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
assert (tbhdu.data['a'] == a1).all()
assert (tbhdu.data['b'] == a2).all()
# Double check that the array is converted to the correct byte-order
# for FITS (big-endian).
tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)
with fits.open(self.temp('testendian.fits')) as hdul:
assert (hdul[1].data['a'] == a2).all()
assert (hdul[1].data['b'] == a2).all()
def test_recarray_to_bintablehdu(self):
bright = np.rec.array(
[(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')],
formats='int16,a20,float32,a10',
names='order,name,mag,Sp')
hdu = fits.BinTableHDU(bright)
assert comparerecords(hdu.data, bright)
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
assert comparerecords(bright, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu(self):
desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],
'formats': ['int', 'S20', 'float32', 'S10']})
a = np.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu_with_unicode(self):
desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],
'formats': ['int', 'U20', 'float32', 'U10']})
a = np.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_new_table_from_recarray(self):
bright = np.rec.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')],
formats='int16,a20,float64,a10',
names='order,name,mag,Sp')
hdu = fits.TableHDU.from_columns(bright, nrows=2)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns.columns[0].array))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns._arrays[0]))
# Ensure I can change the value of one data element and it effects
# all of the others.
hdu.data[0][0] = 213
assert hdu.data[0][0] == 213
assert hdu.data._coldefs._arrays[0][0] == 213
assert hdu.data._coldefs.columns[0].array[0] == 213
assert hdu.columns._arrays[0][0] == 213
assert hdu.columns.columns[0].array[0] == 213
hdu.data._coldefs._arrays[0][0] = 100
assert hdu.data[0][0] == 100
assert hdu.data._coldefs._arrays[0][0] == 100
assert hdu.data._coldefs.columns[0].array[0] == 100
assert hdu.columns._arrays[0][0] == 100
assert hdu.columns.columns[0].array[0] == 100
hdu.data._coldefs.columns[0].array[0] = 500
assert hdu.data[0][0] == 500
assert hdu.data._coldefs._arrays[0][0] == 500
assert hdu.data._coldefs.columns[0].array[0] == 500
assert hdu.columns._arrays[0][0] == 500
assert hdu.columns.columns[0].array[0] == 500
hdu.columns._arrays[0][0] = 600
assert hdu.data[0][0] == 600
assert hdu.data._coldefs._arrays[0][0] == 600
assert hdu.data._coldefs.columns[0].array[0] == 600
assert hdu.columns._arrays[0][0] == 600
assert hdu.columns.columns[0].array[0] == 600
hdu.columns.columns[0].array[0] = 800
assert hdu.data[0][0] == 800
assert hdu.data._coldefs._arrays[0][0] == 800
assert hdu.data._coldefs.columns[0].array[0] == 800
assert hdu.columns._arrays[0][0] == 800
assert hdu.columns.columns[0].array[0] == 800
assert (hdu.data.field(0) ==
np.array([800, 2], dtype=np.int16)).all()
assert hdu.data[0][1] == 'Serius'
assert hdu.data[1][1] == 'Canopys'
assert (hdu.data.field(2) ==
np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdu.data[0][3] == 'A1V'
assert hdu.data[1][3] == 'F0Ib'
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert (hdul[1].data.field(0) ==
np.array([800, 2], dtype=np.int16)).all()
assert hdul[1].data[0][1] == 'Serius'
assert hdul[1].data[1][1] == 'Canopys'
assert (hdul[1].data.field(2) ==
np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdul[1].data[0][3] == 'A1V'
assert hdul[1].data[1][3] == 'F0Ib'
del hdul
hdu = fits.BinTableHDU.from_columns(bright, nrows=2)
tmp = np.rec.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib')],
formats='int16,a20,float64,a10',
names='order,name,mag,Sp')
assert comparerecords(hdu.data, tmp)
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
def test_new_fitsrec(self):
"""
Tests creating a new FITS_rec object from a multi-field ndarray.
"""
with fits.open(self.data('tb.fits')) as h:
data = h[1].data
new_data = np.array([(3, 'qwe', 4.5, False)], dtype=data.dtype)
appended = np.append(data, new_data).view(fits.FITS_rec)
assert repr(appended).startswith('FITS_rec(')
# This test used to check the entire string representation of FITS_rec,
# but that has problems between different numpy versions. Instead just
# check that the FITS_rec was created, and we'll let subsequent tests
# worry about checking values and such
def test_appending_a_column(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
counts = np.array([412, 434, 408, 417])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table2.fits'))
# Append the rows of table 2 after the rows of table 1
# The column definitions are assumed to be the same
# Open the two files we want to append
t1 = fits.open(self.temp('table1.fits'))
t2 = fits.open(self.temp('table2.fits'))
# Get the number of rows in the table from the first file
nrows1 = t1[1].data.shape[0]
# Get the total number of rows in the resulting appended table
nrows = t1[1].data.shape[0] + t2[1].data.shape[0]
assert (t1[1].columns._arrays[1] is t1[1].columns.columns[1].array)
# Create a new table that consists of the data from the first table
# but has enough space in the ndarray to hold the data from both tables
hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows)
# For each column in the tables append the data from table 2 after the
# data from table 1.
for i in range(len(t1[1].columns)):
hdu.data.field(i)[nrows1:] = t2[1].data.field(i)
hdu.writeto(self.temp('newtable.fits'))
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 19, '8R x 5C', '[10A, J, 10A, 5E, L]',
'')]
assert fits.info(self.temp('newtable.fits'), output=False) == info
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True),
('NGC5', 412, '', z, False),
('NGC6', 434, '', z, True),
('NGC7', 408, '', z, False),
('NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
# Same verification from the file
hdul = fits.open(self.temp('newtable.fits'))
hdu = hdul[1]
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_adding_a_column(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']
coldefs1 = coldefs + c5
tbhdu1 = fits.BinTableHDU.from_columns(coldefs1)
assert tbhdu1.columns.names == ['target', 'counts', 'notes',
'spectrum', 'flag']
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(tbhdu1.data, array)
def test_adding_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']
tbhdu.columns.add_col(c5)
assert tbhdu.columns.names == ['target', 'counts', 'notes',
'spectrum', 'flag']
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(tbhdu.data, array)
def test_adding_a_column_to_file(self):
hdul = fits.open(self.data('table.fits'))
tbhdu = hdul[1]
col = fits.Column(name='a', array=np.array([1, 2]), format='K')
tbhdu.columns.add_col(col)
assert tbhdu.columns.names == ['target', 'V_mag', 'a']
array = np.rec.array(
[('NGC1001', 11.1, 1),
('NGC1002', 12.3, 2),
('NGC1003', 15.2, 0)],
formats='a20,f4,i8')
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_removing_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ['target', 'counts', 'notes',
'spectrum', 'flag']
tbhdu.columns.del_col('flag')
assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z),
('NGC2', 334, '', z),
('NGC3', 308, '', z),
('NCG4', 317, '', z)],
formats='a10,u4,a10,5f4')
assert comparerecords(tbhdu.data, array)
tbhdu.columns.del_col('counts')
tbhdu.columns.del_col('notes')
assert tbhdu.columns.names == ['target', 'spectrum']
array = np.rec.array(
[('NGC1', z),
('NGC2', z),
('NGC3', z),
('NCG4', z)],
formats='a10,5f4')
assert comparerecords(tbhdu.data, array)
def test_removing_a_column_from_file(self):
hdul = fits.open(self.data('table.fits'))
tbhdu = hdul[1]
tbhdu.columns.del_col('V_mag')
assert tbhdu.columns.names == ['target']
array = np.rec.array(
[('NGC1001', ),
('NGC1002', ),
('NGC1003', )],
formats='a20')
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_merge_tables(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
counts = np.array([412, 434, 408, 417])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target1', format='10A', array=names)
c2 = fits.Column(name='counts1', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes1', format='A10')
c4 = fits.Column(name='spectrum1', format='5E')
c5 = fits.Column(name='flag1', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table2.fits'))
# Merge the columns of table 2 after the columns of table 1
# The column names are assumed to be different
# Open the two files we want to append
t1 = fits.open(self.temp('table1.fits'))
t2 = fits.open(self.temp('table2.fits'))
hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns)
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),
('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),
('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),
('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
hdu.writeto(self.temp('newtable.fits'))
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 30, '4R x 10C',
'[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]', '')]
assert fits.info(self.temp('newtable.fits'), output=False) == info
hdul = fits.open(self.temp('newtable.fits'))
hdu = hdul[1]
assert (hdu.columns.names ==
['target', 'counts', 'notes', 'spectrum', 'flag', 'target1',
'counts1', 'notes1', 'spectrum1', 'flag1'])
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),
('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),
('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),
('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
# Same verification from the file
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_modify_column_attributes(self):
"""Regression test for https://github.com/astropy/astropy/issues/996
This just tests one particular use case, but it should apply pretty
well to other similar cases.
"""
NULLS = {'a': 2, 'b': 'b', 'c': 2.3}
data = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', int), ('b', 'S1'), ('c', float)])
b = fits.BinTableHDU(data=data)
for col in b.columns:
col.null = NULLS[col.name]
b.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
header = hdul[1].header
assert header['TNULL1'] == 2
assert header['TNULL2'] == 'b'
assert header['TNULL3'] == 2.3
def test_mask_array(self):
t = fits.open(self.data('table.fits'))
tbdata = t[1].data
mask = tbdata.field('V_mag') > 12
newtbdata = tbdata[mask]
hdu = fits.BinTableHDU(newtbdata)
hdu.writeto(self.temp('newtable.fits'))
hdul = fits.open(self.temp('newtable.fits'))
# match to a regex rather than a specific string.
expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]"
assert re.match(expect, str(hdu.data))
assert re.match(expect, str(hdul[1].data))
t.close()
hdul.close()
def test_slice_a_row(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
row = t1[1].data[2]
assert row['counts'] == 308
a, b, c = row[1:4]
assert a == counts[2]
assert b == ''
assert (c == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
row['counts'] = 310
assert row['counts'] == 310
row[1] = 315
assert row['counts'] == 315
assert row[1:4]['counts'] == 315
pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)
row[1:4]['counts'] = 300
assert row[1:4]['counts'] == 300
assert row['counts'] == 300
row[1:4][0] = 400
assert row[1:4]['counts'] == 400
row[1:4]['counts'] = 300
assert row[1:4]['counts'] == 300
# Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59
row[1:4][::-1][-1] = 500
assert row[1:4]['counts'] == 500
row[1:4:2][0] = 300
assert row[1:4]['counts'] == 300
pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)
assert row[1:4].field(0) == 300
assert row[1:4].field('counts') == 300
pytest.raises(KeyError, row[1:4].field, 'flag')
row[1:4].setfield('counts', 500)
assert row[1:4].field(0) == 500
pytest.raises(KeyError, row[1:4].setfield, 'flag', False)
assert t1[1].data._coldefs._arrays[1][2] == 500
assert t1[1].data._coldefs.columns[1].array[2] == 500
assert t1[1].columns._arrays[1][2] == 500
assert t1[1].columns.columns[1].array[2] == 500
assert t1[1].data[2][1] == 500
t1.close()
def test_fits_record_len(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
assert len(t1[1].data[0]) == 5
assert len(t1[1].data[0][0:4]) == 4
assert len(t1[1].data[0][0:5]) == 5
assert len(t1[1].data[0][0:6]) == 5
assert len(t1[1].data[0][0:7]) == 5
assert len(t1[1].data[0][1:4]) == 3
assert len(t1[1].data[0][1:5]) == 4
assert len(t1[1].data[0][1:6]) == 4
assert len(t1[1].data[0][1:7]) == 4
t1.close()
def test_add_data_by_rows(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
c1 = fits.Column(name='target', format='10A')
c2 = fits.Column(name='counts', format='J', unit='DN')
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L')
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5)
# Test assigning data to a tables row using a FITS_record
tbhdu.data[0] = tbhdu1.data[0]
tbhdu.data[4] = tbhdu1.data[3]
# Test assigning data to a tables row using a tuple
tbhdu.data[2] = ('NGC1', 312, 'A Note',
np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32),
True)
# Test assigning data to a tables row using a list
tbhdu.data[3] = ['JIM1', '33', 'A Note',
np.array([1., 2., 3., 4., 5.], dtype=np.float32),
True]
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.data._coldefs._arrays[0]))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns.columns[0].array))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns._arrays[0]))
assert tbhdu.data[0][1] == 312
assert tbhdu.data._coldefs._arrays[1][0] == 312
assert tbhdu.data._coldefs.columns[1].array[0] == 312
assert tbhdu.columns._arrays[1][0] == 312
assert tbhdu.columns.columns[1].array[0] == 312
assert tbhdu.columns.columns[0].array[0] == 'NGC1'
assert tbhdu.columns.columns[2].array[0] == ''
assert (tbhdu.columns.columns[3].array[0] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu.columns.columns[4].array[0] == True # noqa
assert tbhdu.data[3][1] == 33
assert tbhdu.data._coldefs._arrays[1][3] == 33
assert tbhdu.data._coldefs.columns[1].array[3] == 33
assert tbhdu.columns._arrays[1][3] == 33
assert tbhdu.columns.columns[1].array[3] == 33
assert tbhdu.columns.columns[0].array[3] == 'JIM1'
assert tbhdu.columns.columns[2].array[3] == 'A Note'
assert (tbhdu.columns.columns[3].array[3] ==
np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()
assert tbhdu.columns.columns[4].array[3] == True # noqa
def test_assign_multiple_rows_to_table(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
counts = np.array([112, 134, 108, 117])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][3] = np.array([1., 2., 3., 4., 5.], dtype=np.float32)
tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9)
# Assign the 4 rows from the second table to rows 5 thru 8 of the
# new table. Note that the last row of the new table will still be
# initialized to the default values.
tbhdu2.data[4:] = tbhdu.data
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.data._coldefs._arrays[0]))
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.columns.columns[0].array))
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.columns._arrays[0]))
assert tbhdu2.data[0][1] == 312
assert tbhdu2.data._coldefs._arrays[1][0] == 312
assert tbhdu2.data._coldefs.columns[1].array[0] == 312
assert tbhdu2.columns._arrays[1][0] == 312
assert tbhdu2.columns.columns[1].array[0] == 312
assert tbhdu2.columns.columns[0].array[0] == 'NGC1'
assert tbhdu2.columns.columns[2].array[0] == ''
assert (tbhdu2.columns.columns[3].array[0] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[0] == True # noqa
assert tbhdu2.data[4][1] == 112
assert tbhdu2.data._coldefs._arrays[1][4] == 112
assert tbhdu2.data._coldefs.columns[1].array[4] == 112
assert tbhdu2.columns._arrays[1][4] == 112
assert tbhdu2.columns.columns[1].array[4] == 112
assert tbhdu2.columns.columns[0].array[4] == 'NGC5'
assert tbhdu2.columns.columns[2].array[4] == ''
assert (tbhdu2.columns.columns[3].array[4] ==
np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[4] == False # noqa
assert tbhdu2.columns.columns[1].array[8] == 0
assert tbhdu2.columns.columns[0].array[8] == ''
assert tbhdu2.columns.columns[2].array[8] == ''
assert (tbhdu2.columns.columns[3].array[8] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[8] == False # noqa
def test_verify_data_references(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
# Verify that original ColDefs object has independent Column
# objects.
assert id(coldefs.columns[0]) != id(c1)
# Verify that original ColDefs object has independent ndarray
# objects.
assert id(coldefs.columns[0].array) != id(names)
# Verify that original ColDefs object references the same data
# object as the original Column object.
assert id(coldefs.columns[0].array) == id(c1.array)
assert id(coldefs.columns[0].array) == id(coldefs._arrays[0])
# Verify new HDU has an independent ColDefs object.
assert id(coldefs) != id(tbhdu.columns)
# Verify new HDU has independent Column objects.
assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0])
# Verify new HDU has independent ndarray objects.
assert (id(coldefs.columns[0].array) !=
id(tbhdu.columns.columns[0].array))
# Verify that both ColDefs objects in the HDU reference the same
# Coldefs object.
assert id(tbhdu.columns) == id(tbhdu.data._coldefs)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.data._coldefs._arrays[0]))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns.columns[0].array))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns._arrays[0]))
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_ndarray(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray))
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.data._coldefs._arrays[0]))
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.columns.columns[0].array))
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.columns._arrays[0]))
# Ensure I can change the value of one data element and it effects
# all of the others.
tbhdu1.data[0][1] = 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
tbhdu1.data._coldefs.columns[1].array[0] = 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
tbhdu1.columns._arrays[1][0] = 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
tbhdu1.columns.columns[1].array[0] = 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
tbhdu1.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_fits_rec(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][1] = 213
assert tbhdu.data[0][1] == 213
assert tbhdu.data._coldefs._arrays[1][0] == 213
assert tbhdu.data._coldefs.columns[1].array[0] == 213
assert tbhdu.columns._arrays[1][0] == 213
assert tbhdu.columns.columns[1].array[0] == 213
tbhdu.data._coldefs._arrays[1][0] = 100
assert tbhdu.data[0][1] == 100
assert tbhdu.data._coldefs._arrays[1][0] == 100
assert tbhdu.data._coldefs.columns[1].array[0] == 100
assert tbhdu.columns._arrays[1][0] == 100
assert tbhdu.columns.columns[1].array[0] == 100
tbhdu.data._coldefs.columns[1].array[0] = 500
assert tbhdu.data[0][1] == 500
assert tbhdu.data._coldefs._arrays[1][0] == 500
assert tbhdu.data._coldefs.columns[1].array[0] == 500
assert tbhdu.columns._arrays[1][0] == 500
assert tbhdu.columns.columns[1].array[0] == 500
tbhdu.columns._arrays[1][0] = 600
assert tbhdu.data[0][1] == 600
assert tbhdu.data._coldefs._arrays[1][0] == 600
assert tbhdu.data._coldefs.columns[1].array[0] == 600
assert tbhdu.columns._arrays[1][0] == 600
assert tbhdu.columns.columns[1].array[0] == 600
tbhdu.columns.columns[1].array[0] = 800
assert tbhdu.data[0][1] == 800
assert tbhdu.data._coldefs._arrays[1][0] == 800
assert tbhdu.data._coldefs.columns[1].array[0] == 800
assert tbhdu.columns._arrays[1][0] == 800
assert tbhdu.columns.columns[1].array[0] == 800
tbhdu.columns.columns[1].array[0] = 312
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 1
fr = t1[1].data
assert t1[1].data[0][1] == 1
assert t1[1].data._coldefs._arrays[1][0] == 1
assert t1[1].data._coldefs.columns[1].array[0] == 1
assert t1[1].columns._arrays[1][0] == 1
assert t1[1].columns.columns[1].array[0] == 1
assert fr[0][1] == 1
assert fr._coldefs._arrays[1][0] == 1
assert fr._coldefs.columns[1].array[0] == 1
fr._coldefs.columns[1].array[0] = 312
tbhdu1 = fits.BinTableHDU.from_columns(fr)
i = 0
for row in tbhdu1.data:
for j in range(len(row)):
if isinstance(row[j], np.ndarray):
assert (row[j] == tbhdu.data[i][j]).all()
else:
assert row[j] == tbhdu.data[i][j]
i = i + 1
tbhdu1.data[0][1] = 213
assert t1[1].data[0][1] == 312
assert t1[1].data._coldefs._arrays[1][0] == 312
assert t1[1].data._coldefs.columns[1].array[0] == 312
assert t1[1].columns._arrays[1][0] == 312
assert t1[1].columns.columns[1].array[0] == 312
assert fr[0][1] == 312
assert fr._coldefs._arrays[1][0] == 312
assert fr._coldefs.columns[1].array[0] == 312
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
t1[1].data[0][1] = 10
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 666
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 666
assert tbhdu1.data._coldefs._arrays[1][0] == 666
assert tbhdu1.data._coldefs.columns[1].array[0] == 666
assert tbhdu1.columns._arrays[1][0] == 666
assert tbhdu1.columns.columns[1].array[0] == 666
t1.close()
def test_bin_table_hdu_constructor(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
hdu = fits.BinTableHDU(tbhdu1.data)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns.columns[0].array))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns._arrays[0]))
# Verify that the references in the original HDU are the same as the
# references in the new HDU.
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
# Verify that a change in the new HDU is reflected in both the new
# and original HDU.
hdu.data[0][1] = 213
assert hdu.data[0][1] == 213
assert hdu.data._coldefs._arrays[1][0] == 213
assert hdu.data._coldefs.columns[1].array[0] == 213
assert hdu.columns._arrays[1][0] == 213
assert hdu.columns.columns[1].array[0] == 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
hdu.data._coldefs._arrays[1][0] = 100
assert hdu.data[0][1] == 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
hdu.data._coldefs.columns[1].array[0] = 500
assert hdu.data[0][1] == 500
assert hdu.data._coldefs._arrays[1][0] == 500
assert hdu.data._coldefs.columns[1].array[0] == 500
assert hdu.columns._arrays[1][0] == 500
assert hdu.columns.columns[1].array[0] == 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
hdu.columns._arrays[1][0] = 600
assert hdu.data[0][1] == 600
assert hdu.data._coldefs._arrays[1][0] == 600
assert hdu.data._coldefs.columns[1].array[0] == 600
assert hdu.columns._arrays[1][0] == 600
assert hdu.columns.columns[1].array[0] == 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
hdu.columns.columns[1].array[0] = 800
assert hdu.data[0][1] == 800
assert hdu.data._coldefs._arrays[1][0] == 800
assert hdu.data._coldefs.columns[1].array[0] == 800
assert hdu.columns._arrays[1][0] == 800
assert hdu.columns.columns[1].array[0] == 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
def test_constructor_name_arg(self):
"""testConstructorNameArg
Passing name='...' to the BinTableHDU and TableHDU constructors
should set the .name attribute and 'EXTNAME' header keyword, and
override any name in an existing 'EXTNAME' value.
"""
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.name == ''
assert 'EXTNAME' not in hdu.header
hdu.name = 'FOO'
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# Passing name to constructor
hdu = hducls(name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# And overriding a header with a different extname
hdr = fits.Header()
hdr['EXTNAME'] = 'EVENTS'
hdu = hducls(header=hdr, name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
def test_constructor_ver_arg(self):
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.ver == 1
assert 'EXTVER' not in hdu.header
hdu.ver = 2
assert hdu.ver == 2
assert hdu.header['EXTVER'] == 2
# Passing name to constructor
hdu = hducls(ver=3)
assert hdu.ver == 3
assert hdu.header['EXTVER'] == 3
# And overriding a header with a different extver
hdr = fits.Header()
hdr['EXTVER'] = 4
hdu = hducls(header=hdr, ver=5)
assert hdu.ver == 5
assert hdu.header['EXTVER'] == 5
def test_unicode_colname(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5204
"Handle unicode FITS BinTable column names on Python 2"
"""
col = fits.Column(name='spam', format='E', array=[42.])
# This used to raise a TypeError, now it works
fits.BinTableHDU.from_columns([col])
def test_bin_table_with_logical_array(self):
c1 = fits.Column(name='flag', format='2L',
array=[[True, False], [False, True]])
coldefs = fits.ColDefs([c1])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
assert (tbhdu1.data.field('flag')[0] ==
np.array([True, False], dtype=bool)).all()
assert (tbhdu1.data.field('flag')[1] ==
np.array([False, True], dtype=bool)).all()
tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data)
assert (tbhdu.data.field('flag')[0] ==
np.array([True, False], dtype=bool)).all()
assert (tbhdu.data.field('flag')[1] ==
np.array([False, True], dtype=bool)).all()
def test_fits_rec_column_access(self):
tbdata = fits.getdata(self.data('table.fits'))
assert (tbdata.V_mag == tbdata.field('V_mag')).all()
assert (tbdata.V_mag == tbdata['V_mag']).all()
# Table with scaling (c3) and tnull (c1)
tbdata = fits.getdata(self.data('tb.fits'))
for col in ('c1', 'c2', 'c3', 'c4'):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# ascii table
tbdata = fits.getdata(self.data('ascii.fits'))
for col in ('a', 'b'):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# with VLA column
col1 = fits.Column(name='x', format='PI()',
array=np.array([[45, 56], [11, 12, 13]],
dtype=np.object_))
hdu = fits.BinTableHDU.from_columns([col1])
assert type(hdu.data['x']) == type(hdu.data.x) # noqa
assert (hdu.data['x'][0] == hdu.data.x[0]).all()
assert (hdu.data['x'][1] == hdu.data.x[1]).all()
def test_table_with_zero_width_column(self):
hdul = fits.open(self.data('zerowidth.fits'))
tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM'
assert 'ORBPARM' in tbhdu.columns.names
# The ORBPARM column should not be in the data, though the data should
# be readable
assert 'ORBPARM' in tbhdu.data.names
assert 'ORBPARM' in tbhdu.data.dtype.names
# Verify that some of the data columns are still correctly accessible
# by name
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
assert comparefloats(
tbhdu.data[0]['STABXYZ'],
np.array([499.85566663, -1317.99231554, -735.18866164],
dtype=np.float64))
assert tbhdu.data[0]['NOSTA'] == 1
assert tbhdu.data[0]['MNTSTA'] == 0
assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'
assert comparefloats(
tbhdu.data[-1]['STABXYZ'],
np.array([0.0, 0.0, 0.0], dtype=np.float64))
assert tbhdu.data[-1]['NOSTA'] == 29
assert tbhdu.data[-1]['MNTSTA'] == 0
hdul.writeto(self.temp('newtable.fits'))
hdul.close()
hdul = fits.open(self.temp('newtable.fits'))
tbhdu = hdul[2]
# Verify that the previous tests still hold after writing
assert 'ORBPARM' in tbhdu.columns.names
assert 'ORBPARM' in tbhdu.data.names
assert 'ORBPARM' in tbhdu.data.dtype.names
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
assert comparefloats(
tbhdu.data[0]['STABXYZ'],
np.array([499.85566663, -1317.99231554, -735.18866164],
dtype=np.float64))
assert tbhdu.data[0]['NOSTA'] == 1
assert tbhdu.data[0]['MNTSTA'] == 0
assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'
assert comparefloats(
tbhdu.data[-1]['STABXYZ'],
np.array([0.0, 0.0, 0.0], dtype=np.float64))
assert tbhdu.data[-1]['NOSTA'] == 29
assert tbhdu.data[-1]['MNTSTA'] == 0
hdul.close()
def test_string_column_padding(self):
a = ['img1', 'img2', 'img3a', 'p']
s = 'img1\x00\x00\x00\x00\x00\x00' \
'img2\x00\x00\x00\x00\x00\x00' \
'img3a\x00\x00\x00\x00\x00' \
'p\x00\x00\x00\x00\x00\x00\x00\x00\x00'
acol = fits.Column(name='MEMNAME', format='A10',
array=chararray.array(a))
ahdu = fits.BinTableHDU.from_columns([acol])
assert ahdu.data.tobytes().decode('raw-unicode-escape') == s
ahdu.writeto(self.temp('newtable.fits'))
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].data.tobytes().decode('raw-unicode-escape') == s
assert (hdul[1].data['MEMNAME'] == a).all()
del hdul
ahdu = fits.TableHDU.from_columns([acol])
ahdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert (hdul[1].data.tobytes().decode('raw-unicode-escape') ==
s.replace('\x00', ' '))
assert (hdul[1].data['MEMNAME'] == a).all()
ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy())
del hdul
# Now serialize once more as a binary table; padding bytes should
# revert to zeroes
ahdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].data.tobytes().decode('raw-unicode-escape') == s
assert (hdul[1].data['MEMNAME'] == a).all()
def test_multi_dimensional_columns(self):
"""
Tests the multidimensional column implementation with both numeric
arrays and string arrays.
"""
data = np.rec.array(
[([0, 1, 2, 3, 4, 5], 'row1' * 2),
([6, 7, 8, 9, 0, 1], 'row2' * 2),
([2, 3, 4, 5, 6, 7], 'row3' * 2)], formats='6i4,a8')
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp('newtable.fits'))
with fits.open(self.temp('newtable.fits'), mode='update') as hdul:
# Modify the TDIM fields to my own specification
hdul[1].header['TDIM1'] = '(2,3)'
hdul[1].header['TDIM2'] = '(4,2)'
with fits.open(self.temp('newtable.fits')) as hdul:
thdu = hdul[1]
c1 = thdu.data.field(0)
c2 = thdu.data.field(1)
assert c1.shape == (3, 3, 2)
assert c2.shape == (3, 2)
assert (c1 == np.array([[[0, 1], [2, 3], [4, 5]],
[[6, 7], [8, 9], [0, 1]],
[[2, 3], [4, 5], [6, 7]]])).all()
assert (c2 == np.array([['row1', 'row1'],
['row2', 'row2'],
['row3', 'row3']])).all()
del c1
del c2
del thdu
del hdul
# Test setting the TDIMn header based on the column data
data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', 4)])
data['x'] = 1, 2, 3
data['s'] = 'ok'
fits.writeto(self.temp('newtable.fits'), data, overwrite=True)
t = fits.getdata(self.temp('newtable.fits'))
assert t.field(1).dtype.str[-1] == '5'
assert t.field(1).shape == (3, 4)
# Like the previous test, but with an extra dimension (a bit more
# complicated)
data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', (4, 3))])
data['x'] = 1, 2, 3
data['s'] = 'ok'
del t
fits.writeto(self.temp('newtable.fits'), data, overwrite=True)
t = fits.getdata(self.temp('newtable.fits'))
assert t.field(1).dtype.str[-1] == '5'
assert t.field(1).shape == (3, 4, 3)
def test_oned_array_single_element(self):
# a table with rows that are 1d arrays of a single value
data = np.array([(1, ), (2, )], dtype=([('x', 'i4', (1, ))]))
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp('onedtable.fits'))
with fits.open(self.temp('onedtable.fits')) as hdul:
thdu = hdul[1]
c = thdu.data.field(0)
assert c.shape == (2, 1)
assert thdu.header['TDIM1'] == '(1)'
def test_bin_table_init_from_string_array_column(self):
"""
Tests two ways of creating a new `BinTableHDU` from a column of
string arrays.
This tests for a couple different regressions, and ensures that
both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work
equivalently.
Some of this is redundant with the following test, but checks some
subtly different cases.
"""
data = [[b'abcd', b'efgh'],
[b'ijkl', b'mnop'],
[b'qrst', b'uvwx']]
arr = np.array([(data,), (data,), (data,), (data,), (data,)],
dtype=[('S', '(3, 2)S4')])
tbhdu1 = fits.BinTableHDU(data=arr)
def test_dims_and_roundtrip(tbhdu):
assert tbhdu.data['S'].shape == (5, 3, 2)
assert tbhdu.data['S'].dtype.str.endswith('U4')
tbhdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header['TDIM1'] == '(4,2,3)'
assert tbhdu2.data['S'].shape == (5, 3, 2)
assert tbhdu.data['S'].dtype.str.endswith('U4')
assert np.all(tbhdu2.data['S'] == tbhdu.data['S'])
test_dims_and_roundtrip(tbhdu1)
tbhdu2 = fits.BinTableHDU.from_columns(arr)
test_dims_and_roundtrip(tbhdu2)
def test_columns_with_truncating_tdim(self):
"""
According to the FITS standard (section 7.3.2):
If the number of elements in the array implied by the TDIMn is less
than the allocated size of the ar- ray in the FITS file, then the
unused trailing elements should be interpreted as containing
undefined fill values.
*deep sigh* What this means is if a column has a repeat count larger
than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)',
but TFORM1 = 6I), then instead of this being an outright error we are
to take the first 4 elements as implied by the TDIM and ignore the
additional two trailing elements.
"""
# It's hard to even successfully create a table like this. I think
# it *should* be difficult, but once created it should at least be
# possible to read.
arr1 = [[b'ab', b'cd'], [b'ef', b'gh'], [b'ij', b'kl']]
arr2 = [1, 2, 3, 4, 5]
arr = np.array([(arr1, arr2), (arr1, arr2)],
dtype=[('a', '(3, 2)S2'), ('b', '5i8')])
tbhdu = fits.BinTableHDU(data=arr)
tbhdu.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
raw_bytes = f.read()
# Artificially truncate TDIM in the header; this seems to be the
# easiest way to do this while getting around Astropy's insistence on the
# data and header matching perfectly; again, we have no interest in
# making it possible to write files in this format, only read them
with open(self.temp('test.fits'), 'wb') as f:
f.write(raw_bytes.replace(b'(2,2,3)', b'(2,2,2)'))
with fits.open(self.temp('test.fits')) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header['TDIM1'] == '(2,2,2)'
assert tbhdu2.header['TFORM1'] == '12A'
for row in tbhdu2.data:
assert np.all(row['a'] == [['ab', 'cd'], ['ef', 'gh']])
assert np.all(row['b'] == [1, 2, 3, 4, 5])
def test_string_array_round_trip(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201"""
data = [['abc', 'def', 'ghi'],
['jkl', 'mno', 'pqr'],
['stu', 'vwx', 'yz ']]
recarr = np.rec.array([(data,), (data,)], formats=['(3,3)S3'])
t = fits.BinTableHDU(data=recarr)
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert 'TDIM1' in h[1].header
assert h[1].header['TDIM1'] == '(3,3,3)'
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (h[1].data.field(0)[0] ==
np.char.decode(recarr.field(0)[0], 'ascii')).all()
with fits.open(self.temp('test.fits')) as h:
# Access the data; I think this is necessary to exhibit the bug
# reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201
h[1].data[:]
h.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits')) as h:
assert 'TDIM1' in h[1].header
assert h[1].header['TDIM1'] == '(3,3,3)'
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (h[1].data.field(0)[0] ==
np.char.decode(recarr.field(0)[0], 'ascii')).all()
def test_new_table_with_nd_column(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/3
"""
arra = np.array(['a', 'b'], dtype='|S1')
arrb = np.array([['a', 'bc'], ['cd', 'e']], dtype='|S2')
arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
cols = [
fits.Column(name='str', format='1A', array=arra),
fits.Column(name='strarray', format='4A', dim='(2,2)',
array=arrb),
fits.Column(name='intarray', format='4I', dim='(2, 2)',
array=arrc)
]
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
# Need to force string arrays to byte arrays in order to compare
# correctly on Python 3
assert (h[1].data['str'].encode('ascii') == arra).all()
assert (h[1].data['strarray'].encode('ascii') == arrb).all()
assert (h[1].data['intarray'] == arrc).all()
def test_mismatched_tform_and_tdim(self):
"""Normally the product of the dimensions listed in a TDIMn keyword
must be less than or equal to the repeat count in the TFORMn keyword.
This tests that this works if less than (treating the trailing bytes
as unspecified fill values per the FITS standard) and fails if the
dimensions specified by TDIMn are greater than the repeat count.
"""
arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
cols = [fits.Column(name='a', format='20I', dim='(2,2)',
array=arra),
fits.Column(name='b', format='4I', dim='(2,2)',
array=arrb)]
# The first column has the mismatched repeat count
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['TFORM1'] == '20I'
assert h[1].header['TFORM2'] == '4I'
assert h[1].header['TDIM1'] == h[1].header['TDIM2'] == '(2,2)'
assert (h[1].data['a'] == arra).all()
assert (h[1].data['b'] == arrb).all()
assert h[1].data.itemsize == 48 # 16-bits times 24
# If dims is more than the repeat count in the format specifier raise
# an error
pytest.raises(VerifyError, fits.Column, name='a', format='2I',
dim='(2,2)', array=arra)
def test_tdim_of_size_one(self):
"""Regression test for https://github.com/astropy/astropy/pull/3580"""
with fits.open(self.data('tdim.fits')) as hdulist:
assert hdulist[1].data['V_mag'].shape == (3, 1, 1)
def test_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52"""
with fits.open(self.data('table.fits')) as f:
data = f[1].data
targets = data.field('target')
s = data[:]
assert (s.field('target') == targets).all()
for n in range(len(targets) + 2):
s = data[:n]
assert (s.field('target') == targets[:n]).all()
s = data[n:]
assert (s.field('target') == targets[n:]).all()
s = data[::2]
assert (s.field('target') == targets[::2]).all()
s = data[::-1]
assert (s.field('target') == targets[::-1]).all()
def test_array_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55"""
with fits.open(self.data('table.fits')) as f:
data = f[1].data
s1 = data[data['target'] == 'NGC1001']
s2 = data[np.where(data['target'] == 'NGC1001')]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_array_broadcasting(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/pull/48
"""
with fits.open(self.data('table.fits')) as hdu:
data = hdu[1].data
data['V_mag'] = 0
assert np.all(data['V_mag'] == 0)
data['V_mag'] = 1
assert np.all(data['V_mag'] == 1)
for container in (list, tuple, np.array):
data['V_mag'] = container([1, 2, 3])
assert np.array_equal(data['V_mag'], np.array([1, 2, 3]))
def test_array_slicing_readonly(self):
"""
Like test_array_slicing but with the file opened in 'readonly' mode.
Regression test for a crash when slicing readonly memmap'd tables.
"""
with fits.open(self.data('table.fits'), mode='readonly') as f:
data = f[1].data
s1 = data[data['target'] == 'NGC1001']
s2 = data[np.where(data['target'] == 'NGC1001')]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
@pytest.mark.parametrize('tablename', ['table.fits', 'tb.fits'])
def test_dump_load_round_trip(self, tablename):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
with fits.open(self.data(tablename)) as hdul:
tbhdu = hdul[1]
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
_assert_attr_col(new_tbhdu, hdul[1])
def test_dump_load_array_colums(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/22
Ensures that a table containing a multi-value array column can be
dumped and loaded successfully.
"""
data = np.rec.array([('a', [1, 2, 3, 4], 0.1),
('b', [5, 6, 7, 8], 0.2)],
formats='a1,4i4,f8')
tbhdu = fits.BinTableHDU.from_columns(data)
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
assert str(tbhdu.header) == str(new_tbhdu.header)
def test_load_guess_format(self):
"""
Tests loading a table dump with no supplied coldefs or header, so that
the table format has to be guessed at. There is of course no exact
science to this; the table that's produced simply uses sensible guesses
for that format. Ideally this should never have to be used.
"""
# Create a table containing a variety of data types.
a0 = np.array([False, True, False], dtype=bool)
c0 = fits.Column(name='c0', format='L', array=a0)
# Format X currently not supported by the format
# a1 = np.array([[0], [1], [0]], dtype=np.uint8)
# c1 = fits.Column(name='c1', format='X', array=a1)
a2 = np.array([1, 128, 255], dtype=np.uint8)
c2 = fits.Column(name='c2', format='B', array=a2)
a3 = np.array([-30000, 1, 256], dtype=np.int16)
c3 = fits.Column(name='c3', format='I', array=a3)
a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32)
c4 = fits.Column(name='c4', format='J', array=a4)
a5 = np.array(['a', 'abc', 'ab'])
c5 = fits.Column(name='c5', format='A3', array=a5)
a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64)
c6 = fits.Column(name='c6', format='D', array=a6)
a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j],
dtype=np.complex128)
c7 = fits.Column(name='c7', format='M', array=a7)
a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
c8 = fits.Column(name='c8', format='PJ()', array=a8)
tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8])
datafile = self.temp('data.txt')
tbhdu.dump(datafile)
new_tbhdu = fits.BinTableHDU.load(datafile)
# In this particular case the record data at least should be equivalent
assert comparerecords(tbhdu.data, new_tbhdu.data)
def test_attribute_field_shadowing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86
Numpy recarray objects have a poorly-considered feature of allowing
field access by attribute lookup. However, if a field name coincides
with an existing attribute/method of the array, the existing name takes
presence (making the attribute-based field lookup completely unreliable
in general cases).
This ensures that any FITS_rec attributes still work correctly even
when there is a field with the same name as that attribute.
"""
c1 = fits.Column(name='names', format='I', array=[1])
c2 = fits.Column(name='formats', format='I', array=[2])
c3 = fits.Column(name='other', format='I', array=[3])
t = fits.BinTableHDU.from_columns([c1, c2, c3])
assert t.data.names == ['names', 'formats', 'other']
assert t.data.formats == ['I'] * 3
assert (t.data['names'] == [1]).all()
assert (t.data['formats'] == [2]).all()
assert (t.data.other == [3]).all()
def test_table_from_bool_fields(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113
Tests creating a table from a recarray containing numpy.bool columns.
"""
array = np.rec.array([(True, False), (False, True)], formats='|b1,|b1')
thdu = fits.BinTableHDU.from_columns(array)
assert thdu.columns.formats == ['L', 'L']
assert comparerecords(thdu.data, array)
# Test round trip
thdu.writeto(self.temp('table.fits'))
data = fits.getdata(self.temp('table.fits'), ext=1)
assert thdu.columns.formats == ['L', 'L']
assert comparerecords(data, array)
def test_table_from_bool_fields2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215
Tests the case where a multi-field ndarray (not a recarray) containing
a bool field is used to initialize a `BinTableHDU`.
"""
arr = np.array([(False,), (True,), (False,)], dtype=[('a', '?')])
hdu = fits.BinTableHDU(data=arr)
assert (hdu.data['a'] == arr['a']).all()
def test_bool_column_update(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139"""
c1 = fits.Column('F1', 'L', array=[True, False])
c2 = fits.Column('F2', 'L', array=[False, True])
thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2]))
thdu.writeto(self.temp('table.fits'))
with fits.open(self.temp('table.fits'), mode='update') as hdul:
hdul[1].data['F1'][1] = True
hdul[1].data['F2'][0] = True
with fits.open(self.temp('table.fits')) as hdul:
assert (hdul[1].data['F1'] == [True, True]).all()
assert (hdul[1].data['F2'] == [True, True]).all()
def test_missing_tnull(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197"""
c = fits.Column('F1', 'A3', null='---',
array=np.array(['1.0', '2.0', '---', '3.0']),
ascii=True)
table = fits.TableHDU.from_columns([c])
table.writeto(self.temp('test.fits'))
# Now let's delete the TNULL1 keyword, making this essentially
# unreadable
with fits.open(self.temp('test.fits'), mode='update') as h:
h[1].header['TFORM1'] = 'E3'
del h[1].header['TNULL1']
with fits.open(self.temp('test.fits')) as h:
pytest.raises(ValueError, lambda: h[1].data['F1'])
try:
with fits.open(self.temp('test.fits')) as h:
h[1].data['F1']
except ValueError as e:
assert str(e).endswith(
"the header may be missing the necessary TNULL1 "
"keyword or the table contains invalid data")
def test_blank_field_zero(self):
"""Regression test for https://github.com/astropy/astropy/issues/5134
Blank values in numerical columns of ASCII tables should be replaced
with zeros, so they can be loaded into numpy arrays.
When a TNULL value is set and there are blank fields not equal to that
value, they should be replaced with zeros.
"""
# Test an integer column with blank string as null
nullval1 = ' '
c1 = fits.Column('F1', format='I8', null=nullval1,
array=np.array([0, 1, 2, 3, 4]),
ascii=True)
table = fits.TableHDU.from_columns([c1])
table.writeto(self.temp('ascii_null.fits'))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp('ascii_null.fits'), mode='r+') as h:
nulled = h.read().replace('2 ', ' ')
h.seek(0)
h.write(nulled)
with fits.open(self.temp('ascii_null.fits'), memmap=True) as f:
assert f[1].data[2][0] == 0
# Test a float column with a null value set and blank fields.
nullval2 = 'NaN'
c2 = fits.Column('F1', format='F12.8', null=nullval2,
array=np.array([1.0, 2.0, 3.0, 4.0]),
ascii=True)
table = fits.TableHDU.from_columns([c2])
table.writeto(self.temp('ascii_null2.fits'))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp('ascii_null2.fits'), mode='r+') as h:
nulled = h.read().replace('3.00000000', ' ')
h.seek(0)
h.write(nulled)
with fits.open(self.temp('ascii_null2.fits'), memmap=True) as f:
# (Currently it should evaluate to 0.0, but if a TODO in fitsrec is
# completed, then it should evaluate to NaN.)
assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0])
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column('mag', format='E', array=arr)
assert (arr == col.array).all()
def test_table_none(self):
"""Regression test
for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data('tb.fits')) as h:
h[1].data
h[1].data = None
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
h[1].writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['NAXIS'] == 2
assert h[1].header['NAXIS1'] == 12
assert h[1].header['NAXIS2'] == 0
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
def test_unncessary_table_load(self):
"""Test unnecessary parsing and processing of FITS tables when writing
directly from one FITS file to a new file without first reading the
data for user manipulation.
In other words, it should be possible to do a direct copy of the raw
data without unnecessary processing of the data.
"""
with fits.open(self.data('table.fits')) as h:
h[1].writeto(self.temp('test.fits'))
# Since this was a direct copy the h[1].data attribute should not have
# even been accessed (since this means the data was read and parsed)
assert 'data' not in h[1].__dict__
with fits.open(self.data('table.fits')) as h1:
with fits.open(self.temp('test.fits')) as h2:
assert str(h1[1].header) == str(h2[1].header)
assert comparerecords(h1[1].data, h2[1].data)
def test_table_from_columns_of_other_table(self):
"""Tests a rare corner case where the columns of an existing table
are used to create a new table with the new_table function. In this
specific case, however, the existing table's data has not been read
yet, so new_table has to get at it through the Delayed proxy.
Note: Although this previously tested new_table it now uses
BinTableHDU.from_columns directly, around which new_table is a mere
wrapper.
"""
hdul = fits.open(self.data('table.fits'))
# Make sure the column array is in fact delayed...
assert isinstance(hdul[1].columns._arrays[0], Delayed)
# Create a new table...
t = fits.BinTableHDU.from_columns(hdul[1].columns)
# The original columns should no longer be delayed...
assert not isinstance(hdul[1].columns._arrays[0], Delayed)
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul2:
assert comparerecords(hdul[1].data, hdul2[1].data)
hdul.close()
def test_bintable_to_asciitable(self):
"""Tests initializing a TableHDU with the data from a BinTableHDU."""
with fits.open(self.data('tb.fits')) as hdul:
tbdata = hdul[1].data
tbhdu = fits.TableHDU(data=tbdata)
tbhdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul2:
tbdata2 = hdul2[1].data
assert np.all(tbdata['c1'] == tbdata2['c1'])
assert np.all(tbdata['c2'] == tbdata2['c2'])
# c3 gets converted from float32 to float64 when writing
# test.fits, so cast to float32 before testing that the correct
# value is retrieved
assert np.all(tbdata['c3'].astype(np.float32) ==
tbdata2['c3'].astype(np.float32))
# c4 is a boolean column in the original table; we want ASCII
# columns to convert these to columns of 'T'/'F' strings
assert np.all(np.where(tbdata['c4'], 'T', 'F') ==
tbdata2['c4'])
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data('tb.fits')) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data('ascii.fits')) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data('random_groups.fits')) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data('zerowidth.fits')) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match='Field 2 has a repeat count of 0'):
assert comparerecords(zwc_pl, zwc[2].data)
def test_zero_length_table(self):
array = np.array([], dtype=[
('a', 'i8'),
('b', 'S64'),
('c', ('i4', (3, 2)))])
hdu = fits.BinTableHDU(array)
assert hdu.header['NAXIS1'] == 96
assert hdu.header['NAXIS2'] == 0
assert hdu.header['TDIM3'] == '(2,3)'
field = hdu.data.field(1)
assert field.shape == (0,)
def test_dim_column_byte_order_mismatch(self):
"""
When creating a table column with non-trivial TDIMn, and
big-endian array data read from an existing FITS file, the data
should not be unnecessarily byteswapped.
Regression test for https://github.com/astropy/astropy/issues/3561
"""
data = fits.getdata(self.data('random_groups.fits'))['DATA']
col = fits.Column(name='TEST', array=data, dim='(3,1,128,1,1)',
format='1152E')
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert np.all(hdul[1].data['TEST'] == data)
def test_fits_rec_from_existing(self):
"""
Tests creating a `FITS_rec` object with `FITS_rec.from_columns`
from an existing `FITS_rec` object read from a FITS file.
This ensures that the per-column arrays are updated properly.
Regression test for https://github.com/spacetelescope/PyFITS/issues/99
"""
# The use case that revealed this problem was trying to create a new
# table from an existing table, but with additional rows so that we can
# append data from a second table (with the same column structure)
data1 = fits.getdata(self.data('tb.fits'))
data2 = fits.getdata(self.data('tb.fits'))
nrows = len(data1) + len(data2)
merged = fits.FITS_rec.from_columns(data1, nrows=nrows)
merged[len(data1):] = data2
mask = merged['c1'] > 1
masked = merged[mask]
# The test table only has two rows, only the second of which is > 1 for
# the 'c1' column
assert comparerecords(data1[1:], masked[:1])
assert comparerecords(data1[1:], masked[1:])
# Double check that the original data1 table hasn't been affected by
# its use in creating the "merged" table
assert comparerecords(data1, fits.getdata(self.data('tb.fits')))
def test_update_string_column_inplace(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4452
Ensure that changes to values in a string column are saved when
a file is opened in ``mode='update'``.
"""
data = np.array([('abc',)], dtype=[('a', 'S3')])
fits.writeto(self.temp('test.fits'), data)
with fits.open(self.temp('test.fits'), mode='update') as hdul:
hdul[1].data['a'][0] = 'XYZ'
assert hdul[1].data['a'][0] == 'XYZ'
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].data['a'][0] == 'XYZ'
# Test update but with a non-trivial TDIMn
data = np.array([([['abc', 'def', 'geh'],
['ijk', 'lmn', 'opq']],)],
dtype=[('a', ('S3', (2, 3)))])
fits.writeto(self.temp('test2.fits'), data)
expected = [['abc', 'def', 'geh'],
['ijk', 'XYZ', 'opq']]
with fits.open(self.temp('test2.fits'), mode='update') as hdul:
assert hdul[1].header['TDIM1'] == '(3,3,2)'
# Note: Previously I wrote data['a'][0][1, 1] to address
# the single row. However, this is broken for chararray because
# data['a'][0] does *not* return a view of the original array--this
# is a bug in chararray though and not a bug in any FITS-specific
# code so we'll roll with it for now...
# (by the way the bug in question is fixed in newer Numpy versions)
hdul[1].data['a'][0, 1, 1] = 'XYZ'
assert np.all(hdul[1].data['a'][0] == expected)
with fits.open(self.temp('test2.fits')) as hdul:
assert hdul[1].header['TDIM1'] == '(3,3,2)'
assert np.all(hdul[1].data['a'][0] == expected)
@pytest.mark.skipif('not HAVE_OBJGRAPH')
def test_reference_leak(self):
"""Regression test for https://github.com/astropy/astropy/pull/520"""
def readfile(filename):
with fits.open(filename) as hdul:
data = hdul[1].data.copy()
for colname in data.dtype.names:
data[colname]
with _refcounting('FITS_rec'):
readfile(self.data('memtest.fits'))
@pytest.mark.skipif('not HAVE_OBJGRAPH')
def test_reference_leak2(self, tmpdir):
"""
Regression test for https://github.com/astropy/astropy/pull/4539
This actually re-runs a small set of tests that I found, during
careful testing, exhibited the reference leaks fixed by #4539, but
now with reference counting around each test to ensure that the
leaks are fixed.
"""
from .test_core import TestCore
from .test_connect import TestMultipleHDU
t1 = TestCore()
t1.setup()
try:
with _refcounting('FITS_rec'):
t1.test_add_del_columns2()
finally:
t1.teardown()
del t1
t2 = self.__class__()
for test_name in ['test_recarray_to_bintablehdu',
'test_numpy_ndarray_to_bintablehdu',
'test_new_table_from_recarray',
'test_new_fitsrec']:
t2.setup()
try:
with _refcounting('FITS_rec'):
getattr(t2, test_name)()
finally:
t2.teardown()
del t2
t3 = TestMultipleHDU()
t3.setup_class()
try:
with _refcounting('FITS_rec'):
t3.test_read(tmpdir)
finally:
t3.teardown_class()
del t3
def test_dump_overwrite(self):
with fits.open(self.data('table.fits')) as hdul:
tbhdu = hdul[1]
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
msg = (r"File .* already exists\. File .* already exists\. File "
r".* already exists\. If you mean to replace the "
r"file\(s\) then use the argument 'overwrite=True'\.")
with pytest.raises(OSError, match=msg):
tbhdu.dump(datafile, cdfile, hfile)
tbhdu.dump(datafile, cdfile, hfile, overwrite=True)
def test_pseudo_unsigned_ints(self):
"""
Tests updating a table column containing pseudo-unsigned ints.
"""
data = np.array([1, 2, 3], dtype=np.uint32)
col = fits.Column(name='A', format='1J', bzero=2**31, array=data)
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp('test.fits'))
# Test that the file wrote out correctly
with fits.open(self.temp('test.fits'), uint=True) as hdul:
hdu = hdul[1]
assert 'TZERO1' in hdu.header
assert hdu.header['TZERO1'] == 2**31
assert hdu.data['A'].dtype == np.dtype('uint32')
assert np.all(hdu.data['A'] == data)
# Test updating the unsigned int data
hdu.data['A'][0] = 99
hdu.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits'), uint=True) as hdul:
hdu = hdul[1]
assert 'TZERO1' in hdu.header
assert hdu.header['TZERO1'] == 2**31
assert hdu.data['A'].dtype == np.dtype('uint32')
assert np.all(hdu.data['A'] == [99, 2, 3])
def test_column_with_scaling(self):
"""Check that a scaled column if correctly saved once it is modified.
Regression test for https://github.com/astropy/astropy/issues/6887
"""
c1 = fits.Column(name='c1', array=np.array([1], dtype='>i2'),
format='1I', bscale=1, bzero=32768)
S = fits.HDUList([fits.PrimaryHDU(),
fits.BinTableHDU.from_columns([c1])])
# Change value in memory
S[1].data['c1'][0] = 2
S.writeto(self.temp("a.fits"))
assert S[1].data['c1'] == 2
# Read and change value in memory
with fits.open(self.temp("a.fits")) as X:
X[1].data['c1'][0] = 10
assert X[1].data['c1'][0] == 10
# Write back to file
X.writeto(self.temp("b.fits"))
# Now check the file
with fits.open(self.temp("b.fits")) as hdul:
assert hdul[1].data['c1'][0] == 10
def test_ascii_inttypes(self):
"""
Test correct integer dtypes according to ASCII table field widths.
Regression for https://github.com/astropy/astropy/issues/9899
"""
i08 = np.array([2**3, 2**23, -2**22, 10, 2**23], dtype='i4')
i10 = np.array([2**8, 2**31-1, -2**29, 30, 2**31-1], dtype='i8')
i20 = np.array([2**16, 2**63-1, -2**63, 40, 2**63-1], dtype='i8')
i02 = np.array([2**8, 2**13, -2**9, 50, 2**13], dtype='i2')
t0 = Table([i08, i08*2, i10, i20, i02])
t1 = Table.read(self.data('ascii_i4-i20.fits'))
assert t1.dtype == t0.dtype
assert comparerecords(t1, t0)
@contextlib.contextmanager
def _refcounting(type_):
"""
Perform the body of a with statement with reference counting for the
given type (given by class name)--raises an assertion error if there
are more unfreed objects of the given type than when we entered the
with statement.
"""
gc.collect()
refcount = len(objgraph.by_type(type_))
yield refcount
gc.collect()
assert len(objgraph.by_type(type_)) <= refcount, \
"More {0!r} objects still in memory than before."
class TestVLATables(FitsTestCase):
"""Tests specific to tables containing variable-length arrays."""
def test_variable_length_columns(self):
def test(format_code):
col = fits.Column(name='QUAL_SPE', format=format_code,
array=[[0] * 1571] * 225)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as toto:
q = toto[1].data.field('QUAL_SPE')
assert (q[0][4:8] ==
np.array([0, 0, 0, 0], dtype=np.uint8)).all()
assert toto[1].columns[0].format.endswith('J(1571)')
for code in ('PJ()', 'QJ()'):
test(code)
def test_extend_variable_length_array(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54"""
def test(format_code):
arr = [[1] * 10] * 10
col1 = fits.Column(name='TESTVLF', format=format_code, array=arr)
col2 = fits.Column(name='TESTSCA', format='J', array=[1] * 10)
tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15)
# This asserts that the normal 'scalar' column's length was extended
assert len(tb_hdu.data['TESTSCA']) == 15
# And this asserts that the VLF column was extended in the same manner
assert len(tb_hdu.data['TESTVLF']) == 15
# We can't compare the whole array since the _VLF is an array of
# objects, but comparing just the edge case rows should suffice
assert (tb_hdu.data['TESTVLF'][0] == arr[0]).all()
assert (tb_hdu.data['TESTVLF'][9] == arr[9]).all()
assert (tb_hdu.data['TESTVLF'][10] == ([0] * 10)).all()
assert (tb_hdu.data['TESTVLF'][-1] == ([0] * 10)).all()
for code in ('PJ()', 'QJ()'):
test(code)
def test_variable_length_table_format_pd_from_object_array(self):
def test(format_code):
a = np.array([np.array([7.2e-20, 7.3e-20]), np.array([0.0]),
np.array([0.0])], 'O')
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith('D(2)')
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ('PD()', 'QD()'):
test(code)
def test_variable_length_table_format_pd_from_list(self):
def test(format_code):
a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]),
np.array([0.0])]
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith('D(2)')
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ('PD()', 'QD()'):
test(code)
def test_variable_length_table_format_pa_from_object_array(self):
def test(format_code):
a = np.array([np.array(['a', 'b', 'c']), np.array(['d', 'e']),
np.array(['f'])], 'O')
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].columns[0].format.endswith('A(3)')
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ('PA()', 'QA()'):
test(code)
def test_variable_length_table_format_pa_from_list(self):
def test(format_code):
a = ['a', 'ab', 'abc']
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].columns[0].format.endswith('A(3)')
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ('PA()', 'QA()'):
test(code)
def test_getdata_vla(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200"""
def test(format_code):
col = fits.Column(name='QUAL_SPE', format=format_code,
array=[np.arange(1572)] * 225)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp('toto.fits'), overwrite=True)
data = fits.getdata(self.temp('toto.fits'))
# Need to compare to the original data row by row since the FITS_rec
# returns an array of _VLA objects
for row_a, row_b in zip(data['QUAL_SPE'], col.array):
assert (row_a == row_b).all()
for code in ('PJ()', 'QJ()'):
test(code)
@pytest.mark.skipif(not NUMPY_LT_1_22 and NUMPY_LT_1_22_1 and sys.platform == 'win32',
reason='https://github.com/numpy/numpy/issues/20699')
def test_copy_vla(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/47
"""
# Make a file containing a couple of VLA tables
arr1 = [np.arange(n + 1) for n in range(255)]
arr2 = [np.arange(255, 256 + n) for n in range(255)]
# A dummy non-VLA column needed to reproduce issue #47
c = fits.Column('test', format='J', array=np.arange(255))
c1 = fits.Column('A', format='PJ', array=arr1)
c2 = fits.Column('B', format='PJ', array=arr2)
t1 = fits.BinTableHDU.from_columns([c, c1])
t2 = fits.BinTableHDU.from_columns([c, c2])
hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2])
hdul.writeto(self.temp('test.fits'), overwrite=True)
# Just test that the test file wrote out correctly
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['TFORM2'] == 'PJ(255)'
assert h[2].header['TFORM2'] == 'PJ(255)'
assert comparerecords(h[1].data, t1.data)
assert comparerecords(h[2].data, t2.data)
# Try copying the second VLA and writing to a new file
with fits.open(self.temp('test.fits')) as h:
new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header)
new_hdu.writeto(self.temp('test3.fits'))
with fits.open(self.temp('test3.fits')) as h2:
assert comparerecords(h2[1].data, t2.data)
new_hdul = fits.HDUList([fits.PrimaryHDU()])
new_hdul.writeto(self.temp('test2.fits'))
# Open several copies of the test file and append copies of the second
# VLA table
with fits.open(self.temp('test2.fits'), mode='append') as new_hdul:
for _ in range(2):
with fits.open(self.temp('test.fits')) as h:
new_hdul.append(h[2])
new_hdul.flush()
# Test that all the VLA copies wrote correctly
with fits.open(self.temp('test2.fits')) as new_hdul:
for idx in range(1, 3):
assert comparerecords(new_hdul[idx].data, t2.data)
def test_vla_with_gap(self):
hdul = fits.open(self.data('theap-gap.fits'))
data = hdul[1].data
assert data.shape == (500,)
assert data['i'][497] == 497
assert np.array_equal(data['arr'][497], [0, 1, 2, 3, 4])
hdul.close()
def test_tolist(self):
col = fits.Column(
name='var', format='PI()',
array=np.array([[1, 2, 3], [11, 12]], dtype=np.object_))
hdu = fits.BinTableHDU.from_columns([col])
assert hdu.data.tolist() == [[[1, 2, 3]], [[11, 12]]]
assert hdu.data['var'].tolist() == [[1, 2, 3], [11, 12]]
def test_tolist_from_file(self):
filename = self.data('variable_length_table.fits')
with fits.open(filename) as hdul:
hdu = hdul[1]
assert hdu.data.tolist() == [[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]]
assert hdu.data['var'].tolist() == [[45, 56], [11, 12, 13]]
# These are tests that solely test the Column and ColDefs interfaces and
# related functionality without directly involving full tables; currently there
# are few of these but I expect there to be more as I improve the test coverage
class TestColumnFunctions(FitsTestCase):
def test_column_format_interpretation(self):
"""
Test to ensure that when Numpy-style record formats are passed in to
the Column constructor for the format argument, they are recognized so
long as it's unambiguous (where "unambiguous" here is questionable
since Numpy is case insensitive when parsing the format codes. But
their "proper" case is lower-case, so we can accept that. Basically,
actually, any key in the NUMPY2FITS dict should be accepted.
"""
for recformat, fitsformat in NUMPY2FITS.items():
c = fits.Column('TEST', np.dtype(recformat))
c.format == fitsformat
c = fits.Column('TEST', recformat)
c.format == fitsformat
c = fits.Column('TEST', fitsformat)
c.format == fitsformat
# Test a few cases that are ambiguous in that they *are* valid binary
# table formats though not ones that are likely to be used, but are
# also valid common ASCII table formats
c = fits.Column('TEST', 'I4')
assert c.format == 'I4'
assert c.format.format == 'I'
assert c.format.width == 4
c = fits.Column('TEST', 'F15.8')
assert c.format == 'F15.8'
assert c.format.format == 'F'
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column('TEST', 'E15.8')
assert c.format.format == 'E'
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column('TEST', 'D15.8')
assert c.format.format == 'D'
assert c.format.width == 15
assert c.format.precision == 8
# zero-precision should be allowed as well, for float types
# https://github.com/astropy/astropy/issues/3422
c = fits.Column('TEST', 'F10.0')
assert c.format.format == 'F'
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column('TEST', 'E10.0')
assert c.format.format == 'E'
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column('TEST', 'D10.0')
assert c.format.format == 'D'
assert c.format.width == 10
assert c.format.precision == 0
# These are a couple cases where the format code is a valid binary
# table format, and is not strictly a valid ASCII table format but
# could be *interpreted* as one by appending a default width. This
# will only happen either when creating an ASCII table or when
# explicitly specifying ascii=True when the column is created
c = fits.Column('TEST', 'I')
assert c.format == 'I'
assert c.format.recformat == 'i2'
c = fits.Column('TEST', 'I', ascii=True)
assert c.format == 'I10'
assert c.format.recformat == 'i4'
# With specified widths, integer precision should be set appropriately
c = fits.Column('TEST', 'I4', ascii=True)
assert c.format == 'I4'
assert c.format.recformat == 'i2'
c = fits.Column('TEST', 'I9', ascii=True)
assert c.format == 'I9'
assert c.format.recformat == 'i4'
c = fits.Column('TEST', 'I12', ascii=True)
assert c.format == 'I12'
assert c.format.recformat == 'i8'
c = fits.Column('TEST', 'E')
assert c.format == 'E'
assert c.format.recformat == 'f4'
c = fits.Column('TEST', 'E', ascii=True)
assert c.format == 'E15.7'
# F is not a valid binary table format so it should be unambiguously
# treated as an ASCII column
c = fits.Column('TEST', 'F')
assert c.format == 'F16.7'
c = fits.Column('TEST', 'D')
assert c.format == 'D'
assert c.format.recformat == 'f8'
c = fits.Column('TEST', 'D', ascii=True)
assert c.format == 'D25.17'
def test_zero_precision_float_column(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3422
"""
c = fits.Column('TEST', 'F5.0', array=[1.1, 2.2, 3.3])
# The decimal places will be clipped
t = fits.TableHDU.from_columns([c])
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].header['TFORM1'] == 'F5.0'
assert hdul[1].data['TEST'].dtype == np.dtype('float64')
assert np.all(hdul[1].data['TEST'] == [1.0, 2.0, 3.0])
# Check how the raw data looks
raw = np.rec.recarray.field(hdul[1].data, 'TEST')
assert raw.tobytes() == b' 1. 2. 3.'
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column('mag', format='E', array=arr)
assert (arr == col.array).all()
def test_new_coldefs_with_invalid_seqence(self):
"""Test that a TypeError is raised when a ColDefs is instantiated with
a sequence of non-Column objects.
"""
pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])
def test_coldefs_init_from_array(self):
"""Test that ColDefs._init_from_array works with single element data-
types as well as multi-element data-types
"""
nd_array = np.ndarray((1,), dtype=[('A', '<u4', (2,)), ('B', '>u2')])
col_defs = fits.column.ColDefs(nd_array)
assert 2**31 == col_defs['A'].bzero
assert 2**15 == col_defs['B'].bzero
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data('tb.fits')) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data('ascii.fits')) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data('random_groups.fits')) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data('zerowidth.fits')) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match=r'Field 2 has a repeat count '
r'of 0 in its format code'):
assert comparerecords(zwc_pl, zwc[2].data)
def test_column_lookup_by_name(self):
"""Tests that a `ColDefs` can be indexed by column name."""
a = fits.Column(name='a', format='D')
b = fits.Column(name='b', format='D')
cols = fits.ColDefs([a, b])
assert cols['a'] == cols[0]
assert cols['b'] == cols[1]
def test_column_attribute_change_after_removal(self):
"""
This is a test of the column attribute change notification system.
After a column has been removed from a table (but other references
are kept to that same column) changes to that column's attributes
should not trigger a notification on the table it was removed from.
"""
# One way we can check this is to ensure there are no further changes
# to the header
table = fits.BinTableHDU.from_columns([
fits.Column('a', format='D'),
fits.Column('b', format='D')])
b = table.columns['b']
table.columns.del_col('b')
assert table.data.dtype.names == ('a',)
b.name = 'HELLO'
assert b.name == 'HELLO'
assert 'TTYPE2' not in table.header
assert table.header['TTYPE1'] == 'a'
assert table.columns.names == ['a']
with pytest.raises(KeyError):
table.columns['b']
# Make sure updates to the remaining column still work
table.columns.change_name('a', 'GOODBYE')
with pytest.raises(KeyError):
table.columns['a']
assert table.columns['GOODBYE'].name == 'GOODBYE'
assert table.data.dtype.names == ('GOODBYE',)
assert table.columns.names == ['GOODBYE']
assert table.data.columns.names == ['GOODBYE']
table.columns['GOODBYE'].name = 'foo'
with pytest.raises(KeyError):
table.columns['GOODBYE']
assert table.columns['foo'].name == 'foo'
assert table.data.dtype.names == ('foo',)
assert table.columns.names == ['foo']
assert table.data.columns.names == ['foo']
def test_x_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the X (bit array) format can be deep-copied.
"""
c = fits.Column('xcol', format='5X', array=[1, 0, 0, 1, 0])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array == c.array)
def test_p_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the P/Q formats (variable length arrays) can be
deep-copied.
"""
c = fits.Column('pcol', format='PJ', array=[[1, 2], [3, 4, 5]])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array[0] == c.array[0])
assert np.all(c2.array[1] == c.array[1])
c3 = fits.Column('qcol', format='QJ', array=[[1, 2], [3, 4, 5]])
c4 = copy.deepcopy(c3)
assert c4.name == c3.name
assert c4.format == c3.format
assert np.all(c4.array[0] == c3.array[0])
assert np.all(c4.array[1] == c3.array[1])
def test_column_verify_keywords(self):
"""
Test that the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
are verified to have a valid value.
"""
with pytest.raises(AssertionError) as err:
_ = fits.Column(1, format='I', array=[1, 2, 3, 4, 5])
assert 'Column name must be a string able to fit' in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column('col', format=0, null='Nan', disp=1, coord_type=1,
coord_unit=2, coord_inc='1', time_ref_pos=1,
coord_ref_point='1', coord_ref_value='1')
err_msgs = ['keyword arguments to Column were invalid',
'TFORM', 'TNULL', 'TDISP', 'TCTYP', 'TCUNI', 'TCRPX',
'TCRVL', 'TCDLT', 'TRPOS']
for msg in err_msgs:
assert msg in str(err.value)
def test_column_verify_start(self):
"""
Regression test for https://github.com/astropy/astropy/pull/6359
Test the validation of the column start position option (ASCII table only),
corresponding to ``TBCOL`` keyword.
Test whether the VerifyError message generated is the one with highest priority,
i.e. the order of error messages to be displayed is maintained.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column('a', format='B', start='a', array=[1, 2, 3])
assert "start option (TBCOLn) is not allowed for binary table columns" in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column('a', format='I', start='a', array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column('a', format='I', start='-56', array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got -56)." in str(err.value)
@pytest.mark.parametrize('keys',
[{'TFORM': 'Z', 'TDISP': 'E'},
{'TFORM': '2', 'TDISP': '2E'},
{'TFORM': 3, 'TDISP': 6.3},
{'TFORM': float, 'TDISP': np.float64},
{'TFORM': '', 'TDISP': 'E.5'}])
def test_column_verify_formats(self, keys):
"""
Additional tests for verification of 'TFORM' and 'TDISP' keyword
arguments used to initialize a Column.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column('col', format=keys['TFORM'], disp=keys['TDISP'])
for key in keys.keys():
assert key in str(err.value)
assert str(keys[key]) in str(err.value)
def test_regression_5383():
# Regression test for an undefined variable
x = np.array([1, 2, 3])
col = fits.Column(name='a', array=x, format='E')
hdu = fits.BinTableHDU.from_columns([col])
del hdu._header['TTYPE1']
hdu.columns[0].name = 'b'
def test_table_to_hdu():
from astropy.table import Table
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table['a'].unit = 'm/s'
table['b'].unit = 'not-a-unit'
table.meta['foo'] = 'bar'
with pytest.warns(UnitsWarning, match="'not-a-unit' did not parse as"
" fits unit") as w:
hdu = fits.BinTableHDU(table, header=fits.Header({'TEST': 1}))
assert len(w) == 1
for name in 'abc':
assert np.array_equal(table[name], hdu.data[name])
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2')
assert hdu.header['FOO'] == 'bar'
assert hdu.header['TEST'] == 1
def test_regression_scalar_indexing():
# Indexing a FITS_rec with a tuple that returns a scalar record
# should work
x = np.array([(1.0, 2), (3.0, 4)],
dtype=[('x', float), ('y', int)]).view(fits.FITS_rec)
x1a = x[1]
# this should succeed.
x1b = x[(1,)]
# FITS_record does not define __eq__; so test elements.
assert all(a == b for a, b in zip(x1a, x1b))
def test_new_column_attributes_preserved(tmpdir):
# Regression test for https://github.com/astropy/astropy/issues/7145
# This makes sure that for now we don't clear away keywords that have
# newly been recognized (in Astropy 3.0) as special column attributes but
# instead just warn that we might do so in future. The new keywords are:
# TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS
col = []
col.append(fits.Column(name="TIME", format="1E", unit="s"))
col.append(fits.Column(name="RAWX", format="1I", unit="pixel"))
col.append(fits.Column(name="RAWY", format="1I"))
cd = fits.ColDefs(col)
hdr = fits.Header()
# Keywords that will get ignored in favor of these in the data
hdr['TUNIT1'] = 'pixel'
hdr['TUNIT2'] = 'm'
hdr['TUNIT3'] = 'm'
# Keywords that were added in Astropy 3.0 that should eventually be
# ignored and set on the data instead
hdr['TCTYP2'] = 'RA---TAN'
hdr['TCTYP3'] = 'ANGLE'
hdr['TCRVL2'] = -999.0
hdr['TCRVL3'] = -999.0
hdr['TCRPX2'] = 1.0
hdr['TCRPX3'] = 1.0
hdr['TALEN2'] = 16384
hdr['TALEN3'] = 1024
hdr['TCUNI2'] = 'angstrom'
hdr['TCUNI3'] = 'deg'
# Other non-relevant keywords
hdr['RA'] = 1.5
hdr['DEC'] = 3.0
with pytest.warns(AstropyDeprecationWarning) as warning_list:
hdu = fits.BinTableHDU.from_columns(cd, hdr)
assert str(warning_list[0].message).startswith(
"The following keywords are now recognized as special")
# First, check that special keywords such as TUNIT are ignored in the header
# We may want to change that behavior in future, but this is the way it's
# been for a while now.
assert hdu.columns[0].unit == 's'
assert hdu.columns[1].unit == 'pixel'
assert hdu.columns[2].unit is None
assert hdu.header['TUNIT1'] == 's'
assert hdu.header['TUNIT2'] == 'pixel'
assert 'TUNIT3' not in hdu.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu.columns[0].coord_type is None
assert hdu.columns[1].coord_type is None
assert hdu.columns[2].coord_type is None
assert 'TCTYP1' not in hdu.header
assert hdu.header['TCTYP2'] == 'RA---TAN'
assert hdu.header['TCTYP3'] == 'ANGLE'
# Make sure that other keywords are still there
assert hdu.header['RA'] == 1.5
assert hdu.header['DEC'] == 3.0
# Now we can write this HDU to a file and re-load. Re-loading *should*
# cause the special column attribtues to be picked up (it's just that when a
# header is manually specified, these values are ignored)
filename = tmpdir.join('test.fits').strpath
hdu.writeto(filename)
# Make sure we don't emit a warning in this case
with warnings.catch_warnings(record=True) as warning_list:
with fits.open(filename) as hdul:
hdu2 = hdul[1]
assert len(warning_list) == 0
# Check that column attributes are now correctly set
assert hdu2.columns[0].unit == 's'
assert hdu2.columns[1].unit == 'pixel'
assert hdu2.columns[2].unit is None
assert hdu2.header['TUNIT1'] == 's'
assert hdu2.header['TUNIT2'] == 'pixel'
assert 'TUNIT3' not in hdu2.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu2.columns[0].coord_type is None
assert hdu2.columns[1].coord_type == 'RA---TAN'
assert hdu2.columns[2].coord_type == 'ANGLE'
assert 'TCTYP1' not in hdu2.header
assert hdu2.header['TCTYP2'] == 'RA---TAN'
assert hdu2.header['TCTYP3'] == 'ANGLE'
# Make sure that other keywords are still there
assert hdu2.header['RA'] == 1.5
assert hdu2.header['DEC'] == 3.0
def test_empty_table(tmpdir):
ofile = str(tmpdir.join('emptytable.fits'))
hdu = fits.BinTableHDU(header=None, data=None, name='TEST')
hdu.writeto(ofile)
with fits.open(ofile) as hdul:
assert hdul['TEST'].data.size == 0
ofile = str(tmpdir.join('emptytable.fits.gz'))
hdu = fits.BinTableHDU(header=None, data=None, name='TEST')
hdu.writeto(ofile, overwrite=True)
with fits.open(ofile) as hdul:
assert hdul['TEST'].data.size == 0
def test_a3dtable(tmpdir):
testfile = str(tmpdir.join('test.fits'))
hdu = fits.BinTableHDU.from_columns([
fits.Column(name='FOO', format='J', array=np.arange(10))
])
hdu.header['XTENSION'] = 'A3DTABLE'
hdu.writeto(testfile, output_verify='ignore')
with fits.open(testfile) as hdul:
assert hdul[1].header['XTENSION'] == 'A3DTABLE'
with pytest.warns(AstropyUserWarning) as w:
hdul.verify('fix')
assert str(w[0].message) == 'Verification reported errors:'
assert str(w[2].message).endswith(
'Converted the XTENSION keyword to BINTABLE.')
assert hdul[1].header['XTENSION'] == 'BINTABLE'
def test_invalid_file(tmp_path):
hdu = fits.BinTableHDU()
# little trick to write an invalid card ...
hdu.header['FOO'] = None
hdu.header.cards['FOO']._value = np.nan
testfile = tmp_path / 'test.fits'
hdu.writeto(testfile, output_verify='ignore')
with fits.open(testfile) as hdul:
assert hdul[1].data is not None
def test_unit_parse_strict(tmp_path):
path = tmp_path / 'invalid_unit.fits'
# this is a unit parseable by the generic format but invalid for FITS
invalid_unit = '1 / (MeV sr s)'
unit = Unit(invalid_unit)
t = Table({'a': [1, 2, 3]})
t.write(path)
with fits.open(path, mode='update') as hdul:
hdul[1].header['TUNIT1'] = invalid_unit
# default is "warn"
with pytest.warns(UnitsWarning):
t = Table.read(path)
assert isinstance(t['a'].unit, UnrecognizedUnit)
t = Table.read(path, unit_parse_strict='silent')
assert isinstance(t['a'].unit, UnrecognizedUnit)
with pytest.raises(ValueError):
Table.read(path, unit_parse_strict='raise')
with pytest.warns(UnitsWarning):
Table.read(path, unit_parse_strict='warn')
|
75cb67228946f836bd76c48fb1033affb19eb26d1b8317cb31a02ba0173c0a9c | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import sys
import warnings
import pytest
import numpy as np
from .test_table import comparerecords
from astropy.io.fits.hdu.base import _ValidHDU
from astropy.io import fits
from . import FitsTestCase
class TestChecksumFunctions(FitsTestCase):
# All checksums have been verified against CFITSIO
def setup(self):
super().setup()
self._oldfilters = warnings.filters[:]
warnings.filterwarnings(
'error',
message='Checksum verification failed')
warnings.filterwarnings(
'error',
message='Datasum verification failed')
# Monkey-patch the _get_timestamp method so that the checksum
# timestamps (and hence the checksum themselves) are always the same
self._old_get_timestamp = _ValidHDU._get_timestamp
_ValidHDU._get_timestamp = lambda self: '2013-12-20T13:36:10'
def teardown(self):
super().teardown()
warnings.filters = self._oldfilters
_ValidHDU._get_timestamp = self._old_get_timestamp
def test_sample_file(self):
hdul = fits.open(self.data('checksum.fits'), checksum=True)
assert hdul._read_all
hdul.close()
def test_image_create(self):
n = np.arange(100, dtype=np.int64)
hdu = fits.PrimaryHDU(n)
hdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
assert (hdu.data == hdul[0].data).all()
assert 'CHECKSUM' in hdul[0].header
assert 'DATASUM' in hdul[0].header
if not sys.platform.startswith('win32'):
# The checksum ends up being different on Windows, possibly due
# to slight floating point differences
assert hdul[0].header['CHECKSUM'] == 'ZHMkeGKjZGKjbGKj'
assert hdul[0].header['DATASUM'] == '4950'
def test_scaled_data(self):
with fits.open(self.data('scale.fits')) as hdul:
orig_data = hdul[0].data.copy()
hdul[0].scale('int16', 'old')
hdul.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul1:
assert (hdul1[0].data == orig_data).all()
assert 'CHECKSUM' in hdul1[0].header
assert hdul1[0].header['CHECKSUM'] == 'cUmaeUjZcUjacUjW'
assert 'DATASUM' in hdul1[0].header
assert hdul1[0].header['DATASUM'] == '1891563534'
def test_scaled_data_auto_rescale(self):
"""
Regression test for
https://github.com/astropy/astropy/issues/3883#issuecomment-115122647
Ensure that when scaled data is automatically rescaled on
opening/writing a file that the checksum and datasum are computed for
the rescaled array.
"""
with fits.open(self.data('scale.fits')) as hdul:
# Write out a copy of the data with the rescaling applied
hdul.writeto(self.temp('rescaled.fits'))
# Reopen the new file and save it back again with a checksum
with fits.open(self.temp('rescaled.fits')) as hdul:
hdul.writeto(self.temp('rescaled2.fits'), overwrite=True,
checksum=True)
# Now do like in the first writeto but use checksum immediately
with fits.open(self.data('scale.fits')) as hdul:
hdul.writeto(self.temp('rescaled3.fits'), checksum=True)
# Also don't rescale the data but add a checksum
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
hdul.writeto(self.temp('scaled.fits'), checksum=True)
# Must used nested with statements to support older Python versions
# (but contextlib.nested is not available in newer Pythons :(
with fits.open(self.temp('rescaled2.fits')) as hdul1:
with fits.open(self.temp('rescaled3.fits')) as hdul2:
with fits.open(self.temp('scaled.fits')) as hdul3:
hdr1 = hdul1[0].header
hdr2 = hdul2[0].header
hdr3 = hdul3[0].header
assert hdr1['DATASUM'] == hdr2['DATASUM']
assert hdr1['CHECKSUM'] == hdr2['CHECKSUM']
assert hdr1['DATASUM'] != hdr3['DATASUM']
assert hdr1['CHECKSUM'] != hdr3['CHECKSUM']
def test_uint16_data(self):
checksums = [
('aDcXaCcXaCcXaCcX', '0'), ('oYiGqXi9oXiEoXi9', '1746888714'),
('VhqQWZoQVfoQVZoQ', '0'), ('4cPp5aOn4aOn4aOn', '0'),
('8aCN8X9N8aAN8W9N', '1756785133'), ('UhqdUZnbUfnbUZnb', '0'),
('4cQJ5aN94aNG4aN9', '0')]
with fits.open(self.data('o4sp040b0_raw.fits'), uint=True) as hdul:
hdul.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), uint=True,
checksum=True) as hdul1:
for idx, (hdu_a, hdu_b) in enumerate(zip(hdul, hdul1)):
if hdu_a.data is None or hdu_b.data is None:
assert hdu_a.data is hdu_b.data
else:
assert (hdu_a.data == hdu_b.data).all()
assert 'CHECKSUM' in hdul[idx].header
assert hdul[idx].header['CHECKSUM'] == checksums[idx][0]
assert 'DATASUM' in hdul[idx].header
assert hdul[idx].header['DATASUM'] == checksums[idx][1]
def test_groups_hdu_data(self):
imdata = np.arange(100.0)
imdata.shape = (10, 1, 1, 2, 5)
pdata1 = np.arange(10) + 0.1
pdata2 = 42
x = fits.hdu.groups.GroupData(imdata, parnames=['abc', 'xyz'],
pardata=[pdata1, pdata2], bitpix=-32)
hdu = fits.GroupsHDU(x)
hdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
assert comparerecords(hdul[0].data, hdu.data)
assert 'CHECKSUM' in hdul[0].header
assert hdul[0].header['CHECKSUM'] == '3eDQAZDO4dDOAZDO'
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '2797758084'
def test_binary_table_data(self):
a1 = np.array(['NGC1001', 'NGC1002', 'NGC1003'])
a2 = np.array([11.1, 12.3, 15.2])
col1 = fits.Column(name='target', format='20A', array=a1)
col2 = fits.Column(name='V_mag', format='E', array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
assert comparerecords(tbhdu.data, hdul[1].data)
assert 'CHECKSUM' in hdul[0].header
assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '0'
assert 'CHECKSUM' in hdul[1].header
assert hdul[1].header['CHECKSUM'] == 'aD1Oa90MaC0Ma90M'
assert 'DATASUM' in hdul[1].header
assert hdul[1].header['DATASUM'] == '1062205743'
def test_variable_length_table_data(self):
c1 = fits.Column(name='var', format='PJ()',
array=np.array([[45.0, 56], np.array([11, 12, 13])],
'O'))
c2 = fits.Column(name='xyz', format='2I', array=[[11, 3], [12, 4]])
tbhdu = fits.BinTableHDU.from_columns([c1, c2])
tbhdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
assert comparerecords(tbhdu.data, hdul[1].data)
assert 'CHECKSUM' in hdul[0].header
assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '0'
assert 'CHECKSUM' in hdul[1].header
assert hdul[1].header['CHECKSUM'] == 'YIGoaIEmZIEmaIEm'
assert 'DATASUM' in hdul[1].header
assert hdul[1].header['DATASUM'] == '1507485'
def test_ascii_table_data(self):
a1 = np.array(['abc', 'def'])
r1 = np.array([11.0, 12.0])
c1 = fits.Column(name='abc', format='A3', array=a1)
# This column used to be E format, but the single-precision float lost
# too much precision when scaling so it was changed to a D
c2 = fits.Column(name='def', format='D', array=r1, bscale=2.3,
bzero=0.6)
c3 = fits.Column(name='t1', format='I', array=[91, 92, 93])
x = fits.ColDefs([c1, c2, c3])
hdu = fits.TableHDU.from_columns(x)
hdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
assert 'CHECKSUM' in hdul[0].header
assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '0'
if not sys.platform.startswith('win32'):
# The checksum ends up being different on Windows, possibly due
# to slight floating point differences
assert 'CHECKSUM' in hdul[1].header
assert hdul[1].header['CHECKSUM'] == '3rKFAoI94oICAoI9'
assert 'DATASUM' in hdul[1].header
assert hdul[1].header['DATASUM'] == '1914653725'
def test_compressed_image_data(self):
with fits.open(self.data('comp.fits')) as h1:
h1.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as h2:
assert np.all(h1[1].data == h2[1].data)
assert 'CHECKSUM' in h2[0].header
assert h2[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
assert 'DATASUM' in h2[0].header
assert h2[0].header['DATASUM'] == '0'
assert 'CHECKSUM' in h2[1].header
assert h2[1].header['CHECKSUM'] == 'ZeAbdb8aZbAabb7a'
assert 'DATASUM' in h2[1].header
assert h2[1].header['DATASUM'] == '113055149'
def test_failing_compressed_datasum(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4587
"""
n = np.ones((10, 10), dtype='float32')
comp_hdu = fits.CompImageHDU(n)
comp_hdu.writeto(self.temp('tmp.fits'), checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
assert np.all(hdul[1].data == comp_hdu.data)
def test_compressed_image_data_int16(self):
n = np.arange(100, dtype='int16')
hdu = fits.ImageHDU(n)
comp_hdu = fits.CompImageHDU(hdu.data, hdu.header)
comp_hdu.writeto(self.temp('tmp.fits'), checksum=True)
hdu.writeto(self.temp('uncomp.fits'), checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
assert np.all(hdul[1].data == comp_hdu.data)
assert np.all(hdul[1].data == hdu.data)
assert 'CHECKSUM' in hdul[0].header
assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '0'
assert 'CHECKSUM' in hdul[1].header
assert hdul[1]._header['CHECKSUM'] == 'J5cCJ5c9J5cAJ5c9'
assert 'DATASUM' in hdul[1].header
assert hdul[1]._header['DATASUM'] == '2453673070'
assert 'CHECKSUM' in hdul[1].header
with fits.open(self.temp('uncomp.fits'), checksum=True) as hdul2:
header_comp = hdul[1]._header
header_uncomp = hdul2[1].header
assert 'ZHECKSUM' in header_comp
assert 'CHECKSUM' in header_uncomp
assert header_uncomp['CHECKSUM'] == 'ZE94eE91ZE91bE91'
assert header_comp['ZHECKSUM'] == header_uncomp['CHECKSUM']
assert 'ZDATASUM' in header_comp
assert 'DATASUM' in header_uncomp
assert header_uncomp['DATASUM'] == '160565700'
assert header_comp['ZDATASUM'] == header_uncomp['DATASUM']
def test_compressed_image_data_float32(self):
n = np.arange(100, dtype='float32')
hdu = fits.ImageHDU(n)
comp_hdu = fits.CompImageHDU(hdu.data, hdu.header)
comp_hdu.writeto(self.temp('tmp.fits'), checksum=True)
hdu.writeto(self.temp('uncomp.fits'), checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
assert np.all(hdul[1].data == comp_hdu.data)
assert np.all(hdul[1].data == hdu.data)
assert 'CHECKSUM' in hdul[0].header
assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '0'
assert 'CHECKSUM' in hdul[1].header
assert 'DATASUM' in hdul[1].header
# The checksum ends up being different on Windows and s390/bigendian,
# possibly due to slight floating point differences? See gh-10921.
# TODO fix these so they work on all platforms; otherwise pointless.
# assert hdul[1]._header['CHECKSUM'] == 'eATIf3SHe9SHe9SH'
# assert hdul[1]._header['DATASUM'] == '1277667818'
with fits.open(self.temp('uncomp.fits'), checksum=True) as hdul2:
header_comp = hdul[1]._header
header_uncomp = hdul2[1].header
assert 'ZHECKSUM' in header_comp
assert 'CHECKSUM' in header_uncomp
assert header_uncomp['CHECKSUM'] == 'Cgr5FZo2Cdo2CZo2'
assert header_comp['ZHECKSUM'] == header_uncomp['CHECKSUM']
assert 'ZDATASUM' in header_comp
assert 'DATASUM' in header_uncomp
assert header_uncomp['DATASUM'] == '2393636889'
assert header_comp['ZDATASUM'] == header_uncomp['DATASUM']
def test_open_with_no_keywords(self):
hdul = fits.open(self.data('arange.fits'), checksum=True)
hdul.close()
def test_append(self):
hdul = fits.open(self.data('tb.fits'))
hdul.writeto(self.temp('tmp.fits'), overwrite=True)
n = np.arange(100)
fits.append(self.temp('tmp.fits'), n, checksum=True)
hdul.close()
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
assert hdul[0]._checksum is None
hdul.close()
def test_writeto_convenience(self):
n = np.arange(100)
fits.writeto(self.temp('tmp.fits'), n, overwrite=True, checksum=True)
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
self._check_checksums(hdul[0])
hdul.close()
def test_hdu_writeto(self):
n = np.arange(100, dtype='int16')
hdu = fits.ImageHDU(n)
hdu.writeto(self.temp('tmp.fits'), checksum=True)
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
self._check_checksums(hdul[0])
hdul.close()
def test_hdu_writeto_existing(self):
"""
Tests that when using writeto with checksum=True, a checksum and
datasum are added to HDUs that did not previously have one.
Regression test for https://github.com/spacetelescope/PyFITS/issues/8
"""
with fits.open(self.data('tb.fits')) as hdul:
hdul.writeto(self.temp('test.fits'), checksum=True)
with fits.open(self.temp('test.fits')) as hdul:
assert 'CHECKSUM' in hdul[0].header
# These checksums were verified against CFITSIO
assert hdul[0].header['CHECKSUM'] == '7UgqATfo7TfoATfo'
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '0'
assert 'CHECKSUM' in hdul[1].header
assert hdul[1].header['CHECKSUM'] == '99daD8bX98baA8bU'
assert 'DATASUM' in hdul[1].header
assert hdul[1].header['DATASUM'] == '1829680925'
def test_datasum_only(self):
n = np.arange(100, dtype='int16')
hdu = fits.ImageHDU(n)
hdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum='datasum')
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
if not (hasattr(hdul[0], '_datasum') and hdul[0]._datasum):
pytest.fail(msg='Missing DATASUM keyword')
if not (hasattr(hdul[0], '_checksum') and not hdul[0]._checksum):
pytest.fail(msg='Non-empty CHECKSUM keyword')
def test_open_update_mode_preserve_checksum(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148 where
checksums are being removed from headers when a file is opened in
update mode, even though no changes were made to the file.
"""
self.copy_file('checksum.fits')
with fits.open(self.temp('checksum.fits')) as hdul:
data = hdul[1].data.copy()
hdul = fits.open(self.temp('checksum.fits'), mode='update')
hdul.close()
with fits.open(self.temp('checksum.fits')) as hdul:
assert 'CHECKSUM' in hdul[1].header
assert 'DATASUM' in hdul[1].header
assert comparerecords(data, hdul[1].data)
def test_open_update_mode_update_checksum(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148, part
2. This ensures that if a file contains a checksum, the checksum is
updated when changes are saved to the file, even if the file was opened
with the default of checksum=False.
An existing checksum and/or datasum are only stripped if the file is
opened with checksum='remove'.
"""
self.copy_file('checksum.fits')
with fits.open(self.temp('checksum.fits')) as hdul:
header = hdul[1].header.copy()
data = hdul[1].data.copy()
with fits.open(self.temp('checksum.fits'), mode='update') as hdul:
hdul[1].header['FOO'] = 'BAR'
hdul[1].data[0]['TIME'] = 42
with fits.open(self.temp('checksum.fits')) as hdul:
header2 = hdul[1].header
data2 = hdul[1].data
assert header2[:-3] == header[:-2]
assert 'CHECKSUM' in header2
assert 'DATASUM' in header2
assert header2['FOO'] == 'BAR'
assert (data2['TIME'][1:] == data['TIME'][1:]).all()
assert data2['TIME'][0] == 42
with fits.open(self.temp('checksum.fits'), mode='update',
checksum='remove') as hdul:
pass
with fits.open(self.temp('checksum.fits')) as hdul:
header2 = hdul[1].header
data2 = hdul[1].data
assert header2[:-1] == header[:-2]
assert 'CHECKSUM' not in header2
assert 'DATASUM' not in header2
assert header2['FOO'] == 'BAR'
assert (data2['TIME'][1:] == data['TIME'][1:]).all()
assert data2['TIME'][0] == 42
def test_overwrite_invalid(self):
"""
Tests that invalid checksum or datasum are overwritten when the file is
saved.
"""
reffile = self.temp('ref.fits')
with fits.open(self.data('tb.fits')) as hdul:
hdul.writeto(reffile, checksum=True)
testfile = self.temp('test.fits')
with fits.open(self.data('tb.fits')) as hdul:
hdul[0].header['DATASUM'] = '1 '
hdul[0].header['CHECKSUM'] = '8UgqATfo7TfoATfo'
hdul[1].header['DATASUM'] = '2349680925'
hdul[1].header['CHECKSUM'] = '11daD8bX98baA8bU'
hdul.writeto(testfile)
with fits.open(testfile) as hdul:
hdul.writeto(self.temp('test2.fits'), checksum=True)
with fits.open(self.temp('test2.fits')) as hdul:
with fits.open(reffile) as ref:
assert 'CHECKSUM' in hdul[0].header
# These checksums were verified against CFITSIO
assert hdul[0].header['CHECKSUM'] == ref[0].header['CHECKSUM']
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '0'
assert 'CHECKSUM' in hdul[1].header
assert hdul[1].header['CHECKSUM'] == ref[1].header['CHECKSUM']
assert 'DATASUM' in hdul[1].header
assert hdul[1].header['DATASUM'] == ref[1].header['DATASUM']
def _check_checksums(self, hdu):
if not (hasattr(hdu, '_datasum') and hdu._datasum):
pytest.fail(msg='Missing DATASUM keyword')
if not (hasattr(hdu, '_checksum') and hdu._checksum):
pytest.fail(msg='Missing CHECKSUM keyword')
|
dcc23d93297614ecd3ecaac3ae6889fd0fac4decdf1dda2af14cef6fb953ab4f | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import numpy as np
from astropy.io import fits
from . import FitsTestCase
class TestDivisionFunctions(FitsTestCase):
"""Test code units that rely on correct integer division."""
def test_rec_from_string(self):
with fits.open(self.data('tb.fits')) as t1:
s = t1[1].data.tobytes()
np.rec.array(
s,
dtype=np.dtype([('c1', '>i4'), ('c2', '|S3'),
('c3', '>f4'), ('c4', '|i1')]),
shape=len(s) // 12)
def test_card_with_continue(self):
h = fits.PrimaryHDU()
h.header['abc'] = 'abcdefg' * 20
def test_valid_hdu_size(self):
with fits.open(self.data('tb.fits')) as t1:
assert type(t1[1].size) is type(1) # noqa
def test_hdu_get_size(self):
with fits.open(self.data('tb.fits')) as _:
pass
def test_section(self, capsys):
# section testing
with fits.open(self.data('arange.fits')) as fs:
assert np.all(fs[0].section[3, 2, 5] == np.array([357]))
|
cb426b1d745b8ffe005dfc541cd41b61e58b970bfd38f0f6f221b6293970b02e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import time
import pytest
import numpy as np
from . import FitsTestCase
from .test_table import comparerecords
from astropy.io import fits
class TestGroupsFunctions(FitsTestCase):
def test_open(self):
with fits.open(self.data('random_groups.fits')) as hdul:
assert isinstance(hdul[0], fits.GroupsHDU)
naxes = (3, 1, 128, 1, 1)
parameters = ['UU', 'VV', 'WW', 'BASELINE', 'DATE']
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 147, naxes, 'float32',
'3 Groups 5 Parameters')]
assert hdul.info(output=False) == info
ghdu = hdul[0]
assert ghdu.parnames == parameters
assert list(ghdu.data.dtype.names) == parameters + ['DATA']
assert isinstance(ghdu.data, fits.GroupData)
# The data should be equal to the number of groups
assert ghdu.header['GCOUNT'] == len(ghdu.data)
assert ghdu.data.data.shape == (len(ghdu.data),) + naxes[::-1]
assert ghdu.data.parnames == parameters
assert isinstance(ghdu.data[0], fits.Group)
assert len(ghdu.data[0]) == len(parameters) + 1
assert ghdu.data[0].data.shape == naxes[::-1]
assert ghdu.data[0].parnames == parameters
def test_open_groups_in_update_mode(self):
"""
Test that opening a file containing a groups HDU in update mode and
then immediately closing it does not result in any unnecessary file
modifications.
Similar to
test_image.TestImageFunctions.test_open_scaled_in_update_mode().
"""
# Copy the original file before making any possible changes to it
self.copy_file('random_groups.fits')
mtime = os.stat(self.temp('random_groups.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('random_groups.fits'), mode='update',
memmap=False).close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('random_groups.fits')).st_mtime
def test_random_groups_data_update(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3730 and
for https://github.com/spacetelescope/PyFITS/issues/102
"""
self.copy_file('random_groups.fits')
with fits.open(self.temp('random_groups.fits'), mode='update') as h:
h[0].data['UU'] = 0.42
with fits.open(self.temp('random_groups.fits'), mode='update') as h:
assert np.all(h[0].data['UU'] == 0.42)
def test_parnames_round_trip(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/130
Ensures that opening a random groups file in update mode or writing it
to a new file does not cause any change to the parameter names.
"""
# Because this test tries to update the random_groups.fits file, let's
# make a copy of it first (so that the file doesn't actually get
# modified in the off chance that the test fails
self.copy_file('random_groups.fits')
parameters = ['UU', 'VV', 'WW', 'BASELINE', 'DATE']
with fits.open(self.temp('random_groups.fits'), mode='update') as h:
assert h[0].parnames == parameters
h.flush()
# Open again just in read-only mode to ensure the parnames didn't
# change
with fits.open(self.temp('random_groups.fits')) as h:
assert h[0].parnames == parameters
h.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[0].parnames == parameters
def test_groupdata_slice(self):
"""
A simple test to ensure that slicing GroupData returns a new, smaller
GroupData object, as is the case with a normal FITS_rec. This is a
regression test for an as-of-yet unreported issue where slicing
GroupData returned a single Group record.
"""
with fits.open(self.data('random_groups.fits')) as hdul:
s = hdul[0].data[1:]
assert isinstance(s, fits.GroupData)
assert len(s) == 2
assert hdul[0].data.parnames == s.parnames
def test_group_slice(self):
"""
Tests basic slicing a single group record.
"""
# A very basic slice test
with fits.open(self.data('random_groups.fits')) as hdul:
g = hdul[0].data[0]
s = g[2:4]
assert len(s) == 2
assert s[0] == g[2]
assert s[-1] == g[-3]
s = g[::-1]
assert len(s) == 6
assert (s[0] == g[-1]).all()
assert s[-1] == g[0]
s = g[::2]
assert len(s) == 3
assert s[0] == g[0]
assert s[1] == g[2]
assert s[2] == g[4]
def test_create_groupdata(self):
"""
Basic test for creating GroupData from scratch.
"""
imdata = np.arange(100.0)
imdata.shape = (10, 1, 1, 2, 5)
pdata1 = np.arange(10, dtype=np.float32) + 0.1
pdata2 = 42.0
x = fits.hdu.groups.GroupData(imdata, parnames=['abc', 'xyz'],
pardata=[pdata1, pdata2], bitpix=-32)
assert x.parnames == ['abc', 'xyz']
assert (x.par('abc') == pdata1).all()
assert (x.par('xyz') == ([pdata2] * len(x))).all()
assert (x.data == imdata).all()
# Test putting the data into a GroupsHDU and round-tripping it
ghdu = fits.GroupsHDU(data=x)
assert ghdu.parnames == ['abc', 'xyz']
assert ghdu.header['GCOUNT'] == 10
ghdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
hdr = h[0].header
assert hdr['GCOUNT'] == 10
assert hdr['PCOUNT'] == 2
assert hdr['NAXIS'] == 5
assert hdr['NAXIS1'] == 0
assert hdr['NAXIS2'] == 5
assert hdr['NAXIS3'] == 2
assert hdr['NAXIS4'] == 1
assert hdr['NAXIS5'] == 1
assert h[0].data.parnames == ['abc', 'xyz']
assert comparerecords(h[0].data, x)
def test_duplicate_parameter(self):
"""
Tests support for multiple parameters of the same name, and ensures
that the data in duplicate parameters are returned as a single summed
value.
"""
imdata = np.arange(100.0)
imdata.shape = (10, 1, 1, 2, 5)
pdata1 = np.arange(10, dtype=np.float32) + 1
pdata2 = 42.0
x = fits.hdu.groups.GroupData(imdata, parnames=['abc', 'xyz', 'abc'],
pardata=[pdata1, pdata2, pdata1],
bitpix=-32)
assert x.parnames == ['abc', 'xyz', 'abc']
assert (x.par('abc') == pdata1 * 2).all()
assert x[0].par('abc') == 2
# Test setting a parameter
x[0].setpar(0, 2)
assert x[0].par('abc') == 3
pytest.raises(ValueError, x[0].setpar, 'abc', 2)
x[0].setpar('abc', (2, 3))
assert x[0].par('abc') == 5
assert x.par('abc')[0] == 5
assert (x.par('abc')[1:] == pdata1[1:] * 2).all()
# Test round-trip
ghdu = fits.GroupsHDU(data=x)
ghdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
hdr = h[0].header
assert hdr['PCOUNT'] == 3
assert hdr['PTYPE1'] == 'abc'
assert hdr['PTYPE2'] == 'xyz'
assert hdr['PTYPE3'] == 'abc'
assert x.parnames == ['abc', 'xyz', 'abc']
assert x.dtype.names == ('abc', 'xyz', '_abc', 'DATA')
assert x.par('abc')[0] == 5
assert (x.par('abc')[1:] == pdata1[1:] * 2).all()
def test_group_bad_naxis(self):
"""Test file without NAXIS1 keyword.
Regression test for https://github.com/astropy/astropy/issues/9709
"""
testfile = os.path.join('invalid', 'group_invalid.fits')
with fits.open(self.data(testfile)) as hdul:
assert len(hdul) == 1
assert hdul[0].header['GROUPS']
assert hdul[0].data is None
|
ce61c27c8856d76b3286f9e8267ed78b295ac097cd326c6571323596c2af2002 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import numpy as np
from astropy.io import fits
from . import FitsTestCase
class TestNonstandardHdus(FitsTestCase):
def test_create_fitshdu(self):
"""
A round trip test of creating a FitsHDU, adding a FITS file to it,
writing the FitsHDU out as part of a new FITS file, and then reading
it and recovering the original FITS file.
"""
self._test_create_fitshdu(compression=False)
def test_create_fitshdu_with_compression(self):
"""Same as test_create_fitshdu but with gzip compression enabled."""
self._test_create_fitshdu(compression=True)
def test_create_fitshdu_from_filename(self):
"""Regression test on `FitsHDU.fromfile`"""
# Build up a simple test FITS file
a = np.arange(100)
phdu = fits.PrimaryHDU(data=a)
phdu.header['TEST1'] = 'A'
phdu.header['TEST2'] = 'B'
imghdu = fits.ImageHDU(data=a + 1)
phdu.header['TEST3'] = 'C'
phdu.header['TEST4'] = 'D'
hdul = fits.HDUList([phdu, imghdu])
hdul.writeto(self.temp('test.fits'))
fitshdu = fits.FitsHDU.fromfile(self.temp('test.fits'))
hdul2 = fitshdu.hdulist
assert len(hdul2) == 2
assert fits.FITSDiff(hdul, hdul2).identical
def _test_create_fitshdu(self, compression=False):
hdul_orig = fits.open(self.data('test0.fits'),
do_not_scale_image_data=True)
fitshdu = fits.FitsHDU.fromhdulist(hdul_orig, compress=compression)
# Just to be meta, let's append to the same hdulist that the fitshdu
# encapuslates
hdul_orig.append(fitshdu)
hdul_orig.writeto(self.temp('tmp.fits'), overwrite=True)
del hdul_orig[-1]
hdul = fits.open(self.temp('tmp.fits'))
assert isinstance(hdul[-1], fits.FitsHDU)
wrapped = hdul[-1].hdulist
assert isinstance(wrapped, fits.HDUList)
assert hdul_orig.info(output=False) == wrapped.info(output=False)
assert (hdul[1].data == wrapped[1].data).all()
assert (hdul[2].data == wrapped[2].data).all()
assert (hdul[3].data == wrapped[3].data).all()
assert (hdul[4].data == wrapped[4].data).all()
hdul_orig.close()
hdul.close()
|
483fcc88038974a01251b89e684c816a68e1d35bd168447e1132ee3bbf917608 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import glob
import io
import os
import sys
import copy
import subprocess
import pytest
import numpy as np
from astropy.io.fits.hdu.base import _ValidHDU, _NonstandardHDU
from astropy.io.fits.verify import VerifyError, VerifyWarning
from astropy.io import fits
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.data import get_pkg_data_filenames
from . import FitsTestCase
class TestHDUListFunctions(FitsTestCase):
def test_update_name(self):
with fits.open(self.data('o4sp040b0_raw.fits')) as hdul:
hdul[4].name = 'Jim'
hdul[4].ver = 9
assert hdul[('JIM', 9)].header['extname'] == 'JIM'
def test_hdu_file_bytes(self):
with fits.open(self.data('checksum.fits')) as hdul:
res = hdul[0].filebytes()
assert res == 11520
res = hdul[1].filebytes()
assert res == 8640
def test_hdulist_file_info(self):
def test_fileinfo(**kwargs):
assert res['datSpan'] == kwargs.get('datSpan', 2880)
assert res['resized'] == kwargs.get('resized', False)
assert res['filename'] == self.data('checksum.fits')
assert res['datLoc'] == kwargs.get('datLoc', 8640)
assert res['hdrLoc'] == kwargs.get('hdrLoc', 0)
assert res['filemode'] == 'readonly'
with fits.open(self.data('checksum.fits')) as hdul:
res = hdul.fileinfo(0)
res = hdul.fileinfo(1)
test_fileinfo(datLoc=17280, hdrLoc=11520)
hdu = fits.ImageHDU(data=hdul[0].data)
hdul.insert(1, hdu)
res = hdul.fileinfo(0)
test_fileinfo(resized=True)
res = hdul.fileinfo(1)
test_fileinfo(datSpan=None, resized=True, datLoc=None, hdrLoc=None)
res = hdul.fileinfo(2)
test_fileinfo(resized=1, datLoc=17280, hdrLoc=11520)
def test_create_from_multiple_primary(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/145
Ensure that a validation error occurs when saving an HDUList containing
multiple PrimaryHDUs.
"""
hdul = fits.HDUList([fits.PrimaryHDU(), fits.PrimaryHDU()])
pytest.raises(VerifyError, hdul.writeto, self.temp('temp.fits'),
output_verify='exception')
def test_append_primary_to_empty_list(self):
# Tests appending a Simple PrimaryHDU to an empty HDUList.
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_empty_list(self):
"""Tests appending a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_table_extension_to_empty_list(self):
"""Tests appending a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
with fits.open(self.data('tb.fits')) as hdul1:
hdul.append(hdul1[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_groupshdu_to_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_primary_to_non_empty_list(self):
"""Tests appending a Simple PrimaryHDU to a non-empty HDUList."""
with fits.open(self.data('arange.fits')) as hdul:
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_non_empty_list(self):
"""Tests appending a Simple ExtensionHDU to a non-empty HDUList."""
with fits.open(self.data('tb.fits')) as hdul:
hdul.append(hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_groupshdu_to_non_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
hdu = fits.GroupsHDU()
with pytest.raises(ValueError):
hdul.append(hdu)
def test_insert_primary_to_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_empty_list(self):
"""Tests inserting a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_table_extension_to_empty_list(self):
"""Tests inserting a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
with fits.open(self.data('tb.fits')) as hdul1:
hdul.insert(0, hdul1[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_primary_to_non_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to a non-empty HDUList."""
with fits.open(self.data('arange.fits')) as hdul:
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(1, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_non_empty_list(self):
"""Tests inserting a Simple ExtensionHDU to a non-empty HDUList."""
with fits.open(self.data('tb.fits')) as hdul:
hdul.insert(1, hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_non_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
hdu = fits.GroupsHDU()
with pytest.raises(ValueError):
hdul.insert(1, hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters'),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
hdul.insert(0, hdu)
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_begin_of_hdulist_with_groupshdu(self):
"""
Tests inserting a Simple GroupsHDU to the beginning of an HDUList
that that already contains a GroupsHDU.
"""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
with pytest.raises(ValueError):
hdul.insert(0, hdu)
def test_insert_extension_to_primary_in_non_empty_list(self):
# Tests inserting a Simple ExtensionHDU to a non-empty HDUList.
with fits.open(self.data('tb.fits')) as hdul:
hdul.insert(0, hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'ImageHDU', 12, (), '', ''),
(3, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_image_extension_to_primary_in_non_empty_list(self):
"""
Tests inserting a Simple Image ExtensionHDU to a non-empty HDUList
as the primary HDU.
"""
with fits.open(self.data('tb.fits')) as hdul:
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', ''),
(1, '', 1, 'ImageHDU', 12, (), '', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_filename(self):
"""Tests the HDUList filename method."""
with fits.open(self.data('tb.fits')) as hdul:
name = hdul.filename()
assert name == self.data('tb.fits')
def test_file_like(self):
"""
Tests the use of a file like object with no tell or seek methods
in HDUList.writeto(), HDULIST.flush() or astropy.io.fits.writeto()
"""
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul = fits.HDUList()
hdul.append(hdu)
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul.writeto(tmpfile)
tmpfile.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_2(self):
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul = fits.open(tmpfile, mode='ostream')
hdul.append(hdu)
hdul.flush()
tmpfile.close()
hdul.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_3(self):
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
fits.writeto(tmpfile, np.arange(100, dtype=np.int32))
tmpfile.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_shallow_copy(self):
"""
Tests that `HDUList.__copy__()` and `HDUList.copy()` return a
shallow copy (regression test for #7211).
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
for hdulcopy in (hdul.copy(), copy.copy(hdul)):
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
assert hdulcopy[0] is hdul[0]
assert hdulcopy[1] is hdul[1]
def test_deep_copy(self):
"""
Tests that `HDUList.__deepcopy__()` returns a deep copy.
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
hdulcopy = copy.deepcopy(hdul)
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
for index in range(len(hdul)):
assert hdulcopy[index] is not hdul[index]
assert hdulcopy[index].header == hdul[index].header
np.testing.assert_array_equal(hdulcopy[index].data, hdul[index].data)
def test_new_hdu_extname(self):
"""
Tests that new extension HDUs that are added to an HDUList can be
properly indexed by their EXTNAME/EXTVER (regression test for
ticket:48).
"""
with fits.open(self.data('test0.fits')) as f:
hdul = fits.HDUList()
hdul.append(f[0].copy())
hdu = fits.ImageHDU(header=f[1].header)
hdul.append(hdu)
assert hdul[1].header['EXTNAME'] == 'SCI'
assert hdul[1].header['EXTVER'] == 1
assert hdul.index_of(('SCI', 1)) == 1
assert hdul.index_of(hdu) == len(hdul) - 1
def test_update_filelike(self):
"""Test opening a file-like object in update mode and resizing the
HDU.
"""
sf = io.BytesIO()
arr = np.zeros((100, 100))
hdu = fits.PrimaryHDU(data=arr)
hdu.writeto(sf)
sf.seek(0)
arr = np.zeros((200, 200))
hdul = fits.open(sf, mode='update')
hdul[0].data = arr
hdul.flush()
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_flush_readonly(self):
"""Test flushing changes to a file opened in a read only mode."""
oldmtime = os.stat(self.data('test0.fits')).st_mtime
with fits.open(self.data('test0.fits')) as hdul:
hdul[0].header['FOO'] = 'BAR'
with pytest.warns(AstropyUserWarning, match='mode is not supported') as w:
hdul.flush()
assert len(w) == 1
assert oldmtime == os.stat(self.data('test0.fits')).st_mtime
def test_fix_extend_keyword(self):
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
hdul.append(fits.ImageHDU())
del hdul[0].header['EXTEND']
hdul.verify('silentfix')
assert 'EXTEND' in hdul[0].header
assert hdul[0].header['EXTEND'] is True
def test_fix_malformed_naxisj(self):
"""
Tests that malformed NAXISj values are fixed sensibly.
"""
hdu = fits.open(self.data('arange.fits'))
# Malform NAXISj header data
hdu[0].header['NAXIS1'] = 11.0
hdu[0].header['NAXIS2'] = '10.0'
hdu[0].header['NAXIS3'] = '7'
# Axes cache needs to be malformed as well
hdu[0]._axes = [11.0, '10.0', '7']
# Perform verification including the fix
hdu.verify('silentfix')
# Check that malformed data was converted
assert hdu[0].header['NAXIS1'] == 11
assert hdu[0].header['NAXIS2'] == 10
assert hdu[0].header['NAXIS3'] == 7
hdu.close()
def test_fix_wellformed_naxisj(self):
"""
Tests that wellformed NAXISj values are not modified.
"""
hdu = fits.open(self.data('arange.fits'))
# Fake new NAXISj header data
hdu[0].header['NAXIS1'] = 768
hdu[0].header['NAXIS2'] = 64
hdu[0].header['NAXIS3'] = 8
# Axes cache needs to be faked as well
hdu[0]._axes = [768, 64, 8]
# Perform verification including the fix
hdu.verify('silentfix')
# Check that malformed data was converted
assert hdu[0].header['NAXIS1'] == 768
assert hdu[0].header['NAXIS2'] == 64
assert hdu[0].header['NAXIS3'] == 8
hdu.close()
def test_new_hdulist_extend_keyword(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/114
Tests that adding a PrimaryHDU to a new HDUList object updates the
EXTEND keyword on that HDU.
"""
h0 = fits.Header()
hdu = fits.PrimaryHDU(header=h0)
sci = fits.ImageHDU(data=np.array(10))
image = fits.HDUList([hdu, sci])
image.writeto(self.temp('temp.fits'))
assert 'EXTEND' in hdu.header
assert hdu.header['EXTEND'] is True
def test_replace_memmaped_array(self):
# Copy the original before we modify it
with fits.open(self.data('test0.fits')) as hdul:
hdul.writeto(self.temp('temp.fits'))
hdul = fits.open(self.temp('temp.fits'), mode='update', memmap=True)
old_data = hdul[1].data.copy()
hdul[1].data = hdul[1].data + 1
hdul.close()
with fits.open(self.temp('temp.fits'), memmap=True) as hdul:
assert ((old_data + 1) == hdul[1].data).all()
def test_open_file_with_bad_file_padding(self):
"""
Test warning when opening files with extra padding at the end.
See https://github.com/astropy/astropy/issues/4351
"""
# write some arbitrary data to a FITS file
fits.writeto(self.temp('temp.fits'), np.arange(100))
# append some arbitrary number of zeros to the end
with open(self.temp('temp.fits'), 'ab') as fobj:
fobj.write(b'\x00' * 1234)
with pytest.warns(
AstropyUserWarning,
match='Unexpected extra padding at the end of the file.'
) as w:
with fits.open(self.temp('temp.fits')) as fobj:
fobj.info()
assert len(w) == 1
@pytest.mark.filterwarnings('ignore:Unexpected extra padding')
def test_open_file_with_end_padding(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/106
Open files with end padding bytes.
"""
with fits.open(self.data('test0.fits'),
do_not_scale_image_data=True) as hdul:
info = hdul.info(output=False)
hdul.writeto(self.temp('temp.fits'))
with open(self.temp('temp.fits'), 'ab') as f:
f.seek(0, os.SEEK_END)
f.write(b'\0' * 2880)
assert info == fits.info(self.temp('temp.fits'), output=False,
do_not_scale_image_data=True)
def test_open_file_with_bad_header_padding(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/136
Open files with nulls for header block padding instead of spaces.
"""
a = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=a)
hdu.writeto(self.temp('temp.fits'))
# Figure out where the header padding begins and fill it with nulls
end_card_pos = str(hdu.header).index('END' + ' ' * 77)
padding_start = end_card_pos + 80
padding_len = 2880 - padding_start
with open(self.temp('temp.fits'), 'r+b') as f:
f.seek(padding_start)
f.write('\0'.encode('ascii') * padding_len)
with pytest.warns(AstropyUserWarning, match='contains null bytes instead of spaces') as w:
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == a).all()
assert len(w) == 1
assert len(hdul) == 1
assert str(hdul[0].header) == str(hdu.header)
def test_update_with_truncated_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148
Test that saving an update where the header is shorter than the
original header doesn't leave a stump from the old header in the file.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(hdu.header) < 34:
hdu.header[f'TEST{idx}'] = idx
idx += 1
hdu.writeto(self.temp('temp.fits'), checksum=True)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
# Modify the header, forcing it to be rewritten
hdul[0].header['TEST1'] = 2
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data).all()
def test_update_resized_header(self):
"""
Test saving updates to a file where the header is one block smaller
than before, and in the case where the heade ris one block larger than
before.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(str(hdu.header)) <= 2880:
hdu.header[f'TEST{idx}'] = idx
idx += 1
orig_header = hdu.header.copy()
hdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
while len(str(hdul[0].header)) > 2880:
del hdul[0].header[-1]
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header == orig_header[:-1]
assert (hdul[0].data == data).all()
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 101
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header[f'TEST{idx}'] = idx
idx += 1
# Touch something in the data too so that it has to be rewritten
hdul[0].data[0] = 27
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header[:-37] == orig_header[:-1]
assert hdul[0].data[0] == 27
assert (hdul[0].data[1:] == data[1:]).all()
def test_update_resized_header2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/150
This is similar to test_update_resized_header, but specifically tests a
case of multiple consecutive flush() calls on the same HDUList object,
where each flush() requires a resize.
"""
data1 = np.arange(100)
data2 = np.arange(100) + 100
phdu = fits.PrimaryHDU(data=data1)
hdu = fits.ImageHDU(data=data2)
phdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='append') as hdul:
hdul.append(hdu)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 1
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header[f'TEST{idx}'] = idx
idx += 1
hdul.flush()
hdul.append(hdu)
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data1).all()
assert hdul[1].header == hdu.header
assert (hdul[1].data == data2).all()
assert (hdul[2].data == data2).all()
def test_hdul_fromstring(self):
"""
Test creating the HDUList structure in memory from a string containing
an entire FITS file. This is similar to test_hdu_fromstring but for an
entire multi-extension FITS file at once.
"""
# Tests HDUList.fromstring for all of Astropy's built in test files
def test_fromstring(filename):
with fits.open(filename) as hdul:
orig_info = hdul.info(output=False)
with open(filename, 'rb') as f:
dat = f.read()
hdul2 = fits.HDUList.fromstring(dat)
assert orig_info == hdul2.info(output=False)
for idx in range(len(hdul)):
assert hdul[idx].header == hdul2[idx].header
if hdul[idx].data is None or hdul2[idx].data is None:
assert hdul[idx].data == hdul2[idx].data
elif (hdul[idx].data.dtype.fields and
hdul2[idx].data.dtype.fields):
# Compare tables
for n in hdul[idx].data.names:
c1 = hdul[idx].data[n]
c2 = hdul2[idx].data[n]
assert (c1 == c2).all()
elif (any(dim == 0 for dim in hdul[idx].data.shape) or
any(dim == 0 for dim in hdul2[idx].data.shape)):
# For some reason some combinations of Python and Numpy
# on Windows result in MemoryErrors when trying to work
# on memmap arrays with more than one dimension but
# some dimensions of size zero, so include a special
# case for that
return hdul[idx].data.shape == hdul2[idx].data.shape
else:
np.testing.assert_array_equal(hdul[idx].data,
hdul2[idx].data)
for filename in get_pkg_data_filenames('data', pattern='*.fits'):
if sys.platform == 'win32' and filename.endswith('zerowidth.fits'):
# Running this test on this file causes a crash in some
# versions of Numpy on Windows. See ticket:
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/174
continue
elif filename.endswith(('variable_length_table.fits',
'theap-gap.fits')):
# Comparing variable length arrays is non-trivial and thus
# skipped at this point.
# TODO: That's probably possible, so one could make it work.
continue
test_fromstring(filename)
# Test that creating an HDUList from something silly raises a TypeError
pytest.raises(TypeError, fits.HDUList.fromstring, ['a', 'b', 'c'])
@pytest.mark.filterwarnings('ignore:Saving a backup')
def test_save_backup(self):
"""Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/121
Save backup of file before flushing changes.
"""
self.copy_file('scale.fits')
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
# Make some changes to the original file to force its header
# and data to be rewritten
hdul[0].header['TEST'] = 'TEST'
# This emits warning that needs to be ignored at the
# pytest.mark.filterwarnings level.
hdul[0].data[0] = 0
assert os.path.exists(self.temp('scale.fits.bak'))
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul1:
with fits.open(self.temp('scale.fits.bak'),
do_not_scale_image_data=True) as hdul2:
assert hdul1[0].header == hdul2[0].header
assert (hdul1[0].data == hdul2[0].data).all()
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
# One more time to see if multiple backups are made
hdul[0].header['TEST2'] = 'TEST'
hdul[0].data[0] = 1
assert os.path.exists(self.temp('scale.fits.bak'))
assert os.path.exists(self.temp('scale.fits.bak.1'))
def test_replace_mmap_data(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
hdu_a = fits.PrimaryHDU(data=arr_a)
hdu_a.writeto(self.temp('test_a.fits'), overwrite=True)
hdu_b = fits.PrimaryHDU(data=arr_b)
hdu_b.writeto(self.temp('test_b.fits'), overwrite=True)
with fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a) as hdul_a:
with fits.open(self.temp('test_b.fits'),
memmap=mmap_b) as hdul_b:
hdul_a[0].data = hdul_b[0].data
with fits.open(self.temp('test_a.fits')) as hdul_a:
assert np.all(hdul_a[0].data == arr_b)
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_replace_mmap_data_2(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work. Like test_replace_mmap_data but with
table data instead of image data.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
col_a = fits.Column(name='a', format='J', array=arr_a)
col_b = fits.Column(name='b', format='J', array=arr_b)
hdu_a = fits.BinTableHDU.from_columns([col_a])
hdu_a.writeto(self.temp('test_a.fits'), overwrite=True)
hdu_b = fits.BinTableHDU.from_columns([col_b])
hdu_b.writeto(self.temp('test_b.fits'), overwrite=True)
with fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a) as hdul_a:
with fits.open(self.temp('test_b.fits'),
memmap=mmap_b) as hdul_b:
hdul_a[1].data = hdul_b[1].data
with fits.open(self.temp('test_a.fits')) as hdul_a:
assert 'b' in hdul_a[1].columns.names
assert 'a' not in hdul_a[1].columns.names
assert np.all(hdul_a[1].data['b'] == arr_b)
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_extname_in_hdulist(self):
"""
Tests to make sure that the 'in' operator works.
Regression test for https://github.com/astropy/astropy/issues/3060
"""
with fits.open(self.data('o4sp040b0_raw.fits')) as hdulist:
hdulist.append(fits.ImageHDU(name='a'))
assert 'a' in hdulist
assert 'A' in hdulist
assert ('a', 1) in hdulist
assert ('A', 1) in hdulist
assert 'b' not in hdulist
assert ('a', 2) not in hdulist
assert ('b', 1) not in hdulist
assert ('b', 2) not in hdulist
assert hdulist[0] in hdulist
assert fits.ImageHDU() not in hdulist
def test_overwrite(self):
hdulist = fits.HDUList([fits.PrimaryHDU()])
hdulist.writeto(self.temp('test_overwrite.fits'))
hdulist.writeto(self.temp('test_overwrite.fits'), overwrite=True)
def test_invalid_hdu_key_in_contains(self):
"""
Make sure invalid keys in the 'in' operator return False.
Regression test for https://github.com/astropy/astropy/issues/5583
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU())
hdulist.append(fits.ImageHDU())
# A more or less random assortment of things which are not valid keys.
bad_keys = [None, 3.5, {}]
for key in bad_keys:
assert not (key in hdulist)
def test_iteration_of_lazy_loaded_hdulist(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5585
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU(name='SCI'))
hdulist.append(fits.ImageHDU(name='SCI'))
hdulist.append(fits.ImageHDU(name='nada'))
hdulist.append(fits.ImageHDU(name='SCI'))
filename = self.temp('many_extension.fits')
hdulist.writeto(filename)
f = fits.open(filename)
# Check that all extensions are read if f is not sliced
all_exts = [ext for ext in f]
assert len(all_exts) == 5
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Try a simple slice with no conditional on the ext. This is essentially
# the reported failure.
all_exts_but_zero = [ext for ext in f[1:]]
assert len(all_exts_but_zero) == 4
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Check whether behavior is proper if the upper end of the slice is not
# omitted.
read_exts = [ext for ext in f[1:4] if ext.header['EXTNAME'] == 'SCI']
assert len(read_exts) == 2
f.close()
def test_read_non_standard_hdu(self):
filename = self.temp('bad-fits.fits')
hdu = fits.PrimaryHDU()
hdu.header['FOO'] = 'BAR'
buf = io.BytesIO()
hdu.writeto(buf)
buf.seek(0)
hdustr = buf.read()
hdustr = hdustr.replace(b'SIMPLE = T',
b'SIMPLE = F')
with open(filename, mode='wb') as f:
f.write(hdustr)
with fits.open(filename) as hdul:
assert isinstance(hdul[0], _NonstandardHDU)
assert hdul[0].header['FOO'] == 'BAR'
def test_proper_error_raised_on_non_fits_file(self):
filename = self.temp('not-fits.fits')
with open(filename, mode='w', encoding='utf=8') as f:
f.write('Not a FITS file')
match = ('No SIMPLE card found, this file '
'does not appear to be a valid FITS file')
# This should raise an OSError because there is no end card.
with pytest.raises(OSError, match=match):
fits.open(filename)
with pytest.raises(OSError, match=match):
fits.open(filename, mode='append')
with pytest.raises(OSError, match=match):
fits.open(filename, mode='update')
def test_proper_error_raised_on_invalid_fits_file(self):
filename = self.temp('bad-fits.fits')
hdu = fits.PrimaryHDU()
hdu.header['FOO'] = 'BAR'
buf = io.BytesIO()
hdu.writeto(buf)
# write 80 additional bytes so the block will have the correct size
buf.write(b' '*80)
buf.seek(0)
buf.seek(80) # now remove the SIMPLE card
with open(filename, mode='wb') as f:
f.write(buf.read())
match = ('No SIMPLE card found, this file '
'does not appear to be a valid FITS file')
# This should raise an OSError because there is no end card.
with pytest.raises(OSError, match=match):
fits.open(filename)
with pytest.raises(OSError, match=match):
fits.open(filename, mode='append')
with pytest.raises(OSError, match=match):
fits.open(filename, mode='update')
with fits.open(filename, ignore_missing_simple=True) as hdul:
assert isinstance(hdul[0], _ValidHDU)
assert hdul[0].header['FOO'] == 'BAR'
def test_warning_raised_on_non_standard_simple_card(self):
filename = self.temp('bad-fits.fits')
hdu = fits.PrimaryHDU()
hdu.header['FOO'] = 'BAR'
buf = io.BytesIO()
hdu.writeto(buf)
# change the simple card format
buf.seek(0)
buf.write(b'SIMPLE = T ')
buf.seek(0)
with open(filename, mode='wb') as f:
f.write(buf.read())
match = ("Found a SIMPLE card but its format doesn't"
" respect the FITS Standard")
with pytest.warns(VerifyWarning, match=match):
fits.open(filename)
with pytest.warns(VerifyWarning, match=match):
fits.open(filename, mode='append')
with pytest.warns(VerifyWarning, match=match):
fits.open(filename, mode='update')
with fits.open(filename, ignore_missing_simple=True) as hdul:
assert isinstance(hdul[0], _ValidHDU)
assert hdul[0].header['FOO'] == 'BAR'
# change the simple card format
buf.seek(0)
buf.write(b'SIMPLE = T / This is a FITS file')
buf.seek(0)
with open(filename, mode='wb') as f:
f.write(buf.read())
with pytest.warns(VerifyWarning, match=match):
fits.open(filename)
def test_proper_error_raised_on_non_fits_file_with_unicode(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5594
The failure shows up when (in python 3+) you try to open a file
with unicode content that is not actually a FITS file. See:
https://github.com/astropy/astropy/issues/5594#issuecomment-266583218
"""
filename = self.temp('not-fits-with-unicode.fits')
with open(filename, mode='w', encoding='utf=8') as f:
f.write('Ce\xe7i ne marche pas')
# This should raise an OSError because there is no end card.
with pytest.raises(OSError, match='No SIMPLE card found, this file '
'does not appear to be a valid FITS file'):
fits.open(filename)
def test_no_resource_warning_raised_on_non_fits_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6168
The ResourceWarning shows up when (in python 3+) you try to
open a non-FITS file when using a filename.
"""
# To avoid creating the file multiple times the tests are
# all included in one test file. See the discussion to the
# PR at https://github.com/astropy/astropy/issues/6168
#
filename = self.temp('not-fits.fits')
with open(filename, mode='w') as f:
f.write('# header line\n')
f.write('0.1 0.2\n')
# Opening the file should raise an OSError however the file
# is opened (there are two distinct code paths, depending on
# whether ignore_missing_end is True or False).
#
# Explicit tests are added to make sure the file handle is not
# closed when passed in to fits.open. In this case the ResourceWarning
# was not raised.
# Make sure that files opened by the user are not closed
with open(filename, mode='rb') as f:
with pytest.raises(OSError):
fits.open(f, ignore_missing_end=False)
assert not f.closed
with open(filename, mode='rb') as f:
with pytest.raises(OSError), pytest.warns(VerifyWarning):
fits.open(f, ignore_missing_end=True)
assert not f.closed
with pytest.raises(OSError):
fits.open(filename, ignore_missing_end=False)
with pytest.raises(OSError), pytest.warns(VerifyWarning):
fits.open(filename, ignore_missing_end=True)
def test_pop_with_lazy_load(self):
filename = self.data('checksum.fits')
with fits.open(filename) as hdul:
# Try popping the hdulist before doing anything else. This makes sure
# that https://github.com/astropy/astropy/issues/7185 is fixed.
hdu = hdul.pop()
assert len(hdul) == 1
# Read the file again and try popping from the beginning
with fits.open(filename) as hdul2:
hdu2 = hdul2.pop(0)
assert len(hdul2) == 1
# Just a sanity check
with fits.open(filename) as hdul3:
assert len(hdul3) == 2
assert hdul3[0].header == hdu2.header
assert hdul3[1].header == hdu.header
def test_pop_extname(self):
with fits.open(self.data('o4sp040b0_raw.fits')) as hdul:
assert len(hdul) == 7
hdu1 = hdul[1]
hdu4 = hdul[4]
hdu_popped = hdul.pop(('SCI', 2))
assert len(hdul) == 6
assert hdu_popped is hdu4
hdu_popped = hdul.pop('SCI')
assert len(hdul) == 5
assert hdu_popped is hdu1
# Skip due to https://github.com/astropy/astropy/issues/8916
@pytest.mark.skipif('sys.platform.startswith("win32")')
def test_write_hdulist_to_stream(self):
"""
Unit test for https://github.com/astropy/astropy/issues/7435
to ensure that an HDUList can be written to a stream.
"""
data = np.array([[1, 2, 3], [4, 5, 6]])
hdu = fits.PrimaryHDU(data)
hdulist = fits.HDUList([hdu])
with open(self.temp('test.fits'), 'wb') as fout:
with subprocess.Popen(["cat"], stdin=subprocess.PIPE,
stdout=fout) as p:
hdulist.writeto(p.stdin)
def test_output_verify(self):
hdul = fits.HDUList([fits.PrimaryHDU()])
hdul[0].header['FOOBAR'] = 42
hdul.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
data = f.read()
# create invalid card
data = data.replace(b'FOOBAR =', b'FOOBAR = ')
with open(self.temp('test2.fits'), 'wb') as f:
f.write(data)
with pytest.raises(VerifyError):
with fits.open(self.temp('test2.fits'), mode='update') as hdul:
hdul[0].header['MORE'] = 'here'
with pytest.warns(VerifyWarning) as ww:
with fits.open(self.temp('test2.fits'), mode='update',
output_verify='fix+warn') as hdul:
hdul[0].header['MORE'] = 'here'
assert len(ww) == 6
msg = "Card 'FOOBAR ' is not FITS standard (equal sign not at column 8)"
assert msg in str(ww[3].message)
|
c47ee9c3902202e2a8b04bc163666c351a35882c00e4fb0e6880e05810e61805 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
import pytest
from . import FitsTestCase
from astropy.io.fits.scripts import fitscheck
from astropy.io import fits
from astropy.utils.exceptions import AstropyUserWarning
from astropy import __version__ as version
class TestFitscheck(FitsTestCase):
def test_help(self):
with pytest.raises(SystemExit) as e:
fitscheck.main(['-h'])
assert e.value.code == 0
def test_version(self, capsys):
with pytest.raises(SystemExit) as e:
fitscheck.main(['--version'])
out = capsys.readouterr()[0]
assert out == f'fitscheck {version}'
assert e.value.code == 0
def test_missing_file(self, capsys):
assert fitscheck.main(['missing.fits']) == 1
stdout, stderr = capsys.readouterr()
assert 'No such file or directory' in stderr
def test_valid_file(self, capsys):
testfile = self.data('checksum.fits')
assert fitscheck.main([testfile]) == 0
assert fitscheck.main([testfile, '--compliance']) == 0
assert fitscheck.main([testfile, '-v']) == 0
stdout, stderr = capsys.readouterr()
assert 'OK' in stderr
def test_remove_checksums(self, capsys):
self.copy_file('checksum.fits')
testfile = self.temp('checksum.fits')
assert fitscheck.main([testfile, '--checksum', 'remove']) == 1
assert fitscheck.main([testfile]) == 1
stdout, stderr = capsys.readouterr()
assert 'MISSING' in stderr
def test_no_checksums(self, capsys):
testfile = self.data('arange.fits')
assert fitscheck.main([testfile]) == 1
stdout, stderr = capsys.readouterr()
assert 'Checksum not found' in stderr
assert fitscheck.main([testfile, '--ignore-missing']) == 0
stdout, stderr = capsys.readouterr()
assert stderr == ''
def test_overwrite_invalid(self, caplog):
"""
Tests that invalid checksum or datasum are overwritten when the file is
saved.
"""
reffile = self.temp('ref.fits')
with fits.open(self.data('tb.fits')) as hdul:
hdul.writeto(reffile, checksum=True)
# replace checksums with wrong ones
testfile = self.temp('test.fits')
with fits.open(self.data('tb.fits')) as hdul:
hdul[0].header['DATASUM'] = '1 '
hdul[0].header['CHECKSUM'] = '8UgqATfo7TfoATfo'
hdul[1].header['DATASUM'] = '2349680925'
hdul[1].header['CHECKSUM'] = '11daD8bX98baA8bU'
hdul.writeto(testfile)
assert fitscheck.main([testfile]) == 1
assert re.match(r'BAD.*Checksum verification failed for HDU',
caplog.records[0].message)
caplog.clear()
with pytest.warns(AstropyUserWarning):
assert fitscheck.main([testfile, '--write', '--force']) == 1
assert re.match(r'BAD.*Checksum verification failed for HDU',
caplog.records[0].message)
caplog.clear()
# check that the file was fixed
assert fitscheck.main([testfile]) == 0
|
e8e1aec7e5fd0394733bd1f7f7d342c5a1474206b95653fed5471155c113d424 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import os
import shutil
import stat
import tempfile
import time
from astropy.io import fits
class FitsTestCase:
def setup(self):
self.data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.temp_dir = tempfile.mkdtemp(prefix='fits-test-')
# Restore global settings to defaults
# TODO: Replace this when there's a better way to in the config API to
# force config values to their defaults
fits.conf.enable_record_valued_keyword_cards = True
fits.conf.extension_name_case_sensitive = False
fits.conf.strip_header_whitespace = True
fits.conf.use_memmap = True
def teardown(self):
if hasattr(self, 'temp_dir') and os.path.exists(self.temp_dir):
tries = 3
while tries:
try:
shutil.rmtree(self.temp_dir)
break
except OSError:
# Probably couldn't delete the file because for whatever
# reason a handle to it is still open/hasn't been
# garbage-collected
time.sleep(0.5)
tries -= 1
fits.conf.reset('enable_record_valued_keyword_cards')
fits.conf.reset('extension_name_case_sensitive')
fits.conf.reset('strip_header_whitespace')
fits.conf.reset('use_memmap')
def copy_file(self, filename):
"""Copies a backup of a test data file to the temp dir and sets its
mode to writeable.
"""
shutil.copy(self.data(filename), self.temp(filename))
os.chmod(self.temp(filename), stat.S_IREAD | stat.S_IWRITE)
def data(self, filename):
"""Returns the path to a test data file."""
return os.path.join(self.data_dir, filename)
def temp(self, filename):
""" Returns the full path to a file in the test temp dir."""
return os.path.join(self.temp_dir, filename)
|
c36147bf06a6d6ec8088ea226b73f807b3effda82cf14490372c6689a6c9099b | # Tests related to writing dask arrays to FITS files in an efficient way
import pytest
import numpy as np
from astropy.io import fits
from astropy.io.fits import ImageHDU, PrimaryHDU
da = pytest.importorskip("dask.array")
@pytest.fixture
def dask_array_in_mem():
return da.random.uniform(-1000, 1000, (1322, 755)).rechunk((59, 55))
def test_construct_image_hdu(dask_array_in_mem):
hdu = ImageHDU(data=dask_array_in_mem)
assert isinstance(hdu.data, da.Array)
def test_construct_hdulist(dask_array_in_mem):
hdu = ImageHDU(data=dask_array_in_mem)
hdulist = fits.HDUList([hdu])
assert isinstance(hdulist[0].data, da.Array)
def test_save_primary_hdu(dask_array_in_mem, tmp_path):
# Saving a Primary HDU directly
filename = tmp_path / 'test.fits'
hdu = PrimaryHDU(data=dask_array_in_mem)
hdu.writeto(filename)
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[0].data, dask_array_in_mem.compute())
def test_save_image_hdu(dask_array_in_mem, tmp_path):
# Saving an image HDU directly
filename = tmp_path / 'test.fits'
hdu = ImageHDU(data=dask_array_in_mem)
hdu.writeto(filename)
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[1].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[1].data, dask_array_in_mem.compute())
def test_save_hdulist(dask_array_in_mem, tmp_path):
# Saving an HDUList
filename = tmp_path / 'test.fits'
hdu1 = PrimaryHDU(data=dask_array_in_mem)
hdu2 = ImageHDU(data=np.random.random((128, 128)))
hdu3 = ImageHDU(data=dask_array_in_mem * 2)
hdulist = fits.HDUList([hdu1, hdu2, hdu3])
assert isinstance(hdulist[0].data, da.Array)
hdulist.writeto(filename)
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[0].data, dask_array_in_mem.compute())
assert isinstance(hdulist_new[1].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[1].data, hdu2.data)
assert isinstance(hdulist_new[2].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[2].data, dask_array_in_mem.compute() * 2)
def test_long_header(dask_array_in_mem, tmp_path):
# Make sure things work correctly if there is a long header in the HDU.
filename = tmp_path / 'test.fits'
# NOTE: we deliberately set up a long header here rather than add the
# keys one by one to hdu.header as adding the header in one go used to
# cause issues, so this acts as a regression test.
header = fits.Header()
for index in range(2048):
header[f'KEY{index:x}'] = 0.
hdu = PrimaryHDU(data=dask_array_in_mem, header=header)
hdu.writeto(filename)
with fits.open(filename) as hdulist_new:
assert len(hdulist_new[0].header) == 2053
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[0].data, dask_array_in_mem.compute())
VALID_DTYPES = ('>i2', '<i2', '>i4', '<i4', '>i8', '<i8', '>f4', '<f4', '>f8', '<f8')
@pytest.mark.parametrize('dtype', VALID_DTYPES)
def test_dtypes(dask_array_in_mem, tmp_path, dtype):
filename = tmp_path / 'test.fits'
array = dask_array_in_mem.astype(dtype)
hdu = PrimaryHDU(data=array)
hdu.writeto(filename)
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[0].data, array.compute())
def test_scaled(dask_array_in_mem, tmp_path):
filename = tmp_path / 'test.fits'
hdu = PrimaryHDU(data=dask_array_in_mem)
hdu.scale('int32', bzero=-1000, bscale=1e-6)
hdu.writeto(filename)
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[0].data, dask_array_in_mem.compute(), atol=1e-5)
def test_scaled_minmax(dask_array_in_mem, tmp_path):
filename = tmp_path / 'test.fits'
hdu = PrimaryHDU(data=dask_array_in_mem)
hdu.scale('int32',option='minmax')
hdu.writeto(filename)
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[0].data, dask_array_in_mem.compute(), atol=1e-5)
def test_append(dask_array_in_mem, tmp_path):
# Test append mode
filename = tmp_path / 'test.fits'
fits.append(filename, dask_array_in_mem)
fits.append(filename, np.arange(10))
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[0].data, dask_array_in_mem.compute())
assert isinstance(hdulist_new[1].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[1].data, np.arange(10))
# @pytest.mark.parametrize('mode', ['rb+', 'ab', 'ab+', 'wb', 'wb+'])
@pytest.mark.parametrize('mode', ['wb', 'wb+'])
def test_file_handle(mode, dask_array_in_mem, tmp_path):
filename = tmp_path / 'test.fits'
hdu1 = PrimaryHDU(data=dask_array_in_mem)
hdu2 = ImageHDU(data=np.arange(10))
hdulist = fits.HDUList([hdu1, hdu2])
with filename.open(mode=mode) as fp:
hdulist.writeto(fp)
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[0].data, dask_array_in_mem.compute())
assert isinstance(hdulist_new[1].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[1].data, np.arange(10))
|
2a7af12f824ee1947ca618c3e7e9fe6f86bbdb5ab7014d8c7d6858ca7066a7c1 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import os
import pathlib
import warnings
import io
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy.io import fits
from astropy import units as u
from astropy.table import Table
from astropy.io.fits import printdiff
from astropy.io.fits.column import ColumnAttribute
from astropy.io.fits.connect import REMOVE_KEYWORDS
from astropy.utils.exceptions import AstropyUserWarning
from astropy.io.fits.tests.test_table import _assert_attr_col
from . import FitsTestCase
class TestConvenience(FitsTestCase):
def test_resource_warning(self):
warnings.simplefilter('always', ResourceWarning)
_ = fits.getdata(self.data('test0.fits'))
_ = fits.getheader(self.data('test0.fits'))
def test_fileobj_not_closed(self):
"""
Tests that file-like objects are not closed after being passed
to convenience functions.
Regression test for https://github.com/astropy/astropy/issues/5063
"""
f = open(self.data('test0.fits'), 'rb')
_ = fits.getdata(f)
assert not f.closed
f.seek(0)
_ = fits.getheader(f)
assert not f.closed
f.close() # Close it now
def test_table_to_hdu(self):
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table['a'].unit = 'm/s'
table['b'].unit = 'not-a-unit'
with pytest.warns(u.UnitsWarning, match="'not-a-unit' did not parse as"
" fits unit") as w:
hdu = fits.table_to_hdu(table)
assert len(w) == 1
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2')
assert isinstance(hdu, fits.BinTableHDU)
filename = self.temp('test_table_to_hdu.fits')
hdu.writeto(filename, overwrite=True)
def test_masked_table_to_hdu(self):
i = np.ma.MaskedArray([1, 2, 3], mask=[True, False, False])
s = np.ma.MaskedArray(['a', 'b', 'c'], mask=[False, True, True])
c = np.ma.MaskedArray([2.3+1j, 4.5+0j, 6.7-1j], mask=[True, False, True])
f = np.ma.MaskedArray([2.3, 4.5, 6.7], mask=[True, False, True])
table = Table([i, s, c, f], names=['i', 's', 'c', 'f'])
# Check that FITS standard is used in replacing masked values.
hdu = fits.table_to_hdu(table)
assert isinstance(hdu, fits.BinTableHDU)
assert hdu.header['TNULL1'] == i.fill_value
assert_array_equal(hdu.data['i'], i.filled())
assert_array_equal(hdu.data['s'], s.filled(''))
assert_array_equal(hdu.data['c'], c.filled(np.nan))
assert_array_equal(hdu.data['c'].real, c.real.filled(np.nan))
assert_array_equal(hdu.data['c'].imag, c.imag.filled(np.nan))
assert_array_equal(hdu.data['c'], c.filled(complex(np.nan, np.nan)))
assert_array_equal(hdu.data['f'], f.filled(np.nan))
filename = self.temp('test_table_to_hdu.fits')
hdu.writeto(filename, overwrite=True)
def test_table_non_stringifyable_unit_to_hdu(self):
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table['a'].unit = u.core.IrreducibleUnit("test")
with pytest.warns(AstropyUserWarning, match="The unit 'test' could not be saved") as w:
fits.table_to_hdu(table)
assert len(w) == 1
def test_table_to_hdu_convert_comment_convention(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table.meta['comments'] = ['This', 'is', 'a', 'comment']
hdu = fits.table_to_hdu(table)
assert hdu.header.get('comment') == ['This', 'is', 'a', 'comment']
with pytest.raises(ValueError):
hdu.header.index('comments')
def test_table_to_hdu_filter_reserved(self):
"""
Regression test for https://github.com/astropy/astropy/issues/9387
"""
diag = 'be ignored since it conflicts with a FITS reserved keyword'
ins_cards = {'EXPTIME': 32.1, 'XTENSION': 'NEWTABLE',
'NAXIS': 1, 'NAXIS1': 3, 'NAXIS2': 9,
'PCOUNT': 42, 'OBSERVER': 'Adams'}
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i4', 'U1', 'f8'])
table.meta.update(ins_cards)
with pytest.warns(AstropyUserWarning,
match=rf'Meta-data keyword \w+ will {diag}') as w:
hdu = fits.table_to_hdu(table)
# This relies on the warnings being raised in the order of the
# meta dict (note that the first and last card are legitimate keys)
assert len(w) == len(ins_cards) - 2
for i, key in enumerate(list(ins_cards)[1:-1]):
assert f'Meta-data keyword {key}' in str(w[i].message)
assert hdu.header.get('XTENSION') == 'BINTABLE'
assert hdu.header.get('NAXIS') == 2
assert hdu.header.get('NAXIS1') == 13
assert hdu.header.get('NAXIS2') == 3
assert hdu.header.get('PCOUNT') == 0
np.testing.assert_almost_equal(hdu.header.get('EXPTIME'), 3.21e1)
@pytest.mark.parametrize('card', REMOVE_KEYWORDS)
def test_table_to_hdu_warn_reserved(self, card):
"""
Test warning for each keyword in ..connect.REMOVE_KEYWORDS, 1 by 1
"""
diag = 'be ignored since it conflicts with a FITS reserved keyword'
res_cards = {'XTENSION': 'BINTABLE', 'BITPIX': 8,
'NAXIS': 2, 'NAXIS1': 12, 'NAXIS2': 3,
'PCOUNT': 0, 'GCOUNT': 1, 'TFIELDS': 2, 'THEAP': None}
ins_cards = {'XTENSION': 'TABLE', 'BITPIX': 16,
'NAXIS': 1, 'NAXIS1': 2, 'NAXIS2': 6,
'PCOUNT': 2, 'GCOUNT': 2, 'TFIELDS': 4, 'THEAP': 36}
table = Table([[1.0, 2.0, 3.0], [2.3, 4.5, 6.7]],
names=['wavelength', 'flux'], dtype=['f8', 'f4'])
table.meta['ORIGIN'] = 'Min.Silly Walks'
table.meta[card] = ins_cards[card]
assert table.meta.get(card) != res_cards[card]
with pytest.warns(AstropyUserWarning,
match=f'Meta-data keyword {card} will {diag}'):
hdu = fits.table_to_hdu(table)
assert hdu.header.get(card) == res_cards[card]
assert hdu.header.get('ORIGIN') == 'Min.Silly Walks'
def test_table_to_hdu_filter_incompatible(self):
"""
Test removal of unsupported data types from header
"""
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i4', 'U1', 'f8'])
table.meta.update({'OBSDATE': '2001-05-26', 'RAMP': np.arange(5),
'TARGETS': {'PRIMARY': 1, 'SECONDAR': 3}})
with pytest.warns(AstropyUserWarning, match=r'Attribute \S+ of type '
r'.+ cannot be added to FITS Header - skipping'):
hdu = fits.table_to_hdu(table)
assert hdu.header.get('OBSDATE') == '2001-05-26'
assert 'RAMP' not in hdu.header
assert 'TARGETS' not in hdu.header
def test_table_writeto_header(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5988
"""
data = np.zeros((5, ), dtype=[('x', float), ('y', int)])
h_in = fits.Header()
h_in['ANSWER'] = (42.0, 'LTU&E')
filename = self.temp('tabhdr42.fits')
fits.writeto(filename, data=data, header=h_in, overwrite=True)
h_out = fits.getheader(filename, ext=1)
assert h_out['ANSWER'] == 42
def test_image_extension_update_header(self):
"""
Test that _makehdu correctly includes the header. For example in the
fits.update convenience function.
"""
filename = self.temp('twoextension.fits')
hdus = [fits.PrimaryHDU(np.zeros((10, 10))),
fits.ImageHDU(np.zeros((10, 10)))]
fits.HDUList(hdus).writeto(filename)
fits.update(filename,
np.zeros((10, 10)),
header=fits.Header([('WHAT', 100)]),
ext=1)
h_out = fits.getheader(filename, ext=1)
assert h_out['WHAT'] == 100
def test_printdiff(self):
"""
Test that FITSDiff can run the different inputs without crashing.
"""
# Testing different string input options
assert printdiff(self.data('arange.fits'),
self.data('blank.fits')) is None
assert printdiff(self.data('arange.fits'),
self.data('blank.fits'), ext=0) is None
assert printdiff(self.data('o4sp040b0_raw.fits'),
self.data('o4sp040b0_raw.fits'),
extname='sci') is None
# This may seem weird, but check printdiff to see, need to test
# incorrect second file
with pytest.raises(OSError):
printdiff('o4sp040b0_raw.fits', 'fakefile.fits', extname='sci')
# Test HDU object inputs
with fits.open(self.data('stddata.fits'), mode='readonly') as in1:
with fits.open(self.data('checksum.fits'), mode='readonly') as in2:
assert printdiff(in1[0], in2[0]) is None
with pytest.raises(ValueError):
printdiff(in1[0], in2[0], ext=0)
assert printdiff(in1, in2) is None
with pytest.raises(NotImplementedError):
printdiff(in1, in2, 0)
def test_tabledump(self):
"""
A simple test of the dump method.
Also regression test for https://github.com/astropy/astropy/issues/6937
"""
datastr = (
'" 1" "abc" " 3.70000007152557" " 0"\n'
'" 2" "xy " " 6.69999971389771" " 1"\n'
)
cdstr = (
'c1 1J I11 "" ""'
' -2147483647 "" "" \n'
'c2 3A A3 "" ""'
' "" "" "" \n'
'c3 1E G15.7 "" ""'
' "" 3 0.4 \n'
'c4 1L L6 "" ""'
' "" "" "" \n'
)
# copy fits file to the temp directory
self.copy_file('tb.fits')
# test without datafile
fits.tabledump(self.temp('tb.fits'))
assert os.path.isfile(self.temp('tb_1.txt'))
# test with datafile
fits.tabledump(self.temp('tb.fits'), datafile=self.temp('test_tb.txt'))
assert os.path.isfile(self.temp('test_tb.txt'))
# test with datafile and cdfile
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
fits.tabledump(self.temp('tb.fits'), datafile, cdfile)
assert os.path.isfile(datafile)
with open(datafile) as data:
assert data.read() == datastr
with open(cdfile) as coldefs:
assert coldefs.read() == cdstr
@pytest.mark.parametrize('tablename', ['table.fits', 'tb.fits'])
def test_dump_load_round_trip(self, tablename):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
# copy fits file to the temp directory
self.copy_file(tablename)
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
fits.tabledump(self.temp(tablename), datafile, cdfile, hfile)
new_tbhdu = fits.tableload(datafile, cdfile, hfile)
with fits.open(self.temp(tablename)) as hdul:
_assert_attr_col(new_tbhdu, hdul[1])
def test_append_filename(self):
"""
Test fits.append with a filename argument.
"""
data = np.arange(6)
testfile = self.temp('test_append_1.fits')
# Test case 1: creation of file
fits.append(testfile, data=data, checksum=True)
# Test case 2: append to existing file, with verify=True
# Also test that additional keyword can be passed to fitsopen
fits.append(testfile, data=data * 2, checksum=True, ignore_blank=True)
# Test case 3: append to existing file, with verify=False
fits.append(testfile, data=data * 3, checksum=True, verify=False)
with fits.open(testfile, checksum=True) as hdu1:
np.testing.assert_array_equal(hdu1[0].data, data)
np.testing.assert_array_equal(hdu1[1].data, data * 2)
np.testing.assert_array_equal(hdu1[2].data, data * 3)
@pytest.mark.parametrize('mode', ['wb', 'wb+', 'ab', 'ab+'])
def test_append_filehandle(self, tmpdir, mode):
"""
Test fits.append with a file handle argument.
"""
append_file = tmpdir.join('append.fits')
with append_file.open(mode) as handle:
fits.append(filename=handle, data=np.ones((4, 4)))
def test_append_with_header(self):
"""
Test fits.append with a fits Header, which triggers detection of the
HDU class. Regression test for
https://github.com/astropy/astropy/issues/8660
"""
testfile = self.temp('test_append_1.fits')
with fits.open(self.data('test0.fits')) as hdus:
for hdu in hdus:
fits.append(testfile, hdu.data, hdu.header, checksum=True)
with fits.open(testfile, checksum=True) as hdus:
assert len(hdus) == 5
def test_pathlib(self):
testfile = pathlib.Path(self.temp('test.fits'))
data = np.arange(10)
hdulist = fits.HDUList([fits.PrimaryHDU(data)])
hdulist.writeto(testfile)
with fits.open(testfile) as hdul:
np.testing.assert_array_equal(hdul[0].data, data)
def test_getdata_ext_given(self):
prihdu = fits.PrimaryHDU(data=np.zeros((5, 5), dtype=int))
exthdu1 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
exthdu2 = fits.ImageHDU(data=2 * np.ones((5, 5), dtype=int))
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
for ext in [0, 1, 2]:
buf.seek(0)
data = fits.getdata(buf, ext=ext)
assert data[0, 0] == ext
def test_getdata_ext_given_nodata(self):
prihdu = fits.PrimaryHDU(data=np.zeros((5, 5), dtype=int))
exthdu1 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
exthdu2 = fits.ImageHDU(data=None)
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
with pytest.raises(IndexError, match="No data in HDU #2."):
fits.getdata(buf, ext=2)
def test_getdata_ext_not_given_with_data_in_primary(self):
prihdu = fits.PrimaryHDU(data=np.zeros((5, 5), dtype=int))
exthdu1 = fits.ImageHDU(data=None)
exthdu2 = fits.ImageHDU(data=None)
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
data = fits.getdata(buf)
assert data[0, 0] == 0
def test_getdata_ext_not_given_with_data_in_ext(self):
# tests fallback mechanism
prihdu = fits.PrimaryHDU(data=None)
exthdu1 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
exthdu2 = fits.ImageHDU(data=None)
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
data = fits.getdata(buf)
assert data[0, 0] == 1
def test_getdata_ext_not_given_nodata_any(self):
# tests exception raised when there is no data in either
# Primary HDU or first extension HDU
prihdu = fits.PrimaryHDU(data=None)
exthdu1 = fits.ImageHDU(data=None)
exthdu2 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
with pytest.raises(
IndexError,
match="No data in either Primary or first extension HDUs."
):
fits.getdata(buf)
def test_getdata_ext_not_given_nodata_noext(self):
# tests exception raised when there is no data in the
# Primary HDU and there are no extension HDUs
prihdu = fits.PrimaryHDU(data=None)
hdulist = fits.HDUList([prihdu])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
with pytest.raises(
IndexError,
match="No data in Primary HDU and no extension HDU found."
):
fits.getdata(buf)
|
a8d8694663af3b911ae8d56e092230bb690797b3cc7c3478ba67f187291046b6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import signal
import gzip
import pytest
import numpy as np
from numpy.testing import assert_equal
from astropy.io.fits import util
from astropy.io.fits.util import ignore_sigint, _rstrip_inplace
from astropy.utils.compat.optional_deps import HAS_PIL
if HAS_PIL:
from PIL import Image
from . import FitsTestCase
class TestUtils(FitsTestCase):
@pytest.mark.skipif("sys.platform.startswith('win')")
def test_ignore_sigint(self):
@ignore_sigint
def test():
with pytest.warns(UserWarning) as w:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
# One more time, for good measure
os.kill(pid, signal.SIGINT)
assert len(w) == 2
assert (str(w[0].message) ==
'KeyboardInterrupt ignored until test is complete!')
pytest.raises(KeyboardInterrupt, test)
def test_realign_dtype(self):
"""
Tests a few corner-cases for numpy dtype creation.
These originally were the reason for having a realign_dtype hack.
"""
dt = np.dtype([('a', np.int32), ('b', np.int16)])
names = dt.names
formats = [dt.fields[name][0] for name in names]
dt2 = np.dtype({'names': names, 'formats': formats,
'offsets': [0, 0]})
assert dt2.itemsize == 4
dt2 = np.dtype({'names': names, 'formats': formats,
'offsets': [0, 1]})
assert dt2.itemsize == 4
dt2 = np.dtype({'names': names, 'formats': formats,
'offsets': [1, 0]})
assert dt2.itemsize == 5
dt = np.dtype([('a', np.float64), ('b', np.int8), ('c', np.int8)])
names = dt.names
formats = [dt.fields[name][0] for name in names]
dt2 = np.dtype({'names': names, 'formats': formats,
'offsets': [0, 0, 0]})
assert dt2.itemsize == 8
dt2 = np.dtype({'names': names, 'formats': formats,
'offsets': [0, 0, 1]})
assert dt2.itemsize == 8
dt2 = np.dtype({'names': names, 'formats': formats,
'offsets': [0, 0, 27]})
assert dt2.itemsize == 28
class TestUtilMode(FitsTestCase):
"""
The high-level tests are partially covered by
test_core.TestConvenienceFunctions.test_fileobj_mode_guessing
but added some low-level tests as well.
"""
def test_mode_strings(self):
# A string signals that the file should be opened so the function
# should return None, because it's simply not opened yet.
assert util.fileobj_mode('tmp1.fits') is None
@pytest.mark.skipif("not HAS_PIL")
def test_mode_pil_image(self):
img = np.random.randint(0, 255, (5, 5, 3)).astype(np.uint8)
result = Image.fromarray(img)
result.save(self.temp('test_simple.jpg'))
# PIL doesn't support append mode. So it will always use binary read.
with Image.open(self.temp('test_simple.jpg')) as fileobj:
assert util.fileobj_mode(fileobj) == 'rb'
def test_mode_gzip(self):
# Open a gzip in every possible (gzip is binary or "touch" only) way
# and check if the mode was correctly identified.
# The lists consist of tuples: filenumber, given mode, identified mode
# The filenumber must be given because read expects the file to exist
# and x expects it to NOT exist.
num_mode_resmode = [(0, 'a', 'ab'), (0, 'ab', 'ab'),
(0, 'w', 'wb'), (0, 'wb', 'wb'),
(1, 'x', 'xb'),
(1, 'r', 'rb'), (1, 'rb', 'rb')]
for num, mode, res in num_mode_resmode:
filename = self.temp(f'test{num}.gz')
with gzip.GzipFile(filename, mode) as fileobj:
assert util.fileobj_mode(fileobj) == res
def test_mode_normal_buffering(self):
# Use the python IO with buffering parameter. Binary mode only:
# see "test_mode_gzip" for explanation of tuple meanings.
num_mode_resmode = [(0, 'ab', 'ab'),
(0, 'wb', 'wb'),
(1, 'xb', 'xb'),
(1, 'rb', 'rb')]
for num, mode, res in num_mode_resmode:
filename = self.temp(f'test1{num}.dat')
with open(filename, mode, buffering=0) as fileobj:
assert util.fileobj_mode(fileobj) == res
def test_mode_normal_no_buffering(self):
# Python IO without buffering
# see "test_mode_gzip" for explanation of tuple meanings.
num_mode_resmode = [(0, 'a', 'a'), (0, 'ab', 'ab'),
(0, 'w', 'w'), (0, 'wb', 'wb'),
(1, 'x', 'x'),
(1, 'r', 'r'), (1, 'rb', 'rb')]
for num, mode, res in num_mode_resmode:
filename = self.temp(f'test2{num}.dat')
with open(filename, mode) as fileobj:
assert util.fileobj_mode(fileobj) == res
def test_mode_normalization(self):
# Use the normal python IO in append mode with all possible permutation
# of the "mode" letters.
# Tuple gives a file name suffix, the given mode and the functions
# return. The filenumber is only for consistency with the other
# test functions. Append can deal with existing and not existing files.
for num, mode, res in [(0, 'a', 'a'),
(0, 'a+', 'a+'),
(0, 'ab', 'ab'),
(0, 'a+b', 'ab+'),
(0, 'ab+', 'ab+')]:
filename = self.temp(f'test3{num}.dat')
with open(filename, mode) as fileobj:
assert util.fileobj_mode(fileobj) == res
def test_rstrip_inplace():
# Incorrect type
s = np.array([1, 2, 3])
with pytest.raises(TypeError) as exc:
_rstrip_inplace(s)
assert exc.value.args[0] == 'This function can only be used on string arrays'
# Bytes array
s = np.array(['a ', ' b', ' c c '], dtype='S6')
_rstrip_inplace(s)
assert_equal(s, np.array(['a', ' b', ' c c'], dtype='S6'))
# Unicode array
s = np.array(['a ', ' b', ' c c '], dtype='U6')
_rstrip_inplace(s)
assert_equal(s, np.array(['a', ' b', ' c c'], dtype='U6'))
# 2-dimensional array
s = np.array([['a ', ' b'], [' c c ', ' a ']], dtype='S6')
_rstrip_inplace(s)
assert_equal(s, np.array([['a', ' b'], [' c c', ' a']], dtype='S6'))
# 3-dimensional array
s = np.repeat(' a a ', 24).reshape((2, 3, 4))
_rstrip_inplace(s)
assert_equal(s, ' a a')
# 3-dimensional non-contiguous array
s = np.repeat(' a a ', 1000).reshape((10, 10, 10))[:2, :3, :4]
_rstrip_inplace(s)
assert_equal(s, ' a a')
|
64c89579f3cc8dc8a7dbbfdfa8edbefa9c46f81002e32302f164b499c6f9d291 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.io.fits.column import Column
from astropy.io.fits.diff import (FITSDiff, HeaderDiff, ImageDataDiff,
TableDataDiff, HDUDiff)
from astropy.io.fits.hdu import HDUList, PrimaryHDU, ImageHDU
from astropy.io.fits.hdu.base import NonstandardExtHDU
from astropy.io.fits.hdu.table import BinTableHDU
from astropy.io.fits.header import Header
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from astropy.io import fits
from . import FitsTestCase
class DummyNonstandardExtHDU(NonstandardExtHDU):
def __init__(self, data=None, *args, **kwargs):
super().__init__(self, *args, **kwargs)
self._buffer = np.asarray(data).tobytes()
self._data_offset = 0
@property
def size(self):
return len(self._buffer)
class TestDiff(FitsTestCase):
def test_identical_headers(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
assert HeaderDiff(ha, hb).identical
assert HeaderDiff(ha.tostring(), hb.tostring()).identical
with pytest.raises(TypeError):
HeaderDiff(1, 2)
def test_slightly_different_headers(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
assert not HeaderDiff(ha, hb).identical
def test_common_keywords(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
hb['D'] = (5, 'Comment')
assert HeaderDiff(ha, hb).common_keywords == ['A', 'B', 'C']
def test_different_keyword_count(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
del hb['B']
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_count == (3, 2)
# But make sure the common keywords are at least correct
assert diff.common_keywords == ['A', 'C']
def test_different_keywords(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
hb['D'] = (5, 'Comment')
ha['E'] = (6, 'Comment')
ha['F'] = (7, 'Comment')
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keywords == (['E', 'F'], ['D'])
def test_different_keyword_values(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {'C': [(3, 4)]}
def test_different_keyword_comments(self):
ha = Header([('A', 1), ('B', 2), ('C', 3, 'comment 1')])
hb = ha.copy()
hb.comments['C'] = 'comment 2'
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert (diff.diff_keyword_comments ==
{'C': [('comment 1', 'comment 2')]})
def test_different_keyword_values_with_duplicate(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
ha.append(('C', 4))
hb.append(('C', 5))
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {'C': [None, (4, 5)]}
def test_asymmetric_duplicate_keywords(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
ha.append(('A', 2, 'comment 1'))
ha.append(('A', 3, 'comment 2'))
hb.append(('B', 4, 'comment 3'))
hb.append(('C', 5, 'comment 4'))
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {}
assert (diff.diff_duplicate_keywords ==
{'A': (3, 1), 'B': (1, 2), 'C': (1, 2)})
report = diff.report()
assert ("Inconsistent duplicates of keyword 'A' :\n"
" Occurs 3 time(s) in a, 1 times in (b)") in report
def test_floating_point_rtol(self):
ha = Header([('A', 1), ('B', 2.00001), ('C', 3.000001)])
hb = ha.copy()
hb['B'] = 2.00002
hb['C'] = 3.000002
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(2.00001, 2.00002)], 'C': [(3.000001, 3.000002)]})
diff = HeaderDiff(ha, hb, rtol=1e-6)
assert not diff.identical
assert diff.diff_keyword_values == {'B': [(2.00001, 2.00002)]}
diff = HeaderDiff(ha, hb, rtol=1e-5)
assert diff.identical
def test_floating_point_atol(self):
ha = Header([('A', 1), ('B', 1.0), ('C', 0.0)])
hb = ha.copy()
hb['B'] = 1.00001
hb['C'] = 0.000001
diff = HeaderDiff(ha, hb, rtol=1e-6)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(1.0, 1.00001)], 'C': [(0.0, 0.000001)]})
diff = HeaderDiff(ha, hb, rtol=1e-5)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'C': [(0.0, 0.000001)]})
diff = HeaderDiff(ha, hb, atol=1e-6)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(1.0, 1.00001)]})
diff = HeaderDiff(ha, hb, atol=1e-5) # strict inequality
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(1.0, 1.00001)]})
diff = HeaderDiff(ha, hb, rtol=1e-5, atol=1e-5)
assert diff.identical
diff = HeaderDiff(ha, hb, atol=1.1e-5)
assert diff.identical
diff = HeaderDiff(ha, hb, rtol=1e-6, atol=1e-6)
assert not diff.identical
def test_ignore_blanks(self):
with fits.conf.set_temp('strip_header_whitespace', False):
ha = Header([('A', 1), ('B', 2), ('C', 'A ')])
hb = ha.copy()
hb['C'] = 'A'
assert ha['C'] != hb['C']
diff = HeaderDiff(ha, hb)
# Trailing blanks are ignored by default
assert diff.identical
assert diff.diff_keyword_values == {}
# Don't ignore blanks
diff = HeaderDiff(ha, hb, ignore_blanks=False)
assert not diff.identical
assert diff.diff_keyword_values == {'C': [('A ', 'A')]}
@pytest.mark.parametrize("differ", [HeaderDiff, HDUDiff, FITSDiff])
def test_ignore_blank_cards(self, differ):
"""Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/152
Ignore blank cards.
"""
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = Header([('A', 1), ('', ''), ('B', 2), ('', ''), ('C', 3)])
hc = ha.copy()
if differ is HeaderDiff:
hc.append()
hc.append()
else: # Ensure blanks are not at the end as they are stripped by HDUs
hc.add_blank(after=-2)
hc.add_blank(after=-2)
if differ in (HDUDiff, FITSDiff): # wrap it in a PrimaryHDU
ha, hb, hc = (PrimaryHDU(np.arange(10), h) for h in (ha, hb, hc))
hc_header = hc.header
if differ is FITSDiff: # wrap it in a HDUList
ha, hb, hc = (HDUList([h]) for h in (ha, hb, hc))
hc_header = hc[0].header
# We now have a header with interleaved blanks, and a header with end
# blanks, both of which should ignore the blanks
assert differ(ha, hb).identical
assert differ(ha, hc).identical
assert differ(hb, hc).identical
assert not differ(ha, hb, ignore_blank_cards=False).identical
assert not differ(ha, hc, ignore_blank_cards=False).identical
# Both hb and hc have the same number of blank cards; since order is
# currently ignored, these should still be identical even if blank
# cards are not ignored
assert differ(hb, hc, ignore_blank_cards=False).identical
if differ is HeaderDiff:
hc.append()
else: # Ensure blanks are not at the end as they are stripped by HDUs
hc_header.add_blank(after=-2)
# But now there are different numbers of blanks, so they should not be
# ignored:
assert not differ(hb, hc, ignore_blank_cards=False).identical
def test_ignore_hdus(self):
a = np.arange(100).reshape(10, 10)
b = a.copy()
ha = Header([('A', 1), ('B', 2), ('C', 3)])
xa = np.array([(1.0, 1), (3.0, 4)], dtype=[('x', float), ('y', int)])
xb = np.array([(1.0, 2), (3.0, 5)], dtype=[('x', float), ('y', int)])
phdu = PrimaryHDU(header=ha)
ihdua = ImageHDU(data=a, name='SCI')
ihdub = ImageHDU(data=b, name='SCI')
bhdu1 = BinTableHDU(data=xa, name='ASDF')
bhdu2 = BinTableHDU(data=xb, name='ASDF')
hdula = HDUList([phdu, ihdua, bhdu1])
hdulb = HDUList([phdu, ihdub, bhdu2])
# ASDF extension should be different
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdus[0][0] == 2
# ASDF extension should be ignored
diff = FITSDiff(hdula, hdulb, ignore_hdus=['ASDF'])
assert diff.identical, diff.report()
diff = FITSDiff(hdula, hdulb, ignore_hdus=['ASD*'])
assert diff.identical, diff.report()
# SCI extension should be different
hdulb['SCI'].data += 1
diff = FITSDiff(hdula, hdulb, ignore_hdus=['ASDF'])
assert not diff.identical
# SCI and ASDF extensions should be ignored
diff = FITSDiff(hdula, hdulb, ignore_hdus=['SCI', 'ASDF'])
assert diff.identical, diff.report()
# All EXTVER of SCI should be ignored
ihduc = ImageHDU(data=a, name='SCI', ver=2)
hdulb.append(ihduc)
diff = FITSDiff(hdula, hdulb, ignore_hdus=['SCI', 'ASDF'])
assert not any(diff.diff_hdus), diff.report()
assert any(diff.diff_hdu_count), diff.report()
def test_ignore_keyword_values(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['B'] = 4
hb['C'] = 5
diff = HeaderDiff(ha, hb, ignore_keywords=['*'])
assert diff.identical
diff = HeaderDiff(ha, hb, ignore_keywords=['B'])
assert not diff.identical
assert diff.diff_keyword_values == {'C': [(3, 5)]}
report = diff.report()
assert 'Keyword B has different values' not in report
assert 'Keyword C has different values' in report
# Test case-insensitivity
diff = HeaderDiff(ha, hb, ignore_keywords=['b'])
assert not diff.identical
assert diff.diff_keyword_values == {'C': [(3, 5)]}
def test_ignore_keyword_comments(self):
ha = Header([('A', 1, 'A'), ('B', 2, 'B'), ('C', 3, 'C')])
hb = ha.copy()
hb.comments['B'] = 'D'
hb.comments['C'] = 'E'
diff = HeaderDiff(ha, hb, ignore_comments=['*'])
assert diff.identical
diff = HeaderDiff(ha, hb, ignore_comments=['B'])
assert not diff.identical
assert diff.diff_keyword_comments == {'C': [('C', 'E')]}
report = diff.report()
assert 'Keyword B has different comments' not in report
assert 'Keyword C has different comments' in report
# Test case-insensitivity
diff = HeaderDiff(ha, hb, ignore_comments=['b'])
assert not diff.identical
assert diff.diff_keyword_comments == {'C': [('C', 'E')]}
def test_trivial_identical_images(self):
ia = np.arange(100).reshape(10, 10)
ib = np.arange(100).reshape(10, 10)
diff = ImageDataDiff(ia, ib)
assert diff.identical
assert diff.diff_total == 0
def test_identical_within_relative_tolerance(self):
ia = np.ones((10, 10)) - 0.00001
ib = np.ones((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-4)
assert diff.identical
assert diff.diff_total == 0
def test_identical_within_absolute_tolerance(self):
ia = np.zeros((10, 10)) - 0.00001
ib = np.zeros((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-4)
assert not diff.identical
assert diff.diff_total == 100
diff = ImageDataDiff(ia, ib, atol=1.0e-4)
assert diff.identical
assert diff.diff_total == 0
def test_identical_within_rtol_and_atol(self):
ia = np.zeros((10, 10)) - 0.00001
ib = np.zeros((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-5, atol=1.0e-5)
assert diff.identical
assert diff.diff_total == 0
def test_not_identical_within_rtol_and_atol(self):
ia = np.zeros((10, 10)) - 0.00001
ib = np.zeros((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-5, atol=1.0e-6)
assert not diff.identical
assert diff.diff_total == 100
def test_identical_comp_image_hdus(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/189
For this test we mostly just care that comparing to compressed images
does not crash, and returns the correct results. Two compressed images
will be considered identical if the decompressed data is the same.
Obviously we test whether or not the same compression was used by
looking for (or ignoring) header differences.
"""
data = np.arange(100.0).reshape(10, 10)
hdu = fits.CompImageHDU(data=data)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdula, \
fits.open(self.temp('test.fits')) as hdulb:
diff = FITSDiff(hdula, hdulb)
assert diff.identical
def test_different_dimensions(self):
ia = np.arange(100).reshape(10, 10)
ib = np.arange(100) - 1
# Although ib could be reshaped into the same dimensions, for now the
# data is not compared anyways
diff = ImageDataDiff(ia, ib)
assert not diff.identical
assert diff.diff_dimensions == ((10, 10), (100,))
assert diff.diff_total == 0
report = diff.report()
assert 'Data dimensions differ' in report
assert 'a: 10 x 10' in report
assert 'b: 100' in report
assert 'No further data comparison performed.'
def test_different_pixels(self):
ia = np.arange(100).reshape(10, 10)
ib = np.arange(100).reshape(10, 10)
ib[0, 0] = 10
ib[5, 5] = 20
diff = ImageDataDiff(ia, ib)
assert not diff.identical
assert diff.diff_dimensions == ()
assert diff.diff_total == 2
assert diff.diff_ratio == 0.02
assert diff.diff_pixels == [((0, 0), (0, 10)), ((5, 5), (55, 20))]
def test_identical_tables(self):
c1 = Column('A', format='L', array=[True, False])
c2 = Column('B', format='X', array=[[0], [1]])
c3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [4, 5, 6, 7]])
c4 = Column('D', format='J', bscale=2.0, array=[0, 1])
c5 = Column('E', format='A3', array=['abc', 'def'])
c6 = Column('F', format='E', unit='m', array=[0.0, 1.0])
c7 = Column('G', format='D', bzero=-0.1, array=[0.0, 1.0])
c8 = Column('H', format='C', array=[0.0+1.0j, 2.0+3.0j])
c9 = Column('I', format='M', array=[4.0+5.0j, 6.0+7.0j])
c10 = Column('J', format='PI(2)', array=[[0, 1], [2, 3]])
columns = [c1, c2, c3, c4, c5, c6, c7, c8, c9, c10]
ta = BinTableHDU.from_columns(columns)
tb = BinTableHDU.from_columns([c.copy() for c in columns])
diff = TableDataDiff(ta.data, tb.data)
assert diff.identical
assert len(diff.common_columns) == 10
assert diff.common_column_names == set('abcdefghij')
assert diff.diff_ratio == 0
assert diff.diff_total == 0
def test_diff_empty_tables(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/178
Ensure that diffing tables containing empty data doesn't crash.
"""
c1 = Column('D', format='J')
c2 = Column('E', format='J')
thdu = BinTableHDU.from_columns([c1, c2], nrows=0)
hdula = fits.HDUList([thdu])
hdulb = fits.HDUList([thdu])
diff = FITSDiff(hdula, hdulb)
assert diff.identical
def test_ignore_table_fields(self):
c1 = Column('A', format='L', array=[True, False])
c2 = Column('B', format='X', array=[[0], [1]])
c3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [4, 5, 6, 7]])
c4 = Column('B', format='X', array=[[1], [0]])
c5 = Column('C', format='4I', dim='(2, 2)',
array=[[1, 2, 3, 4], [5, 6, 7, 8]])
ta = BinTableHDU.from_columns([c1, c2, c3])
tb = BinTableHDU.from_columns([c1, c4, c5])
diff = TableDataDiff(ta.data, tb.data, ignore_fields=['B', 'C'])
assert diff.identical
# The only common column should be c1
assert len(diff.common_columns) == 1
assert diff.common_column_names == {'a'}
assert diff.diff_ratio == 0
assert diff.diff_total == 0
def test_different_table_field_names(self):
ca = Column('A', format='L', array=[True, False])
cb = Column('B', format='L', array=[True, False])
cc = Column('C', format='L', array=[True, False])
ta = BinTableHDU.from_columns([ca, cb])
tb = BinTableHDU.from_columns([ca, cc])
diff = TableDataDiff(ta.data, tb.data)
assert not diff.identical
assert len(diff.common_columns) == 1
assert diff.common_column_names == {'a'}
assert diff.diff_column_names == (['B'], ['C'])
assert diff.diff_ratio == 0
assert diff.diff_total == 0
report = diff.report()
assert 'Extra column B of format L in a' in report
assert 'Extra column C of format L in b' in report
def test_different_table_field_counts(self):
"""
Test tables with some common columns, but different number of columns
overall.
"""
ca = Column('A', format='L', array=[True, False])
cb = Column('B', format='L', array=[True, False])
cc = Column('C', format='L', array=[True, False])
ta = BinTableHDU.from_columns([cb])
tb = BinTableHDU.from_columns([ca, cb, cc])
diff = TableDataDiff(ta.data, tb.data)
assert not diff.identical
assert diff.diff_column_count == (1, 3)
assert len(diff.common_columns) == 1
assert diff.common_column_names == {'b'}
assert diff.diff_column_names == ([], ['A', 'C'])
assert diff.diff_ratio == 0
assert diff.diff_total == 0
report = diff.report()
assert ' Tables have different number of columns:' in report
assert ' a: 1\n b: 3' in report
def test_different_table_rows(self):
"""
Test tables that are otherwise identical but one has more rows than the
other.
"""
ca1 = Column('A', format='L', array=[True, False])
cb1 = Column('B', format='L', array=[True, False])
ca2 = Column('A', format='L', array=[True, False, True])
cb2 = Column('B', format='L', array=[True, False, True])
ta = BinTableHDU.from_columns([ca1, cb1])
tb = BinTableHDU.from_columns([ca2, cb2])
diff = TableDataDiff(ta.data, tb.data)
assert not diff.identical
assert diff.diff_column_count == ()
assert len(diff.common_columns) == 2
assert diff.diff_rows == (2, 3)
assert diff.diff_values == []
report = diff.report()
assert 'Table rows differ' in report
assert 'a: 2' in report
assert 'b: 3' in report
assert 'No further data comparison performed.'
def test_different_table_data(self):
"""
Test diffing table data on columns of several different data formats
and dimensions.
"""
ca1 = Column('A', format='L', array=[True, False])
ca2 = Column('B', format='X', array=[[0], [1]])
ca3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [4, 5, 6, 7]])
ca4 = Column('D', format='J', bscale=2.0, array=[0.0, 2.0])
ca5 = Column('E', format='A3', array=['abc', 'def'])
ca6 = Column('F', format='E', unit='m', array=[0.0, 1.0])
ca7 = Column('G', format='D', bzero=-0.1, array=[0.0, 1.0])
ca8 = Column('H', format='C', array=[0.0+1.0j, 2.0+3.0j])
ca9 = Column('I', format='M', array=[4.0+5.0j, 6.0+7.0j])
ca10 = Column('J', format='PI(2)', array=[[0, 1], [2, 3]])
cb1 = Column('A', format='L', array=[False, False])
cb2 = Column('B', format='X', array=[[0], [0]])
cb3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [5, 6, 7, 8]])
cb4 = Column('D', format='J', bscale=2.0, array=[2.0, 2.0])
cb5 = Column('E', format='A3', array=['abc', 'ghi'])
cb6 = Column('F', format='E', unit='m', array=[1.0, 2.0])
cb7 = Column('G', format='D', bzero=-0.1, array=[2.0, 3.0])
cb8 = Column('H', format='C', array=[1.0+1.0j, 2.0+3.0j])
cb9 = Column('I', format='M', array=[5.0+5.0j, 6.0+7.0j])
cb10 = Column('J', format='PI(2)', array=[[1, 2], [3, 4]])
ta = BinTableHDU.from_columns([ca1, ca2, ca3, ca4, ca5, ca6, ca7,
ca8, ca9, ca10])
tb = BinTableHDU.from_columns([cb1, cb2, cb3, cb4, cb5, cb6, cb7,
cb8, cb9, cb10])
diff = TableDataDiff(ta.data, tb.data, numdiffs=20)
assert not diff.identical
# The column definitions are the same, but not the column values
assert diff.diff_columns == ()
assert diff.diff_values[0] == (('A', 0), (True, False))
assert diff.diff_values[1] == (('B', 1), ([1], [0]))
assert diff.diff_values[2][0] == ('C', 1)
assert (diff.diff_values[2][1][0] == [[4, 5], [6, 7]]).all()
assert (diff.diff_values[2][1][1] == [[5, 6], [7, 8]]).all()
assert diff.diff_values[3] == (('D', 0), (0, 2.0))
assert diff.diff_values[4] == (('E', 1), ('def', 'ghi'))
assert diff.diff_values[5] == (('F', 0), (0.0, 1.0))
assert diff.diff_values[6] == (('F', 1), (1.0, 2.0))
assert diff.diff_values[7] == (('G', 0), (0.0, 2.0))
assert diff.diff_values[8] == (('G', 1), (1.0, 3.0))
assert diff.diff_values[9] == (('H', 0), (0.0+1.0j, 1.0+1.0j))
assert diff.diff_values[10] == (('I', 0), (4.0+5.0j, 5.0+5.0j))
assert diff.diff_values[11][0] == ('J', 0)
assert (diff.diff_values[11][1][0] == [0, 1]).all()
assert (diff.diff_values[11][1][1] == [1, 2]).all()
assert diff.diff_values[12][0] == ('J', 1)
assert (diff.diff_values[12][1][0] == [2, 3]).all()
assert (diff.diff_values[12][1][1] == [3, 4]).all()
assert diff.diff_total == 13
assert diff.diff_ratio == 0.65
report = diff.report()
assert ('Column A data differs in row 0:\n'
' a> True\n'
' b> False') in report
assert ('...and at 1 more indices.\n'
' Column D data differs in row 0:') in report
assert ('13 different table data element(s) found (65.00% different)'
in report)
assert report.count('more indices') == 1
def test_identical_files_basic(self):
"""Test identicality of two simple, extensionless files."""
a = np.arange(100).reshape(10, 10)
hdu = PrimaryHDU(data=a)
hdu.writeto(self.temp('testa.fits'))
hdu.writeto(self.temp('testb.fits'))
diff = FITSDiff(self.temp('testa.fits'), self.temp('testb.fits'))
assert diff.identical
report = diff.report()
# Primary HDUs should contain no differences
assert 'Primary HDU' not in report
assert 'Extension HDU' not in report
assert 'No differences found.' in report
a = np.arange(10)
ehdu = ImageHDU(data=a)
diff = HDUDiff(ehdu, ehdu)
assert diff.identical
report = diff.report()
assert 'No differences found.' in report
def test_partially_identical_files1(self):
"""
Test files that have some identical HDUs but a different extension
count.
"""
a = np.arange(100).reshape(10, 10)
phdu = PrimaryHDU(data=a)
ehdu = ImageHDU(data=a)
hdula = HDUList([phdu, ehdu])
hdulb = HDUList([phdu, ehdu, ehdu])
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdu_count == (2, 3)
# diff_hdus should be empty, since the third extension in hdulb
# has nothing to compare against
assert diff.diff_hdus == []
report = diff.report()
assert 'Files contain different numbers of HDUs' in report
assert 'a: 2\n b: 3' in report
assert 'No differences found between common HDUs' in report
def test_partially_identical_files2(self):
"""
Test files that have some identical HDUs but one different HDU.
"""
a = np.arange(100).reshape(10, 10)
phdu = PrimaryHDU(data=a)
ehdu = ImageHDU(data=a)
ehdu2 = ImageHDU(data=(a + 1))
hdula = HDUList([phdu, ehdu, ehdu])
hdulb = HDUList([phdu, ehdu2, ehdu])
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdu_count == ()
assert len(diff.diff_hdus) == 1
assert diff.diff_hdus[0][0] == 1
hdudiff = diff.diff_hdus[0][1]
assert not hdudiff.identical
assert hdudiff.diff_extnames == ()
assert hdudiff.diff_extvers == ()
assert hdudiff.diff_extension_types == ()
assert hdudiff.diff_headers.identical
assert hdudiff.diff_data is not None
datadiff = hdudiff.diff_data
assert isinstance(datadiff, ImageDataDiff)
assert not datadiff.identical
assert datadiff.diff_dimensions == ()
assert (datadiff.diff_pixels ==
[((0, y), (y, y + 1)) for y in range(10)])
assert datadiff.diff_ratio == 1.0
assert datadiff.diff_total == 100
report = diff.report()
# Primary HDU and 2nd extension HDU should have no differences
assert 'Primary HDU' not in report
assert 'Extension HDU 2' not in report
assert 'Extension HDU 1' in report
assert 'Headers contain differences' not in report
assert 'Data contains differences' in report
for y in range(10):
assert f'Data differs at [{y + 1}, 1]' in report
assert '100 different pixels found (100.00% different).' in report
def test_partially_identical_files3(self):
"""
Test files that have some identical HDUs but a different extension
name.
"""
phdu = PrimaryHDU()
ehdu = ImageHDU(name='FOO')
hdula = HDUList([phdu, ehdu])
ehdu = BinTableHDU(name='BAR')
ehdu.header['EXTVER'] = 2
ehdu.header['EXTLEVEL'] = 3
hdulb = HDUList([phdu, ehdu])
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdus[0][0] == 1
hdu_diff = diff.diff_hdus[0][1]
assert hdu_diff.diff_extension_types == ('IMAGE', 'BINTABLE')
assert hdu_diff.diff_extnames == ('FOO', 'BAR')
assert hdu_diff.diff_extvers == (1, 2)
assert hdu_diff.diff_extlevels == (1, 3)
report = diff.report()
assert 'Extension types differ' in report
assert 'a: IMAGE\n b: BINTABLE' in report
assert 'Extension names differ' in report
assert 'a: FOO\n b: BAR' in report
assert 'Extension versions differ' in report
assert 'a: 1\n b: 2' in report
assert 'Extension levels differ' in report
assert 'a: 1\n b: 2' in report
def test_diff_nans(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/204
"""
# First test some arrays that should be equivalent....
arr = np.empty((10, 10), dtype=np.float64)
arr[:5] = 1.0
arr[5:] = np.nan
arr2 = arr.copy()
table = np.rec.array([(1.0, 2.0), (3.0, np.nan), (np.nan, np.nan)],
names=['cola', 'colb']).view(fits.FITS_rec)
table2 = table.copy()
assert ImageDataDiff(arr, arr2).identical
assert TableDataDiff(table, table2).identical
# Now let's introduce some differences, where there are nans and where
# there are not nans
arr2[0][0] = 2.0
arr2[5][0] = 2.0
table2[0][0] = 2.0
table2[1][1] = 2.0
diff = ImageDataDiff(arr, arr2)
assert not diff.identical
assert diff.diff_pixels[0] == ((0, 0), (1.0, 2.0))
assert diff.diff_pixels[1][0] == (5, 0)
assert np.isnan(diff.diff_pixels[1][1][0])
assert diff.diff_pixels[1][1][1] == 2.0
diff = TableDataDiff(table, table2)
assert not diff.identical
assert diff.diff_values[0] == (('cola', 0), (1.0, 2.0))
assert diff.diff_values[1][0] == ('colb', 1)
assert np.isnan(diff.diff_values[1][1][0])
assert diff.diff_values[1][1][1] == 2.0
def test_file_output_from_path_string(self):
outpath = self.temp('diff_output.txt')
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
report_as_string = diffobj.report()
with open(outpath) as fout:
assert fout.read() == report_as_string
def test_file_output_overwrite_safety(self):
outpath = self.temp('diff_output.txt')
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
diffobj.report(fileobj=outpath)
def test_file_output_overwrite_success(self):
outpath = self.temp('diff_output.txt')
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
report_as_string = diffobj.report()
diffobj.report(fileobj=outpath, overwrite=True)
with open(outpath) as fout:
assert fout.read() == report_as_string, (
"overwritten output file is not identical to report string")
def test_rawdatadiff_nodiff(self):
a = np.arange(100, dtype='uint8').reshape(10, 10)
b = a.copy()
hdu_a = DummyNonstandardExtHDU(data=a)
hdu_b = DummyNonstandardExtHDU(data=b)
diff = HDUDiff(hdu_a, hdu_b)
assert diff.identical
report = diff.report()
assert 'No differences found.' in report
def test_rawdatadiff_dimsdiff(self):
a = np.arange(100, dtype='uint8') + 10
b = a[:80].copy()
hdu_a = DummyNonstandardExtHDU(data=a)
hdu_b = DummyNonstandardExtHDU(data=b)
diff = HDUDiff(hdu_a, hdu_b)
assert not diff.identical
report = diff.report()
assert 'Data sizes differ:' in report
assert 'a: 100 bytes' in report
assert 'b: 80 bytes' in report
assert 'No further data comparison performed.' in report
def test_rawdatadiff_bytesdiff(self):
a = np.arange(100, dtype='uint8') + 10
b = a.copy()
changes = [(30, 200), (89, 170)]
for i, v in changes:
b[i] = v
hdu_a = DummyNonstandardExtHDU(data=a)
hdu_b = DummyNonstandardExtHDU(data=b)
diff = HDUDiff(hdu_a, hdu_b)
assert not diff.identical
diff_bytes = diff.diff_data.diff_bytes
assert len(changes) == len(diff_bytes)
for j, (i, v) in enumerate(changes):
assert diff_bytes[j] == (i, (i+10, v))
report = diff.report()
assert 'Data contains differences:' in report
for i, _ in changes:
assert f'Data differs at byte {i}:' in report
assert '2 different bytes found (2.00% different).' in report
def test_fitsdiff_hdu_name(tmpdir):
"""Make sure diff report reports HDU name and ver if same in files"""
path1 = str(tmpdir.join("test1.fits"))
path2 = str(tmpdir.join("test2.fits"))
hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5), name="SCI")])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = FITSDiff(path1, path2)
assert "Extension HDU 1 (SCI, 1):" in diff.report()
def test_fitsdiff_no_hdu_name(tmpdir):
"""Make sure diff report doesn't report HDU name if not in files"""
path1 = str(tmpdir.join("test1.fits"))
path2 = str(tmpdir.join("test2.fits"))
hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5))])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = FITSDiff(path1, path2)
assert "Extension HDU 1:" in diff.report()
def test_fitsdiff_with_names(tmpdir):
"""Make sure diff report doesn't report HDU name if not same in files"""
path1 = str(tmpdir.join("test1.fits"))
path2 = str(tmpdir.join("test2.fits"))
hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5), name="SCI", ver=1)])
hdulist.writeto(path1)
hdulist[1].name = "ERR"
hdulist.writeto(path2)
diff = FITSDiff(path1, path2)
assert "Extension HDU 1:" in diff.report()
|
6967703a1d3052632b91d4eded0e0574cb0cb9ba08ffed8657446dfed46320b5 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import collections
import warnings
from io import StringIO, BytesIO
import pytest
import numpy as np
from astropy.io import fits
from astropy.io.fits.verify import VerifyWarning
from astropy.utils.exceptions import AstropyUserWarning
from . import FitsTestCase
from astropy.io.fits.card import _pad
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import encode_ascii
def test_shallow_copy():
"""Make sure that operations on a shallow copy do not alter the original.
#4990."""
original_header = fits.Header([('a', 1), ('b', 1)])
copied_header = copy.copy(original_header)
# Modifying the original dict should not alter the copy
original_header['c'] = 100
assert 'c' not in copied_header
# and changing the copy should not change the original.
copied_header['a'] = 0
assert original_header['a'] == 1
def test_init_with_header():
"""Make sure that creating a Header from another Header makes a copy if
copy is True."""
original_header = fits.Header([('a', 10)])
new_header = fits.Header(original_header, copy=True)
original_header['a'] = 20
assert new_header['a'] == 10
new_header['a'] = 0
assert original_header['a'] == 20
def test_init_with_dict():
dict1 = {'a': 11, 'b': 12, 'c': 13, 'd': 14, 'e': 15}
h1 = fits.Header(dict1)
for i in dict1:
assert dict1[i] == h1[i]
def test_init_with_ordereddict():
# Create a list of tuples. Each tuple consisting of a letter and the number
list1 = [(i, j) for j, i in enumerate('abcdefghijklmnopqrstuvwxyz')]
# Create an ordered dictionary and a header from this dictionary
dict1 = collections.OrderedDict(list1)
h1 = fits.Header(dict1)
# Check that the order is preserved of the initial list
assert all(h1[val] == list1[i][1] for i, val in enumerate(h1))
class TestHeaderFunctions(FitsTestCase):
"""Test Header and Card objects."""
def test_rename_keyword(self):
"""Test renaming keyword with rename_keyword."""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
header.rename_keyword('A', 'B')
assert 'A' not in header
assert 'B' in header
assert header[0] == 'B'
assert header['B'] == 'B'
assert header.comments['B'] == 'C'
@pytest.mark.parametrize('key', ['A', 'a'])
def test_indexing_case(self, key):
"""Check that indexing is case insensitive"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
assert key in header
assert header[key] == 'B'
assert header.get(key) == 'B'
assert header.index(key) == 0
assert header.comments[key] == 'C'
assert header.count(key) == 1
header.remove(key, ignore_missing=False)
def test_card_constructor_default_args(self):
"""Test Card constructor with default argument values."""
c = fits.Card()
assert '' == c.keyword
def test_card_from_bytes(self):
"""
Test loading a Card from a `bytes` object (assuming latin-1 encoding).
"""
c = fits.Card.fromstring(b"ABC = 'abc'")
assert c.keyword == 'ABC'
assert c.value == 'abc'
def test_string_value_card(self):
"""Test Card constructor with string value"""
c = fits.Card('abc', '<8 ch')
assert str(c) == _pad("ABC = '<8 ch '")
c = fits.Card('nullstr', '')
assert str(c) == _pad("NULLSTR = ''")
def test_boolean_value_card(self):
"""Test Card constructor with boolean value"""
c = fits.Card("abc", True)
assert str(c) == _pad("ABC = T")
c = fits.Card.fromstring('ABC = F')
assert c.value is False
def test_long_integer_value_card(self):
"""Test Card constructor with long integer value"""
c = fits.Card('long_int', -467374636747637647347374734737437)
assert str(c) == _pad("LONG_INT= -467374636747637647347374734737437")
def test_floating_point_value_card(self):
"""Test Card constructor with floating point value"""
c = fits.Card('floatnum', -467374636747637647347374734737437.)
if (str(c) != _pad("FLOATNUM= -4.6737463674763E+32") and
str(c) != _pad("FLOATNUM= -4.6737463674763E+032")):
assert str(c) == _pad("FLOATNUM= -4.6737463674763E+32")
def test_complex_value_card(self):
"""Test Card constructor with complex value"""
c = fits.Card('abc',
(1.2345377437887837487e88 + 6324767364763746367e-33j))
f1 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
f2 = _pad("ABC = (1.2345377437887E+088, 6.3247673647637E-015)")
f3 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
if str(c) != f1 and str(c) != f2:
assert str(c) == f3
def test_card_image_constructed_too_long(self):
"""Test that over-long cards truncate the comment"""
# card image constructed from key/value/comment is too long
# (non-string value)
c = fits.Card('abc', 9, 'abcde' * 20)
with pytest.warns(fits.verify.VerifyWarning):
assert (str(c) ==
"ABC = 9 "
"/ abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeab")
c = fits.Card('abc', 'a' * 68, 'abcdefg')
with pytest.warns(fits.verify.VerifyWarning):
assert str(c) == f"ABC = '{'a' * 68}'"
def test_constructor_filter_illegal_data_structures(self):
"""Test that Card constructor raises exceptions on bad arguments"""
pytest.raises(ValueError, fits.Card, ('abc',), {'value': (2, 3)})
pytest.raises(ValueError, fits.Card, 'key', [], 'comment')
def test_keyword_too_long(self):
"""Test that long Card keywords are allowed, but with a warning"""
pytest.warns(UserWarning, fits.Card, 'abcdefghi', 'long')
def test_illegal_characters_in_key(self):
"""
Test that Card constructor allows illegal characters in the keyword,
but creates a HIERARCH card.
"""
# This test used to check that a ValueError was raised, because a
# keyword like 'abc+' was simply not allowed. Now it should create a
# HIERARCH card.
with pytest.warns(AstropyUserWarning) as w:
c = fits.Card('abc+', 9)
assert len(w) == 1
assert c.image == _pad('HIERARCH abc+ = 9')
def test_add_history(self):
header = fits.Header([('A', 'B', 'C'), ('HISTORY', 1),
('HISTORY', 2), ('HISTORY', 3), ('', '', ''),
('', '', '')])
header.add_history(4)
# One of the blanks should get used, so the length shouldn't change
assert len(header) == 6
assert header.cards[4].value == 4
assert header['HISTORY'] == [1, 2, 3, 4]
assert repr(header['HISTORY']) == '1\n2\n3\n4'
header.add_history(0, after='A')
assert len(header) == 6
assert header.cards[1].value == 0
assert header['HISTORY'] == [0, 1, 2, 3, 4]
def test_add_blank(self):
header = fits.Header([('A', 'B', 'C'), ('', 1), ('', 2), ('', 3),
('', '', ''), ('', '', '')])
header.add_blank(4)
# This time a new blank should be added, and the existing blanks don't
# get used... (though this is really kinda sketchy--there's a
# distinction between truly blank cards, and cards with blank keywords
# that isn't currently made int he code)
assert len(header) == 7
assert header.cards[6].value == 4
assert header[''] == [1, 2, 3, '', '', 4]
assert repr(header['']) == '1\n2\n3\n\n\n4'
header.add_blank(0, after='A')
assert len(header) == 8
assert header.cards[1].value == 0
assert header[''] == [0, 1, 2, 3, '', '', 4]
header[''] = 5
header[' '] = 6
assert header[''] == [0, 1, 2, 3, '', '', 4, 5, 6]
assert header[' '] == [0, 1, 2, 3, '', '', 4, 5, 6]
def test_update(self):
class FakeHeader(list):
def keys(self):
return [l[0] for l in self]
def __getitem__(self, key):
return next(l[1:] for l in self if l[0] == key)
header = fits.Header()
header.update({'FOO': ('BAR', 'BAZ')})
header.update(FakeHeader([('A', 1), ('B', 2, 'comment')]))
assert set(header.keys()) == {'FOO', 'A', 'B'}
assert header.comments['B'] == 'comment'
# test that comments are preserved
tmphdr = fits.Header()
tmphdr['HELLO'] = (1, 'this is a comment')
header.update(tmphdr)
assert set(header.keys()) == {'FOO', 'A', 'B', 'HELLO'}
assert header.comments['HELLO'] == 'this is a comment'
header.update(NAXIS1=100, NAXIS2=100)
assert set(header.keys()) == {'FOO', 'A', 'B', 'HELLO', 'NAXIS1', 'NAXIS2'}
assert set(header.values()) == {'BAR', 1, 2, 100, 100}
def test_update_comment(self):
hdul = fits.open(self.data('arange.fits'))
hdul[0].header.update({'FOO': ('BAR', 'BAZ')})
assert hdul[0].header['FOO'] == 'BAR'
assert hdul[0].header.comments['FOO'] == 'BAZ'
with pytest.raises(ValueError):
hdul[0].header.update({'FOO2': ('BAR', 'BAZ', 'EXTRA')})
hdul.writeto(self.temp('test.fits'))
hdul.close()
hdul = fits.open(self.temp('test.fits'), mode='update')
hdul[0].header.comments['FOO'] = 'QUX'
hdul.close()
hdul = fits.open(self.temp('test.fits'))
assert hdul[0].header.comments['FOO'] == 'QUX'
hdul[0].header.add_comment(0, after='FOO')
assert str(hdul[0].header.cards[-1]).strip() == 'COMMENT 0'
hdul.close()
def test_commentary_cards(self):
# commentary cards
val = "A commentary card's value has no quotes around it."
c = fits.Card("HISTORY", val)
assert str(c) == _pad('HISTORY ' + val)
val = "A commentary card has no comment."
c = fits.Card("COMMENT", val, "comment")
assert str(c) == _pad('COMMENT ' + val)
def test_commentary_card_created_by_fromstring(self):
# commentary card created by fromstring()
c = fits.Card.fromstring(
"COMMENT card has no comments. "
"/ text after slash is still part of the value.")
assert (c.value == 'card has no comments. '
'/ text after slash is still part of the value.')
assert c.comment == ''
def test_commentary_card_will_not_parse_numerical_value(self):
# commentary card will not parse the numerical value
c = fits.Card.fromstring("HISTORY (1, 2)")
assert str(c) == _pad("HISTORY (1, 2)")
def test_equal_sign_after_column8(self):
# equal sign after column 8 of a commentary card will be part of the
# string value
c = fits.Card.fromstring("HISTORY = (1, 2)")
assert str(c) == _pad("HISTORY = (1, 2)")
def test_blank_keyword(self):
c = fits.Card('', ' / EXPOSURE INFORMATION')
assert str(c) == _pad(' / EXPOSURE INFORMATION')
c = fits.Card.fromstring(str(c))
assert c.keyword == ''
assert c.value == ' / EXPOSURE INFORMATION'
def test_specify_undefined_value(self):
# this is how to specify an undefined value
c = fits.Card("undef", fits.card.UNDEFINED)
assert str(c) == _pad("UNDEF =")
def test_complex_number_using_string_input(self):
# complex number using string input
c = fits.Card.fromstring('ABC = (8, 9)')
assert str(c) == _pad("ABC = (8, 9)")
def test_fixable_non_standard_fits_card(self, capsys):
# fixable non-standard FITS card will keep the original format
c = fits.Card.fromstring('abc = + 2.1 e + 12')
assert c.value == 2100000000000.0
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert str(c) == _pad("ABC = +2.1E+12")
def test_fixable_non_fsc(self):
# fixable non-FSC: if the card is not parsable, it's value will be
# assumed
# to be a string and everything after the first slash will be comment
c = fits.Card.fromstring(
"no_quote= this card's value has no quotes "
"/ let's also try the comment")
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(c) == "NO_QUOTE= 'this card''s value has no quotes' "
"/ let's also try the comment ")
def test_undefined_value_using_string_input(self):
# undefined value using string input
c = fits.Card.fromstring('ABC = ')
assert str(c) == _pad("ABC =")
def test_undefined_keys_values(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['UNDEF'] = None
assert list(header.values()) == ['BAR', None]
assert list(header.items()) == [('FOO', 'BAR'), ('UNDEF', None)]
def test_mislocated_equal_sign(self, capsys):
# test mislocated "=" sign
c = fits.Card.fromstring('XYZ= 100')
assert c.keyword == 'XYZ'
assert c.value == 100
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert str(c) == _pad("XYZ = 100")
def test_equal_only_up_to_column_10(self, capsys):
# the test of "=" location is only up to column 10
# This test used to check if Astropy rewrote this card to a new format,
# something like "HISTO = '= (1, 2)". But since ticket #109 if the
# format is completely wrong we don't make any assumptions and the card
# should be left alone
c = fits.Card.fromstring("HISTO = (1, 2)")
with pytest.warns(AstropyUserWarning,
match=r'header keyword is invalid'):
assert str(c) == _pad("HISTO = (1, 2)")
# Likewise this card should just be left in its original form and
# we shouldn't guess how to parse it or rewrite it.
c = fits.Card.fromstring(" HISTORY (1, 2)")
with pytest.warns(AstropyUserWarning,
match=r'header keyword is invalid'):
assert str(c) == _pad(" HISTORY (1, 2)")
def test_verify_invalid_equal_sign(self):
# verification
c = fits.Card.fromstring('ABC= a6')
with pytest.warns(AstropyUserWarning) as w:
c.verify()
err_text1 = ("Card 'ABC' is not FITS standard (equal sign not at "
"column 8)")
err_text2 = ("Card 'ABC' is not FITS standard (invalid value "
"string: 'a6'")
assert len(w) == 4
assert err_text1 in str(w[1].message)
assert err_text2 in str(w[2].message)
def test_fix_invalid_equal_sign(self):
fix_text = "Fixed 'ABC' card to meet the FITS standard."
c = fits.Card.fromstring('ABC= a6')
with pytest.warns(AstropyUserWarning, match=fix_text) as w:
c.verify('fix')
assert len(w) == 4
assert str(c) == _pad("ABC = 'a6 '")
def test_long_string_value(self):
# test long string value
c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10)
assert (str(c) ==
"ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment ")
def test_long_string_value_with_multiple_long_words(self):
"""
Regression test for https://github.com/astropy/astropy/issues/11298
"""
c = fits.Card('WHATEVER',
'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_'
'03)-AAABBBCCC.n.h5 SuperNavigationParameters_XXXX_YYYY'
'_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.xml')
assert (str(c) ==
"WHATEVER= 'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n&'"
"CONTINUE '.h5 &' "
"CONTINUE 'SuperNavigationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.&'"
"CONTINUE 'xml' ")
def test_long_unicode_string(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/1
So long as a unicode string can be converted to ASCII it should have no
different behavior in this regard from a byte string.
"""
h1 = fits.Header()
h1['TEST'] = 'abcdefg' * 30
h2 = fits.Header()
h2['TEST'] = 'abcdefg' * 30
assert str(h1) == str(h2)
def test_long_string_repr(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/193
Ensure that the __repr__() for cards represented with CONTINUE cards is
split across multiple lines (broken at each *physical* card).
"""
header = fits.Header()
header['TEST1'] = ('Regular value', 'Regular comment')
header['TEST2'] = ('long string value ' * 10, 'long comment ' * 10)
header['TEST3'] = ('Regular value', 'Regular comment')
assert (repr(header).splitlines() ==
[str(fits.Card('TEST1', 'Regular value', 'Regular comment')),
"TEST2 = 'long string value long string value long string value long string &' ",
"CONTINUE 'value long string value long string value long string value long &' ",
"CONTINUE 'string value long string value long string value &' ",
"CONTINUE '&' / long comment long comment long comment long comment long ",
"CONTINUE '&' / comment long comment long comment long comment long comment ",
"CONTINUE '' / long comment ",
str(fits.Card('TEST3', 'Regular value', 'Regular comment'))])
def test_blank_keyword_long_value(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/194
Test that a blank keyword ('') can be assigned a too-long value that is
continued across multiple cards with blank keywords, just like COMMENT
and HISTORY cards.
"""
value = 'long string value ' * 10
header = fits.Header()
header[''] = value
assert len(header) == 3
assert ' '.join(header['']) == value.rstrip()
# Ensure that this works like other commentary keywords
header['COMMENT'] = value
header['HISTORY'] = value
assert header['COMMENT'] == header['HISTORY']
assert header['COMMENT'] == header['']
def test_long_string_from_file(self):
c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10)
hdu = fits.PrimaryHDU()
hdu.header.append(c)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
c = hdul[0].header.cards['abc']
hdul.close()
assert (str(c) ==
"ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment ")
def test_word_in_long_string_too_long(self):
# if a word in a long string is too long, it will be cut in the middle
c = fits.Card('abc', 'longstringvalue' * 10, 'longcomment' * 10)
assert (str(c) ==
"ABC = 'longstringvaluelongstringvaluelongstringvaluelongstringvaluelongstr&'"
"CONTINUE 'ingvaluelongstringvaluelongstringvaluelongstringvaluelongstringvalu&'"
"CONTINUE 'elongstringvalue&' "
"CONTINUE '&' / longcommentlongcommentlongcommentlongcommentlongcommentlongcomme"
"CONTINUE '' / ntlongcommentlongcommentlongcommentlongcomment ")
def test_long_string_value_via_fromstring(self, capsys):
# long string value via fromstring() method
c = fits.Card.fromstring(
_pad("abc = 'longstring''s testing & ' "
"/ comments in line 1") +
_pad("continue 'continue with long string but without the "
"ampersand at the end' /") +
_pad("continue 'continue must have string value (with quotes)' "
"/ comments with ''. "))
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(c) ==
"ABC = 'longstring''s testing continue with long string but without the &' "
"CONTINUE 'ampersand at the endcontinue must have string value (with quotes)&' "
"CONTINUE '' / comments in line 1 comments with ''. ")
def test_continue_card_with_equals_in_value(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/117
"""
c = fits.Card.fromstring(
_pad("EXPR = '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits * &'") +
_pad("CONTINUE '5.87359e-12 * MWAvg(Av=0.12)&'") +
_pad("CONTINUE '&' / pysyn expression"))
assert c.keyword == 'EXPR'
assert (c.value ==
'/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits '
'* 5.87359e-12 * MWAvg(Av=0.12)')
assert c.comment == 'pysyn expression'
def test_final_continue_card_lacks_ampersand(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
h = fits.Header()
h['SVALUE'] = 'A' * 69
assert repr(h).splitlines()[-1] == _pad("CONTINUE 'AA'")
def test_final_continue_card_ampersand_removal_on_long_comments(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
c = fits.Card('TEST', 'long value' * 10, 'long comment &' * 10)
assert (str(c) ==
"TEST = 'long valuelong valuelong valuelong valuelong valuelong valuelong &' "
"CONTINUE 'valuelong valuelong valuelong value&' "
"CONTINUE '&' / long comment &long comment &long comment &long comment &long "
"CONTINUE '&' / comment &long comment &long comment &long comment &long comment "
"CONTINUE '' / &long comment & ")
def test_hierarch_card_creation(self):
# Test automatic upgrade to hierarch card
with pytest.warns(AstropyUserWarning, match='HIERARCH card will be created') as w:
c = fits.Card('ESO INS SLIT2 Y1FRML',
'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)')
assert len(w) == 1
assert (str(c) ==
"HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'")
# Test manual creation of hierarch card
c = fits.Card('hierarch abcdefghi', 10)
assert str(c) == _pad("HIERARCH abcdefghi = 10")
c = fits.Card('HIERARCH ESO INS SLIT2 Y1FRML',
'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)')
assert (str(c) ==
"HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'")
def test_hierarch_with_abbrev_value_indicator(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/5
"""
c = fits.Card.fromstring("HIERARCH key.META_4='calFileVersion'")
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
def test_hierarch_not_warn(self):
"""Check that compressed image headers do not issue HIERARCH warnings.
"""
filename = fits.util.get_testdata_filepath('compressed_image.fits')
with fits.open(filename) as hdul:
header = hdul[1].header
with warnings.catch_warnings(record=True) as warning_list:
header["HIERARCH LONG KEYWORD"] = 42
assert len(warning_list) == 0
assert header["LONG KEYWORD"] == 42
assert header["HIERARCH LONG KEYWORD"] == 42
# Check that it still warns if we do not use HIERARCH
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header["LONG KEYWORD2"] = 1
assert header["LONG KEYWORD2"] == 1
def test_hierarch_keyword_whitespace(self):
"""
Regression test for
https://github.com/spacetelescope/PyFITS/issues/6
Make sure any leading or trailing whitespace around HIERARCH
keywords is stripped from the actual keyword value.
"""
c = fits.Card.fromstring(
"HIERARCH key.META_4 = 'calFileVersion'")
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
# Test also with creation via the Card constructor
c = fits.Card('HIERARCH key.META_4', 'calFileVersion')
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
def test_verify_mixed_case_hierarch(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/7
Assures that HIERARCH keywords with lower-case characters and other
normally invalid keyword characters are not considered invalid.
"""
c = fits.Card('HIERARCH WeirdCard.~!@#_^$%&', 'The value', 'a comment')
# This should not raise any exceptions
c.verify('exception')
assert c.keyword == 'WeirdCard.~!@#_^$%&'
assert c.value == 'The value'
assert c.comment == 'a comment'
# Test also the specific case from the original bug report
header = fits.Header([
('simple', True),
('BITPIX', 8),
('NAXIS', 0),
('EXTEND', True, 'May contain datasets'),
('HIERARCH key.META_0', 'detRow')
])
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
header2 = hdul[0].header
assert (str(header.cards[header.index('key.META_0')]) ==
str(header2.cards[header2.index('key.META_0')]))
def test_missing_keyword(self):
"""Test that accessing a non-existent keyword raises a KeyError."""
header = fits.Header()
# De-referencing header through the inline function should behave
# identically to accessing it in the pytest.raises context below.
pytest.raises(KeyError, lambda k: header[k], 'NAXIS')
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'NAXIS' not found."):
header['NAXIS']
def test_hierarch_card_lookup(self):
header = fits.Header()
header['hierarch abcdefghi'] = 10
assert 'abcdefghi' in header
assert header['abcdefghi'] == 10
# This used to be assert_false, but per ticket
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/155 hierarch keywords
# should be treated case-insensitively when performing lookups
assert 'ABCDEFGHI' in header
def test_hierarch_card_delete(self):
header = fits.Header()
header['hierarch abcdefghi'] = 10
del header['hierarch abcdefghi']
def test_hierarch_card_insert_delete(self):
header = fits.Header()
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header['abcdefghi'] = 10
header['abcdefgh'] = 10
header['abcdefg'] = 10
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header.insert(2, ('abcdefghij', 10))
del header['abcdefghij']
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header.insert(2, ('abcdefghij', 10))
del header[2]
assert list(header.keys())[2] == 'abcdefg'.upper()
def test_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards.
"""
msg = 'a HIERARCH card will be created'
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({'HIERARCH BLAH BLAH': 'TESTA'})
assert len(w) == 0
assert 'BLAH BLAH' in header
assert header['BLAH BLAH'] == 'TESTA'
header.update({'HIERARCH BLAH BLAH': 'TESTB'})
assert len(w) == 0
assert header['BLAH BLAH'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLAH BLAH': 'TESTC'})
assert len(w) == 1
assert len(header) == 1
assert header['BLAH BLAH'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH blah blah': 'TESTD'})
assert len(w) == 1
assert len(header) == 1
assert header['blah blah'], 'TESTD'
header.update({'blah blah': 'TESTE'})
assert len(w) == 2
assert len(header) == 1
assert header['blah blah'], 'TESTE'
# Create a HIERARCH card > 8 characters without explicitly stating
# 'HIERARCH'
header.update({'BLAH BLAH BLAH': 'TESTA'})
assert len(w) == 3
assert msg in str(w[0].message)
header.update({'HIERARCH BLAH BLAH BLAH': 'TESTB'})
assert len(w) == 3
assert header['BLAH BLAH BLAH'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLAH BLAH BLAH': 'TESTC'})
assert len(w) == 4
assert header['BLAH BLAH BLAH'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH blah blah blah': 'TESTD'})
assert len(w) == 4
assert header['blah blah blah'], 'TESTD'
header.update({'blah blah blah': 'TESTE'})
assert len(w) == 5
assert header['blah blah blah'], 'TESTE'
def test_short_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards,
specifically where the keyword is fewer than 8 characters, but contains
invalid characters such that it can only be created as a HIERARCH card.
"""
msg = 'a HIERARCH card will be created'
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({'HIERARCH BLA BLA': 'TESTA'})
assert len(w) == 0
assert 'BLA BLA' in header
assert header['BLA BLA'] == 'TESTA'
header.update({'HIERARCH BLA BLA': 'TESTB'})
assert len(w) == 0
assert header['BLA BLA'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLA BLA': 'TESTC'})
assert len(w) == 1
assert header['BLA BLA'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH bla bla': 'TESTD'})
assert len(w) == 1
assert len(header) == 1
assert header['bla bla'], 'TESTD'
header.update({'bla bla': 'TESTE'})
assert len(w) == 2
assert len(header) == 1
assert header['bla bla'], 'TESTE'
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
# Create a HIERARCH card containing invalid characters without
# explicitly stating 'HIERARCH'
header.update({'BLA BLA': 'TESTA'})
print([x.category for x in w])
assert len(w) == 1
assert msg in str(w[0].message)
header.update({'HIERARCH BLA BLA': 'TESTB'})
assert len(w) == 1
assert header['BLA BLA'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLA BLA': 'TESTC'})
assert len(w) == 2
assert header['BLA BLA'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH bla bla': 'TESTD'})
assert len(w) == 2
assert len(header) == 1
assert header['bla bla'], 'TESTD'
header.update({'bla bla': 'TESTE'})
assert len(w) == 3
assert len(header) == 1
assert header['bla bla'], 'TESTE'
def test_header_setitem_invalid(self):
header = fits.Header()
def test():
header['FOO'] = ('bar', 'baz', 'qux')
pytest.raises(ValueError, test)
def test_header_setitem_1tuple(self):
header = fits.Header()
header['FOO'] = ('BAR',)
header['FOO2'] = (None,)
assert header['FOO'] == 'BAR'
assert header['FOO2'] is None
assert header[0] == 'BAR'
assert header.comments[0] == ''
assert header.comments['FOO'] == ''
def test_header_setitem_2tuple(self):
header = fits.Header()
header['FOO'] = ('BAR', 'BAZ')
header['FOO2'] = (None, None)
assert header['FOO'] == 'BAR'
assert header['FOO2'] is None
assert header[0] == 'BAR'
assert header.comments[0] == 'BAZ'
assert header.comments['FOO'] == 'BAZ'
assert header.comments['FOO2'] == ''
def test_header_set_value_to_none(self):
"""
Setting the value of a card to None should simply give that card an
undefined value. Undefined value should map to None.
"""
header = fits.Header()
header['FOO'] = 'BAR'
assert header['FOO'] == 'BAR'
header['FOO'] = None
assert header['FOO'] is None
# Create a header that contains an undefined value and a defined
# value.
hstr = "UNDEF = \nDEFINED = 42"
header = fits.Header.fromstring(hstr, sep='\n')
# Explicitly add a card with an UNDEFINED value
c = fits.Card("UNDEF2", fits.card.UNDEFINED)
header.extend([c])
# And now assign an undefined value to the header through setitem
header['UNDEF3'] = fits.card.UNDEFINED
# Tuple assignment
header.append(("UNDEF5", None, "Undefined value"), end=True)
header.append("UNDEF6")
assert header['DEFINED'] == 42
assert header['UNDEF'] is None
assert header['UNDEF2'] is None
assert header['UNDEF3'] is None
assert header['UNDEF5'] is None
assert header['UNDEF6'] is None
# Assign an undefined value to a new card
header['UNDEF4'] = None
# Overwrite an existing value with None
header["DEFINED"] = None
# All headers now should be undefined
for c in header.cards:
assert c.value == fits.card.UNDEFINED
def test_set_comment_only(self):
header = fits.Header([('A', 'B', 'C')])
header.set('A', comment='D')
assert header['A'] == 'B'
assert header.comments['A'] == 'D'
def test_header_iter(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
assert list(header) == ['A', 'C']
def test_header_slice(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
newheader = header[1:]
assert len(newheader) == 2
assert 'A' not in newheader
assert 'C' in newheader
assert 'E' in newheader
newheader = header[::-1]
assert len(newheader) == 3
assert newheader[0] == 'F'
assert newheader[1] == 'D'
assert newheader[2] == 'B'
newheader = header[::2]
assert len(newheader) == 2
assert 'A' in newheader
assert 'C' not in newheader
assert 'E' in newheader
def test_header_slice_assignment(self):
"""
Assigning to a slice should just assign new values to the cards
included in the slice.
"""
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header[1:] = 1
assert header[1] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header[1:] = 'GH'
assert header[1] == 'GH'
assert header[2] == 'GH'
# Now assign via an iterable
header[1:] = ['H', 'I']
assert header[1] == 'H'
assert header[2] == 'I'
def test_header_slice_delete(self):
"""Test deleting a slice of cards from the header."""
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
del header[1:]
assert len(header) == 1
assert header[0] == 'B'
del header[:]
assert len(header) == 0
def test_wildcard_slice(self):
"""Test selecting a subsection of a header via wildcard matching."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
newheader = header['AB*']
assert len(newheader) == 2
assert newheader[0] == 0
assert newheader[1] == 2
def test_wildcard_with_hyphen(self):
"""
Regression test for issue where wildcards did not work on keywords
containing hyphens.
"""
header = fits.Header([('DATE', 1), ('DATE-OBS', 2), ('DATE-FOO', 3)])
assert len(header['DATE*']) == 3
assert len(header['DATE?*']) == 2
assert len(header['DATE-*']) == 2
def test_wildcard_slice_assignment(self):
"""Test assigning to a header slice selected via wildcard matching."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header['AB*'] = 1
assert header[0] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header['AB*'] = 'GH'
assert header[0] == 'GH'
assert header[2] == 'GH'
# Now assign via an iterable
header['AB*'] = ['H', 'I']
assert header[0] == 'H'
assert header[2] == 'I'
def test_wildcard_slice_deletion(self):
"""Test deleting cards from a header that match a wildcard pattern."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
del header['AB*']
assert len(header) == 1
assert header[0] == 1
def test_header_history(self):
header = fits.Header([('ABC', 0), ('HISTORY', 1), ('HISTORY', 2),
('DEF', 3), ('HISTORY', 4), ('HISTORY', 5)])
assert header['HISTORY'] == [1, 2, 4, 5]
def test_header_clear(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
header.clear()
assert 'A' not in header
assert 'C' not in header
assert len(header) == 0
def test_header_fromkeys(self):
header = fits.Header.fromkeys(['A', 'B'])
assert 'A' in header
assert header['A'] is None
assert header.comments['A'] == ''
assert 'B' in header
assert header['B'] is None
assert header.comments['B'] == ''
def test_header_fromkeys_with_value(self):
header = fits.Header.fromkeys(['A', 'B'], 'C')
assert 'A' in header
assert header['A'] == 'C'
assert header.comments['A'] == ''
assert 'B' in header
assert header['B'] == 'C'
assert header.comments['B'] == ''
def test_header_fromkeys_with_value_and_comment(self):
header = fits.Header.fromkeys(['A'], ('B', 'C'))
assert 'A' in header
assert header['A'] == 'B'
assert header.comments['A'] == 'C'
def test_header_fromkeys_with_duplicates(self):
header = fits.Header.fromkeys(['A', 'B', 'A'], 'C')
assert 'A' in header
assert ('A', 0) in header
assert ('A', 1) in header
assert ('A', 2) not in header
assert header[0] == 'C'
assert header['A'] == 'C'
assert header[('A', 0)] == 'C'
assert header[2] == 'C'
assert header[('A', 1)] == 'C'
def test_header_items(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
assert list(header.items()) == [('A', 'B'), ('C', 'D')]
def test_header_iterkeys(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
for a, b in zip(header.keys(), header):
assert a == b
def test_header_itervalues(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
for a, b in zip(header.values(), ['B', 'D']):
assert a == b
def test_header_keys(self):
with fits.open(self.data('arange.fits')) as hdul:
assert (list(hdul[0].header) ==
['SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2', 'NAXIS3',
'EXTEND'])
def test_header_list_like_pop(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'),
('G', 'H')])
last = header.pop()
assert last == 'H'
assert len(header) == 3
assert list(header) == ['A', 'C', 'E']
mid = header.pop(1)
assert mid == 'D'
assert len(header) == 2
assert list(header) == ['A', 'E']
first = header.pop(0)
assert first == 'B'
assert len(header) == 1
assert list(header) == ['E']
pytest.raises(IndexError, header.pop, 42)
def test_header_dict_like_pop(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'),
('G', 'H')])
pytest.raises(TypeError, header.pop, 'A', 'B', 'C')
last = header.pop('G')
assert last == 'H'
assert len(header) == 3
assert list(header) == ['A', 'C', 'E']
mid = header.pop('C')
assert mid == 'D'
assert len(header) == 2
assert list(header) == ['A', 'E']
first = header.pop('A')
assert first == 'B'
assert len(header) == 1
assert list(header) == ['E']
default = header.pop('X', 'Y')
assert default == 'Y'
assert len(header) == 1
pytest.raises(KeyError, header.pop, 'X')
def test_popitem(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 2
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 1
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 0
pytest.raises(KeyError, header.popitem)
def test_setdefault(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
assert header.setdefault('A') == 'B'
assert header.setdefault('C') == 'D'
assert header.setdefault('E') == 'F'
assert len(header) == 3
assert header.setdefault('G', 'H') == 'H'
assert len(header) == 4
assert 'G' in header
assert header.setdefault('G', 'H') == 'H'
assert len(header) == 4
def test_update_from_dict(self):
"""
Test adding new cards and updating existing cards from a dict using
Header.update()
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update({'A': 'E', 'F': 'G'})
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
# Same as above but this time pass the update dict as keyword arguments
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update(A='E', F='G')
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
def test_update_from_iterable(self):
"""
Test adding new cards and updating existing cards from an iterable of
cards and card tuples.
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update([('A', 'E'), fits.Card('F', 'G')])
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
def test_header_extend(self):
"""
Test extending a header both with and without stripping cards from the
extension header.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu2.header['MYKEY'] = ('some val', 'some comment')
hdu.header += hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
# Same thing, but using + instead of +=
hdu = fits.PrimaryHDU()
hdu.header = hdu.header + hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
# Directly append the other header in full--not usually a desirable
# operation when the header is coming from another HDU
hdu.header.extend(hdu2.header, strip=False)
assert len(hdu.header) == 11
assert list(hdu.header)[5] == 'XTENSION'
assert hdu.header[-1] == 'some val'
assert ('MYKEY', 1) in hdu.header
def test_header_extend_unique(self):
"""
Test extending the header with and without unique=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 6
assert hdu.header[-2] == 'some val'
assert hdu.header[-1] == 'some other val'
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu.header.extend(hdu2.header, unique=True)
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
def test_header_extend_unique_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added. Issue astropy/astropy#3967
"""
for commentary_card in ['', 'COMMENT', 'HISTORY']:
for is_unique in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = 'My text'
hdu.header.extend(hdu2.header, unique=is_unique)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == 'My text'
def test_header_extend_update(self):
"""
Test extending the header with and without update=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu.header['HISTORY'] = 'history 1'
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu2.header['HISTORY'] = 'history 1'
hdu2.header['HISTORY'] = 'history 2'
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 9
assert ('MYKEY', 0) in hdu.header
assert ('MYKEY', 1) in hdu.header
assert hdu.header[('MYKEY', 1)] == 'some other val'
assert len(hdu.header['HISTORY']) == 3
assert hdu.header[-1] == 'history 2'
hdu = fits.PrimaryHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu.header['HISTORY'] = 'history 1'
hdu.header.extend(hdu2.header, update=True)
assert len(hdu.header) == 7
assert ('MYKEY', 0) in hdu.header
assert ('MYKEY', 1) not in hdu.header
assert hdu.header['MYKEY'] == 'some other val'
assert len(hdu.header['HISTORY']) == 2
assert hdu.header[-1] == 'history 2'
def test_header_extend_update_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added.
Though not quite the same as astropy/astropy#3967, update=True hits
the same if statement as that issue.
"""
for commentary_card in ['', 'COMMENT', 'HISTORY']:
for is_update in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = 'My text'
hdu.header.extend(hdu2.header, update=is_update)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == 'My text'
def test_header_extend_exact(self):
"""
Test that extending an empty header with the contents of an existing
header can exactly duplicate that header, given strip=False and
end=True.
"""
header = fits.getheader(self.data('test0.fits'))
header2 = fits.Header()
header2.extend(header, strip=False, end=True)
assert header == header2
def test_header_count(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
assert header.count('A') == 1
assert header.count('C') == 1
assert header.count('E') == 1
header['HISTORY'] = 'a'
header['HISTORY'] = 'b'
assert header.count('HISTORY') == 2
pytest.raises(KeyError, header.count, 'G')
def test_header_append_use_blanks(self):
"""
Tests that blank cards can be appended, and that future appends will
use blank cards when available (unless useblanks=False)
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
# Append a couple blanks
header.append()
header.append()
assert len(header) == 4
assert header[-1] == ''
assert header[-2] == ''
# New card should fill the first blank by default
header.append(('E', 'F'))
assert len(header) == 4
assert header[-2] == 'F'
assert header[-1] == ''
# This card should not use up a blank spot
header.append(('G', 'H'), useblanks=False)
assert len(header) == 5
assert header[-1] == ''
assert header[-2] == 'H'
def test_header_append_keyword_only(self):
"""
Test appending a new card with just the keyword, and no value or
comment given.
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.append('E')
assert len(header) == 3
assert list(header)[-1] == 'E'
assert header[-1] is None
assert header.comments['E'] == ''
# Try appending a blank--normally this can be accomplished with just
# header.append(), but header.append('') should also work (and is maybe
# a little more clear)
header.append('')
assert len(header) == 4
assert list(header)[-1] == ''
assert header[''] == ''
assert header.comments[''] == ''
def test_header_insert_use_blanks(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
# Append a couple blanks
header.append()
header.append()
# Insert a new card; should use up one of the blanks
header.insert(1, ('E', 'F'))
assert len(header) == 4
assert header[1] == 'F'
assert header[-1] == ''
assert header[-2] == 'D'
# Insert a new card without using blanks
header.insert(1, ('G', 'H'), useblanks=False)
assert len(header) == 5
assert header[1] == 'H'
assert header[-1] == ''
def test_header_insert_before_keyword(self):
"""
Test that a keyword name or tuple can be used to insert new keywords.
Also tests the ``after`` keyword argument.
Regression test for https://github.com/spacetelescope/PyFITS/issues/12
"""
header = fits.Header([
('NAXIS1', 10), ('COMMENT', 'Comment 1'),
('COMMENT', 'Comment 3')])
header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes'))
assert list(header.keys())[0] == 'NAXIS'
assert header[0] == 2
assert header.comments[0] == 'Number of axes'
header.insert('NAXIS1', ('NAXIS2', 20), after=True)
assert list(header.keys())[1] == 'NAXIS1'
assert list(header.keys())[2] == 'NAXIS2'
assert header[2] == 20
header.insert(('COMMENT', 1), ('COMMENT', 'Comment 2'))
assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3']
header.insert(('COMMENT', 2), ('COMMENT', 'Comment 4'), after=True)
assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3',
'Comment 4']
header.insert(-1, ('TEST1', True))
assert list(header.keys())[-2] == 'TEST1'
header.insert(-1, ('TEST2', True), after=True)
assert list(header.keys())[-1] == 'TEST2'
assert list(header.keys())[-3] == 'TEST1'
def test_remove(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
# When keyword is present in the header it should be removed.
header.remove('C')
assert len(header) == 1
assert list(header) == ['A']
assert 'C' not in header
# When keyword is not present in the header and ignore_missing is
# False, KeyError should be raised
with pytest.raises(KeyError):
header.remove('F')
# When keyword is not present and ignore_missing is True, KeyError
# will be ignored
header.remove('F', ignore_missing=True)
assert len(header) == 1
# Test for removing all instances of a keyword
header = fits.Header([('A', 'B'), ('C', 'D'), ('A', 'F')])
header.remove('A', remove_all=True)
assert 'A' not in header
assert len(header) == 1
assert list(header) == ['C']
assert header[0] == 'D'
def test_header_comments(self):
header = fits.Header([('A', 'B', 'C'), ('DEF', 'G', 'H')])
assert (repr(header.comments) ==
' A C\n'
' DEF H')
def test_comment_slices_and_filters(self):
header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'),
('AI', 'J', 'K')])
s = header.comments[1:]
assert list(s) == ['H', 'K']
s = header.comments[::-1]
assert list(s) == ['K', 'H', 'D']
s = header.comments['A*']
assert list(s) == ['D', 'K']
def test_comment_slice_filter_assign(self):
header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'),
('AI', 'J', 'K')])
header.comments[1:] = 'L'
assert list(header.comments) == ['D', 'L', 'L']
assert header.cards[header.index('AB')].comment == 'D'
assert header.cards[header.index('EF')].comment == 'L'
assert header.cards[header.index('AI')].comment == 'L'
header.comments[::-1] = header.comments[:]
assert list(header.comments) == ['L', 'L', 'D']
header.comments['A*'] = ['M', 'N']
assert list(header.comments) == ['M', 'L', 'N']
def test_commentary_slicing(self):
header = fits.Header()
indices = list(range(5))
for idx in indices:
header['HISTORY'] = idx
# Just a few sample slice types; this won't get all corner cases but if
# these all work we should be in good shape
assert header['HISTORY'][1:] == indices[1:]
assert header['HISTORY'][:3] == indices[:3]
assert header['HISTORY'][:6] == indices[:6]
assert header['HISTORY'][:-2] == indices[:-2]
assert header['HISTORY'][::-1] == indices[::-1]
assert header['HISTORY'][1::-1] == indices[1::-1]
assert header['HISTORY'][1:5:2] == indices[1:5:2]
# Same tests, but copy the values first; as it turns out this is
# different from just directly doing an __eq__ as in the first set of
# assertions
header.insert(0, ('A', 'B', 'C'))
header.append(('D', 'E', 'F'), end=True)
assert list(header['HISTORY'][1:]) == indices[1:]
assert list(header['HISTORY'][:3]) == indices[:3]
assert list(header['HISTORY'][:6]) == indices[:6]
assert list(header['HISTORY'][:-2]) == indices[:-2]
assert list(header['HISTORY'][::-1]) == indices[::-1]
assert list(header['HISTORY'][1::-1]) == indices[1::-1]
assert list(header['HISTORY'][1:5:2]) == indices[1:5:2]
def test_update_commentary(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['HISTORY'] = 'ABC'
header['FRED'] = 'BARNEY'
header['HISTORY'] = 'DEF'
header['HISTORY'] = 'GHI'
assert header['HISTORY'] == ['ABC', 'DEF', 'GHI']
# Single value update
header['HISTORY'][0] = 'FOO'
assert header['HISTORY'] == ['FOO', 'DEF', 'GHI']
# Single value partial slice update
header['HISTORY'][1:] = 'BAR'
assert header['HISTORY'] == ['FOO', 'BAR', 'BAR']
# Multi-value update
header['HISTORY'][:] = ['BAZ', 'QUX']
assert header['HISTORY'] == ['BAZ', 'QUX', 'BAR']
def test_commentary_comparison(self):
"""
Regression test for an issue found in *writing* the regression test for
https://github.com/astropy/astropy/issues/2363, where comparison of
the list of values for a commentary keyword did not always compare
correctly with other iterables.
"""
header = fits.Header()
header['HISTORY'] = 'hello world'
header['HISTORY'] = 'hello world'
header['COMMENT'] = 'hello world'
assert header['HISTORY'] != header['COMMENT']
header['COMMENT'] = 'hello world'
assert header['HISTORY'] == header['COMMENT']
def test_long_commentary_card(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['BAZ'] = 'QUX'
longval = 'ABC' * 30
header['HISTORY'] = longval
header['FRED'] = 'BARNEY'
header['HISTORY'] = longval
assert len(header) == 7
assert list(header)[2] == 'FRED'
assert str(header.cards[3]) == 'HISTORY ' + longval[:72]
assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:]
header.set('HISTORY', longval, after='FOO')
assert len(header) == 9
assert str(header.cards[1]) == 'HISTORY ' + longval[:72]
assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:]
header = fits.Header()
header.update({'FOO': 'BAR'})
header.update({'BAZ': 'QUX'})
longval = 'ABC' * 30
header.add_history(longval)
header.update({'FRED': 'BARNEY'})
header.add_history(longval)
assert len(header.cards) == 7
assert header.cards[2].keyword == 'FRED'
assert str(header.cards[3]) == 'HISTORY ' + longval[:72]
assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:]
header.add_history(longval, after='FOO')
assert len(header.cards) == 9
assert str(header.cards[1]) == 'HISTORY ' + longval[:72]
assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:]
def test_totxtfile(self):
with fits.open(self.data('test0.fits')) as hdul:
hdul[0].header.totextfile(self.temp('header.txt'))
hdu = fits.ImageHDU()
hdu.header.update({'MYKEY': 'FOO'})
hdu.header.extend(hdu.header.fromtextfile(self.temp('header.txt')),
update=True, update_first=True)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp('test.fits'), output_verify='ignore')
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({'MYKEY': 'FOO'})
hdu.header.extend(hdu.header.fromtextfile(self.temp('header.txt')),
update=True, update_first=True, strip=False)
assert 'MYKEY' in hdu.header
assert 'EXTENSION' not in hdu.header
assert 'SIMPLE' in hdu.header
hdu.writeto(self.temp('test.fits'), output_verify='ignore',
overwrite=True)
with fits.open(self.temp('test.fits')) as hdul2:
assert len(hdul2) == 2
assert 'MYKEY' in hdul2[1].header
def test_fromfile(self):
"""Regression test for https://github.com/astropy/astropy/issues/8711
"""
filename = self.data('scale.fits')
hdr = fits.Header.fromfile(filename)
assert hdr['DATASET'] == '2MASS'
def test_header_fromtextfile(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/122
Manually write a text file containing some header cards ending with
newlines and ensure that fromtextfile can read them back in.
"""
header = fits.Header()
header['A'] = ('B', 'C')
header['B'] = ('C', 'D')
header['C'] = ('D', 'E')
with open(self.temp('test.hdr'), 'w') as f:
f.write('\n'.join(str(c).strip() for c in header.cards))
header2 = fits.Header.fromtextfile(self.temp('test.hdr'))
assert header == header2
def test_header_fromtextfile_with_end_card(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Make sure that when a Header is read from a text file that the END card
is ignored.
"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
# We don't use header.totextfile here because it writes each card with
# trailing spaces to pad them out to 80 characters. But this bug only
# presents itself when each card ends immediately with a newline, and
# no trailing spaces
with open(self.temp('test.hdr'), 'w') as f:
f.write('\n'.join(str(c).strip() for c in header.cards))
f.write('\nEND')
new_header = fits.Header.fromtextfile(self.temp('test.hdr'))
assert 'END' not in new_header
assert header == new_header
def test_append_end_card(self):
"""
Regression test 2 for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Manually adding an END card to a header should simply result in a
ValueError (as was the case in PyFITS 3.0 and earlier).
"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
def setitem(k, v):
header[k] = v
pytest.raises(ValueError, setitem, 'END', '')
pytest.raises(ValueError, header.append, 'END')
pytest.raises(ValueError, header.append, 'END', end=True)
pytest.raises(ValueError, header.insert, len(header), 'END')
pytest.raises(ValueError, header.set, 'END')
def test_invalid_end_cards(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/217
This tests the case where the END card looks like a normal card like
'END = ' and other similar oddities. As long as a card starts with END
and looks like it was intended to be the END card we allow it, but with
a warning.
"""
horig = fits.PrimaryHDU(data=np.arange(100)).header
def invalid_header(end, pad):
# Build up a goofy invalid header
# Start from a seemingly normal header
s = horig.tostring(sep='', endcard=False, padding=False)
# append the bogus end card
s += end
# add additional padding if requested
if pad:
s += ' ' * _pad_length(len(s))
# This will differ between Python versions
if isinstance(s, bytes):
return BytesIO(s)
else:
return StringIO(s)
# Basic case motivated by the original issue; it's as if the END card
# was appended by software that doesn't know to treat it specially, and
# it is given an = after it
s = invalid_header('END =', True)
with pytest.warns(AstropyUserWarning, match="Unexpected bytes trailing "
"END keyword: ' ='") as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# A case similar to the last but with more spaces between END and the
# =, as though the '= ' value indicator were placed like that of a
# normal card
s = invalid_header('END = ', True)
with pytest.warns(AstropyUserWarning, match="Unexpected bytes trailing "
"END keyword: ' ='") as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# END card with trailing gibberish
s = invalid_header('END$%&%^*%*', True)
with pytest.warns(AstropyUserWarning, match=r"Unexpected bytes trailing "
r"END keyword: '\$%&%\^\*%\*'") as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# 'END' at the very end of a truncated file without padding; the way
# the block reader works currently this can only happen if the 'END'
# is at the very end of the file.
s = invalid_header('END', False)
with pytest.warns(AstropyUserWarning, match="Missing padding to end of "
"the FITS block") as w:
# Don't raise an exception on missing padding, but still produce a
# warning that the END card is incomplete
h = fits.Header.fromfile(s, padding=False)
assert h == horig
assert len(w) == 1
def test_invalid_characters(self):
"""
Test header with invalid characters
"""
# Generate invalid file with non-ASCII character
h = fits.Header()
h['FOO'] = 'BAR'
h['COMMENT'] = 'hello'
hdul = fits.PrimaryHDU(header=h, data=np.arange(5))
hdul.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
out = f.read()
out = out.replace(b'hello', 'héllo'.encode('latin1'))
out = out.replace(b'BAR', 'BÀR'.encode('latin1'))
with open(self.temp('test2.fits'), 'wb') as f2:
f2.write(out)
with pytest.warns(AstropyUserWarning, match="non-ASCII characters are "
"present in the FITS file") as w:
h = fits.getheader(self.temp('test2.fits'))
assert h['FOO'] == 'B?R'
assert h['COMMENT'] == 'h?llo'
assert len(w) == 1
def test_unnecessary_move(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/125
Ensures that a header is not modified when setting the position of a
keyword that's already in its correct position.
"""
header = fits.Header([('A', 'B'), ('B', 'C'), ('C', 'D')])
header.set('B', before=2)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', after=0)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', before='C')
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', after='A')
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', before=2)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
# 123 is well past the end, and C is already at the end, so it's in the
# right place already
header.set('C', before=123)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('C', after=123)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
def test_invalid_float_cards(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137"""
# Create a header containing two of the problematic cards in the test
# case where this came up:
hstr = "FOCALLEN= +1.550000000000e+002\nAPERTURE= +0.000000000000e+000"
h = fits.Header.fromstring(hstr, sep='\n')
# First the case that *does* work prior to fixing this issue
assert h['FOCALLEN'] == 155.0
assert h['APERTURE'] == 0.0
# Now if this were reserialized, would new values for these cards be
# written with repaired exponent signs?
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['FOCALLEN']) ==
_pad("FOCALLEN= +1.550000000000E+002"))
assert h.cards['FOCALLEN']._modified
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['APERTURE']) ==
_pad("APERTURE= +0.000000000000E+000"))
assert h.cards['APERTURE']._modified
assert h._modified
# This is the case that was specifically causing problems; generating
# the card strings *before* parsing the values. Also, the card strings
# really should be "fixed" before being returned to the user
h = fits.Header.fromstring(hstr, sep='\n')
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['FOCALLEN']) ==
_pad("FOCALLEN= +1.550000000000E+002"))
assert h.cards['FOCALLEN']._modified
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['APERTURE']) ==
_pad("APERTURE= +0.000000000000E+000"))
assert h.cards['APERTURE']._modified
assert h['FOCALLEN'] == 155.0
assert h['APERTURE'] == 0.0
assert h._modified
# For the heck of it, try assigning the identical values and ensure
# that the newly fixed value strings are left intact
h['FOCALLEN'] = 155.0
h['APERTURE'] = 0.0
assert (str(h.cards['FOCALLEN']) ==
_pad("FOCALLEN= +1.550000000000E+002"))
assert (str(h.cards['APERTURE']) ==
_pad("APERTURE= +0.000000000000E+000"))
def test_invalid_float_cards2(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/140
"""
# The example for this test requires creating a FITS file containing a
# slightly misformatted float value. I can't actually even find a way
# to do that directly through Astropy--it won't let me.
hdu = fits.PrimaryHDU()
hdu.header['TEST'] = 5.0022221e-07
hdu.writeto(self.temp('test.fits'))
# Here we manually make the file invalid
with open(self.temp('test.fits'), 'rb+') as f:
f.seek(346) # Location of the exponent 'E' symbol
f.write(encode_ascii('e'))
with fits.open(self.temp('test.fits')) as hdul, \
pytest.warns(AstropyUserWarning) as w:
hdul.writeto(self.temp('temp.fits'), output_verify='warn')
assert len(w) == 5
# The first two warnings are just the headers to the actual warning
# message (HDU 0, Card 4). I'm still not sure things like that
# should be output as separate warning messages, but that's
# something to think about...
msg = str(w[3].message)
assert "(invalid value string: '5.0022221e-07')" in msg
def test_leading_zeros(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137, part 2
Ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137 also showed that in
float values like 0.001 the leading zero was unnecessarily being
stripped off when rewriting the header. Though leading zeros should be
removed from integer values to prevent misinterpretation as octal by
python (for now Astropy will still maintain the leading zeros if now
changes are made to the value, but will drop them if changes are made).
"""
c = fits.Card.fromstring("APERTURE= +0.000000000000E+000")
assert str(c) == _pad("APERTURE= +0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 0.000000000000E+000")
assert str(c) == _pad("APERTURE= 0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 017")
assert str(c) == _pad("APERTURE= 017")
assert c.value == 17
def test_assign_boolean(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/123
Tests assigning Python and Numpy boolean values to keyword values.
"""
fooimg = _pad('FOO = T')
barimg = _pad('BAR = F')
h = fits.Header()
h['FOO'] = True
h['BAR'] = False
assert h['FOO'] is True
assert h['BAR'] is False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
h = fits.Header()
h['FOO'] = np.bool_(True)
h['BAR'] = np.bool_(False)
assert h['FOO'] is True
assert h['BAR'] is False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
h = fits.Header()
h.append(fits.Card.fromstring(fooimg))
h.append(fits.Card.fromstring(barimg))
assert h['FOO'] is True
assert h['BAR'] is False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
def test_header_method_keyword_normalization(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/149
Basically ensures that all public Header methods are case-insensitive
w.r.t. keywords.
Provides a reasonably comprehensive test of several methods at once.
"""
h = fits.Header([('abC', 1), ('Def', 2), ('GeH', 3)])
assert list(h) == ['ABC', 'DEF', 'GEH']
assert 'abc' in h
assert 'dEf' in h
assert h['geh'] == 3
# Case insensitivity of wildcards
assert len(h['g*']) == 1
h['aBc'] = 2
assert h['abc'] == 2
# ABC already existed so assigning to aBc should not have added any new
# cards
assert len(h) == 3
del h['gEh']
assert list(h) == ['ABC', 'DEF']
assert len(h) == 2
assert h.get('def') == 2
h.set('Abc', 3)
assert h['ABC'] == 3
h.set('gEh', 3, before='Abc')
assert list(h) == ['GEH', 'ABC', 'DEF']
assert h.pop('abC') == 3
assert len(h) == 2
assert h.setdefault('def', 3) == 2
assert len(h) == 2
assert h.setdefault('aBc', 1) == 1
assert len(h) == 3
assert list(h) == ['GEH', 'DEF', 'ABC']
h.update({'GeH': 1, 'iJk': 4})
assert len(h) == 4
assert list(h) == ['GEH', 'DEF', 'ABC', 'IJK']
assert h['GEH'] == 1
assert h.count('ijk') == 1
assert h.index('ijk') == 3
h.remove('Def')
assert len(h) == 3
assert list(h) == ['GEH', 'ABC', 'IJK']
def test_end_in_comment(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/142
Tests a case where the comment of a card ends with END, and is followed
by several blank cards.
"""
data = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=data)
hdu.header['TESTKW'] = ('Test val', 'This is the END')
# Add a couple blanks after the END string
hdu.header.append()
hdu.header.append()
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), memmap=False) as hdul:
# memmap = False to avoid leaving open a mmap to the file when we
# access the data--this causes problems on Windows when we try to
# overwrite the file later
assert 'TESTKW' in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Add blanks until the header is extended to two block sizes
while len(hdu.header) < 36:
hdu.header.append()
hdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
assert 'TESTKW' in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Test parsing the same header when it's written to a text file
hdu.header.totextfile(self.temp('test.hdr'))
header2 = fits.Header.fromtextfile(self.temp('test.hdr'))
assert hdu.header == header2
def test_assign_unicode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/134
Assigning a unicode literal as a header value should not fail silently.
If the value can be converted to ASCII then it should just work.
Otherwise it should fail with an appropriate value error.
Also tests unicode for keywords and comments.
"""
erikku = '\u30a8\u30ea\u30c3\u30af'
def assign(keyword, val):
h[keyword] = val
h = fits.Header()
h['FOO'] = 'BAR'
assert 'FOO' in h
assert h['FOO'] == 'BAR'
assert repr(h) == _pad("FOO = 'BAR '")
pytest.raises(ValueError, assign, erikku, 'BAR')
h['FOO'] = 'BAZ'
assert h['FOO'] == 'BAZ'
assert repr(h) == _pad("FOO = 'BAZ '")
pytest.raises(ValueError, assign, 'FOO', erikku)
h['FOO'] = ('BAR', 'BAZ')
assert h['FOO'] == 'BAR'
assert h.comments['FOO'] == 'BAZ'
assert repr(h) == _pad("FOO = 'BAR ' / BAZ")
pytest.raises(ValueError, assign, 'FOO', ('BAR', erikku))
pytest.raises(ValueError, assign, 'FOO', (erikku, 'BAZ'))
pytest.raises(ValueError, assign, 'FOO', (erikku, erikku))
def test_assign_non_ascii(self):
"""
First regression test for
https://github.com/spacetelescope/PyFITS/issues/37
Although test_assign_unicode ensures that `str` objects containing
non-ASCII characters cannot be assigned to headers.
It should not be possible to assign bytes to a header at all.
"""
h = fits.Header()
with pytest.raises(ValueError, match="Illegal value: b'Hello'."):
h.set('TEST', b'Hello')
def test_header_strip_whitespace(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/146, and
for the solution that is optional stripping of whitespace from the end
of a header value.
By default extra whitespace is stripped off, but if
`fits.conf.strip_header_whitespace` = False it should not be
stripped.
"""
h = fits.Header()
h['FOO'] = 'Bar '
assert h['FOO'] == 'Bar'
c = fits.Card.fromstring("QUX = 'Bar '")
h.append(c)
assert h['QUX'] == 'Bar'
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
with fits.conf.set_temp('strip_header_whitespace', False):
assert h['FOO'] == 'Bar '
assert h['QUX'] == 'Bar '
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
assert h['FOO'] == 'Bar'
assert h['QUX'] == 'Bar'
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
def test_keep_duplicate_history_in_orig_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/156
When creating a new HDU from an existing Header read from an existing
FITS file, if the original header contains duplicate HISTORY values
those duplicates should be preserved just as in the original header.
This bug occurred due to naivete in Header.extend.
"""
history = ['CCD parameters table ...',
' reference table oref$n951041ko_ccd.fits',
' INFLIGHT 12/07/2001 25/02/2002',
' all bias frames'] * 3
hdu = fits.PrimaryHDU()
# Add the history entries twice
for item in history:
hdu.header['HISTORY'] = item
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[0].header['HISTORY'] == history
new_hdu = fits.PrimaryHDU(header=hdu.header)
assert new_hdu.header['HISTORY'] == hdu.header['HISTORY']
new_hdu.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits')) as hdul:
assert hdul[0].header['HISTORY'] == history
def test_invalid_keyword_cards(self):
"""
Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/109
Allow opening files with headers containing invalid keywords.
"""
# Create a header containing a few different types of BAD headers.
c1 = fits.Card.fromstring('CLFIND2D: contour = 0.30')
c2 = fits.Card.fromstring('Just some random text.')
c3 = fits.Card.fromstring('A' * 80)
hdu = fits.PrimaryHDU()
# This should work with some warnings
with pytest.warns(AstropyUserWarning) as w:
hdu.header.append(c1)
hdu.header.append(c2)
hdu.header.append(c3)
assert len(w) == 3
hdu.writeto(self.temp('test.fits'))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp('test.fits')) as hdul:
# Merely opening the file should blast some warnings about the
# invalid keywords
assert len(w) == 3
header = hdul[0].header
assert 'CLFIND2D' in header
assert 'Just som' in header
assert 'AAAAAAAA' in header
assert header['CLFIND2D'] == ': contour = 0.30'
assert header['Just som'] == 'e random text.'
assert header['AAAAAAAA'] == 'A' * 72
# It should not be possible to assign to the invalid keywords
pytest.raises(ValueError, header.set, 'CLFIND2D', 'foo')
pytest.raises(ValueError, header.set, 'Just som', 'foo')
pytest.raises(ValueError, header.set, 'AAAAAAAA', 'foo')
def test_fix_hierarch_with_invalid_value(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/172
Ensures that when fixing a hierarch card it remains a hierarch card.
"""
c = fits.Card.fromstring('HIERARCH ESO DET CHIP PXSPACE = 5e6')
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
c.verify('fix')
assert str(c) == _pad('HIERARCH ESO DET CHIP PXSPACE = 5E6')
def test_assign_inf_nan(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/11
For the time being it should not be possible to assign the floating
point values inf or nan to a header value, since this is not defined by
the FITS standard.
"""
h = fits.Header()
pytest.raises(ValueError, h.set, 'TEST', float('nan'))
pytest.raises(ValueError, h.set, 'TEST', np.nan)
pytest.raises(ValueError, h.set, 'TEST', np.float32('nan'))
pytest.raises(ValueError, h.set, 'TEST', float('inf'))
pytest.raises(ValueError, h.set, 'TEST', np.inf)
def test_update_bool(self):
"""
Regression test for an issue where a value of True in a header
cannot be updated to a value of 1, and likewise for False/0.
"""
h = fits.Header([('TEST', True)])
h['TEST'] = 1
assert h['TEST'] is not True
assert isinstance(h['TEST'], int)
assert h['TEST'] == 1
h['TEST'] = np.bool_(True)
assert h['TEST'] is True
h['TEST'] = False
assert h['TEST'] is False
h['TEST'] = np.bool_(False)
assert h['TEST'] is False
h['TEST'] = 0
assert h['TEST'] is not False
assert isinstance(h['TEST'], int)
assert h['TEST'] == 0
h['TEST'] = np.bool_(False)
assert h['TEST'] is False
def test_update_numeric(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/49
Ensure that numeric values can be upcast/downcast between int, float,
and complex by assigning values that compare equal to the existing
value but are a different type.
"""
h = fits.Header()
h['TEST'] = 1
# int -> float
h['TEST'] = 1.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 1.0')
# float -> int
h['TEST'] = 1
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 1')
# int -> complex
h['TEST'] = 1.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (1.0, 0.0)')
# complex -> float
h['TEST'] = 1.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 1.0')
# float -> complex
h['TEST'] = 1.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (1.0, 0.0)')
# complex -> int
h['TEST'] = 1
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 1')
# Now the same tests but with zeros
h['TEST'] = 0
# int -> float
h['TEST'] = 0.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 0.0')
# float -> int
h['TEST'] = 0
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 0')
# int -> complex
h['TEST'] = 0.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (0.0, 0.0)')
# complex -> float
h['TEST'] = 0.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 0.0')
# float -> complex
h['TEST'] = 0.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (0.0, 0.0)')
# complex -> int
h['TEST'] = 0
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 0')
def test_newlines_in_commentary(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/51
Test data extracted from a header in an actual FITS file found in the
wild. Names have been changed to protect the innocent.
"""
# First ensure that we can't assign new keyword values with newlines in
# them
h = fits.Header()
pytest.raises(ValueError, h.set, 'HISTORY', '\n')
pytest.raises(ValueError, h.set, 'HISTORY', '\nabc')
pytest.raises(ValueError, h.set, 'HISTORY', 'abc\n')
pytest.raises(ValueError, h.set, 'HISTORY', 'abc\ndef')
test_cards = [
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18 "
"HISTORY File modified by user ' fred' with fv on 2013-04-23T11:16:29 "
"HISTORY File modified by user ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14\nFile modified by user 'wilma' "
"HISTORY with fv on 2013-04-22T21:42:18\nFile modif\nied by user 'wilma' with fv "
"HISTORY on 2013-04-23T11:16:29\nFile modified by use\nr ' fred' with fv on 2013-1"
"HISTORY 1-04T16:59:14 "
]
for card_image in test_cards:
c = fits.Card.fromstring(card_image)
if '\n' in card_image:
pytest.raises(fits.VerifyError, c.verify, 'exception')
else:
c.verify('exception')
def test_long_commentary_card_appended_to_header(self):
"""
If a HISTORY or COMMENT card with a too-long value is appended to a
header with Header.append (as opposed to assigning to hdr['HISTORY']
it fails verification.
Regression test for https://github.com/astropy/astropy/issues/11486
"""
header = fits.Header()
value = 'abc' * 90
# this is what Table does when saving its history metadata key to a
# FITS file
header.append(('history', value))
assert len(header.cards) == 1
# Test Card._split() directly since this was the main problem area
key, val = header.cards[0]._split()
assert key == 'HISTORY' and val == value
# Try writing adding this header to an HDU and writing it to a file
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp('test.fits'), overwrite=True)
def test_header_fromstring_bytes(self):
"""
Test reading a Header from a `bytes` string.
See https://github.com/astropy/astropy/issues/8706
"""
with open(self.data('test0.fits'), 'rb') as fobj:
pri_hdr_from_bytes = fits.Header.fromstring(fobj.read())
pri_hdr = fits.getheader(self.data('test0.fits'))
assert pri_hdr['NAXIS'] == pri_hdr_from_bytes['NAXIS']
assert pri_hdr == pri_hdr_from_bytes
assert pri_hdr.tostring() == pri_hdr_from_bytes.tostring()
def test_set_keyword_with_space(self):
"""
Regression test for https://github.com/astropy/astropy/issues/10479
"""
hdr = fits.Header()
hdr['KEY2 '] = 2
hdr['KEY2 '] = 4
assert len(hdr) == 1
assert hdr['KEY2'] == 4
assert hdr['KEY2 '] == 4
def test_strip(self):
hdr = fits.getheader(self.data('tb.fits'), ext=1)
hdr['FOO'] = 'bar'
hdr.strip()
assert set(hdr) == {'HISTORY', 'FOO'}
hdr = fits.getheader(self.data('tb.fits'), ext=1)
hdr['FOO'] = 'bar'
hdr = hdr.copy(strip=True)
assert set(hdr) == {'HISTORY', 'FOO'}
def test_update_invalid_card(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5408
Tests updating the value of a card that is malformatted (with an
invalid value literal).
This tests two ways of reproducing the problem, one working with a
Card object directly, and one when reading/writing a header containing
such an invalid card.
"""
card = fits.Card.fromstring('KW = INF / Comment')
card.value = 'FIXED'
assert tuple(card) == ('KW', 'FIXED', 'Comment')
card.verify('fix')
assert tuple(card) == ('KW', 'FIXED', 'Comment')
card = fits.Card.fromstring('KW = INF')
hdu = fits.PrimaryHDU()
# This is a loophole to write a header containing a malformatted card
card._verified = True
hdu.header.append(card)
hdu.header.tofile(self.temp('bogus.fits'))
with fits.open(self.temp('bogus.fits')) as hdul:
hdul[0].header['KW'] = -1
hdul.writeto(self.temp('bogus_fixed.fits'))
with fits.open(self.temp('bogus_fixed.fits')) as hdul:
assert hdul[0].header['KW'] == -1
def test_index_numpy_int(self):
header = fits.Header([('A', 'FOO'), ('B', 2), ('C', 'BAR')])
idx = np.int8(2)
assert header[idx] == 'BAR'
header[idx] = 'BAZ'
assert header[idx] == 'BAZ'
header.insert(idx, ('D', 42))
assert header[idx] == 42
header.add_comment('HELLO')
header.add_comment('WORLD')
assert header['COMMENT'][np.int64(1)] == 'WORLD'
header.append(('C', 'BAZBAZ'))
assert header[('C', np.int16(0))] == 'BAZ'
assert header[('C', np.uint32(1))] == 'BAZBAZ'
class TestRecordValuedKeywordCards(FitsTestCase):
"""
Tests for handling of record-valued keyword cards as used by the
`FITS WCS distortion paper
<https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
These tests are derived primarily from the release notes for PyFITS 1.4 (in
which this feature was first introduced.
Note that extra leading spaces in the `value` fields should be parsed on input,
but will be stripped in the cards.
"""
def setup(self):
super().setup()
self._test_header = fits.Header()
self._test_header.set('DP1', 'NAXIS: 2')
self._test_header.set('DP1', 'AXIS.1: 1')
self._test_header.set('DP1', 'AXIS.2: 2')
self._test_header.set('DP1', 'NAUX: 2')
self._test_header.set('DP1', 'AUX.1.COEFF.0: 0')
self._test_header.set('DP1', 'AUX.1.POWER.0: 1')
self._test_header.set('DP1', 'AUX.1.COEFF.1: 0.00048828125')
self._test_header.set('DP1', 'AUX.1.POWER.1: 1')
def test_initialize_rvkc(self):
"""
Test different methods for initializing a card that should be
recognized as a RVKC
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
assert c.comment == 'A comment'
c = fits.Card.fromstring("DP1 = 'NAXIS: 2.1'")
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.1
assert c.field_specifier == 'NAXIS'
c = fits.Card.fromstring("DP1 = 'NAXIS: a'")
assert c.keyword == 'DP1'
assert c.value == 'NAXIS: a'
assert c.field_specifier is None
c = fits.Card('DP1', 'NAXIS: 2')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1', 'NAXIS: 2.0')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1', 'NAXIS: a')
assert c.keyword == 'DP1'
assert c.value == 'NAXIS: a'
assert c.field_specifier is None
c = fits.Card('DP1.NAXIS', 2)
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1.NAXIS', 2.0)
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
with pytest.warns(fits.verify.VerifyWarning):
c = fits.Card('DP1.NAXIS', 'a')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 'a'
assert c.field_specifier is None
def test_parse_field_specifier(self):
"""
Tests that the field_specifier can accessed from a card read from a
string before any other attributes are accessed.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == 'NAXIS'
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.comment == 'A comment'
def test_update_field_specifier(self):
"""
Test setting the field_specifier attribute and updating the card image
to reflect the new value.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == 'NAXIS'
c.field_specifier = 'NAXIS1'
assert c.field_specifier == 'NAXIS1'
assert c.keyword == 'DP1.NAXIS1'
assert c.value == 2.0
assert c.comment == 'A comment'
assert str(c).rstrip() == "DP1 = 'NAXIS1: 2' / A comment"
def test_field_specifier_case_senstivity(self):
"""
The keyword portion of an RVKC should still be case-insensitive, but
the field-specifier portion should be case-sensitive.
"""
header = fits.Header()
header.set('abc.def', 1)
header.set('abc.DEF', 2)
assert header['abc.def'] == 1
assert header['ABC.def'] == 1
assert header['aBc.def'] == 1
assert header['ABC.DEF'] == 2
assert 'ABC.dEf' not in header
def test_get_rvkc_by_index(self):
"""
Returning a RVKC from a header via index lookup should return the
float value of the card.
"""
assert self._test_header[0] == 2.0
assert isinstance(self._test_header[0], float)
assert self._test_header[1] == 1.0
assert isinstance(self._test_header[1], float)
def test_get_rvkc_by_keyword(self):
"""
Returning a RVKC just via the keyword name should return the full value
string of the first card with that keyword.
This test was changed to reflect the requirement in ticket
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184--previously it required
_test_header['DP1'] to return the parsed float value.
"""
assert self._test_header['DP1'] == 'NAXIS: 2'
def test_get_rvkc_by_keyword_and_field_specifier(self):
"""
Returning a RVKC via the full keyword/field-specifier combination
should return the floating point value associated with the RVKC.
"""
assert self._test_header['DP1.NAXIS'] == 2.0
assert isinstance(self._test_header['DP1.NAXIS'], float)
assert self._test_header['DP1.AUX.1.COEFF.1'] == 0.00048828125
def test_access_nonexistent_rvkc(self):
"""
Accessing a nonexistent RVKC should raise an IndexError for
index-based lookup, or a KeyError for keyword lookup (like a normal
card).
"""
pytest.raises(IndexError, lambda x: self._test_header[x], 8)
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'DP1\.AXIS\.3' not found."):
self._test_header['DP1.AXIS.3']
def test_update_rvkc(self):
"""A RVKC can be updated either via index or keyword access."""
self._test_header[0] = 3
assert self._test_header['DP1.NAXIS'] == 3.0
assert isinstance(self._test_header['DP1.NAXIS'], float)
self._test_header['DP1.AXIS.1'] = 1.1
assert self._test_header['DP1.AXIS.1'] == 1.1
def test_update_rvkc_2(self):
"""Regression test for an issue that appeared after SVN r2412."""
h = fits.Header()
h['D2IM1.EXTVER'] = 1
assert h['D2IM1.EXTVER'] == 1.0
h['D2IM1.EXTVER'] = 2
assert h['D2IM1.EXTVER'] == 2.0
def test_raw_keyword_value(self):
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2'
c = fits.Card('DP1.NAXIS', 2)
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2.0'
c = fits.Card('DP1.NAXIS', 2.0)
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2.0'
def test_rvkc_insert_after(self):
"""
It should be possible to insert a new RVKC after an existing one
specified by the full keyword/field-specifier combination."""
self._test_header.set('DP1', 'AXIS.3: 1', 'a comment',
after='DP1.AXIS.2')
assert self._test_header[3] == 1
assert self._test_header['DP1.AXIS.3'] == 1
def test_rvkc_delete(self):
"""
Deleting a RVKC should work as with a normal card by using the full
keyword/field-spcifier combination.
"""
del self._test_header['DP1.AXIS.1']
assert len(self._test_header) == 7
assert list(self._test_header)[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header)[1] == 'DP1.AXIS.2'
# Perform a subsequent delete to make sure all the index mappings were
# updated
del self._test_header['DP1.AXIS.2']
assert len(self._test_header) == 6
assert list(self._test_header)[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header)[1] == 'DP1.NAUX'
assert self._test_header[1] == 2
def test_pattern_matching_keys(self):
"""Test the keyword filter strings with RVKCs."""
cl = self._test_header['DP1.AXIS.*']
assert isinstance(cl, fits.Header)
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'"])
cl = self._test_header['DP1.N*']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'NAXIS: 2'",
"DP1 = 'NAUX: 2'"])
cl = self._test_header['DP1.AUX...']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
cl = self._test_header['DP?.NAXIS']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'NAXIS: 2'"])
cl = self._test_header['DP1.A*S.*']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'"])
def test_pattern_matching_key_deletion(self):
"""Deletion by filter strings should work."""
del self._test_header['DP1.A*...']
assert len(self._test_header) == 2
assert list(self._test_header)[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header)[1] == 'DP1.NAUX'
assert self._test_header[1] == 2
def test_successive_pattern_matching(self):
"""
A card list returned via a filter string should be further filterable.
"""
cl = self._test_header['DP1.A*...']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
cl2 = cl['*.*AUX...']
assert ([str(c).strip() for c in cl2.cards] ==
["DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
def test_rvkc_in_cardlist_keys(self):
"""
The CardList.keys() method should return full keyword/field-spec values
for RVKCs.
"""
cl = self._test_header['DP1.AXIS.*']
assert list(cl) == ['DP1.AXIS.1', 'DP1.AXIS.2']
def test_rvkc_in_cardlist_values(self):
"""
The CardList.values() method should return the values of all RVKCs as
floating point values.
"""
cl = self._test_header['DP1.AXIS.*']
assert list(cl.values()) == [1.0, 2.0]
def test_rvkc_value_attribute(self):
"""
Individual card values should be accessible by the .value attribute
(which should return a float).
"""
cl = self._test_header['DP1.AXIS.*']
assert cl.cards[0].value == 1.0
assert isinstance(cl.cards[0].value, float)
def test_overly_permissive_parsing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/183
Ensures that cards with standard commentary keywords are never treated
as RVKCs. Also ensures that cards not strictly matching the RVKC
pattern are not treated as such.
"""
h = fits.Header()
h['HISTORY'] = 'AXIS.1: 2'
h['HISTORY'] = 'AXIS.2: 2'
assert 'HISTORY.AXIS' not in h
assert 'HISTORY.AXIS.1' not in h
assert 'HISTORY.AXIS.2' not in h
assert h['HISTORY'] == ['AXIS.1: 2', 'AXIS.2: 2']
# This is an example straight out of the ticket where everything after
# the '2012' in the date value was being ignored, allowing the value to
# successfully be parsed as a "float"
h = fits.Header()
h['HISTORY'] = 'Date: 2012-09-19T13:58:53.756061'
assert 'HISTORY.Date' not in h
assert str(h.cards[0]) == _pad('HISTORY Date: 2012-09-19T13:58:53.756061')
c = fits.Card.fromstring(
" 'Date: 2012-09-19T13:58:53.756061'")
assert c.keyword == ''
assert c.value == "'Date: 2012-09-19T13:58:53.756061'"
assert c.field_specifier is None
h = fits.Header()
h['FOO'] = 'Date: 2012-09-19T13:58:53.756061'
assert 'FOO.Date' not in h
assert (str(h.cards[0]) ==
_pad("FOO = 'Date: 2012-09-19T13:58:53.756061'"))
def test_overly_aggressive_rvkc_lookup(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184
Ensures that looking up a RVKC by keyword only (without the
field-specifier) in a header returns the full string value of that card
without parsing it as a RVKC. Also ensures that a full field-specifier
is required to match a RVKC--a partial field-specifier that doesn't
explicitly match any record-valued keyword should result in a KeyError.
"""
c1 = fits.Card.fromstring("FOO = 'AXIS.1: 2'")
c2 = fits.Card.fromstring("FOO = 'AXIS.2: 4'")
h = fits.Header([c1, c2])
assert h['FOO'] == 'AXIS.1: 2'
assert h[('FOO', 1)] == 'AXIS.2: 4'
assert h['FOO.AXIS.1'] == 2.0
assert h['FOO.AXIS.2'] == 4.0
assert 'FOO.AXIS' not in h
assert 'FOO.AXIS.' not in h
assert 'FOO.' not in h
pytest.raises(KeyError, lambda: h['FOO.AXIS'])
pytest.raises(KeyError, lambda: h['FOO.AXIS.'])
pytest.raises(KeyError, lambda: h['FOO.'])
def test_fitsheader_script(self):
"""Tests the basic functionality of the `fitsheader` script."""
from astropy.io.fits.scripts import fitsheader
# Can an extension by specified by the EXTNAME keyword?
hf = fitsheader.HeaderFormatter(self.data('zerowidth.fits'))
output = hf.parse(extensions=['AIPS FQ'])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX" in output
# Can we limit the display to one specific keyword?
output = hf.parse(extensions=['AIPS FQ'], keywords=['EXTNAME'])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX =" not in output
assert len(output.split('\n')) == 3
# Can we limit the display to two specific keywords?
output = hf.parse(extensions=[1],
keywords=['EXTNAME', 'BITPIX'])
assert "EXTNAME =" in output
assert "BITPIX =" in output
assert len(output.split('\n')) == 4
# Can we use wildcards for keywords?
output = hf.parse(extensions=[1], keywords=['NAXIS*'])
assert "NAXIS =" in output
assert "NAXIS1 =" in output
assert "NAXIS2 =" in output
hf.close()
# Can an extension by specified by the EXTNAME+EXTVER keywords?
hf = fitsheader.HeaderFormatter(self.data('test0.fits'))
assert "EXTNAME = 'SCI" in hf.parse(extensions=['SCI,2'])
hf.close()
# Can we print the original header before decompression?
hf = fitsheader.HeaderFormatter(self.data('comp.fits'))
assert "XTENSION= 'IMAGE" in hf.parse(extensions=[1],
compressed=False)
assert "XTENSION= 'BINTABLE" in hf.parse(extensions=[1],
compressed=True)
hf.close()
def test_fitsheader_table_feature(self):
"""Tests the `--table` feature of the `fitsheader` script."""
from astropy.io import fits
from astropy.io.fits.scripts import fitsheader
test_filename = self.data('zerowidth.fits')
formatter = fitsheader.TableHeaderFormatter(test_filename)
with fits.open(test_filename) as fitsobj:
# Does the table contain the expected number of rows?
mytable = formatter.parse([0])
assert len(mytable) == len(fitsobj[0].header)
# Repeat the above test when multiple HDUs are requested
mytable = formatter.parse(extensions=['AIPS FQ', 2, "4"])
assert len(mytable) == (len(fitsobj['AIPS FQ'].header)
+ len(fitsobj[2].header)
+ len(fitsobj[4].header))
# Can we recover the filename and extension name from the table?
mytable = formatter.parse(extensions=['AIPS FQ'])
assert np.all(mytable['filename'] == test_filename)
assert np.all(mytable['hdu'] == 'AIPS FQ')
assert mytable['value'][mytable['keyword'] == "EXTNAME"] == "AIPS FQ"
# Can we specify a single extension/keyword?
mytable = formatter.parse(extensions=['AIPS FQ'],
keywords=['EXTNAME'])
assert len(mytable) == 1
assert mytable['hdu'][0] == "AIPS FQ"
assert mytable['keyword'][0] == "EXTNAME"
assert mytable['value'][0] == "AIPS FQ"
# Is an incorrect extension dealt with gracefully?
mytable = formatter.parse(extensions=['DOES_NOT_EXIST'])
assert mytable is None
# Is an incorrect keyword dealt with gracefully?
mytable = formatter.parse(extensions=['AIPS FQ'],
keywords=['DOES_NOT_EXIST'])
assert mytable is None
formatter.close()
@pytest.mark.parametrize('mode', ['wb', 'wb+', 'ab', 'ab+'])
def test_hdu_writeto_mode(self, mode):
with open(self.temp('mode.fits'), mode=mode) as ff:
hdu = fits.ImageHDU(data=np.ones(5))
hdu.writeto(ff)
def test_subclass():
"""Check that subclasses don't get ignored on slicing and copying."""
class MyHeader(fits.Header):
def append(self, card, *args, **kwargs):
if isinstance(card, tuple) and len(card) == 2:
# Just for our checks we add a comment if there is none.
card += ('no comment',)
return super().append(card, *args, **kwargs)
my_header = MyHeader((('a', 1., 'first'),
('b', 2., 'second'),
('c', 3.,)))
assert my_header.comments['a'] == 'first'
assert my_header.comments['b'] == 'second'
assert my_header.comments['c'] == 'no comment'
slice_ = my_header[1:]
assert type(slice_) is MyHeader
assert slice_.comments['b'] == 'second'
assert slice_.comments['c'] == 'no comment'
selection = my_header['c*']
assert type(selection) is MyHeader
assert selection.comments['c'] == 'no comment'
copy_ = my_header.copy()
assert type(copy_) is MyHeader
assert copy_.comments['b'] == 'second'
assert copy_.comments['c'] == 'no comment'
my_header.extend((('d', 4.),))
assert my_header.comments['d'] == 'no comment'
|
d3f36b79e69671ed41d71bb4eeed34953d642be377033a321f7c9ca73db6aca4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
import os
from . import FitsTestCase
from astropy.io.fits.convenience import writeto
from astropy.io.fits.hdu import PrimaryHDU, hdulist
from astropy.io.fits import Header, ImageHDU, HDUList, FITSDiff
from astropy.io.fits.scripts import fitsdiff
from astropy import __version__ as version
class TestFITSDiff_script(FitsTestCase):
def test_help(self):
with pytest.raises(SystemExit) as e:
fitsdiff.main(['-h'])
assert e.value.code == 0
def test_version(self, capsys):
with pytest.raises(SystemExit) as e:
fitsdiff.main(['--version'])
out = capsys.readouterr()[0]
assert out == f'fitsdiff {version}'
assert e.value.code == 0
def test_noargs(self):
with pytest.raises(SystemExit) as e:
fitsdiff.main([""])
assert e.value.code == 2
def test_oneargargs(self):
with pytest.raises(SystemExit) as e:
fitsdiff.main(["file1"])
assert e.value.code == 2
def test_nodiff(self):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 0
def test_onediff(self):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 12
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 1
def test_manydiff(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a + 1
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
out, err = capsys.readouterr()
assert numdiff == 1
assert out.splitlines()[-4:] == [
' a> 9',
' b> 10',
' ...',
' 100 different pixels found (100.00% different).']
numdiff = fitsdiff.main(['-n', '1', tmp_a, tmp_b])
out, err = capsys.readouterr()
assert numdiff == 1
assert out.splitlines()[-4:] == [
' a> 0',
' b> 1',
' ...',
' 100 different pixels found (100.00% different).']
def test_outputfile(self):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 12
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(['-o', self.temp('diff.txt'), tmp_a, tmp_b])
assert numdiff == 1
with open(self.temp('diff.txt')) as f:
out = f.read()
assert out.splitlines()[-4:] == [
' Data differs at [1, 2]:',
' a> 10',
' b> 12',
' 1 different pixels found (1.00% different).']
def test_atol(self):
a = np.arange(100, dtype=float).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 11
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-a", "1", tmp_a, tmp_b])
assert numdiff == 0
numdiff = fitsdiff.main(["--exact", "-a", "1", tmp_a, tmp_b])
assert numdiff == 1
def test_rtol(self):
a = np.arange(100, dtype=float).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 11
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-r", "1e-1", tmp_a, tmp_b])
assert numdiff == 0
def test_rtol_diff(self, capsys):
a = np.arange(100, dtype=float).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 11
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-r", "1e-2", tmp_a, tmp_b])
assert numdiff == 1
out, err = capsys.readouterr()
assert out == f"""
fitsdiff: {version}
a: {tmp_a}
b: {tmp_b}
Maximum number of different data values to be reported: 10
Relative tolerance: 0.01, Absolute tolerance: 0.0
Primary HDU:
Data contains differences:
Data differs at [1, 2]:
a> 10.0
? ^
b> 11.0
? ^
1 different pixels found (1.00% different).
"""
assert err == ""
def test_wildcard(self):
tmp1 = self.temp("tmp_file1")
with pytest.raises(SystemExit) as e:
fitsdiff.main([tmp1+"*", "ACME"])
assert e.value.code == 2
def test_not_quiet(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 0
out, err = capsys.readouterr()
assert out == f"""
fitsdiff: {version}
a: {tmp_a}
b: {tmp_b}
Maximum number of different data values to be reported: 10
Relative tolerance: 0.0, Absolute tolerance: 0.0
No differences found.
"""
assert err == ""
def test_quiet(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-q", tmp_a, tmp_b])
assert numdiff == 0
out, err = capsys.readouterr()
assert out == ""
assert err == ""
def test_path(self, capsys):
os.mkdir(self.temp('sub/'))
tmp_b = self.temp('sub/ascii.fits')
tmp_g = self.temp('sub/group.fits')
tmp_h = self.data('group.fits')
with hdulist.fitsopen(tmp_h) as hdu_b:
hdu_b.writeto(tmp_g)
writeto(tmp_b, np.arange(100).reshape(10, 10))
# one modified file and a directory
assert fitsdiff.main(["-q", self.data_dir, tmp_b]) == 1
assert fitsdiff.main(["-q", tmp_b, self.data_dir]) == 1
# two directories
tmp_d = self.temp('sub/')
assert fitsdiff.main(["-q", self.data_dir, tmp_d]) == 1
assert fitsdiff.main(["-q", tmp_d, self.data_dir]) == 1
with pytest.warns(UserWarning, match=r"Field 'ORBPARM' has a repeat "
r"count of 0 in its format code"):
assert fitsdiff.main(["-q", self.data_dir, self.data_dir]) == 0
# no match
tmp_c = self.data('arange.fits')
fitsdiff.main([tmp_c, tmp_d])
out, err = capsys.readouterr()
assert "'arange.fits' has no match in" in err
# globbing
with pytest.warns(UserWarning, match=r"Field 'ORBPARM' has a repeat "
r"count of 0 in its format code"):
assert fitsdiff.main(["-q", self.data_dir+'/*.fits',
self.data_dir]) == 0
assert fitsdiff.main(["-q", self.data_dir+'/g*.fits', tmp_d]) == 0
# one file and a directory
tmp_f = self.data('tb.fits')
assert fitsdiff.main(["-q", tmp_f, self.data_dir]) == 0
assert fitsdiff.main(["-q", self.data_dir, tmp_f]) == 0
def test_ignore_hdus(self):
a = np.arange(100).reshape(10, 10)
b = a.copy() + 1
ha = Header([('A', 1), ('B', 2), ('C', 3)])
phdu_a = PrimaryHDU(header=ha)
phdu_b = PrimaryHDU(header=ha)
ihdu_a = ImageHDU(data=a, name='SCI')
ihdu_b = ImageHDU(data=b, name='SCI')
hdulist_a = HDUList([phdu_a, ihdu_a])
hdulist_b = HDUList([phdu_b, ihdu_b])
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdulist_a.writeto(tmp_a)
hdulist_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 1
numdiff = fitsdiff.main([tmp_a, tmp_b, "-u", "SCI"])
assert numdiff == 0
def test_ignore_hdus_report(self, capsys):
a = np.arange(100).reshape(10, 10)
b = a.copy() + 1
ha = Header([('A', 1), ('B', 2), ('C', 3)])
phdu_a = PrimaryHDU(header=ha)
phdu_b = PrimaryHDU(header=ha)
ihdu_a = ImageHDU(data=a, name='SCI')
ihdu_b = ImageHDU(data=b, name='SCI')
hdulist_a = HDUList([phdu_a, ihdu_a])
hdulist_b = HDUList([phdu_b, ihdu_b])
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdulist_a.writeto(tmp_a)
hdulist_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b, "-u", "SCI"])
assert numdiff == 0
out, err = capsys.readouterr()
assert "testa.fits" in out
assert "testb.fits" in out
@pytest.mark.skip(reason="fails intentionally to show open files (see PR #10159)")
def test_fitsdiff_openfile(tmpdir):
"""Make sure that failing FITSDiff doesn't leave open files."""
path1 = str(tmpdir.join("file1.fits"))
path2 = str(tmpdir.join("file2.fits"))
hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5))])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = FITSDiff(path1, path2)
assert diff.identical, diff.report()
|
c129df5b8d91b4d8dc1bbf1ffbc03e8123f804eca7417d2abd8aeda6a3b201eb | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import sys
import numpy as np
from astropy.io import fits
from . import FitsTestCase
def compare_arrays(arr1in, arr2in, verbose=False):
"""
Compare the values field-by-field in two sets of numpy arrays or
recarrays.
"""
arr1 = arr1in.view(np.ndarray)
arr2 = arr2in.view(np.ndarray)
nfail = 0
for n2 in arr2.dtype.names:
n1 = n2
if n1 not in arr1.dtype.names:
n1 = n1.lower()
if n1 not in arr1.dtype.names:
n1 = n1.upper()
if n1 not in arr1.dtype.names:
raise ValueError(f'field name {n2} not found in array 1')
if verbose:
sys.stdout.write(f" testing field: '{n2}'\n")
sys.stdout.write(' shape...........')
if arr2[n2].shape != arr1[n1].shape:
nfail += 1
if verbose:
sys.stdout.write('shapes differ\n')
else:
if verbose:
sys.stdout.write('OK\n')
sys.stdout.write(' elements........')
w, = np.where(arr1[n1].ravel() != arr2[n2].ravel())
if w.size > 0:
nfail += 1
if verbose:
sys.stdout.write(
f'\n {w.size} elements in field {n2} differ\n')
else:
if verbose:
sys.stdout.write('OK\n')
if nfail == 0:
if verbose:
sys.stdout.write('All tests passed\n')
return True
else:
if verbose:
sys.stdout.write(f'{nfail} differences found\n')
return False
def get_test_data(verbose=False):
st = np.zeros(3, [('f1', 'i4'), ('f2', 'S6'), ('f3', '>2f8')])
np.random.seed(35)
st['f1'] = [1, 3, 5]
st['f2'] = ['hello', 'world', 'byebye']
st['f3'] = np.random.random(st['f3'].shape)
return st
class TestStructured(FitsTestCase):
def test_structured(self):
fname = self.data('stddata.fits')
data1, h1 = fits.getdata(fname, ext=1, header=True)
data2, h2 = fits.getdata(fname, ext=2, header=True)
st = get_test_data()
outfile = self.temp('test.fits')
fits.writeto(outfile, data1, overwrite=True)
fits.append(outfile, data2)
fits.append(outfile, st)
assert st.dtype.isnative
assert np.all(st['f1'] == [1, 3, 5])
data1check, h1check = fits.getdata(outfile, ext=1, header=True)
data2check, h2check = fits.getdata(outfile, ext=2, header=True)
stcheck, sthcheck = fits.getdata(outfile, ext=3, header=True)
assert compare_arrays(data1, data1check, verbose=True)
assert compare_arrays(data2, data2check, verbose=True)
assert compare_arrays(st, stcheck, verbose=True)
# try reading with view
dataviewcheck, hviewcheck = fits.getdata(outfile, ext=2, header=True,
view=np.ndarray)
assert compare_arrays(data2, dataviewcheck, verbose=True)
|
6a726b359d1c7be0efbaffaa639cb0c73a99abdbe124267725db83ffed56686a | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import pytest
import numpy as np
from astropy.io import fits
from astropy.io.fits.compression import compress_hdu
from . import FitsTestCase
MAX_INT = np.iinfo(np.intc).max
MAX_LONG = np.iinfo(int).max
MAX_LONGLONG = np.iinfo(np.longlong).max
class TestCompressionFunction(FitsTestCase):
def test_wrong_argument_number(self):
with pytest.raises(TypeError):
compress_hdu(1, 2)
def test_unknown_compression_type(self):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header['ZCMPTYPE'] = 'fun'
with pytest.raises(ValueError) as exc:
compress_hdu(hdu)
assert 'Unrecognized compression type: fun' in str(exc.value)
def test_zbitpix_unknown(self):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header['ZBITPIX'] = 13
with pytest.raises(ValueError) as exc:
compress_hdu(hdu)
assert 'Invalid value for BITPIX: 13' in str(exc.value)
def test_data_none(self):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu.data = None
with pytest.raises(TypeError) as exc:
compress_hdu(hdu)
assert 'CompImageHDU.data must be a numpy.ndarray' in str(exc.value)
def test_missing_internal_header(self):
hdu = fits.CompImageHDU(np.ones((10, 10)))
del hdu._header
with pytest.raises(AttributeError) as exc:
compress_hdu(hdu)
assert '_header' in str(exc.value)
def test_invalid_tform(self):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header['TFORM1'] = 'TX'
with pytest.raises(RuntimeError) as exc:
compress_hdu(hdu)
assert 'TX' in str(exc.value) and 'TFORM' in str(exc.value)
def test_invalid_zdither(self):
hdu = fits.CompImageHDU(np.ones((10, 10)), quantize_method=1)
hdu._header['ZDITHER0'] = 'a'
with pytest.raises(TypeError):
compress_hdu(hdu)
@pytest.mark.parametrize('kw', ['ZNAXIS', 'ZBITPIX'])
def test_header_missing_keyword(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
del hdu._header[kw]
with pytest.raises(KeyError) as exc:
compress_hdu(hdu)
assert kw in str(exc.value)
@pytest.mark.parametrize('kw', ['ZNAXIS', 'ZVAL1', 'ZVAL2', 'ZBLANK', 'BLANK'])
def test_header_value_int_overflow(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = MAX_INT + 1
with pytest.raises(OverflowError):
compress_hdu(hdu)
@pytest.mark.parametrize('kw', ['ZTILE1', 'ZNAXIS1'])
def test_header_value_long_overflow(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = MAX_LONG + 1
with pytest.raises(OverflowError):
compress_hdu(hdu)
@pytest.mark.parametrize('kw', ['NAXIS1', 'NAXIS2', 'TNULL1', 'PCOUNT', 'THEAP'])
def test_header_value_longlong_overflow(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = MAX_LONGLONG + 1
with pytest.raises(OverflowError):
compress_hdu(hdu)
@pytest.mark.parametrize('kw', ['ZVAL3'])
def test_header_value_float_overflow(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = 1e300
with pytest.raises(OverflowError):
compress_hdu(hdu)
@pytest.mark.parametrize('kw', ['NAXIS1', 'NAXIS2', 'TFIELDS', 'PCOUNT'])
def test_header_value_negative(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = -1
with pytest.raises(ValueError) as exc:
compress_hdu(hdu)
assert f'{kw} should not be negative.' in str(exc.value)
@pytest.mark.parametrize(
('kw', 'limit'),
[('ZNAXIS', 999),
('TFIELDS', 999)])
def test_header_value_exceeds_custom_limit(self, kw, limit):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = limit + 1
with pytest.raises(ValueError) as exc:
compress_hdu(hdu)
assert kw in str(exc.value)
@pytest.mark.parametrize('kw', ['TTYPE1', 'TFORM1', 'ZCMPTYPE', 'ZNAME1',
'ZQUANTIZ'])
def test_header_value_no_string(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = 1
with pytest.raises(TypeError):
compress_hdu(hdu)
@pytest.mark.parametrize('kw', ['TZERO1', 'TSCAL1'])
def test_header_value_no_double(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = '1'
with pytest.raises(TypeError):
compress_hdu(hdu)
@pytest.mark.parametrize('kw', ['ZSCALE', 'ZZERO'])
def test_header_value_no_double_int_image(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10), dtype=np.int32))
hdu._header[kw] = '1'
with pytest.raises(TypeError):
compress_hdu(hdu)
|
4e7e720aadee6ab8ec86fb65e9957d7e28d78acfe13207b1841639b25cc32e2d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from . import FitsTestCase
from astropy.io.fits.scripts import fitsheader
from astropy import __version__ as version
class TestFITSheader_script(FitsTestCase):
def test_help(self):
with pytest.raises(SystemExit) as e:
fitsheader.main(['-h'])
assert e.value.code == 0
def test_version(self, capsys):
with pytest.raises(SystemExit) as e:
fitsheader.main(['--version'])
out = capsys.readouterr()[0]
assert out == f'fitsheader {version}'
assert e.value.code == 0
def test_file_exists(self, capsys):
fitsheader.main([self.data('arange.fits')])
out, err = capsys.readouterr()
assert out.splitlines()[1].startswith(
'SIMPLE = T / conforms to FITS standard')
assert err == ''
def test_by_keyword(self, capsys):
fitsheader.main(['-k', 'NAXIS', self.data('arange.fits')])
out, err = capsys.readouterr()
assert out.splitlines()[1].startswith(
'NAXIS = 3 / number of array dimensions')
fitsheader.main(['-k', 'NAXIS*', self.data('arange.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 5
assert out[1].startswith('NAXIS')
assert out[2].startswith('NAXIS1')
assert out[3].startswith('NAXIS2')
assert out[4].startswith('NAXIS3')
fitsheader.main(['-k', 'RANDOMKEY', self.data('arange.fits')])
out, err = capsys.readouterr()
assert err.startswith('WARNING') and 'RANDOMKEY' in err
assert not err.startswith('ERROR')
def test_by_extension(self, capsys):
fitsheader.main(['-e', '1', self.data('test0.fits')])
out, err = capsys.readouterr()
assert len(out.splitlines()) == 62
fitsheader.main(['-e', '3', '-k', 'BACKGRND', self.data('test0.fits')])
out, err = capsys.readouterr()
assert out.splitlines()[1].startswith('BACKGRND= 312.')
fitsheader.main(['-e', '0', '-k', 'BACKGRND', self.data('test0.fits')])
out, err = capsys.readouterr()
assert err.startswith('WARNING')
fitsheader.main(['-e', '3', '-k', 'FOO', self.data('test0.fits')])
out, err = capsys.readouterr()
assert err.startswith('WARNING')
def test_table(self, capsys):
fitsheader.main(['-t', '-k', 'BACKGRND', self.data('test0.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 5
assert out[1].endswith('| 1 | BACKGRND | 316.0 |')
assert out[2].endswith('| 2 | BACKGRND | 351.0 |')
assert out[3].endswith('| 3 | BACKGRND | 312.0 |')
assert out[4].endswith('| 4 | BACKGRND | 323.0 |')
fitsheader.main(['-t', '-e', '0', '-k', 'NAXIS',
self.data('arange.fits'),
self.data('ascii.fits'),
self.data('blank.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[1].endswith('| 0 | NAXIS | 3 |')
assert out[2].endswith('| 0 | NAXIS | 0 |')
assert out[3].endswith('| 0 | NAXIS | 2 |')
def test_fitsort(self, capsys):
fitsheader.main(['-e', '0', '-f', '-k', 'EXPSTART', '-k', 'EXPTIME',
self.data('test0.fits'), self.data('test1.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[2].endswith('test0.fits 49491.65366175 0.23')
assert out[3].endswith('test1.fits 49492.65366175 0.22')
fitsheader.main(['-e', '0', '-f', '-k', 'EXPSTART', '-k', 'EXPTIME',
self.data('test0.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 3
assert out[2].endswith('test0.fits 49491.65366175 0.23')
fitsheader.main(['-f', '-k', 'NAXIS',
self.data('tdim.fits'), self.data('test1.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[0].endswith('0:NAXIS 1:NAXIS 2:NAXIS 3:NAXIS 4:NAXIS')
assert out[2].endswith('tdim.fits 0 2 -- -- --')
assert out[3].endswith('test1.fits 0 2 2 2 2')
# check that files without required keyword are present
fitsheader.main(['-f', '-k', 'DATE-OBS',
self.data('table.fits'), self.data('test0.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[2].endswith('table.fits --')
assert out[3].endswith('test0.fits 19/05/94')
# check that COMMENT and HISTORY are excluded
fitsheader.main(['-e', '0', '-f', self.data('tb.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 3
assert out[2].endswith('tb.fits True 16 0 True '
'STScI-STSDAS/TABLES tb.fits 1')
def test_dotkeyword(self, capsys):
fitsheader.main(['-e', '0', '-k', 'ESO DET ID',
self.data('fixed-1890.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 2
assert out[1].strip().endswith("HIERARCH ESO DET ID = 'DV13' / Detector system Id")
fitsheader.main(['-e', '0', '-k', 'ESO.DET.ID',
self.data('fixed-1890.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 2
assert out[1].strip().endswith("HIERARCH ESO DET ID = 'DV13' / Detector system Id")
|
51dee56fc92cb551425b6fa0830ee95e4af1a68842eb317f1882b5a2fc7aab55 | import gc
import pathlib
import warnings
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy.io.fits.column import (_parse_tdisp_format, _fortran_to_python_format,
python_to_tdisp)
from astropy.io.fits import HDUList, PrimaryHDU, BinTableHDU, ImageHDU, table_to_hdu
from astropy.io import fits
from astropy import units as u
from astropy.table import Table, QTable, NdarrayMixin, Column
from astropy.table.table_helpers import simple_table
from astropy.units import allclose as quantity_allclose
from astropy.units.format.fits import UnitScaleError
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import (AstropyUserWarning,
AstropyDeprecationWarning)
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from astropy.time import Time
from astropy.units.quantity import QuantityInfo
from astropy.io.tests.mixin_columns import mixin_cols, compare_attrs, serialized_names
# FITS does not preserve precision, in_subfmt, and out_subfmt.
time_attrs = ['value', 'shape', 'format', 'scale', 'location']
compare_attrs = {name: (time_attrs if isinstance(col, Time) else compare_attrs[name])
for name, col in mixin_cols.items()}
# FITS does not support multi-element location, array with object dtype,
# or logarithmic quantities.
unsupported_cols = {name: col for name, col in mixin_cols.items()
if (isinstance(col, Time) and col.location.shape != ()
or isinstance(col, np.ndarray) and col.dtype.kind == 'O'
or isinstance(col, u.LogQuantity))}
mixin_cols = {name: col for name, col in mixin_cols.items()
if name not in unsupported_cols}
def equal_data(a, b):
for name in a.dtype.names:
if not np.all(a[name] == b[name]):
return False
return True
class TestSingleTable:
def setup_class(self):
self.data = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', int), ('b', 'U1'), ('c', float)])
def test_simple(self, tmpdir):
filename = str(tmpdir.join('test_simple.fts'))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_pathlib(self, tmpdir):
filename = pathlib.Path(str(tmpdir.join('test_simple.fit')))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_meta(self, tmpdir):
filename = str(tmpdir.join('test_simple.fits'))
t1 = Table(self.data)
t1.meta['A'] = 1
t1.meta['B'] = 2.3
t1.meta['C'] = 'spam'
t1.meta['comments'] = ['this', 'is', 'a', 'long', 'comment']
t1.meta['HISTORY'] = ['first', 'second', 'third']
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
for key in t1.meta:
if isinstance(t1.meta, list):
for i in range(len(t1.meta[key])):
assert t1.meta[key][i] == t2.meta[key][i]
else:
assert t1.meta[key] == t2.meta[key]
def test_simple_meta_conflicting(self, tmpdir):
filename = str(tmpdir.join('test_simple.fits'))
t1 = Table(self.data)
t1.meta['ttype1'] = 'spam'
with pytest.warns(AstropyUserWarning, match='Meta-data keyword ttype1 '
'will be ignored since it conflicts with a FITS '
'reserved keyword') as w:
t1.write(filename, overwrite=True)
assert len(w) == 1
def test_simple_noextension(self, tmpdir):
"""
Test that file type is recognized without extension
"""
filename = str(tmpdir.join('test_simple'))
t1 = Table(self.data)
t1.write(filename, overwrite=True, format='fits')
t2 = Table.read(filename)
assert equal_data(t1, t2)
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_with_units(self, table_type, tmpdir):
filename = str(tmpdir.join('test_with_units.fits'))
t1 = table_type(self.data)
t1['a'].unit = u.m
t1['c'].unit = u.km / u.s
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2['a'].unit == u.m
assert t2['c'].unit == u.km / u.s
def test_with_custom_units_qtable(self, tmpdir):
# Test only for QTable - for Table's Column, new units are dropped
# (as is checked in test_write_drop_nonstandard_units).
filename = str(tmpdir.join('test_with_units.fits'))
unit = u.def_unit('bandpass_sol_lum')
t = QTable()
t['l'] = np.ones(5) * unit
with pytest.warns(AstropyUserWarning) as w:
t.write(filename, overwrite=True)
assert len(w) == 1
assert 'bandpass_sol_lum' in str(w[0].message)
# Just reading back, the data is fine but the unit is not recognized.
with pytest.warns(u.UnitsWarning, match="'bandpass_sol_lum' did not parse") as w:
t2 = QTable.read(filename)
assert len(w) == 1
assert isinstance(t2['l'].unit, u.UnrecognizedUnit)
assert str(t2['l'].unit) == 'bandpass_sol_lum'
assert np.all(t2['l'].value == t['l'].value)
# But if we enable the unit, it should be recognized.
with u.add_enabled_units(unit):
t3 = QTable.read(filename)
assert t3['l'].unit is unit
assert equal_data(t3, t)
# Regression check for #8897; write used to fail when a custom
# unit was enabled.
with pytest.warns(AstropyUserWarning):
t3.write(filename, overwrite=True)
# It should also be possible to read the file in using a unit alias,
# even to a unit that may not be the same.
with u.set_enabled_aliases({'bandpass_sol_lum': u.Lsun}):
t3 = QTable.read(filename)
assert t3['l'].unit is u.Lsun
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_read_with_unit_aliases(self, table_type):
hdu = BinTableHDU(self.data)
hdu.columns[0].unit = 'Angstroms'
hdu.columns[2].unit = 'ergs/(cm.s.Angstroms)'
with u.set_enabled_aliases(dict(Angstroms=u.AA, ergs=u.erg)):
t = table_type.read(hdu)
assert t['a'].unit == u.AA
assert t['c'].unit == u.erg/(u.cm*u.s*u.AA)
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_with_format(self, table_type, tmpdir):
filename = str(tmpdir.join('test_with_format.fits'))
t1 = table_type(self.data)
t1['a'].format = '{:5d}'
t1['b'].format = '{:>20}'
t1['c'].format = '{:6.2f}'
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2['a'].format == '{:5d}'
assert t2['b'].format == '{:>20}'
assert t2['c'].format == '{:6.2f}'
def test_masked(self, tmpdir):
filename = str(tmpdir.join('test_masked.fits'))
t1 = Table(self.data, masked=True)
t1.mask['a'] = [1, 0, 1, 0]
t1.mask['b'] = [1, 0, 0, 1]
t1.mask['c'] = [0, 1, 1, 0]
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
assert np.all(t1['a'].mask == t2['a'].mask)
assert np.all(t1['b'].mask == t2['b'].mask)
assert np.all(t1['c'].mask == t2['c'].mask)
@pytest.mark.parametrize('masked', [True, False])
def test_masked_nan(self, masked, tmpdir):
"""Check that masked values by default are replaced by NaN.
This should work for any shape and be independent of whether the
Table is formally masked or not.
"""
filename = str(tmpdir.join('test_masked_nan.fits'))
a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0])
b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1], dtype='f4')
c = np.ma.stack([a, b], axis=-1)
t1 = Table([a, b, c], names=['a', 'b', 'c'], masked=masked)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert_array_equal(t2['a'].data, [np.nan, 8.5, np.nan, 6.25])
assert_array_equal(t2['b'].data, [np.nan, 4.5, 6.75, np.nan])
assert_array_equal(t2['c'].data, np.stack([t2['a'].data, t2['b'].data],
axis=-1))
assert np.all(t1['a'].mask == t2['a'].mask)
assert np.all(t1['b'].mask == t2['b'].mask)
assert np.all(t1['c'].mask == t2['c'].mask)
def test_masked_serialize_data_mask(self, tmpdir):
filename = str(tmpdir.join('test_masked_nan.fits'))
a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0])
b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1])
c = np.ma.stack([a, b], axis=-1)
t1 = Table([a, b, c], names=['a', 'b', 'c'])
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert_array_equal(t2['a'].data, [5.25, 8.5, 3.75, 6.25])
assert_array_equal(t2['b'].data, [2.5, 4.5, 6.75, 8.875])
assert_array_equal(t2['c'].data, np.stack([t2['a'].data, t2['b'].data],
axis=-1))
assert np.all(t1['a'].mask == t2['a'].mask)
assert np.all(t1['b'].mask == t2['b'].mask)
assert np.all(t1['c'].mask == t2['c'].mask)
def test_read_from_fileobj(self, tmpdir):
filename = str(tmpdir.join('test_read_from_fileobj.fits'))
hdu = BinTableHDU(self.data)
hdu.writeto(filename, overwrite=True)
with open(filename, 'rb') as f:
t = Table.read(f)
assert equal_data(t, self.data)
def test_read_with_nonstandard_units(self):
hdu = BinTableHDU(self.data)
hdu.columns[0].unit = 'RADIANS'
hdu.columns[1].unit = 'spam'
hdu.columns[2].unit = 'millieggs'
with pytest.warns(u.UnitsWarning, match="did not parse as fits unit"):
t = Table.read(hdu)
assert equal_data(t, self.data)
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_write_drop_nonstandard_units(self, table_type, tmpdir):
# While we are generous on input (see above), we are strict on
# output, dropping units not recognized by the fits standard.
filename = str(tmpdir.join('test_nonstandard_units.fits'))
spam = u.def_unit('spam')
t = table_type()
t['a'] = [1., 2., 3.] * spam
with pytest.warns(AstropyUserWarning, match='spam') as w:
t.write(filename)
assert len(w) == 1
if table_type is Table:
assert ('cannot be recovered in reading. ') in str(w[0].message)
else:
assert 'lost to non-astropy fits readers' in str(w[0].message)
with fits.open(filename) as ff:
hdu = ff[1]
assert 'TUNIT1' not in hdu.header
def test_memmap(self, tmpdir):
filename = str(tmpdir.join('test_simple.fts'))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, memmap=False)
t3 = Table.read(filename, memmap=True)
assert equal_data(t2, t3)
# To avoid issues with --open-files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
@pytest.mark.parametrize('memmap', (False, True))
def test_character_as_bytes(self, tmpdir, memmap):
filename = str(tmpdir.join('test_simple.fts'))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, character_as_bytes=False, memmap=memmap)
t3 = Table.read(filename, character_as_bytes=True, memmap=memmap)
assert t2['b'].dtype.kind == 'U'
assert t3['b'].dtype.kind == 'S'
assert equal_data(t2, t3)
# To avoid issues with --open-files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
def test_oned_single_element(self, tmpdir):
filename = str(tmpdir.join('test_oned_single_element.fits'))
table = Table({'x': [[1], [2]]})
table.write(filename, overwrite=True)
read = Table.read(filename)
assert read['x'].shape == (2, 1)
assert len(read['x'][0]) == 1
def test_write_append(self, tmpdir):
t = Table(self.data)
hdu = table_to_hdu(t)
def check_equal(filename, expected, start_from=1):
with fits.open(filename) as hdu_list:
assert len(hdu_list) == expected
for hdu_table in hdu_list[start_from:]:
assert hdu_table.header == hdu.header
assert np.all(hdu_table.data == hdu.data)
filename = str(tmpdir.join('test_write_append.fits'))
t.write(filename, append=True)
t.write(filename, append=True)
check_equal(filename, 3)
# Check the overwrite works correctly.
t.write(filename, append=True, overwrite=True)
t.write(filename, append=True)
check_equal(filename, 3)
# Normal write, check it's not appending.
t.write(filename, overwrite=True)
t.write(filename, overwrite=True)
check_equal(filename, 2)
# Now write followed by append, with different shaped tables.
t2 = Table(np.array([1, 2]))
t2.write(filename, overwrite=True)
t.write(filename, append=True)
check_equal(filename, 3, start_from=2)
assert equal_data(t2, Table.read(filename, hdu=1))
def test_write_overwrite(self, tmpdir):
t = Table(self.data)
filename = str(tmpdir.join('test_write_overwrite.fits'))
t.write(filename)
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename)
t.write(filename, overwrite=True)
def test_mask_nans_on_read(self, tmpdir):
filename = str(tmpdir.join('test_inexact_format_parse_on_read.fits'))
c1 = fits.Column(name='a', array=np.array([1, 2, np.nan]), format='E')
table_hdu = fits.TableHDU.from_columns([c1])
table_hdu.writeto(filename)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
def test_mask_null_on_read(self, tmpdir):
filename = str(tmpdir.join('test_null_format_parse_on_read.fits'))
col = fits.Column(name='a', array=np.array([1, 2, 99, 60000], dtype='u2'), format='I', null=99, bzero=32768)
bin_table_hdu = fits.BinTableHDU.from_columns([col])
bin_table_hdu.writeto(filename, overwrite=True)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
class TestMultipleHDU:
def setup_class(self):
self.data1 = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', int), ('b', 'U1'), ('c', float)])
self.data2 = np.array(list(zip([1.4, 2.3, 3.2, 4.7],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('p', float), ('q', float)])
self.data3 = np.array(list(zip([1, 2, 3, 4],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('A', int), ('B', float)])
hdu0 = PrimaryHDU()
hdu1 = BinTableHDU(self.data1, name='first')
hdu2 = BinTableHDU(self.data2, name='second')
hdu3 = ImageHDU(np.ones((3, 3)), name='third')
hdu4 = BinTableHDU(self.data3)
self.hdus = HDUList([hdu0, hdu1, hdu2, hdu3, hdu4])
self.hdusb = HDUList([hdu0, hdu3, hdu2, hdu1])
self.hdus3 = HDUList([hdu0, hdu3, hdu2])
self.hdus2 = HDUList([hdu0, hdu1, hdu3])
self.hdus1 = HDUList([hdu0, hdu1])
def teardown_class(self):
del self.hdus
def setup_method(self, method):
warnings.filterwarnings('always')
def test_read(self, tmpdir):
filename = str(tmpdir.join('test_read.fits'))
self.hdus.writeto(filename)
with pytest.warns(AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=1\)"):
t = Table.read(filename)
assert equal_data(t, self.data1)
filename = str(tmpdir.join('test_read_2.fits'))
self.hdusb.writeto(filename)
with pytest.warns(AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=2\)"):
t3 = Table.read(filename)
assert equal_data(t3, self.data2)
def test_read_with_hdu_0(self, tmpdir):
filename = str(tmpdir.join('test_read_with_hdu_0.fits'))
self.hdus.writeto(filename)
with pytest.raises(ValueError) as exc:
Table.read(filename, hdu=0)
assert exc.value.args[0] == 'No table found in hdu=0'
@pytest.mark.parametrize('hdu', [1, 'first'])
def test_read_with_hdu_1(self, tmpdir, hdu):
filename = str(tmpdir.join('test_read_with_hdu_1.fits'))
self.hdus.writeto(filename)
t = Table.read(filename, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize('hdu', [2, 'second'])
def test_read_with_hdu_2(self, tmpdir, hdu):
filename = str(tmpdir.join('test_read_with_hdu_2.fits'))
self.hdus.writeto(filename)
t = Table.read(filename, hdu=hdu)
assert equal_data(t, self.data2)
@pytest.mark.parametrize('hdu', [3, 'third'])
def test_read_with_hdu_3(self, tmpdir, hdu):
filename = str(tmpdir.join('test_read_with_hdu_3.fits'))
self.hdus.writeto(filename)
with pytest.raises(ValueError, match='No table found in hdu=3'):
Table.read(filename, hdu=hdu)
def test_read_with_hdu_4(self, tmpdir):
filename = str(tmpdir.join('test_read_with_hdu_4.fits'))
self.hdus.writeto(filename)
t = Table.read(filename, hdu=4)
assert equal_data(t, self.data3)
@pytest.mark.parametrize('hdu', [2, 3, '1', 'second', ''])
def test_read_with_hdu_missing(self, tmpdir, hdu):
filename = str(tmpdir.join('test_warn_with_hdu_1.fits'))
self.hdus1.writeto(filename)
with pytest.warns(AstropyDeprecationWarning,
match=rf"Specified hdu={hdu} not found, "
r"reading in first available table \(hdu=1\)"):
t1 = Table.read(filename, hdu=hdu)
assert equal_data(t1, self.data1)
@pytest.mark.parametrize('hdu', [0, 2, 'third'])
def test_read_with_hdu_warning(self, tmpdir, hdu):
filename = str(tmpdir.join('test_warn_with_hdu_2.fits'))
self.hdus2.writeto(filename)
with pytest.warns(AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=1\)"):
t2 = Table.read(filename, hdu=hdu)
assert equal_data(t2, self.data1)
@pytest.mark.parametrize('hdu', [0, 1, 'third'])
def test_read_in_last_hdu(self, tmpdir, hdu):
filename = str(tmpdir.join('test_warn_with_hdu_3.fits'))
self.hdus3.writeto(filename)
with pytest.warns(AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=2\)"):
t3 = Table.read(filename, hdu=hdu)
assert equal_data(t3, self.data2)
def test_read_from_hdulist(self):
with pytest.warns(AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=1\)"):
t = Table.read(self.hdus)
assert equal_data(t, self.data1)
with pytest.warns(AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=2\)"):
t3 = Table.read(self.hdusb)
assert equal_data(t3, self.data2)
def test_read_from_hdulist_with_hdu_0(self):
with pytest.raises(ValueError) as exc:
Table.read(self.hdus, hdu=0)
assert exc.value.args[0] == 'No table found in hdu=0'
@pytest.mark.parametrize('hdu', [1, 'first', None])
def test_read_from_hdulist_with_single_table(self, hdu):
t = Table.read(self.hdus1, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize('hdu', [1, 'first'])
def test_read_from_hdulist_with_hdu_1(self, hdu):
t = Table.read(self.hdus, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize('hdu', [2, 'second'])
def test_read_from_hdulist_with_hdu_2(self, hdu):
t = Table.read(self.hdus, hdu=hdu)
assert equal_data(t, self.data2)
@pytest.mark.parametrize('hdu', [3, 'third'])
def test_read_from_hdulist_with_hdu_3(self, hdu):
with pytest.raises(ValueError, match='No table found in hdu=3'):
Table.read(self.hdus, hdu=hdu)
@pytest.mark.parametrize('hdu', [0, 2, 'third'])
def test_read_from_hdulist_with_hdu_warning(self, hdu):
with pytest.warns(AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=1\)"):
t2 = Table.read(self.hdus2, hdu=hdu)
assert equal_data(t2, self.data1)
@pytest.mark.parametrize('hdu', [2, 3, '1', 'second', ''])
def test_read_from_hdulist_with_hdu_missing(self, hdu):
with pytest.warns(AstropyDeprecationWarning,
match=rf"Specified hdu={hdu} not found, "
r"reading in first available table \(hdu=1\)"):
t1 = Table.read(self.hdus1, hdu=hdu)
assert equal_data(t1, self.data1)
@pytest.mark.parametrize('hdu', [0, 1, 'third'])
def test_read_from_hdulist_in_last_hdu(self, hdu):
with pytest.warns(AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=2\)"):
t3 = Table.read(self.hdus3, hdu=hdu)
assert equal_data(t3, self.data2)
@pytest.mark.parametrize('hdu', [None, 1, 'first'])
def test_read_from_single_hdu(self, hdu):
t = Table.read(self.hdus[1])
assert equal_data(t, self.data1)
def test_masking_regression_1795():
"""
Regression test for #1795 - this bug originally caused columns where TNULL
was not defined to have their first element masked.
"""
t = Table.read(get_pkg_data_filename('data/tb.fits'))
assert np.all(t['c1'].mask == np.array([False, False]))
assert not hasattr(t['c2'], 'mask')
assert not hasattr(t['c3'], 'mask')
assert not hasattr(t['c4'], 'mask')
assert np.all(t['c1'].data == np.array([1, 2]))
assert np.all(t['c2'].data == np.array([b'abc', b'xy ']))
assert_allclose(t['c3'].data, np.array([3.70000007153, 6.6999997139]))
assert np.all(t['c4'].data == np.array([False, True]))
def test_scale_error():
a = [1, 4, 5]
b = [2.0, 5.0, 8.2]
c = ['x', 'y', 'z']
t = Table([a, b, c], names=('a', 'b', 'c'), meta={'name': 'first table'})
t['a'].unit = '1.2'
with pytest.raises(UnitScaleError, match=r"The column 'a' could not be "
r"stored in FITS format because it has a scale '\(1\.2\)'"
r" that is not recognized by the FITS standard\. Either "
r"scale the data or change the units\."):
t.write('t.fits', format='fits', overwrite=True)
@pytest.mark.parametrize('tdisp_str, format_return',
[('EN10.5', ('EN', '10', '5', None)),
('F6.2', ('F', '6', '2', None)),
('B5.10', ('B', '5', '10', None)),
('E10.5E3', ('E', '10', '5', '3')),
('A21', ('A', '21', None, None))])
def test_parse_tdisp_format(tdisp_str, format_return):
assert _parse_tdisp_format(tdisp_str) == format_return
@pytest.mark.parametrize('tdisp_str, format_str_return',
[('G15.4E2', '{:15.4g}'),
('Z5.10', '{:5x}'),
('I6.5', '{:6d}'),
('L8', '{:>8}'),
('E20.7', '{:20.7e}')])
def test_fortran_to_python_format(tdisp_str, format_str_return):
assert _fortran_to_python_format(tdisp_str) == format_str_return
@pytest.mark.parametrize('fmt_str, tdisp_str',
[('{:3d}', 'I3'),
('3d', 'I3'),
('7.3f', 'F7.3'),
('{:>4}', 'A4'),
('{:7.4f}', 'F7.4'),
('%5.3g', 'G5.3'),
('%10s', 'A10'),
('%.4f', 'F13.4')])
def test_python_to_tdisp(fmt_str, tdisp_str):
assert python_to_tdisp(fmt_str) == tdisp_str
def test_logical_python_to_tdisp():
assert python_to_tdisp('{:>7}', logical_dtype=True) == 'L7'
def test_bool_column(tmpdir):
"""
Regression test for https://github.com/astropy/astropy/issues/1953
Ensures that Table columns of bools are properly written to a FITS table.
"""
arr = np.ones(5, dtype=bool)
arr[::2] == np.False_
t = Table([arr])
t.write(str(tmpdir.join('test.fits')), overwrite=True)
with fits.open(str(tmpdir.join('test.fits'))) as hdul:
assert hdul[1].data['col0'].dtype == np.dtype('bool')
assert np.all(hdul[1].data['col0'] == arr)
def test_unicode_column(tmpdir):
"""
Test that a column of unicode strings is still written as one
byte-per-character in the FITS table (so long as the column can be ASCII
encoded).
Regression test for one of the issues fixed in
https://github.com/astropy/astropy/pull/4228
"""
t = Table([np.array(['a', 'b', 'cd'])])
t.write(str(tmpdir.join('test.fits')), overwrite=True)
with fits.open(str(tmpdir.join('test.fits'))) as hdul:
assert np.all(hdul[1].data['col0'] == ['a', 'b', 'cd'])
assert hdul[1].header['TFORM1'] == '2A'
t2 = Table([np.array(['\N{SNOWMAN}'])])
with pytest.raises(UnicodeEncodeError):
t2.write(str(tmpdir.join('test.fits')), overwrite=True)
def test_unit_warnings_read_write(tmpdir):
filename = str(tmpdir.join('test_unit.fits'))
t1 = Table([[1, 2], [3, 4]], names=['a', 'b'])
t1['a'].unit = 'm/s'
t1['b'].unit = 'not-a-unit'
with pytest.warns(u.UnitsWarning, match="'not-a-unit' did not parse as fits unit") as w:
t1.write(filename, overwrite=True)
assert len(w) == 1
with pytest.warns(u.UnitsWarning, match="'not-a-unit' did not parse as fits unit") as w:
Table.read(filename, hdu=1)
def test_convert_comment_convention(tmpdir):
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
filename = get_pkg_data_filename('data/stddata.fits')
with pytest.warns(AstropyUserWarning, match=r'hdu= was not specified but '
r'multiple tables are present'):
t = Table.read(filename)
assert t.meta['comments'] == [
'',
' *** End of mandatory fields ***',
'',
'',
' *** Column names ***',
'',
'',
' *** Column formats ***',
''
]
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description', 'info.meta']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == 'info.meta':
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f':
assert quantity_allclose(a1, a2, rtol=1e-15)
else:
assert np.all(a1 == a2)
def test_fits_mixins_qtable_to_table(tmpdir):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = str(tmpdir.join('test_simple.fits'))
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format='fits')
t2 = Table.read(filename, format='fits', astropy_native=True)
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
# Special-case Time, which does not yet support round-tripping
# the format.
if isinstance(col2, Time):
col2.format = col.format
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_fits_mixins_as_one(table_cls, tmpdir):
"""Test write/read all cols at once and validate intermediate column names"""
filename = str(tmpdir.join('test_simple.fits'))
names = sorted(mixin_cols)
# FITS stores times directly, so we just get the column back.
all_serialized_names = []
for name in sorted(mixin_cols):
all_serialized_names.extend(
[name] if isinstance(mixin_cols[name], Time)
else serialized_names[name])
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta['C'] = 'spam'
t.meta['comments'] = ['this', 'is', 'a', 'comment']
t.meta['history'] = ['first', 'second', 'third']
t.write(filename, format="fits")
t2 = table_cls.read(filename, format='fits', astropy_native=True)
assert t2.meta['C'] == 'spam'
assert t2.meta['comments'] == ['this', 'is', 'a', 'comment']
assert t2.meta['HISTORY'] == ['first', 'second', 'third']
assert t.colnames == t2.colnames
# Read directly via fits and confirm column names
with fits.open(filename) as hdus:
assert hdus[1].columns.names == all_serialized_names
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_fits_mixins_per_column(table_cls, name_col, tmpdir):
"""Test write/read one col at a time and do detailed validation"""
filename = str(tmpdir.join('test_simple.fits'))
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'my \n\n\n description'
t[name].info.meta = {'list': list(range(50)), 'dict': {'a': 'b' * 200}}
if not t.has_mixin_columns:
pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)')
t.write(filename, format="fits")
t2 = table_cls.read(filename, format='fits', astropy_native=True)
if isinstance(col, Time):
# FITS Time does not preserve format
t2[name].format = col.format
assert t.colnames == t2.colnames
for colname in t.colnames:
compare = ['data'] if colname in ('c1', 'c2') else compare_attrs[colname]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
@pytest.mark.parametrize('name_col', unsupported_cols.items())
@pytest.mark.xfail(reason='column type unsupported')
def test_fits_unsupported_mixin(self, name_col, tmpdir):
# Check that we actually fail in writing unsupported columns defined
# on top.
filename = str(tmpdir.join('test_simple.fits'))
name, col = name_col
Table([col], names=[name]).write(filename, format='fits')
def test_info_attributes_with_no_mixins(tmpdir):
"""Even if there are no mixin columns, if there is metadata that would be lost it still
gets serialized
"""
filename = str(tmpdir.join('test.fits'))
t = Table([[1.0, 2.0]])
t['col0'].description = 'hello' * 40
t['col0'].format = '{:8.4f}'
t['col0'].meta['a'] = {'b': 'c'}
t.write(filename, overwrite=True)
t2 = Table.read(filename)
assert t2['col0'].description == 'hello' * 40
assert t2['col0'].format == '{:8.4f}'
assert t2['col0'].meta['a'] == {'b': 'c'}
@pytest.mark.parametrize('method', ['set_cols', 'names', 'class'])
def test_round_trip_masked_table_serialize_mask(tmpdir, method):
"""
Same as previous test but set the serialize_method to 'data_mask' so mask is
written out and the behavior is all correct.
"""
filename = str(tmpdir.join('test.fits'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
# MaskedColumn but no masked elements. See table the MaskedColumnInfo class
# _represent_as_dict() method for info about we test a column with no masked elements.
t['d'] = [1, 2, 3]
if method == 'set_cols':
for col in t.itercols():
col.info.serialize_method['fits'] = 'data_mask'
t.write(filename)
elif method == 'names':
t.write(filename, serialize_method={'a': 'data_mask', 'b': 'data_mask',
'c': 'data_mask', 'd': 'data_mask'})
elif method == 'class':
t.write(filename, serialize_method='data_mask')
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
def test_meta_not_modified(tmpdir):
filename = str(tmpdir.join('test.fits'))
t = Table(data=[Column([1, 2], 'a', description='spam')])
t.meta['comments'] = ['a', 'b']
assert len(t.meta) == 1
t.write(filename)
assert len(t.meta) == 1
assert t.meta['comments'] == ['a', 'b']
|
262595418347ad56669e90328f67fdf8e2daf3c9488cacfac7b6b77d32c22224 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import io
import mmap
import errno
import os
import pathlib
import shutil
import urllib.request
import zipfile
from unittest.mock import patch
import pytest
import numpy as np
from . import FitsTestCase
from astropy.io.fits.convenience import _getext
from astropy.io.fits.diff import FITSDiff
from astropy.io.fits.file import _File, GZIP_MAGIC
from astropy.io import fits
from astropy.utils.data import conf
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from astropy.utils import data
from astropy.io.tests import safeio
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2
if HAS_BZ2:
import bz2
class TestCore(FitsTestCase):
def test_missing_file(self):
with pytest.raises(OSError):
fits.open(self.temp('does-not-exist.fits'))
def test_naxisj_check(self):
with fits.open(self.data('o4sp040b0_raw.fits')) as hdulist:
hdulist[1].header['NAXIS3'] = 500
assert 'NAXIS3' in hdulist[1].header
hdulist.verify('silentfix')
assert 'NAXIS3' not in hdulist[1].header
def test_byteswap(self):
p = fits.PrimaryHDU()
l = fits.HDUList() # noqa
n = np.zeros(3, dtype='i2')
n[0] = 1
n[1] = 60000
n[2] = 2
c = fits.Column(name='foo', format='i2', bscale=1, bzero=32768,
array=n)
t = fits.BinTableHDU.from_columns([c])
l.append(p)
l.append(t)
l.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as p:
assert p[1].data[1]['foo'] == 60000.0
def test_fits_file_path_object(self):
"""
Testing when fits file is passed as pathlib.Path object #4412.
"""
fpath = pathlib.Path(self.data('tdim.fits'))
with fits.open(fpath) as hdulist:
assert hdulist[0].filebytes() == 2880
assert hdulist[1].filebytes() == 5760
with fits.open(self.data('tdim.fits')) as hdulist2:
assert FITSDiff(hdulist2, hdulist).identical is True
def test_fits_pathlike_object(self):
"""
Testing when fits file is passed as os.PathLike object #11579.
"""
class TPath(os.PathLike):
def __init__(self, path):
self.path = path
def __fspath__(self):
return str(self.path)
fpath = TPath(self.data('tdim.fits'))
with fits.open(fpath) as hdulist:
assert hdulist[0].filebytes() == 2880
assert hdulist[1].filebytes() == 5760
with fits.open(self.data('tdim.fits')) as hdulist2:
assert FITSDiff(hdulist2, hdulist).identical is True
def test_fits_file_bytes_object(self):
"""
Testing when fits file is passed as bytes.
"""
with fits.open(self.data('tdim.fits').encode()) as hdulist:
assert hdulist[0].filebytes() == 2880
assert hdulist[1].filebytes() == 5760
with fits.open(self.data('tdim.fits')) as hdulist2:
assert FITSDiff(hdulist2, hdulist).identical is True
def test_add_del_columns(self):
p = fits.ColDefs([])
p.add_col(fits.Column(name='FOO', format='3J'))
p.add_col(fits.Column(name='BAR', format='1I'))
assert p.names == ['FOO', 'BAR']
p.del_col('FOO')
assert p.names == ['BAR']
def test_add_del_columns2(self):
hdulist = fits.open(self.data('tb.fits'))
table = hdulist[1]
assert table.data.dtype.names == ('c1', 'c2', 'c3', 'c4')
assert table.columns.names == ['c1', 'c2', 'c3', 'c4']
table.columns.del_col('c1')
assert table.data.dtype.names == ('c2', 'c3', 'c4')
assert table.columns.names == ['c2', 'c3', 'c4']
table.columns.del_col('c3')
assert table.data.dtype.names == ('c2', 'c4')
assert table.columns.names == ['c2', 'c4']
table.columns.add_col(fits.Column('foo', '3J'))
assert table.data.dtype.names == ('c2', 'c4', 'foo')
assert table.columns.names == ['c2', 'c4', 'foo']
hdulist.writeto(self.temp('test.fits'), overwrite=True)
hdulist.close()
# NOTE: If you see a warning, might be related to
# https://github.com/spacetelescope/PyFITS/issues/44
with fits.open(self.temp('test.fits')) as hdulist:
table = hdulist[1]
assert table.data.dtype.names == ('c2', 'c4', 'foo')
assert table.columns.names == ['c2', 'c4', 'foo']
def test_update_header_card(self):
"""A very basic test for the Header.update method--I'd like to add a
few more cases to this at some point.
"""
header = fits.Header()
comment = 'number of bits per data pixel'
header['BITPIX'] = (16, comment)
assert 'BITPIX' in header
assert header['BITPIX'] == 16
assert header.comments['BITPIX'] == comment
header.update(BITPIX=32)
assert header['BITPIX'] == 32
assert header.comments['BITPIX'] == ''
def test_set_card_value(self):
"""Similar to test_update_header_card(), but tests the the
`header['FOO'] = 'bar'` method of updating card values.
"""
header = fits.Header()
comment = 'number of bits per data pixel'
card = fits.Card.fromstring(f'BITPIX = 32 / {comment}')
header.append(card)
header['BITPIX'] = 32
assert 'BITPIX' in header
assert header['BITPIX'] == 32
assert header.cards[0].keyword == 'BITPIX'
assert header.cards[0].value == 32
assert header.cards[0].comment == comment
def test_uint(self):
filename = self.data('o4sp040b0_raw.fits')
with fits.open(filename, uint=False) as hdulist_f:
with fits.open(filename, uint=True) as hdulist_i:
assert hdulist_f[1].data.dtype == np.float32
assert hdulist_i[1].data.dtype == np.uint16
assert np.all(hdulist_f[1].data == hdulist_i[1].data)
def test_fix_missing_card_append(self):
hdu = fits.ImageHDU()
errs = hdu.req_cards('TESTKW', None, None, 'foo', 'silentfix', [])
assert len(errs) == 1
assert 'TESTKW' in hdu.header
assert hdu.header['TESTKW'] == 'foo'
assert hdu.header.cards[-1].keyword == 'TESTKW'
def test_fix_invalid_keyword_value(self):
hdu = fits.ImageHDU()
hdu.header['TESTKW'] = 'foo'
errs = hdu.req_cards('TESTKW', None,
lambda v: v == 'foo', 'foo', 'ignore', [])
assert len(errs) == 0
# Now try a test that will fail, and ensure that an error will be
# raised in 'exception' mode
errs = hdu.req_cards('TESTKW', None, lambda v: v == 'bar', 'bar',
'exception', [])
assert len(errs) == 1
assert errs[0][1] == "'TESTKW' card has invalid value 'foo'."
# See if fixing will work
hdu.req_cards('TESTKW', None, lambda v: v == 'bar', 'bar', 'silentfix',
[])
assert hdu.header['TESTKW'] == 'bar'
def test_unfixable_missing_card(self):
class TestHDU(fits.hdu.base.NonstandardExtHDU):
def _verify(self, option='warn'):
errs = super()._verify(option)
hdu.req_cards('TESTKW', None, None, None, 'fix', errs)
return errs
@classmethod
def match_header(cls, header):
# Since creating this HDU class adds it to the registry we
# don't want the file reader to possibly think any actual
# HDU from a file should be handled by this class
return False
hdu = TestHDU(header=fits.Header())
with pytest.raises(fits.VerifyError):
hdu.verify('fix')
def test_exception_on_verification_error(self):
hdu = fits.ImageHDU()
del hdu.header['XTENSION']
with pytest.raises(fits.VerifyError):
hdu.verify('exception')
def test_ignore_verification_error(self):
hdu = fits.ImageHDU()
del hdu.header['NAXIS']
# The default here would be to issue a warning; ensure that no warnings
# or exceptions are raised
hdu.verify('ignore')
# Make sure the error wasn't fixed either, silently or otherwise
assert 'NAXIS' not in hdu.header
def test_unrecognized_verify_option(self):
hdu = fits.ImageHDU()
with pytest.raises(ValueError):
hdu.verify('foobarbaz')
def test_errlist_basic(self):
# Just some tests to make sure that _ErrList is setup correctly.
# No arguments
error_list = fits.verify._ErrList()
assert error_list == []
# Some contents - this is not actually working, it just makes sure they
# are kept.
error_list = fits.verify._ErrList([1, 2, 3])
assert error_list == [1, 2, 3]
def test_combined_verify_options(self):
"""
Test verify options like fix+ignore.
"""
def make_invalid_hdu():
hdu = fits.ImageHDU()
# Add one keyword to the header that contains a fixable defect, and one
# with an unfixable defect
c1 = fits.Card.fromstring("test = ' test'")
c2 = fits.Card.fromstring("P.I. = ' Hubble'")
hdu.header.append(c1)
hdu.header.append(c2)
return hdu
# silentfix+ignore should be completely silent
hdu = make_invalid_hdu()
hdu.verify('silentfix+ignore')
# silentfix+warn should be quiet about the fixed HDU and only warn
# about the unfixable one
hdu = make_invalid_hdu()
with pytest.warns(AstropyUserWarning, match='Illegal keyword name') as w:
hdu.verify('silentfix+warn')
assert len(w) == 4
# silentfix+exception should only mention the unfixable error in the
# exception
hdu = make_invalid_hdu()
with pytest.raises(fits.VerifyError, match=r'Illegal keyword name') as excinfo:
hdu.verify('silentfix+exception')
assert 'not upper case' not in str(excinfo.value)
# fix+ignore is not too useful, but it should warn about the fixed
# problems while saying nothing about the unfixable problems
hdu = make_invalid_hdu()
with pytest.warns(AstropyUserWarning, match='not upper case') as w:
hdu.verify('fix+ignore')
assert len(w) == 4
# fix+warn
hdu = make_invalid_hdu()
with pytest.warns(AstropyUserWarning) as w:
hdu.verify('fix+warn')
assert len(w) == 6
assert 'not upper case' in str(w[2].message)
assert 'Illegal keyword name' in str(w[4].message)
# fix+exception
hdu = make_invalid_hdu()
with pytest.raises(fits.VerifyError, match=r'Illegal keyword name') as excinfo:
hdu.verify('fix+exception')
assert 'not upper case' in str(excinfo.value)
def test_getext(self):
"""
Test the various different ways of specifying an extension header in
the convenience functions.
"""
filename = self.data('test0.fits')
hl, ext = _getext(filename, 'readonly', 1)
assert ext == 1
hl.close()
pytest.raises(ValueError, _getext, filename, 'readonly',
1, 2)
pytest.raises(ValueError, _getext, filename, 'readonly',
(1, 2))
pytest.raises(ValueError, _getext, filename, 'readonly',
'sci', 'sci')
pytest.raises(TypeError, _getext, filename, 'readonly',
1, 2, 3)
hl, ext = _getext(filename, 'readonly', ext=1)
assert ext == 1
hl.close()
hl, ext = _getext(filename, 'readonly', ext=('sci', 2))
assert ext == ('sci', 2)
hl.close()
pytest.raises(TypeError, _getext, filename, 'readonly',
1, ext=('sci', 2), extver=3)
pytest.raises(TypeError, _getext, filename, 'readonly',
ext=('sci', 2), extver=3)
hl, ext = _getext(filename, 'readonly', 'sci')
assert ext == ('sci', 1)
hl.close()
hl, ext = _getext(filename, 'readonly', 'sci', 1)
assert ext == ('sci', 1)
hl.close()
hl, ext = _getext(filename, 'readonly', ('sci', 1))
assert ext == ('sci', 1)
hl.close()
hl, ext = _getext(filename, 'readonly', 'sci',
extver=1, do_not_scale_image_data=True)
assert ext == ('sci', 1)
hl.close()
pytest.raises(TypeError, _getext, filename, 'readonly',
'sci', ext=1)
pytest.raises(TypeError, _getext, filename, 'readonly',
'sci', 1, extver=2)
hl, ext = _getext(filename, 'readonly', extname='sci')
assert ext == ('sci', 1)
hl.close()
hl, ext = _getext(filename, 'readonly', extname='sci',
extver=1)
assert ext == ('sci', 1)
hl.close()
pytest.raises(TypeError, _getext, filename, 'readonly',
extver=1)
def test_extension_name_case_sensitive(self):
"""
Tests that setting fits.conf.extension_name_case_sensitive at
runtime works.
"""
hdu = fits.ImageHDU()
hdu.name = 'sCi'
assert hdu.name == 'SCI'
assert hdu.header['EXTNAME'] == 'SCI'
with fits.conf.set_temp('extension_name_case_sensitive', True):
hdu = fits.ImageHDU()
hdu.name = 'sCi'
assert hdu.name == 'sCi'
assert hdu.header['EXTNAME'] == 'sCi'
hdu.name = 'sCi'
assert hdu.name == 'SCI'
assert hdu.header['EXTNAME'] == 'SCI'
def test_hdu_fromstring(self):
"""
Tests creating a fully-formed HDU object from a string containing the
bytes of the HDU.
"""
infile = self.data('test0.fits')
outfile = self.temp('test.fits')
with open(infile, 'rb') as fin:
dat = fin.read()
offset = 0
with fits.open(infile) as hdul:
hdulen = hdul[0]._data_offset + hdul[0]._data_size
hdu = fits.PrimaryHDU.fromstring(dat[:hdulen])
assert isinstance(hdu, fits.PrimaryHDU)
assert hdul[0].header == hdu.header
assert hdu.data is None
hdu.header['TEST'] = 'TEST'
hdu.writeto(outfile)
with fits.open(outfile) as hdul:
assert isinstance(hdu, fits.PrimaryHDU)
assert hdul[0].header[:-1] == hdu.header[:-1]
assert hdul[0].header['TEST'] == 'TEST'
assert hdu.data is None
with fits.open(infile)as hdul:
for ext_hdu in hdul[1:]:
offset += hdulen
hdulen = len(str(ext_hdu.header)) + ext_hdu._data_size
hdu = fits.ImageHDU.fromstring(dat[offset:offset + hdulen])
assert isinstance(hdu, fits.ImageHDU)
assert ext_hdu.header == hdu.header
assert (ext_hdu.data == hdu.data).all()
def test_nonstandard_hdu(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/157
Tests that "Nonstandard" HDUs with SIMPLE = F are read and written
without prepending a superfluous and unwanted standard primary HDU.
"""
data = np.arange(100, dtype=np.uint8)
hdu = fits.PrimaryHDU(data=data)
hdu.header['SIMPLE'] = False
hdu.writeto(self.temp('test.fits'))
info = [(0, '', 1, 'NonstandardHDU', 5, (), '', '')]
with fits.open(self.temp('test.fits')) as hdul:
assert hdul.info(output=False) == info
# NonstandardHDUs just treat the data as an unspecified array of
# bytes. The first 100 bytes should match the original data we
# passed in...the rest should be zeros padding out the rest of the
# FITS block
assert (hdul[0].data[:100] == data).all()
assert (hdul[0].data[100:] == 0).all()
def test_extname(self):
"""Test getting/setting the EXTNAME of an HDU."""
h1 = fits.PrimaryHDU()
assert h1.name == 'PRIMARY'
# Normally a PRIMARY HDU should not have an EXTNAME, though it should
# have a default .name attribute
assert 'EXTNAME' not in h1.header
# The current version of the FITS standard does allow PRIMARY HDUs to
# have an EXTNAME, however.
h1.name = 'NOTREAL'
assert h1.name == 'NOTREAL'
assert h1.header.get('EXTNAME') == 'NOTREAL'
# Updating the EXTNAME in the header should update the .name
h1.header['EXTNAME'] = 'TOOREAL'
assert h1.name == 'TOOREAL'
# If we delete an EXTNAME keyword from a PRIMARY HDU it should go back
# to the default
del h1.header['EXTNAME']
assert h1.name == 'PRIMARY'
# For extension HDUs the situation is a bit simpler:
h2 = fits.ImageHDU()
assert h2.name == ''
assert 'EXTNAME' not in h2.header
h2.name = 'HELLO'
assert h2.name == 'HELLO'
assert h2.header.get('EXTNAME') == 'HELLO'
h2.header['EXTNAME'] = 'GOODBYE'
assert h2.name == 'GOODBYE'
def test_extver_extlevel(self):
"""Test getting/setting the EXTVER and EXTLEVEL of and HDU."""
# EXTVER and EXTNAME work exactly the same; their semantics are, for
# now, to be inferred by the user. Although they should never be less
# than 1, the standard does not explicitly forbid any value so long as
# it's an integer
h1 = fits.PrimaryHDU()
assert h1.ver == 1
assert h1.level == 1
assert 'EXTVER' not in h1.header
assert 'EXTLEVEL' not in h1.header
h1.ver = 2
assert h1.header.get('EXTVER') == 2
h1.header['EXTVER'] = 3
assert h1.ver == 3
del h1.header['EXTVER']
h1.ver == 1
h1.level = 2
assert h1.header.get('EXTLEVEL') == 2
h1.header['EXTLEVEL'] = 3
assert h1.level == 3
del h1.header['EXTLEVEL']
assert h1.level == 1
pytest.raises(TypeError, setattr, h1, 'ver', 'FOO')
pytest.raises(TypeError, setattr, h1, 'level', 'BAR')
def test_consecutive_writeto(self):
"""
Regression test for an issue where calling writeto twice on the same
HDUList could write a corrupted file.
https://github.com/spacetelescope/PyFITS/issues/40 is actually a
particular instance of this problem, though isn't unique to sys.stdout.
"""
with fits.open(self.data('test0.fits')) as hdul1:
# Add a bunch of header keywords so that the data will be forced to
# new offsets within the file:
for idx in range(40):
hdul1[1].header[f'TEST{idx}'] = 'test'
hdul1.writeto(self.temp('test1.fits'))
hdul1.writeto(self.temp('test2.fits'))
# Open a second handle to the original file and compare it to hdul1
# (We only compare part of the one header that was modified)
# Compare also with the second writeto output
with fits.open(self.data('test0.fits')) as hdul2:
with fits.open(self.temp('test2.fits')) as hdul3:
for hdul in (hdul1, hdul3):
for idx, hdus in enumerate(zip(hdul2, hdul)):
hdu2, hdu = hdus
if idx != 1:
assert hdu.header == hdu2.header
else:
assert (hdu2.header ==
hdu.header[:len(hdu2.header)])
assert np.all(hdu.data == hdu2.data)
class TestConvenienceFunctions(FitsTestCase):
def test_writeto(self):
"""
Simple test for writing a trivial header and some data to a file
with the `writeto()` convenience function.
"""
filename = self.temp('array.fits')
data = np.zeros((100, 100))
header = fits.Header()
fits.writeto(filename, data, header=header, overwrite=True)
with fits.open(filename) as hdul:
assert len(hdul) == 1
assert (data == hdul[0].data).all()
def test_writeto_2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/107
Test of `writeto()` with a trivial header containing a single keyword.
"""
filename = self.temp('array.fits')
data = np.zeros((100, 100))
header = fits.Header()
header.set('CRPIX1', 1.)
fits.writeto(filename, data, header=header,
overwrite=True, output_verify='silentfix')
with fits.open(filename) as hdul:
assert len(hdul) == 1
assert (data == hdul[0].data).all()
assert 'CRPIX1' in hdul[0].header
assert hdul[0].header['CRPIX1'] == 1.0
class TestFileFunctions(FitsTestCase):
"""
Tests various basic I/O operations, specifically in the
astropy.io.fits.file._File class.
"""
def test_open_nonexistent(self):
"""Test that trying to open a non-existent file results in an
OSError (and not some other arbitrary exception).
"""
with pytest.raises(OSError, match=r'No such file or directory'):
fits.open(self.temp('foobar.fits'))
# But opening in ostream or append mode should be okay, since they
# allow writing new files
for mode in ('ostream', 'append'):
with fits.open(self.temp('foobar.fits'), mode=mode) as _:
pass
assert os.path.exists(self.temp('foobar.fits'))
os.remove(self.temp('foobar.fits'))
def test_open_file_handle(self):
# Make sure we can open a FITS file from an open file handle
with open(self.data('test0.fits'), 'rb') as handle:
with fits.open(handle) as _:
pass
with open(self.temp('temp.fits'), 'wb') as handle:
with fits.open(handle, mode='ostream') as _:
pass
# Opening without explicitly specifying binary mode should fail
with pytest.raises(ValueError):
with open(self.data('test0.fits')) as handle:
with fits.open(handle) as _:
pass
# All of these read modes should fail
for mode in ['r', 'rt']:
with pytest.raises(ValueError):
with open(self.data('test0.fits'), mode=mode) as handle:
with fits.open(handle) as _:
pass
# These update or write modes should fail as well
for mode in ['w', 'wt', 'w+', 'wt+', 'r+', 'rt+',
'a', 'at', 'a+', 'at+']:
with pytest.raises(ValueError):
with open(self.temp('temp.fits'), mode=mode) as handle:
with fits.open(handle) as _:
pass
def test_fits_file_handle_mode_combo(self):
# This should work fine since no mode is given
with open(self.data('test0.fits'), 'rb') as handle:
with fits.open(handle) as _:
pass
# This should work fine since the modes are compatible
with open(self.data('test0.fits'), 'rb') as handle:
with fits.open(handle, mode='readonly') as _:
pass
# This should not work since the modes conflict
with pytest.raises(ValueError):
with open(self.data('test0.fits'), 'rb') as handle:
with fits.open(handle, mode='ostream') as _:
pass
def test_open_from_url(self):
file_url = 'file:///' + self.data('test0.fits').lstrip('/')
with urllib.request.urlopen(file_url) as urlobj:
with fits.open(urlobj) as _:
pass
# It will not be possible to write to a file that is from a URL object
for mode in ('ostream', 'append', 'update'):
with pytest.raises(ValueError):
with urllib.request.urlopen(file_url) as urlobj:
with fits.open(urlobj, mode=mode) as _:
pass
@pytest.mark.remote_data(source='astropy')
def test_open_from_remote_url(self):
for dataurl in (conf.dataurl, conf.dataurl_mirror):
remote_url = f"{dataurl}/allsky/allsky_rosat.fits"
try:
with urllib.request.urlopen(remote_url) as urlobj:
with fits.open(urlobj) as fits_handle:
assert len(fits_handle) == 1
for mode in ('ostream', 'append', 'update'):
with pytest.raises(ValueError):
with urllib.request.urlopen(remote_url) as urlobj:
with fits.open(urlobj, mode=mode) as fits_handle:
assert len(fits_handle) == 1
except (urllib.error.HTTPError, urllib.error.URLError):
continue
else:
break
else:
raise Exception("Could not download file")
def test_open_gzipped(self):
gzip_file = self._make_gzip_file()
with fits.open(gzip_file) as fits_handle:
assert fits_handle._file.compression == 'gzip'
assert len(fits_handle) == 5
with fits.open(gzip.GzipFile(gzip_file)) as fits_handle:
assert fits_handle._file.compression == 'gzip'
assert len(fits_handle) == 5
def test_open_gzipped_from_handle(self):
with open(self._make_gzip_file(), 'rb') as handle:
with fits.open(handle) as fits_handle:
assert fits_handle._file.compression == 'gzip'
def test_detect_gzipped(self):
"""Test detection of a gzip file when the extension is not .gz."""
with fits.open(self._make_gzip_file('test0.fz')) as fits_handle:
assert fits_handle._file.compression == 'gzip'
assert len(fits_handle) == 5
def test_writeto_append_mode_gzip(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/33
Check that a new GzipFile opened in append mode can be used to write
out a new FITS file.
"""
# Note: when opening a GzipFile the 'b+' is superfluous, but this was
# still how the original test case looked
# Note: with statement not supported on GzipFile in older Python
# versions
fileobj = gzip.GzipFile(self.temp('test.fits.gz'), 'ab+')
h = fits.PrimaryHDU()
try:
h.writeto(fileobj)
finally:
fileobj.close()
with fits.open(self.temp('test.fits.gz')) as hdul:
assert hdul[0].header == h.header
def test_fits_update_mode_gzip(self):
"""Test updating a GZipped FITS file"""
with fits.open(self._make_gzip_file('update.gz'), mode='update') as fits_handle:
hdu = fits.ImageHDU(data=[x for x in range(100)])
fits_handle.append(hdu)
with fits.open(self.temp('update.gz')) as new_handle:
assert len(new_handle) == 6
assert (new_handle[-1].data == [x for x in range(100)]).all()
def test_fits_append_mode_gzip(self):
"""Make sure that attempting to open an existing GZipped FITS file in
'append' mode raises an error"""
with pytest.raises(OSError):
with fits.open(self._make_gzip_file('append.gz'), mode='append') as _:
pass
@pytest.mark.skipif(not HAS_BZ2, reason='Python built without bz2 module')
def test_open_bzipped(self):
bzip_file = self._make_bzip2_file()
with fits.open(bzip_file) as fits_handle:
assert fits_handle._file.compression == 'bzip2'
assert len(fits_handle) == 5
with fits.open(bz2.BZ2File(bzip_file)) as fits_handle:
assert fits_handle._file.compression == 'bzip2'
assert len(fits_handle) == 5
@pytest.mark.skipif(not HAS_BZ2, reason='Python built without bz2 module')
def test_open_bzipped_from_handle(self):
with open(self._make_bzip2_file(), 'rb') as handle:
with fits.open(handle) as fits_handle:
assert fits_handle._file.compression == 'bzip2'
assert len(fits_handle) == 5
@pytest.mark.skipif(not HAS_BZ2, reason='Python built without bz2 module')
def test_detect_bzipped(self):
"""Test detection of a bzip2 file when the extension is not .bz2."""
with fits.open(self._make_bzip2_file('test0.xx')) as fits_handle:
assert fits_handle._file.compression == 'bzip2'
assert len(fits_handle) == 5
@pytest.mark.skipif(not HAS_BZ2, reason='Python built without bz2 module')
def test_writeto_bzip2_fileobj(self):
"""Test writing to a bz2.BZ2File file like object"""
fileobj = bz2.BZ2File(self.temp('test.fits.bz2'), 'w')
h = fits.PrimaryHDU()
try:
h.writeto(fileobj)
finally:
fileobj.close()
with fits.open(self.temp('test.fits.bz2')) as hdul:
assert hdul[0].header == h.header
@pytest.mark.skipif(not HAS_BZ2, reason='Python built without bz2 module')
def test_writeto_bzip2_filename(self):
"""Test writing to a bzip2 file by name"""
filename = self.temp('testname.fits.bz2')
h = fits.PrimaryHDU()
h.writeto(filename)
with fits.open(self.temp('testname.fits.bz2')) as hdul:
assert hdul[0].header == h.header
def test_open_zipped(self):
zip_file = self._make_zip_file()
with fits.open(zip_file) as fits_handle:
assert fits_handle._file.compression == 'zip'
assert len(fits_handle) == 5
with fits.open(zipfile.ZipFile(zip_file)) as fits_handle:
assert fits_handle._file.compression == 'zip'
assert len(fits_handle) == 5
def test_open_zipped_from_handle(self):
with open(self._make_zip_file(), 'rb') as handle:
with fits.open(handle) as fits_handle:
assert fits_handle._file.compression == 'zip'
assert len(fits_handle) == 5
def test_detect_zipped(self):
"""Test detection of a zip file when the extension is not .zip."""
zf = self._make_zip_file(filename='test0.fz')
with fits.open(zf) as fits_handle:
assert len(fits_handle) == 5
def test_open_zipped_writeable(self):
"""Opening zipped files in a writeable mode should fail."""
zf = self._make_zip_file()
pytest.raises(OSError, fits.open, zf, 'update')
pytest.raises(OSError, fits.open, zf, 'append')
zf = zipfile.ZipFile(zf, 'a')
pytest.raises(OSError, fits.open, zf, 'update')
pytest.raises(OSError, fits.open, zf, 'append')
def test_read_open_astropy_gzip_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2774
This tests reading from a ``GzipFile`` object from Astropy's
compatibility copy of the ``gzip`` module.
"""
gf = gzip.GzipFile(self._make_gzip_file())
try:
assert len(fits.open(gf)) == 5
finally:
gf.close()
def test_open_multiple_member_zipfile(self):
"""
Opening zip files containing more than one member files should fail
as there's no obvious way to specify which file is the FITS file to
read.
"""
zfile = zipfile.ZipFile(self.temp('test0.zip'), 'w')
zfile.write(self.data('test0.fits'))
zfile.writestr('foo', 'bar')
zfile.close()
with pytest.raises(OSError):
fits.open(zfile.filename)
def test_read_open_file(self):
"""Read from an existing file object."""
with open(self.data('test0.fits'), 'rb') as f:
assert len(fits.open(f)) == 5
def test_read_closed_file(self):
"""Read from an existing file object that's been closed."""
f = open(self.data('test0.fits'), 'rb')
f.close()
with fits.open(f) as f2:
assert len(f2) == 5
def test_read_open_gzip_file(self):
"""Read from an open gzip file object."""
gf = gzip.GzipFile(self._make_gzip_file())
try:
assert len(fits.open(gf)) == 5
finally:
gf.close()
def test_open_gzip_file_for_writing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/195."""
gf = self._make_gzip_file()
with fits.open(gf, mode='update') as h:
h[0].header['EXPFLAG'] = 'ABNORMAL'
h[1].data[0, 0] = 1
with fits.open(gf) as h:
# Just to make sure the update worked; if updates work
# normal writes should work too...
assert h[0].header['EXPFLAG'] == 'ABNORMAL'
assert h[1].data[0, 0] == 1
def test_write_read_gzip_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2794
Ensure files written through gzip are readable.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
hdu.writeto(self.temp('test.fits.gz'))
with open(self.temp('test.fits.gz'), 'rb') as f:
assert f.read(3) == GZIP_MAGIC
with fits.open(self.temp('test.fits.gz')) as hdul:
assert np.all(hdul[0].data == data)
@pytest.mark.parametrize('ext', ['gz', 'bz2', 'zip'])
def test_compressed_ext_but_not_compressed(self, ext):
testfile = self.temp(f'test0.fits.{ext}')
shutil.copy(self.data('test0.fits'), testfile)
with fits.open(testfile) as hdul:
assert len(hdul) == 5
fits.append(testfile, np.arange(5))
with fits.open(testfile) as hdul:
assert len(hdul) == 6
def test_read_file_like_object(self):
"""Test reading a FITS file from a file-like object."""
filelike = io.BytesIO()
with open(self.data('test0.fits'), 'rb') as f:
filelike.write(f.read())
filelike.seek(0)
assert len(fits.open(filelike)) == 5
def test_updated_file_permissions(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/79
Tests that when a FITS file is modified in update mode, the file
permissions are preserved.
"""
filename = self.temp('test.fits')
hdul = [fits.PrimaryHDU(), fits.ImageHDU()]
hdul = fits.HDUList(hdul)
hdul.writeto(filename)
old_mode = os.stat(filename).st_mode
hdul = fits.open(filename, mode='update')
hdul.insert(1, fits.ImageHDU())
hdul.flush()
hdul.close()
assert old_mode == os.stat(filename).st_mode
def test_fileobj_mode_guessing(self):
"""Tests whether a file opened without a specified io.fits mode
('readonly', etc.) is opened in a mode appropriate for the given file
object.
"""
self.copy_file('test0.fits')
# Opening in text mode should outright fail
for mode in ('r', 'w', 'a'):
with open(self.temp('test0.fits'), mode) as f:
pytest.raises(ValueError, fits.HDUList.fromfile, f)
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file('test0.fits')
with open(self.temp('test0.fits'), 'rb') as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)['filemode'] == 'readonly'
for mode in ('wb', 'ab'):
with open(self.temp('test0.fits'), mode) as f:
with fits.HDUList.fromfile(f) as h:
# Basically opening empty files for output streaming
assert len(h) == 0
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file('test0.fits')
with open(self.temp('test0.fits'), 'wb+') as f:
with fits.HDUList.fromfile(f) as h:
# wb+ still causes an existing file to be overwritten so there
# are no HDUs
assert len(h) == 0
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file('test0.fits')
with open(self.temp('test0.fits'), 'rb+') as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)['filemode'] == 'update'
with open(self.temp('test0.fits'), 'ab+') as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)['filemode'] == 'append'
def test_mmap_unwriteable(self):
"""Regression test for https://github.com/astropy/astropy/issues/968
Temporarily patches mmap.mmap to exhibit platform-specific bad
behavior.
"""
class MockMmap(mmap.mmap):
def flush(self):
raise OSError('flush is broken on this platform')
old_mmap = mmap.mmap
mmap.mmap = MockMmap
# Force the mmap test to be rerun
_File.__dict__['_mmap_available']._cache.clear()
try:
self.copy_file('test0.fits')
with pytest.warns(AstropyUserWarning, match=r'mmap\.flush is unavailable') as w:
with fits.open(self.temp('test0.fits'), mode='update',
memmap=True) as h:
h[1].data[0, 0] = 999
assert len(w) == 1
# Double check that writing without mmap still worked
with fits.open(self.temp('test0.fits')) as h:
assert h[1].data[0, 0] == 999
finally:
mmap.mmap = old_mmap
_File.__dict__['_mmap_available']._cache.clear()
@pytest.mark.openfiles_ignore
def test_mmap_allocate_error(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1380
Temporarily patches mmap.mmap to raise an OSError if mode is ACCESS_COPY.
"""
mmap_original = mmap.mmap
# We patch mmap here to raise an error if access=mmap.ACCESS_COPY, which
# emulates an issue that an OSError is raised if the available address
# space is less than the size of the file even if memory mapping is used.
def mmap_patched(*args, **kwargs):
if kwargs.get('access') == mmap.ACCESS_COPY:
exc = OSError()
exc.errno = errno.ENOMEM
raise exc
else:
return mmap_original(*args, **kwargs)
with fits.open(self.data('test0.fits'), memmap=True) as hdulist:
with patch.object(mmap, 'mmap', side_effect=mmap_patched) as p:
with pytest.warns(AstropyUserWarning, match=r"Could not memory "
r"map array with mode='readonly'"):
data = hdulist[1].data
p.reset_mock()
assert not data.flags.writeable
def test_mmap_closing(self):
"""
Tests that the mmap reference is closed/removed when there aren't any
HDU data references left.
"""
if not _File._mmap_available:
pytest.xfail('not expected to work on platforms without mmap '
'support')
with fits.open(self.data('test0.fits'), memmap=True) as hdul:
assert hdul._file._mmap is None
hdul[1].data
assert hdul._file._mmap is not None
del hdul[1].data
# Should be no more references to data in the file so close the
# mmap
assert hdul._file._mmap is None
hdul[1].data
hdul[2].data
del hdul[1].data
# hdul[2].data is still references so keep the mmap open
assert hdul._file._mmap is not None
del hdul[2].data
assert hdul._file._mmap is None
assert hdul._file._mmap is None
with fits.open(self.data('test0.fits'), memmap=True) as hdul:
hdul[1].data
# When the only reference to the data is on the hdu object, and the
# hdulist it belongs to has been closed, the mmap should be closed as
# well
assert hdul._file._mmap is None
with fits.open(self.data('test0.fits'), memmap=True) as hdul:
data = hdul[1].data
# also make a copy
data_copy = data.copy()
# The HDUList is closed; in fact, get rid of it completely
del hdul
# The data array should still work though...
assert np.all(data == data_copy)
def test_uncloseable_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2356
Demonstrates that FITS files can still be read from file-like objects
that don't have an obvious "open" or "closed" state.
"""
class MyFileLike:
def __init__(self, foobar):
self._foobar = foobar
def read(self, n):
return self._foobar.read(n)
def seek(self, offset, whence=os.SEEK_SET):
self._foobar.seek(offset, whence)
def tell(self):
return self._foobar.tell()
with open(self.data('test0.fits'), 'rb') as f:
fileobj = MyFileLike(f)
with fits.open(fileobj) as hdul1:
with fits.open(self.data('test0.fits')) as hdul2:
assert hdul1.info(output=False) == hdul2.info(output=False)
for hdu1, hdu2 in zip(hdul1, hdul2):
assert hdu1.header == hdu2.header
if hdu1.data is not None and hdu2.data is not None:
assert np.all(hdu1.data == hdu2.data)
def test_write_bytesio_discontiguous(self):
"""
Regression test related to
https://github.com/astropy/astropy/issues/2794#issuecomment-55441539
Demonstrates that writing an HDU containing a discontiguous Numpy array
should work properly.
"""
data = np.arange(100)[::3]
hdu = fits.PrimaryHDU(data=data)
fileobj = io.BytesIO()
hdu.writeto(fileobj)
fileobj.seek(0)
with fits.open(fileobj) as h:
assert np.all(h[0].data == data)
def test_write_bytesio(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2463
Test against `io.BytesIO`. `io.StringIO` is not supported.
"""
self._test_write_string_bytes_io(io.BytesIO())
@pytest.mark.skipif('sys.platform.startswith("win32")')
def test_filename_with_colon(self):
"""
Test reading and writing a file with a colon in the filename.
Regression test for https://github.com/astropy/astropy/issues/3122
"""
# Skip on Windows since colons in filenames makes NTFS sad.
filename = 'APEXHET.2014-04-01T15:18:01.000.fits'
hdu = fits.PrimaryHDU(data=np.arange(10))
hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert np.all(hdul[0].data == hdu.data)
def test_writeto_full_disk(self, monkeypatch):
"""
Test that it gives a readable error when trying to write an hdulist
to a full disk.
"""
def _writeto(self, array):
raise OSError("Fake error raised when writing file.")
def get_free_space_in_dir(path):
return 0
with pytest.raises(OSError) as exc:
monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writeto", _writeto)
monkeypatch.setattr(data, "get_free_space_in_dir", get_free_space_in_dir)
n = np.arange(0, 1000, dtype='int64')
hdu = fits.PrimaryHDU(n)
hdulist = fits.HDUList(hdu)
filename = self.temp('test.fits')
with open(filename, mode='wb') as fileobj:
hdulist.writeto(fileobj)
assert ("Not enough space on disk: requested 8000, available 0. "
"Fake error raised when writing file.") == exc.value.args[0]
def test_flush_full_disk(self, monkeypatch):
"""
Test that it gives a readable error when trying to update an hdulist
to a full disk.
"""
filename = self.temp('test.fits')
hdul = [fits.PrimaryHDU(), fits.ImageHDU()]
hdul = fits.HDUList(hdul)
hdul[0].data = np.arange(0, 1000, dtype='int64')
hdul.writeto(filename)
def _writedata(self, fileobj):
raise OSError("Fake error raised when writing file.")
def get_free_space_in_dir(path):
return 0
monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writedata", _writedata)
monkeypatch.setattr(data, "get_free_space_in_dir",
get_free_space_in_dir)
with pytest.raises(OSError) as exc:
with fits.open(filename, mode='update') as hdul:
hdul[0].data = np.arange(0, 1000, dtype='int64')
hdul.insert(1, fits.ImageHDU())
hdul.flush()
assert ("Not enough space on disk: requested 8000, available 0. "
"Fake error raised when writing file.") == exc.value.args[0]
def _test_write_string_bytes_io(self, fileobj):
"""
Implemented for both test_write_stringio and test_write_bytesio.
"""
with fits.open(self.data('test0.fits')) as hdul:
hdul.writeto(fileobj)
hdul2 = fits.HDUList.fromstring(fileobj.getvalue())
assert FITSDiff(hdul, hdul2).identical
def _make_gzip_file(self, filename='test0.fits.gz'):
gzfile = self.temp(filename)
with open(self.data('test0.fits'), 'rb') as f:
gz = gzip.open(gzfile, 'wb')
gz.write(f.read())
gz.close()
return gzfile
def test_write_overwrite(self):
filename = self.temp('test_overwrite.fits')
hdu = fits.PrimaryHDU(data=np.arange(10))
hdu.writeto(filename)
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdu.writeto(filename)
hdu.writeto(filename, overwrite=True)
def _make_zip_file(self, mode='copyonwrite', filename='test0.fits.zip'):
zfile = zipfile.ZipFile(self.temp(filename), 'w')
zfile.write(self.data('test0.fits'))
zfile.close()
return zfile.filename
def _make_bzip2_file(self, filename='test0.fits.bz2'):
bzfile = self.temp(filename)
with open(self.data('test0.fits'), 'rb') as f:
bz = bz2.BZ2File(bzfile, 'w')
bz.write(f.read())
bz.close()
return bzfile
def test_simulateonly(self):
"""Write to None simulates writing."""
with fits.open(self.data('test0.fits')) as hdul:
hdul.writeto(None)
hdul[0].writeto(None)
hdul[0].header.tofile(None)
def test_bintablehdu_zero_bytes(self):
"""Make sure we don't have any zero-byte writes in BinTableHDU"""
bright = np.rec.array([(1, 'Sirius', -1.45, 'A1V'),
(2, 'Canopus', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')],
formats='int16,a20,float32,a10', names='order,name,mag,Sp')
hdu_non_zero = fits.BinTableHDU(bright)
# use safeio, a special file handler meant to fail on zero-byte writes
fh = safeio.CatchZeroByteWriter(open(self.temp('bright.fits'), mode='wb'))
hdu_non_zero.writeto(fh)
fh.close()
def test_primaryhdu_zero_bytes(self):
"""
Make sure we don't have any zero-byte writes from an ImageHDU
(or other) of `size % BLOCK_SIZE == 0`
"""
hdu_img_2880 = fits.PrimaryHDU(data=np.arange(720, dtype='i4'))
# use safeio, a special file handler meant to fail on zero-byte writes
fh = safeio.CatchZeroByteWriter(open(self.temp('image.fits'), mode='wb'))
hdu_img_2880.writeto(fh)
fh.close()
class TestStreamingFunctions(FitsTestCase):
"""Test functionality of the StreamingHDU class."""
def test_streaming_hdu(self):
shdu = self._make_streaming_hdu(self.temp('new.fits'))
assert isinstance(shdu.size, int)
assert shdu.size == 100
shdu.close()
def test_streaming_hdu_file_wrong_mode(self):
"""
Test that streaming an HDU to a file opened in the wrong mode fails as
expected.
"""
with pytest.raises(ValueError):
with open(self.temp('new.fits'), 'wb') as f:
header = fits.Header()
fits.StreamingHDU(f, header)
def test_streaming_hdu_write_file(self):
"""Test streaming an HDU to an open file object."""
arr = np.zeros((5, 5), dtype=np.int32)
with open(self.temp('new.fits'), 'ab+') as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
assert shdu.writecomplete
assert shdu.size == 100
with fits.open(self.temp('new.fits')) as hdul:
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_write_file_like(self):
"""Test streaming an HDU to an open file-like object."""
arr = np.zeros((5, 5), dtype=np.int32)
# The file-like object underlying a StreamingHDU must be in binary mode
sf = io.BytesIO()
shdu = self._make_streaming_hdu(sf)
shdu.write(arr)
assert shdu.writecomplete
assert shdu.size == 100
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_append_extension(self):
arr = np.zeros((5, 5), dtype=np.int32)
with open(self.temp('new.fits'), 'ab+') as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
# Doing this again should update the file with an extension
with open(self.temp('new.fits'), 'ab+') as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
def test_fix_invalid_extname(self, capsys):
phdu = fits.PrimaryHDU()
ihdu = fits.ImageHDU()
ihdu.header['EXTNAME'] = 12345678
hdul = fits.HDUList([phdu, ihdu])
filename = self.temp('temp.fits')
pytest.raises(fits.VerifyError, hdul.writeto, filename,
output_verify='exception')
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
hdul.writeto(filename, output_verify='fix')
with fits.open(filename):
assert hdul[1].name == '12345678'
assert hdul[1].header['EXTNAME'] == '12345678'
hdul.close()
def _make_streaming_hdu(self, fileobj):
hd = fits.Header()
hd['SIMPLE'] = (True, 'conforms to FITS standard')
hd['BITPIX'] = (32, 'array data type')
hd['NAXIS'] = (2, 'number of array dimensions')
hd['NAXIS1'] = 5
hd['NAXIS2'] = 5
hd['EXTEND'] = True
return fits.StreamingHDU(fileobj, hd)
def test_blank_ignore(self):
with fits.open(self.data('blank.fits'), ignore_blank=True) as f:
assert f[0].data.flat[0] == 2
def test_error_if_memmap_impossible(self):
pth = self.data('blank.fits')
with fits.open(pth, memmap=True) as hdul:
with pytest.raises(ValueError):
hdul[0].data
# However, it should not fail if do_not_scale_image_data was used:
# See https://github.com/astropy/astropy/issues/3766
with fits.open(pth, memmap=True, do_not_scale_image_data=True) as hdul:
hdul[0].data # Just make sure it doesn't crash
|
33ba596d4aa13b5f857a4a0736d7724432c2282caa44756bb8184166282a8650 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from . import FitsTestCase
from astropy.io.fits.fitstime import GLOBAL_TIME_INFO, time_to_fits, is_time_column_keyword
from astropy.coordinates import EarthLocation
from astropy.io import fits
from astropy.table import Table, QTable, Column
from astropy.time import Time, TimeDelta
from astropy.time.core import BARYCENTRIC_SCALES
from astropy.time.formats import FITS_DEPRECATED_SCALES
from astropy.utils.exceptions import AstropyUserWarning
class TestFitsTime(FitsTestCase):
def setup_class(self):
self.time = np.array(['1999-01-01T00:00:00.123456789', '2010-01-01T00:00:00'])
self.time_3d = np.array([[[1, 2], [1, 3], [3, 4]]])
def test_is_time_column_keyword(self):
# Time column keyword without column number
assert is_time_column_keyword('TRPOS') is False
# Global time column keyword
assert is_time_column_keyword('TIMESYS') is False
# Valid time column keyword
assert is_time_column_keyword('TRPOS12') is True
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_time_to_fits_loc(self, table_types):
"""
Test all the unusual conditions for locations of ``Time``
columns in a ``Table``.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc')
t['b'] = Time(self.time, format='isot', scale='tt')
# Check that vectorized location is stored using Green Bank convention
t['a'].location = EarthLocation([1., 2.], [2., 3.], [3., 4.],
unit='Mm')
with pytest.warns(AstropyUserWarning, match=r'Time Column "b" has no '
r'specified location, but global Time Position is present'):
table, hdr = time_to_fits(t)
assert (table['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')).all()
assert (table['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')).all()
assert (table['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')).all()
with pytest.warns(AstropyUserWarning, match=r'Time Column "b" has no '
r'specified location, but global Time Position is present'):
t.write(self.temp('time.fits'), format='fits', overwrite=True)
# Check that a blank value for the "TRPOSn" keyword is not generated
hdr = fits.getheader(self.temp('time.fits'), 1)
assert hdr.get('TRPOS2', None) is None
with pytest.warns(AstropyUserWarning, match=r'Time column reference position '
r'"TRPOSn" is not specified. The default value for it is '
r'"TOPOCENTER", and the observatory position has been specified.'):
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
assert (tm['a'].location == t['a'].location).all()
assert tm['b'].location == t['b'].location
# Check that multiple Time columns with different locations raise an exception
t['a'].location = EarthLocation(1, 2, 3)
t['b'].location = EarthLocation(2, 3, 4)
with pytest.raises(ValueError) as err:
table, hdr = time_to_fits(t)
assert 'Multiple Time Columns with different geocentric' in str(err.value)
# Check that Time column with no location specified will assume global location
t['b'].location = None
with pytest.warns(AstropyUserWarning, match='Time Column "b" has no specified '
'location, but global Time Position is present') as w:
table, hdr = time_to_fits(t)
assert len(w) == 1
# Check that multiple Time columns with same location can be written
t['b'].location = EarthLocation(1, 2, 3)
table, hdr = time_to_fits(t)
# Check compatibility of Time Scales and Reference Positions
for scale in BARYCENTRIC_SCALES:
t.replace_column('a', getattr(t['a'], scale))
with pytest.warns(AstropyUserWarning, match='Earth Location "TOPOCENTER" '
'for Time Column') as w:
table, hdr = time_to_fits(t)
assert len(w) == 1
# Check that multidimensional vectorized location (ndim=3) is stored
# using Green Bank convention.
t = table_types()
location = EarthLocation([[[1., 2.], [1., 3.], [3., 4.]]],
[[[1., 2.], [1., 3.], [3., 4.]]],
[[[1., 2.], [1., 3.], [3., 4.]]], unit='Mm')
t['a'] = Time(self.time_3d, format='jd', location=location)
table, hdr = time_to_fits(t)
assert (table['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')).all()
assert (table['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')).all()
assert (table['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')).all()
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
assert (tm['a'].location == t['a'].location).all()
# Check that singular location with ndim>1 can be written
t['a'] = Time(self.time, location=EarthLocation([[[1.]]], [[[2.]]],
[[[3.]]], unit='Mm'))
table, hdr = time_to_fits(t)
assert hdr['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')
assert hdr['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')
assert hdr['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
assert tm['a'].location == t['a'].location
@pytest.mark.parametrize('mask', (False, [True, False]))
@pytest.mark.parametrize('serialize_method', ('jd1_jd2', 'formatted_value'))
def test_time_to_fits_serialize_method(self, serialize_method, mask):
"""
Test the data returned by ``time_to_fits`` for masked values.
"""
a = Time(np.ma.MaskedArray(self.time, mask=mask))
b = Time(np.ma.MaskedArray([[1, 2], [3, 4]], mask=np.broadcast_to(mask, (2, 2))),
format='cxcsec')
assert b.masked is a.masked is (mask is not False)
t = QTable([a, b], names=['a', 'b'])
t.write(self.temp('time.fits'), format='fits', overwrite=True,
serialize_method=serialize_method)
tm = QTable.read(self.temp('time.fits'), format='fits', astropy_native=True)
if mask is not False:
assert np.all(tm['a'].mask == a.mask)
assert np.all(tm['b'].mask == b.mask)
if serialize_method == 'jd1_jd2':
assert isinstance(tm['a'], Time) and np.all(tm['a'] == a)
assert isinstance(tm['b'], Time) and np.all(tm['b'] == b)
else:
# TODO: Should 'formatted_value' not become a Time too,
# at least if read with astropy_native=True?
assert isinstance(tm['a'], Column) and np.all(tm['a'] == a.value)
assert isinstance(tm['b'], Column) and np.all(tm['b'] == b.value)
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_time_to_fits_header(self, table_types):
"""
Test the header and metadata returned by ``time_to_fits``.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc',
location=EarthLocation(-2446354,
4237210, 4077985, unit='m'))
t['b'] = Time([1, 2], format='cxcsec', scale='tt')
ideal_col_hdr = {'OBSGEO-X': t['a'].location.x.value,
'OBSGEO-Y': t['a'].location.y.value,
'OBSGEO-Z': t['a'].location.z.value}
with pytest.warns(AstropyUserWarning, match=r'Time Column "b" has no '
r'specified location, but global Time Position is present'):
table, hdr = time_to_fits(t)
# Check the global time keywords in hdr
for key, value in GLOBAL_TIME_INFO.items():
assert hdr[key] == value[0]
assert hdr.comments[key] == value[1]
hdr.remove(key)
for key, value in ideal_col_hdr.items():
assert hdr[key] == value
hdr.remove(key)
# Check the column-specific time metadata
coord_info = table.meta['__coordinate_columns__']
for colname in coord_info:
assert coord_info[colname]['coord_type'] == t[colname].scale.upper()
assert coord_info[colname]['coord_unit'] == 'd'
assert coord_info['a']['time_ref_pos'] == 'TOPOCENTER'
assert coord_info['b']['time_ref_pos'] == None # noqa
assert len(hdr) == 0
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_fits_to_time_meta(self, table_types):
"""
Test that the relevant global time metadata is read into
``Table.meta`` as ``Time``.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc')
t.meta['DATE'] = '1999-01-01T00:00:00'
t.meta['MJD-OBS'] = 56670
# Test for default write behavior (full precision) and read it
# back using native astropy objects; thus, ensure its round-trip
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
# Test DATE
assert isinstance(tm.meta['DATE'], Time)
assert tm.meta['DATE'].value == t.meta['DATE']
assert tm.meta['DATE'].format == 'fits'
# Default time scale according to the FITS standard is UTC
assert tm.meta['DATE'].scale == 'utc'
# Test MJD-xxx
assert isinstance(tm.meta['MJD-OBS'], Time)
assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS']
assert tm.meta['MJD-OBS'].format == 'mjd'
assert tm.meta['MJD-OBS'].scale == 'utc'
# Explicitly specified Time Scale
t.meta['TIMESYS'] = 'ET'
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
# Test DATE
assert isinstance(tm.meta['DATE'], Time)
assert tm.meta['DATE'].value == t.meta['DATE']
assert tm.meta['DATE'].scale == 'utc'
# Test MJD-xxx
assert isinstance(tm.meta['MJD-OBS'], Time)
assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS']
assert tm.meta['MJD-OBS'].scale == FITS_DEPRECATED_SCALES[t.meta['TIMESYS']]
# Test for conversion of time data to its value, as defined by its format
t['a'].info.serialize_method['fits'] = 'formatted_value'
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits')
# Test DATE
assert not isinstance(tm.meta['DATE'], Time)
assert tm.meta['DATE'] == t.meta['DATE']
# Test MJD-xxx
assert not isinstance(tm.meta['MJD-OBS'], Time)
assert tm.meta['MJD-OBS'] == t.meta['MJD-OBS']
assert (tm['a'] == t['a'].value).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_time_loc_unit(self, table_types):
"""
Test that ``location`` specified by using any valid unit
(length/angle) in ``Time`` columns gets stored in FITS
as ITRS Cartesian coordinates (X, Y, Z), each in m.
Test that it round-trips through FITS.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc',
location=EarthLocation(1, 2, 3, unit='km'))
table, hdr = time_to_fits(t)
# Check the header
assert hdr['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')
assert hdr['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')
assert hdr['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
# Check the round-trip of location
assert (tm['a'].location == t['a'].location).all()
assert tm['a'].location.x.value == t['a'].location.x.to_value(unit='m')
assert tm['a'].location.y.value == t['a'].location.y.to_value(unit='m')
assert tm['a'].location.z.value == t['a'].location.z.to_value(unit='m')
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_fits_to_time_index(self, table_types):
"""
Ensure that fits_to_time works correctly if the time column is also
an index.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc')
t['b'] = [2, 1]
t['c'] = [3, 4]
# Make it so that the time column is also an index
t.add_index('a')
t.add_index('b')
# Test for default write behavior (full precision) and read it
# back using native astropy objects; thus, ensure its round-trip
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
assert isinstance(tm['a'], Time)
# Ensure that indices on original table are preserved but round-trip
# table has no indices. (If indices are ever serialized the final two
# tests are expected to fail).
assert len(t.indices) == 2
assert len(tm.indices) == 0
for name in ('a', 'b'):
assert len(t[name].info.indices) == 1
assert len(tm[name].info.indices) == 0
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits(self, table_types):
"""
Test that FITS table with time columns (standard compliant)
can be read by io.fits as a table with Time columns.
This tests the following:
1. The special-case where a column has the name 'TIME' and a
time unit
2. Time from Epoch (Reference time) is appropriately converted.
3. Coordinate columns (corresponding to coordinate keywords in the header)
other than time, that is, spatial coordinates, are not mistaken
to be time.
"""
filename = self.data('chandra_time.fits')
with pytest.warns(AstropyUserWarning, match=r'Time column "time" reference '
r'position will be ignored'):
tm = table_types.read(filename, astropy_native=True)
# Test case 1
assert isinstance(tm['time'], Time)
assert tm['time'].scale == 'tt'
assert tm['time'].format == 'mjd'
non_native = table_types.read(filename)
# Test case 2
ref_time = Time(non_native.meta['MJDREF'], format='mjd',
scale=non_native.meta['TIMESYS'].lower())
delta_time = TimeDelta(non_native['time'])
assert (ref_time + delta_time == tm['time']).all()
# Test case 3
for colname in ['chipx', 'chipy', 'detx', 'dety', 'x', 'y']:
assert not isinstance(tm[colname], Time)
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_datetime(self, table_types):
"""
Test that ISO-8601 Datetime String Columns are read correctly.
"""
# Datetime column
c = fits.Column(name='datetime', format='A29', coord_type='TCG',
time_ref_pos='GEOCENTER', array=self.time)
# Explicitly create a FITS Binary Table
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['datetime'], Time)
assert tm['datetime'].scale == 'tcg'
assert tm['datetime'].format == 'fits'
assert (tm['datetime'] == self.time).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_location(self, table_types):
"""
Test that geocentric/geodetic observatory position is read
properly, as and when it is specified.
"""
# Datetime column
c = fits.Column(name='datetime', format='A29', coord_type='TT',
time_ref_pos='TOPOCENTER', array=self.time)
# Observatory position in ITRS Cartesian coordinates (geocentric)
cards = [('OBSGEO-X', -2446354), ('OBSGEO-Y', 4237210),
('OBSGEO-Z', 4077985)]
# Explicitly create a FITS Binary Table
bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards))
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['datetime'], Time)
assert tm['datetime'].location.x.value == -2446354
assert tm['datetime'].location.y.value == 4237210
assert tm['datetime'].location.z.value == 4077985
# Observatory position in geodetic coordinates
cards = [('OBSGEO-L', 0), ('OBSGEO-B', 0), ('OBSGEO-H', 0)]
# Explicitly create a FITS Binary Table
bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards))
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['datetime'], Time)
assert tm['datetime'].location.lon.value == 0
assert tm['datetime'].location.lat.value == 0
assert np.isclose(tm['datetime'].location.height.value, 0,
rtol=0, atol=1e-9)
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_scale(self, table_types):
"""
Test handling of 'GPS' and 'LOCAL' time scales which are
recognized by the FITS standard but are not native to astropy.
"""
# GPS scale column
gps_time = np.array([630720013, 630720014])
c = fits.Column(name='gps_time', format='D', unit='s', coord_type='GPS',
coord_unit='s', time_ref_pos='TOPOCENTER', array=gps_time)
cards = [('OBSGEO-L', 0), ('OBSGEO-B', 0), ('OBSGEO-H', 0)]
bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards))
bhdu.writeto(self.temp('time.fits'), overwrite=True)
with pytest.warns(AstropyUserWarning, match='FITS recognized time scale value "GPS"') as w:
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert len(w) == 1
assert isinstance(tm['gps_time'], Time)
assert tm['gps_time'].format == 'gps'
assert tm['gps_time'].scale == 'tai'
assert (tm['gps_time'].value == gps_time).all()
# LOCAL scale column
local_time = np.array([1, 2])
c = fits.Column(name='local_time', format='D', unit='d',
coord_type='LOCAL', coord_unit='d',
time_ref_pos='RELOCATABLE', array=local_time)
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['local_time'], Time)
assert tm['local_time'].format == 'mjd'
assert tm['local_time'].scale == 'local'
assert (tm['local_time'].value == local_time).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_location_warnings(self, table_types):
"""
Test warnings for time column reference position.
"""
# Time reference position "TOPOCENTER" without corresponding
# observatory position.
c = fits.Column(name='datetime', format='A29', coord_type='TT',
time_ref_pos='TOPOCENTER', array=self.time)
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
with pytest.warns(AstropyUserWarning, match='observatory position is '
'not properly specified') as w:
table_types.read(self.temp('time.fits'), astropy_native=True)
assert len(w) == 1
# Warning for default value of time reference position "TOPOCENTER"
# not generated when there is no specified observatory position.
c = fits.Column(name='datetime', format='A29', coord_type='TT',
array=self.time)
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
table_types.read(self.temp('time.fits'), astropy_native=True)
|
c876233bb17bbce8388e16f5263d8d8d7da36f9a84bed671f49dfd6501f57ad9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from . import FitsTestCase
from astropy.io.fits.scripts import fitsinfo
from astropy import __version__ as version
class TestFitsinfo(FitsTestCase):
def test_help(self):
with pytest.raises(SystemExit) as e:
fitsinfo.main(['-h'])
assert e.value.code == 0
def test_version(self, capsys):
with pytest.raises(SystemExit) as e:
fitsinfo.main(['--version'])
out = capsys.readouterr()[0]
assert out == f'fitsinfo {version}'
assert e.value.code == 0
def test_onefile(self, capsys):
fitsinfo.main([self.data('arange.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 3
assert out[1].startswith(
'No. Name Ver Type Cards Dimensions Format')
assert out[2].startswith(
' 0 PRIMARY 1 PrimaryHDU 7 (11, 10, 7) int32')
def test_multiplefiles(self, capsys):
fitsinfo.main([self.data('arange.fits'),
self.data('ascii.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 8
assert out[1].startswith(
'No. Name Ver Type Cards Dimensions Format')
assert out[2].startswith(
' 0 PRIMARY 1 PrimaryHDU 7 (11, 10, 7) int32')
assert out[3] == ''
assert out[7].startswith(
' 1 1 TableHDU 20 5R x 2C [E10.4, I5]')
|
7bc7129b60de5cb13ad8997c2adcd9650eb03c8bca7112a1387dcb82510047e3 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import platform
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy.io import fits
from . import FitsTestCase
class TestUintFunctions(FitsTestCase):
@classmethod
def setup_class(cls):
cls.utypes = ('u2', 'u4', 'u8')
cls.utype_map = {'u2': np.uint16, 'u4': np.uint32, 'u8': np.uint64}
cls.itype_map = {'u2': np.int16, 'u4': np.int32, 'u8': np.int64}
cls.format_map = {'u2': 'I', 'u4': 'J', 'u8': 'K'}
# Test of 64-bit compressed image is disabled. cfitsio library doesn't
# like it
@pytest.mark.parametrize(('utype', 'compressed'),
[('u2', False), ('u4', False), ('u8', False), ('u2', True),
('u4', True)]) # ,('u8',True)])
def test_uint(self, utype, compressed):
bits = 8*int(utype[1])
if platform.architecture()[0] == '64bit' or bits != 64:
if compressed:
hdu = fits.CompImageHDU(np.array([-3, -2, -1, 0, 1, 2, 3], dtype=np.int64))
hdu_number = 1
else:
hdu = fits.PrimaryHDU(np.array([-3, -2, -1, 0, 1, 2, 3], dtype=np.int64))
hdu_number = 0
hdu.scale(f'int{bits:d}', '', bzero=2 ** (bits-1))
hdu.writeto(self.temp('tempfile.fits'), overwrite=True)
with fits.open(self.temp('tempfile.fits'), uint=True) as hdul:
assert hdul[hdu_number].data.dtype == self.utype_map[utype]
assert (hdul[hdu_number].data == np.array(
[(2 ** bits) - 3, (2 ** bits) - 2, (2 ** bits) - 1,
0, 1, 2, 3],
dtype=self.utype_map[utype])).all()
hdul.writeto(self.temp('tempfile1.fits'))
with fits.open(self.temp('tempfile1.fits'),
uint16=True) as hdul1:
d1 = hdul[hdu_number].data
d2 = hdul1[hdu_number].data
assert (d1 == d2).all()
if not compressed:
# TODO: Enable these lines if CompImageHDUs ever grow
# .section support
sec = hdul[hdu_number].section[:1]
assert sec.dtype.name == f'uint{bits}'
assert (sec == d1[:1]).all()
@pytest.mark.parametrize('utype', ('u2', 'u4', 'u8'))
def test_uint_columns(self, utype):
"""Test basic functionality of tables with columns containing
pseudo-unsigned integers. See
https://github.com/astropy/astropy/pull/906
"""
bits = 8*int(utype[1])
if platform.architecture()[0] == '64bit' or bits != 64:
bzero = self.utype_map[utype](2**(bits-1))
one = self.utype_map[utype](1)
u0 = np.arange(bits+1, dtype=self.utype_map[utype])
u = 2**u0 - one
if bits == 64:
u[63] = bzero - one
u[64] = u[63] + u[63] + one
uu = (u - bzero).view(self.itype_map[utype])
# Construct a table from explicit column
col = fits.Column(name=utype, array=u,
format=self.format_map[utype], bzero=bzero)
table = fits.BinTableHDU.from_columns([col])
assert (table.data[utype] == u).all()
# This used to be table.data.base, but now after adding a table to
# a BinTableHDU it gets stored as a view of the original table,
# even if the original was already a FITS_rec. So now we need
# table.data.base.base
assert (table.data.base.base[utype] == uu).all()
hdu0 = fits.PrimaryHDU()
hdulist = fits.HDUList([hdu0, table])
hdulist.writeto(self.temp('tempfile.fits'), overwrite=True)
# Test write of unsigned int
del hdulist
with fits.open(self.temp('tempfile.fits'), uint=True) as hdulist2:
hdudata = hdulist2[1].data
assert (hdudata[utype] == u).all()
assert (hdudata[utype].dtype == self.utype_map[utype])
assert (hdudata.base[utype] == uu).all()
# Construct recarray then write out that.
v = u.view(dtype=[(utype, self.utype_map[utype])])
fits.writeto(self.temp('tempfile2.fits'), v, overwrite=True)
with fits.open(self.temp('tempfile2.fits'), uint=True) as hdulist3:
hdudata3 = hdulist3[1].data
assert (hdudata3.base[utype] ==
table.data.base.base[utype]).all()
assert (hdudata3[utype] == table.data[utype]).all()
assert (hdudata3[utype] == u).all()
def test_uint_slice(self):
"""
Fix for https://github.com/astropy/astropy/issues/5490
if data is sliced first, make sure the data is still converted as uint
"""
# create_data:
dataref = np.arange(2**16, dtype=np.uint16)
tbhdu = fits.BinTableHDU.from_columns([
fits.Column(name='a', format='I',
array=np.arange(2**16, dtype=np.int16)),
fits.Column(name='b', format='I', bscale=1, bzero=2**15,
array=dataref)
])
tbhdu.writeto(self.temp('test_scaled_slicing.fits'))
with fits.open(self.temp('test_scaled_slicing.fits')) as hdulist:
data = hdulist[1].data
assert_array_equal(data['b'], dataref)
sel = data['a'] >= 0
assert_array_equal(data[sel]['b'], dataref[sel])
assert data[sel]['b'].dtype == dataref[sel].dtype
with fits.open(self.temp('test_scaled_slicing.fits')) as hdulist:
data = hdulist[1].data
assert_array_equal(data[sel]['b'], dataref[sel])
assert data[sel]['b'].dtype == dataref[sel].dtype
|
62cace37328af2d333078c0dd3af949cd8167579bd4fa29d21fe25579a7d20d1 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import math
import os
import re
import time
import pytest
import numpy as np
from numpy.testing import assert_equal
from astropy.io import fits
from astropy.io.fits.hdu.compressed import SUBTRACTIVE_DITHER_1, DITHER_SEED_CHECKSUM
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from .test_table import comparerecords
from . import FitsTestCase
class TestImageFunctions(FitsTestCase):
def test_constructor_name_arg(self):
"""Like the test of the same name in test_table.py"""
hdu = fits.ImageHDU()
assert hdu.name == ''
assert 'EXTNAME' not in hdu.header
hdu.name = 'FOO'
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# Passing name to constructor
hdu = fits.ImageHDU(name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# And overriding a header with a different extname
hdr = fits.Header()
hdr['EXTNAME'] = 'EVENTS'
hdu = fits.ImageHDU(header=hdr, name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
def test_constructor_ver_arg(self):
def assert_ver_is(hdu, reference_ver):
assert hdu.ver == reference_ver
assert hdu.header['EXTVER'] == reference_ver
hdu = fits.ImageHDU()
assert hdu.ver == 1 # defaults to 1
assert 'EXTVER' not in hdu.header
hdu.ver = 1
assert_ver_is(hdu, 1)
# Passing name to constructor
hdu = fits.ImageHDU(ver=2)
assert_ver_is(hdu, 2)
# And overriding a header with a different extver
hdr = fits.Header()
hdr['EXTVER'] = 3
hdu = fits.ImageHDU(header=hdr, ver=4)
assert_ver_is(hdu, 4)
# The header card is not overridden if ver is None or not passed in
hdr = fits.Header()
hdr['EXTVER'] = 5
hdu = fits.ImageHDU(header=hdr, ver=None)
assert_ver_is(hdu, 5)
hdu = fits.ImageHDU(header=hdr)
assert_ver_is(hdu, 5)
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
"""
ifd = fits.HDUList(fits.PrimaryHDU())
phdr = ifd[0].header
phdr['FILENAME'] = 'labq01i3q_rawtag.fits'
primary_hdu = fits.PrimaryHDU(header=phdr)
ofd = fits.HDUList(primary_hdu)
ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'
# Original header should be unchanged
assert phdr['FILENAME'] == 'labq01i3q_rawtag.fits'
def test_open(self):
# The function "open" reads a FITS file into an HDUList object. There
# are three modes to open: "readonly" (the default), "append", and
# "update".
# Open a file read-only (the default mode), the content of the FITS
# file are read into memory.
r = fits.open(self.data('test0.fits')) # readonly
# data parts are latent instantiation, so if we close the HDUList
# without touching data, data can not be accessed.
r.close()
with pytest.raises(IndexError) as exc_info:
r[1].data[:2, :2]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == ('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
def test_open_2(self):
r = fits.open(self.data('test0.fits'))
info = ([(0, 'PRIMARY', 1, 'PrimaryHDU', 138, (), '', '')] +
[(x, 'SCI', x, 'ImageHDU', 61, (40, 40), 'int16', '')
for x in range(1, 5)])
try:
assert r.info(output=False) == info
finally:
r.close()
def test_open_3(self):
# Test that HDUs cannot be accessed after the file was closed
r = fits.open(self.data('test0.fits'))
r.close()
with pytest.raises(IndexError) as exc_info:
r[1]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == ('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
# Test that HDUs can be accessed with lazy_load_hdus=False
r = fits.open(self.data('test0.fits'), lazy_load_hdus=False)
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
with pytest.raises(IndexError) as exc_info:
r[6]
assert str(exc_info.value) == 'list index out of range'
# And the same with the global config item
assert fits.conf.lazy_load_hdus # True by default
fits.conf.lazy_load_hdus = False
try:
r = fits.open(self.data('test0.fits'))
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
finally:
fits.conf.lazy_load_hdus = True
def test_fortran_array(self):
# Test that files are being correctly written+read for "C" and "F" order arrays
a = np.arange(21).reshape(3, 7)
b = np.asfortranarray(a)
afits = self.temp('a_str.fits')
bfits = self.temp('b_str.fits')
# writing to str specified files
fits.PrimaryHDU(data=a).writeto(afits)
fits.PrimaryHDU(data=b).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a)
np.testing.assert_array_equal(fits.getdata(bfits), a)
# writing to fileobjs
aafits = self.temp('a_fileobj.fits')
bbfits = self.temp('b_fileobj.fits')
with open(aafits, mode='wb') as fd:
fits.PrimaryHDU(data=a).writeto(fd)
with open(bbfits, mode='wb') as fd:
fits.PrimaryHDU(data=b).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a)
np.testing.assert_array_equal(fits.getdata(bbfits), a)
def test_fortran_array_non_contiguous(self):
# Test that files are being correctly written+read for 'C' and 'F' order arrays
a = np.arange(105).reshape(3, 5, 7)
b = np.asfortranarray(a)
# writing to str specified files
afits = self.temp('a_str_slice.fits')
bfits = self.temp('b_str_slice.fits')
fits.PrimaryHDU(data=a[::2, ::2]).writeto(afits)
fits.PrimaryHDU(data=b[::2, ::2]).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bfits), a[::2, ::2])
# writing to fileobjs
aafits = self.temp('a_fileobj_slice.fits')
bbfits = self.temp('b_fileobj_slice.fits')
with open(aafits, mode='wb') as fd:
fits.PrimaryHDU(data=a[::2, ::2]).writeto(fd)
with open(bbfits, mode='wb') as fd:
fits.PrimaryHDU(data=b[::2, ::2]).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bbfits), a[::2, ::2])
def test_primary_with_extname(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/151
Tests that the EXTNAME keyword works with Primary HDUs as well, and
interacts properly with the .name attribute. For convenience
hdulist['PRIMARY'] will still refer to the first HDU even if it has an
EXTNAME not equal to 'PRIMARY'.
"""
prihdr = fits.Header([('EXTNAME', 'XPRIMARY'), ('EXTVER', 1)])
hdul = fits.HDUList([fits.PrimaryHDU(header=prihdr)])
assert 'EXTNAME' in hdul[0].header
assert hdul[0].name == 'XPRIMARY'
assert hdul[0].name == hdul[0].header['EXTNAME']
info = [(0, 'XPRIMARY', 1, 'PrimaryHDU', 5, (), '', '')]
assert hdul.info(output=False) == info
assert hdul['PRIMARY'] is hdul['XPRIMARY']
assert hdul['PRIMARY'] is hdul[('XPRIMARY', 1)]
hdul[0].name = 'XPRIMARY2'
assert hdul[0].header['EXTNAME'] == 'XPRIMARY2'
hdul.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[0].name == 'XPRIMARY2'
def test_io_manipulation(self):
# Get a keyword value. An extension can be referred by name or by
# number. Both extension and keyword names are case insensitive.
with fits.open(self.data('test0.fits')) as r:
assert r['primary'].header['naxis'] == 0
assert r[0].header['naxis'] == 0
# If there are more than one extension with the same EXTNAME value,
# the EXTVER can be used (as the second argument) to distinguish
# the extension.
assert r['sci', 1].header['detector'] == 1
# append (using "update()") a new card
r[0].header['xxx'] = 1.234e56
assert ('\n'.join(str(x) for x in r[0].header.cards[-3:]) ==
"EXPFLAG = 'NORMAL ' / Exposure interruption indicator \n"
"FILENAME= 'vtest3.fits' / File name \n"
"XXX = 1.234E+56 ")
# rename a keyword
r[0].header.rename_keyword('filename', 'fname')
pytest.raises(ValueError, r[0].header.rename_keyword, 'fname',
'history')
pytest.raises(ValueError, r[0].header.rename_keyword, 'fname',
'simple')
r[0].header.rename_keyword('fname', 'filename')
# get a subsection of data
assert np.array_equal(r[2].data[:3, :3],
np.array([[349, 349, 348],
[349, 349, 347],
[347, 350, 349]], dtype=np.int16))
# We can create a new FITS file by opening a new file with "append"
# mode.
with fits.open(self.temp('test_new.fits'), mode='append') as n:
# Append the primary header and the 2nd extension to the new
# file.
n.append(r[0])
n.append(r[2])
# The flush method will write the current HDUList object back
# to the newly created file on disk. The HDUList is still open
# and can be further operated.
n.flush()
assert n[1].data[1, 1] == 349
# modify a data point
n[1].data[1, 1] = 99
# When the file is closed, the most recent additions of
# extension(s) since last flush() will be appended, but any HDU
# already existed at the last flush will not be modified
del n
# If an existing file is opened with "append" mode, like the
# readonly mode, the HDU's will be read into the HDUList which can
# be modified in memory but can not be written back to the original
# file. A file opened with append mode can only add new HDU's.
os.rename(self.temp('test_new.fits'),
self.temp('test_append.fits'))
with fits.open(self.temp('test_append.fits'), mode='append') as a:
# The above change did not take effect since this was made
# after the flush().
assert a[1].data[1, 1] == 349
a.append(r[1])
del a
# When changes are made to an HDUList which was opened with
# "update" mode, they will be written back to the original file
# when a flush/close is called.
os.rename(self.temp('test_append.fits'),
self.temp('test_update.fits'))
with fits.open(self.temp('test_update.fits'), mode='update') as u:
# When the changes do not alter the size structures of the
# original (or since last flush) HDUList, the changes are
# written back "in place".
assert u[0].header['rootname'] == 'U2EQ0201T'
u[0].header['rootname'] = 'abc'
assert u[1].data[1, 1] == 349
u[1].data[1, 1] = 99
u.flush()
# If the changes affect the size structure, e.g. adding or
# deleting HDU(s), header was expanded or reduced beyond
# existing number of blocks (2880 bytes in each block), or
# change the data size, the HDUList is written to a temporary
# file, the original file is deleted, and the temporary file is
# renamed to the original file name and reopened in the update
# mode. To a user, these two kinds of updating writeback seem
# to be the same, unless the optional argument in flush or
# close is set to 1.
del u[2]
u.flush()
# The write method in HDUList class writes the current HDUList,
# with all changes made up to now, to a new file. This method
# works the same disregard the mode the HDUList was opened
# with.
u.append(r[3])
u.writeto(self.temp('test_new.fits'))
del u
# Another useful new HDUList method is readall. It will "touch" the
# data parts in all HDUs, so even if the HDUList is closed, we can
# still operate on the data.
with fits.open(self.data('test0.fits')) as r:
r.readall()
assert r[1].data[1, 1] == 315
# create an HDU with data only
data = np.ones((3, 5), dtype=np.float32)
hdu = fits.ImageHDU(data=data, name='SCI')
assert np.array_equal(hdu.data,
np.array([[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.]],
dtype=np.float32))
# create an HDU with header and data
# notice that the header has the right NAXIS's since it is constructed
# with ImageHDU
hdu2 = fits.ImageHDU(header=r[1].header, data=np.array([1, 2],
dtype='int32'))
assert ('\n'.join(str(x) for x in hdu2.header.cards[1:5]) ==
"BITPIX = 32 / array data type \n"
"NAXIS = 1 / number of array dimensions \n"
"NAXIS1 = 2 \n"
"PCOUNT = 0 / number of parameters ")
def test_memory_mapping(self):
# memory mapping
f1 = fits.open(self.data('test0.fits'), memmap=1)
f1.close()
def test_verification_on_output(self):
# verification on output
# make a defect HDUList first
x = fits.ImageHDU()
hdu = fits.HDUList(x) # HDUList can take a list or one single HDU
with pytest.warns(AstropyUserWarning, match=r"HDUList's 0th element is not a primary HDU\.") as w:
hdu.verify()
assert len(w) == 3
with pytest.warns(AstropyUserWarning, match=r"HDUList's 0th element is not a primary HDU\. "
r"Fixed by inserting one as 0th HDU\.") as w:
hdu.writeto(self.temp('test_new2.fits'), 'fix')
assert len(w) == 3
def test_section(self):
# section testing
fs = fits.open(self.data('arange.fits'))
assert np.array_equal(fs[0].section[3, 2, 5], 357)
assert np.array_equal(
fs[0].section[3, 2, :],
np.array([352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362]))
assert np.array_equal(fs[0].section[3, 2, 4:],
np.array([356, 357, 358, 359, 360, 361, 362]))
assert np.array_equal(fs[0].section[3, 2, :8],
np.array([352, 353, 354, 355, 356, 357, 358, 359]))
assert np.array_equal(fs[0].section[3, 2, -8:8],
np.array([355, 356, 357, 358, 359]))
assert np.array_equal(
fs[0].section[3, 2:5, :],
np.array([[352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362],
[363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373],
[374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384]]))
assert np.array_equal(fs[0].section[3, :, :][:3, :3],
np.array([[330, 331, 332],
[341, 342, 343],
[352, 353, 354]]))
dat = fs[0].data
assert np.array_equal(fs[0].section[3, 2:5, :8], dat[3, 2:5, :8])
assert np.array_equal(fs[0].section[3, 2:5, 3], dat[3, 2:5, 3])
assert np.array_equal(fs[0].section[3:6, :, :][:3, :3, :3],
np.array([[[330, 331, 332],
[341, 342, 343],
[352, 353, 354]],
[[440, 441, 442],
[451, 452, 453],
[462, 463, 464]],
[[550, 551, 552],
[561, 562, 563],
[572, 573, 574]]]))
assert np.array_equal(fs[0].section[:, :, :][:3, :2, :2],
np.array([[[0, 1],
[11, 12]],
[[110, 111],
[121, 122]],
[[220, 221],
[231, 232]]]))
assert np.array_equal(fs[0].section[:, 2, :], dat[:, 2, :])
assert np.array_equal(fs[0].section[:, 2:5, :], dat[:, 2:5, :])
assert np.array_equal(fs[0].section[3:6, 3, :], dat[3:6, 3, :])
assert np.array_equal(fs[0].section[3:6, 3:7, :], dat[3:6, 3:7, :])
assert np.array_equal(fs[0].section[:, ::2], dat[:, ::2])
assert np.array_equal(fs[0].section[:, [1, 2, 4], 3],
dat[:, [1, 2, 4], 3])
bool_index = np.array([True, False, True, True, False,
False, True, True, False, True])
assert np.array_equal(fs[0].section[:, bool_index, :],
dat[:, bool_index, :])
assert np.array_equal(
fs[0].section[3:6, 3, :, ...], dat[3:6, 3, :, ...])
assert np.array_equal(fs[0].section[..., ::2], dat[..., ::2])
assert np.array_equal(fs[0].section[..., [1, 2, 4], 3],
dat[..., [1, 2, 4], 3])
fs.close()
def test_section_data_single(self):
a = np.array([1])
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
sec = hdul[0].section
dat = hdul[0].data
assert np.array_equal(sec[0], dat[0])
assert np.array_equal(sec[...], dat[...])
assert np.array_equal(sec[..., 0], dat[..., 0])
assert np.array_equal(sec[0, ...], dat[0, ...])
hdul.close()
def test_section_data_square(self):
a = np.arange(4).reshape(2, 2)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
def test_section_data_cube(self):
a = np.arange(18).reshape(2, 3, 3)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:] == dat[:]).all()
assert (d.section[:, :] == dat[:, :]).all()
# Test that various combinations of indexing on the section are equal to
# indexing the data.
# Testing all combinations of scalar-index and [:] for each dimension.
for idx1 in [slice(None), 0, 1]:
for idx2 in [slice(None), 0, 1, 2]:
for idx3 in [slice(None), 0, 1, 2]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test all ways to slice the last dimension but keeping the first two.
for idx3 in [slice(0, 1), slice(0, 2), slice(0, 3),
slice(1, 2), slice(1, 3), slice(2, 3)]:
nd_idx = (slice(None), slice(None), idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test various combinations (not exhaustive) to slice all dimensions.
for idx1 in [slice(0, 1), slice(1, 2)]:
for idx2 in [slice(0, 1), slice(0, 2), slice(0, 3),
slice(1, 2), slice(1, 3)]:
for idx3 in [slice(0, 1), slice(0, 2), slice(0, 3),
slice(1, 2), slice(1, 3), slice(2, 3)]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
hdul.close()
def test_section_data_four(self):
a = np.arange(256).reshape(4, 4, 4, 4)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :, :, :] == dat[:, :, :, :]).all()
assert (d.section[:, :, :] == dat[:, :, :]).all()
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[:] == dat[:]).all()
assert (d.section[0, :, :, :] == dat[0, :, :, :]).all()
assert (d.section[0, :, 0, :] == dat[0, :, 0, :]).all()
assert (d.section[:, :, 0, :] == dat[:, :, 0, :]).all()
assert (d.section[:, 1, 0, :] == dat[:, 1, 0, :]).all()
assert (d.section[:, :, :, 1] == dat[:, :, :, 1]).all()
hdul.close()
def test_section_data_scaled(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/143
This is like test_section_data_square but uses a file containing scaled
image data, to test that sections can work correctly with scaled data.
"""
hdul = fits.open(self.data('scale.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
# Test without having accessed the full data first
hdul = fits.open(self.data('scale.fits'))
d = hdul[0]
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
assert not d._data_loaded
hdul.close()
def test_do_not_scale_image_data(self):
with fits.open(self.data('scale.fits'), do_not_scale_image_data=True) as hdul:
assert hdul[0].data.dtype == np.dtype('>i2')
with fits.open(self.data('scale.fits')) as hdul:
assert hdul[0].data.dtype == np.dtype('float32')
def test_append_uint_data(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/56
(BZERO and BSCALE added in the wrong location when appending scaled
data)
"""
fits.writeto(self.temp('test_new.fits'), data=np.array([],
dtype='uint8'))
d = np.zeros([100, 100]).astype('uint16')
fits.append(self.temp('test_new.fits'), data=d)
with fits.open(self.temp('test_new.fits'), uint=True) as f:
assert f[1].data.dtype == 'uint16'
def test_scale_with_explicit_bzero_bscale(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6399
"""
hdu2 = fits.ImageHDU(np.random.rand(100, 100))
# The line below raised an exception in astropy 2.0, so if it does not
# raise an error here, that is progress.
hdu2.scale(type='uint8', bscale=1, bzero=0)
def test_uint_header_consistency(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2305
This ensures that an HDU containing unsigned integer data always has
the appropriate BZERO value in its header.
"""
for int_size in (16, 32, 64):
# Just make an array of some unsigned ints that wouldn't fit in a
# signed int array of the same bit width
max_uint = (2 ** int_size) - 1
if int_size == 64:
max_uint = np.uint64(int_size)
dtype = f'uint{int_size}'
arr = np.empty(100, dtype=dtype)
arr.fill(max_uint)
arr -= np.arange(100, dtype=dtype)
uint_hdu = fits.PrimaryHDU(data=arr)
assert np.all(uint_hdu.data == arr)
assert uint_hdu.data.dtype.name == f'uint{int_size}'
assert 'BZERO' in uint_hdu.header
assert uint_hdu.header['BZERO'] == (2 ** (int_size - 1))
filename = f'uint{int_size}.fits'
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename), uint=True) as hdul:
new_uint_hdu = hdul[0]
assert np.all(new_uint_hdu.data == arr)
assert new_uint_hdu.data.dtype.name == f'uint{int_size}'
assert 'BZERO' in new_uint_hdu.header
assert new_uint_hdu.header['BZERO'] == (2 ** (int_size - 1))
@pytest.mark.parametrize(('from_file'), (False, True))
@pytest.mark.parametrize(('do_not_scale'), (False,))
def test_uint_header_keywords_removed_after_bitpix_change(self,
from_file,
do_not_scale):
"""
Regression test for https://github.com/astropy/astropy/issues/4974
BZERO/BSCALE should be removed if data is converted to a floating
point type.
Currently excluding the case where do_not_scale_image_data=True
because it is not clear what the expectation should be.
"""
arr = np.zeros(100, dtype='uint16')
if from_file:
# To generate the proper input file we always want to scale the
# data before writing it...otherwise when we open it will be
# regular (signed) int data.
tmp_uint = fits.PrimaryHDU(arr)
filename = 'unsigned_int.fits'
tmp_uint.writeto(self.temp(filename))
with fits.open(self.temp(filename),
do_not_scale_image_data=do_not_scale) as f:
uint_hdu = f[0]
# Force a read before we close.
_ = uint_hdu.data
else:
uint_hdu = fits.PrimaryHDU(arr,
do_not_scale_image_data=do_not_scale)
# Make sure appropriate keywords are in the header. See
# https://github.com/astropy/astropy/pull/3916#issuecomment-122414532
# for discussion.
assert 'BSCALE' in uint_hdu.header
assert 'BZERO' in uint_hdu.header
assert uint_hdu.header['BSCALE'] == 1
assert uint_hdu.header['BZERO'] == 32768
# Convert data to floating point...
uint_hdu.data = uint_hdu.data * 1.0
# ...bitpix should be negative.
assert uint_hdu.header['BITPIX'] < 0
# BSCALE and BZERO should NOT be in header any more.
assert 'BSCALE' not in uint_hdu.header
assert 'BZERO' not in uint_hdu.header
# This is the main test...the data values should round trip
# as zero.
filename = 'test_uint_to_float.fits'
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert (hdul[0].data == 0).all()
def test_blanks(self):
"""Test image data with blank spots in it (which should show up as
NaNs in the data array.
"""
arr = np.zeros((10, 10), dtype=np.int32)
# One row will be blanks
arr[1] = 999
hdu = fits.ImageHDU(data=arr)
hdu.header['BLANK'] = 999
hdu.writeto(self.temp('test_new.fits'))
with fits.open(self.temp('test_new.fits')) as hdul:
assert np.isnan(hdul[1].data[1]).all()
def test_invalid_blanks(self):
"""
Test that invalid use of the BLANK keyword leads to an appropriate
warning, and that the BLANK keyword is ignored when returning the
HDU data.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
arr = np.arange(5, dtype=np.float64)
hdu = fits.PrimaryHDU(data=arr)
hdu.header['BLANK'] = 2
with pytest.warns(AstropyUserWarning, match="Invalid 'BLANK' keyword in header") as w:
hdu.writeto(self.temp('test_new.fits'))
# Allow the HDU to be written, but there should be a warning
# when writing a header with BLANK when then data is not
# int
assert len(w) == 1
# Should also get a warning when opening the file, and the BLANK
# value should not be applied
with pytest.warns(AstropyUserWarning, match="Invalid 'BLANK' keyword in header") as w:
with fits.open(self.temp('test_new.fits')) as h:
assert np.all(arr == h[0].data)
assert len(w) == 1
@pytest.mark.filterwarnings("ignore:Invalid 'BLANK' keyword in header")
def test_scale_back_with_blanks(self):
"""
Test that when auto-rescaling integer data with "blank" values (where
the blanks are replaced by NaN in the float data), that the "BLANK"
keyword is removed from the header.
Further, test that when using the ``scale_back=True`` option the blank
values are restored properly.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
# Make the sample file
arr = np.arange(5, dtype=np.int32)
hdu = fits.PrimaryHDU(data=arr)
hdu.scale('int16', bscale=1.23)
# Creating data that uses BLANK is currently kludgy--a separate issue
# TODO: Rewrite this test when scaling with blank support is better
# supported
# Let's just add a value to the data that should be converted to NaN
# when it is read back in:
filename = self.temp('test.fits')
hdu.data[0] = 9999
hdu.header['BLANK'] = 9999
hdu.writeto(filename)
with fits.open(filename) as hdul:
data = hdul[0].data
assert np.isnan(data[0])
with pytest.warns(fits.verify.VerifyWarning,
match=r"Invalid 'BLANK' keyword in header"):
hdul.writeto(self.temp('test2.fits'))
# Now reopen the newly written file. It should not have a 'BLANK'
# keyword
with fits.open(self.temp('test2.fits')) as hdul2:
assert 'BLANK' not in hdul2[0].header
data = hdul2[0].data
assert np.isnan(data[0])
# Finally, test that scale_back keeps the BLANKs correctly
with fits.open(filename, scale_back=True,
mode='update') as hdul3:
data = hdul3[0].data
# This emits warning that pytest cannot catch properly, so we
# catch it with pytest.mark.filterwarnings above.
assert np.isnan(data[0])
with fits.open(filename,
do_not_scale_image_data=True) as hdul4:
assert hdul4[0].header['BLANK'] == 9999
assert hdul4[0].header['BSCALE'] == 1.23
assert hdul4[0].data[0] == 9999
def test_bzero_with_floats(self):
"""Test use of the BZERO keyword in an image HDU containing float
data.
"""
arr = np.zeros((10, 10)) - 1
hdu = fits.ImageHDU(data=arr)
hdu.header['BZERO'] = 1.0
hdu.writeto(self.temp('test_new.fits'))
with fits.open(self.temp('test_new.fits')) as hdul:
arr += 1
assert (hdul[1].data == arr).all()
def test_rewriting_large_scaled_image(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 and
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/101
"""
hdul = fits.open(self.data('fixed-1890.fits'))
orig_data = hdul[0].data
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.data('fixed-1890.fits'))
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.data('fixed-1890.fits'),
do_not_scale_image_data=True)
hdul.writeto(self.temp('test_new.fits'), overwrite=True,
output_verify='silentfix')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
orig_data = hdul[0].data
hdul.close()
hdul = fits.open(self.temp('test_new.fits'), mode='update')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
def test_image_update_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/105
Replacing the original header to an image HDU and saving should update
the NAXISn keywords appropriately and save the image data correctly.
"""
# Copy the original file before saving to it
self.copy_file('test0.fits')
with fits.open(self.temp('test0.fits'), mode='update') as hdul:
orig_data = hdul[1].data.copy()
hdr_copy = hdul[1].header.copy()
del hdr_copy['NAXIS*']
hdul[1].header = hdr_copy
with fits.open(self.temp('test0.fits')) as hdul:
assert (orig_data == hdul[1].data).all()
def test_open_scaled_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/119
(Don't update scaled image data if the data is not read)
This ensures that merely opening and closing a file containing scaled
image data does not cause any change to the data (or the header).
Changes should only occur if the data is accessed.
"""
# Copy the original file before making any possible changes to it
self.copy_file('scale.fits')
mtime = os.stat(self.temp('scale.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('scale.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('scale.fits')).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp('scale.fits'), 'update')
orig_data = hdul[0].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp('scale.fits')).st_mtime
hdul = fits.open(self.temp('scale.fits'), mode='update')
assert hdul[0].data.dtype == np.dtype('>f4')
assert hdul[0].header['BITPIX'] == -32
assert 'BZERO' not in hdul[0].header
assert 'BSCALE' not in hdul[0].header
assert (orig_data == hdul[0].data).all()
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preserved properly
hdul[0].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp('scale.fits'))
assert hdul[0].shape == (42, 10)
assert hdul[0].data.dtype == np.dtype('>f4')
assert hdul[0].header['BITPIX'] == -32
assert 'BZERO' not in hdul[0].header
assert 'BSCALE' not in hdul[0].header
hdul.close()
def test_scale_back(self):
"""A simple test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/120
The scale_back feature for image HDUs.
"""
self.copy_file('scale.fits')
with fits.open(self.temp('scale.fits'), mode='update',
scale_back=True) as hdul:
orig_bitpix = hdul[0].header['BITPIX']
orig_bzero = hdul[0].header['BZERO']
orig_bscale = hdul[0].header['BSCALE']
orig_data = hdul[0].data.copy()
hdul[0].data[0] = 0
with fits.open(self.temp('scale.fits'),
do_not_scale_image_data=True) as hdul:
assert hdul[0].header['BITPIX'] == orig_bitpix
assert hdul[0].header['BZERO'] == orig_bzero
assert hdul[0].header['BSCALE'] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[0].data[0] == zero_point).all()
with fits.open(self.temp('scale.fits')) as hdul:
assert (hdul[0].data[1:] == orig_data[1:]).all()
def test_image_none(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data('test0.fits')) as h:
h[1].data
h[1].data = None
h[1].writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].data is None
assert h[1].header['NAXIS'] == 0
assert 'NAXIS1' not in h[1].header
assert 'NAXIS2' not in h[1].header
def test_invalid_blank(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2711
If the BLANK keyword contains an invalid value it should be ignored for
any calculations (though a warning should be issued).
"""
data = np.arange(100, dtype=np.float64)
hdu = fits.PrimaryHDU(data)
hdu.header['BLANK'] = 'nan'
with pytest.warns(fits.verify.VerifyWarning, match=r"Invalid value for "
r"'BLANK' keyword in header: 'nan'"):
hdu.writeto(self.temp('test.fits'))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp('test.fits')) as hdul:
assert np.all(hdul[0].data == data)
assert len(w) == 2
msg = "Invalid value for 'BLANK' keyword in header"
assert msg in str(w[0].message)
msg = "Invalid 'BLANK' keyword"
assert msg in str(w[1].message)
def test_scaled_image_fromfile(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2710
"""
# Make some sample data
a = np.arange(100, dtype=np.float32)
hdu = fits.PrimaryHDU(data=a.copy())
hdu.scale(bscale=1.1)
hdu.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
file_data = f.read()
hdul = fits.HDUList.fromstring(file_data)
assert np.allclose(hdul[0].data, a)
def test_set_data(self):
"""
Test data assignment - issue #5087
"""
im = fits.ImageHDU()
ar = np.arange(12)
im.data = ar
def test_scale_bzero_with_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.PrimaryHDU(data=a.copy())
hdu2 = fits.PrimaryHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale('int16', bzero=99.0)
hdu2.scale('int16', bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.PrimaryHDU(a).writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode="update",
scale_back=True) as (hdu,):
hdu.data[:] = 0
assert np.allclose(hdu.data, 0)
def test_hdu_creation_with_scalar(self):
msg = r'data object array\(1\) should have at least one dimension'
with pytest.raises(TypeError, match=msg):
fits.ImageHDU(data=1)
with pytest.raises(TypeError, match=msg):
fits.PrimaryHDU(data=1)
class TestCompressedImage(FitsTestCase):
def test_empty(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2595
"""
hdu = fits.CompImageHDU()
assert hdu.data is None
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode='update') as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert hdul[1].data is None
# Now test replacing the empty data with an array and see what
# happens
hdul[1].data = np.arange(100, dtype=np.int32)
with fits.open(self.temp('test.fits')) as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert np.all(hdul[1].data == np.arange(100, dtype=np.int32))
@pytest.mark.parametrize(
('data', 'compression_type', 'quantize_level'),
[(np.zeros((2, 10, 10), dtype=np.float32), 'RICE_1', 16),
(np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_1', -0.01),
(np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_2', -0.01),
(np.zeros((100, 100)) + 1, 'HCOMPRESS_1', 16),
(np.zeros((10, 10)), 'PLIO_1', 16)])
@pytest.mark.parametrize('byte_order', ['<', '>'])
def test_comp_image(self, data, compression_type, quantize_level,
byte_order):
data = data.newbyteorder(byte_order)
primary_hdu = fits.PrimaryHDU()
ofd = fits.HDUList(primary_hdu)
chdu = fits.CompImageHDU(data, name='SCI',
compression_type=compression_type,
quantize_level=quantize_level)
ofd.append(chdu)
ofd.writeto(self.temp('test_new.fits'), overwrite=True)
ofd.close()
with fits.open(self.temp('test_new.fits')) as fd:
assert (fd[1].data == data).all()
assert fd[1].header['NAXIS'] == chdu.header['NAXIS']
assert fd[1].header['NAXIS1'] == chdu.header['NAXIS1']
assert fd[1].header['NAXIS2'] == chdu.header['NAXIS2']
assert fd[1].header['BITPIX'] == chdu.header['BITPIX']
@pytest.mark.skipif('not HAS_SCIPY')
def test_comp_image_quantize_level(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5969
Test that quantize_level is used.
"""
import scipy.misc
np.random.seed(42)
data = scipy.misc.ascent() + np.random.randn(512, 512)*10
fits.ImageHDU(data).writeto(self.temp('im1.fits'))
fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1,
quantize_level=-1, dither_seed=5)\
.writeto(self.temp('im2.fits'))
fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1,
quantize_level=-100, dither_seed=5)\
.writeto(self.temp('im3.fits'))
im1 = fits.getdata(self.temp('im1.fits'))
im2 = fits.getdata(self.temp('im2.fits'))
im3 = fits.getdata(self.temp('im3.fits'))
assert not np.array_equal(im2, im3)
assert np.isclose(np.min(im1 - im2), -0.5, atol=1e-3)
assert np.isclose(np.max(im1 - im2), 0.5, atol=1e-3)
assert np.isclose(np.min(im1 - im3), -50, atol=1e-1)
assert np.isclose(np.max(im1 - im3), 50, atol=1e-1)
def test_comp_image_hcompression_1_invalid_data(self):
"""
Tests compression with the HCOMPRESS_1 algorithm with data that is
not 2D and has a non-2D tile size.
"""
pytest.raises(ValueError, fits.CompImageHDU,
np.zeros((2, 10, 10), dtype=np.float32), name='SCI',
compression_type='HCOMPRESS_1', quantize_level=16,
tile_size=[2, 10, 10])
def test_comp_image_hcompress_image_stack(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/171
Tests that data containing more than two dimensions can be
compressed with HCOMPRESS_1 so long as the user-supplied tile size can
be flattened to two dimensions.
"""
cube = np.arange(300, dtype=np.float32).reshape(3, 10, 10)
hdu = fits.CompImageHDU(data=cube, name='SCI',
compression_type='HCOMPRESS_1',
quantize_level=16, tile_size=[5, 5, 1])
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
# HCOMPRESSed images are allowed to deviate from the original by
# about 1/quantize_level of the RMS in each tile.
assert np.abs(hdul['SCI'].data - cube).max() < 1./15.
def test_subtractive_dither_seed(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/32
Ensure that when floating point data is compressed with the
SUBTRACTIVE_DITHER_1 quantization method that the correct ZDITHER0 seed
is added to the header, and that the data can be correctly
decompressed.
"""
array = np.arange(100.0).reshape(10, 10)
csum = (array[0].view('uint8').sum() % 10000) + 1
hdu = fits.CompImageHDU(data=array,
quantize_method=SUBTRACTIVE_DITHER_1,
dither_seed=DITHER_SEED_CHECKSUM)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
assert 'ZQUANTIZ' in hdul[1]._header
assert hdul[1]._header['ZQUANTIZ'] == 'SUBTRACTIVE_DITHER_1'
assert 'ZDITHER0' in hdul[1]._header
assert hdul[1]._header['ZDITHER0'] == csum
assert np.all(hdul[1].data == array)
def test_disable_image_compression(self):
with fits.open(self.data('comp.fits'),
disable_image_compression=True) as hdul:
# The compressed image HDU should show up as a BinTableHDU, but
# *not* a CompImageHDU
assert isinstance(hdul[1], fits.BinTableHDU)
assert not isinstance(hdul[1], fits.CompImageHDU)
with fits.open(self.data('comp.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
def test_open_comp_image_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/167
Similar to test_open_scaled_in_update_mode(), but specifically for
compressed images.
"""
# Copy the original file before making any possible changes to it
self.copy_file('comp.fits')
mtime = os.stat(self.temp('comp.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('comp.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('comp.fits')).st_mtime
def test_open_scaled_in_update_mode_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 2
Identical to test_open_scaled_in_update_mode() but with a compressed
version of the scaled image.
"""
# Copy+compress the original file before making any possible changes to
# it
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
mtime = os.stat(self.temp('scale.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('scale.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('scale.fits')).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp('scale.fits'), 'update')
hdul[1].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp('scale.fits')).st_mtime
hdul = fits.open(self.temp('scale.fits'), mode='update')
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preserved properly
hdul[1].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp('scale.fits'))
assert hdul[1].shape == (42, 10)
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
hdul.close()
def test_write_comp_hdu_direct_from_existing(self):
with fits.open(self.data('comp.fits')) as hdul:
hdul[1].writeto(self.temp('test.fits'))
with fits.open(self.data('comp.fits')) as hdul1:
with fits.open(self.temp('test.fits')) as hdul2:
assert np.all(hdul1[1].data == hdul2[1].data)
assert comparerecords(hdul1[1].compressed_data,
hdul2[1].compressed_data)
def test_rewriting_large_scaled_image_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 1
Identical to test_rewriting_large_scaled_image() but with a compressed
image.
"""
with fits.open(self.data('fixed-1890.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('fixed-1890-z.fits'))
hdul = fits.open(self.temp('fixed-1890-z.fits'))
orig_data = hdul[1].data
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.temp('fixed-1890-z.fits'))
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.temp('fixed-1890-z.fits'),
do_not_scale_image_data=True)
hdul.writeto(self.temp('test_new.fits'), overwrite=True,
output_verify='silentfix')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
orig_data = hdul[1].data
hdul.close()
hdul = fits.open(self.temp('test_new.fits'), mode='update')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
def test_scale_back_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 3
Identical to test_scale_back() but uses a compressed image.
"""
# Create a compressed version of the scaled image
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
with fits.open(self.temp('scale.fits'), mode='update',
scale_back=True) as hdul:
orig_bitpix = hdul[1].header['BITPIX']
orig_bzero = hdul[1].header['BZERO']
orig_bscale = hdul[1].header['BSCALE']
orig_data = hdul[1].data.copy()
hdul[1].data[0] = 0
with fits.open(self.temp('scale.fits'),
do_not_scale_image_data=True) as hdul:
assert hdul[1].header['BITPIX'] == orig_bitpix
assert hdul[1].header['BZERO'] == orig_bzero
assert hdul[1].header['BSCALE'] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[1].data[0] == zero_point).all()
with fits.open(self.temp('scale.fits')) as hdul:
assert (hdul[1].data[1:] == orig_data[1:]).all()
# Extra test to ensure that after everything the data is still the
# same as in the original uncompressed version of the image
with fits.open(self.data('scale.fits')) as hdul2:
# Recall we made the same modification to the data in hdul
# above
hdul2[0].data[0] = 0
assert (hdul[1].data == hdul2[0].data).all()
def test_lossless_gzip_compression(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/198"""
rng = np.random.default_rng(42)
noise = rng.normal(size=(20, 20))
chdu1 = fits.CompImageHDU(data=noise, compression_type='GZIP_1')
# First make a test image with lossy compression and make sure it
# wasn't compressed perfectly. This shouldn't happen ever, but just to
# make sure the test non-trivial.
chdu1.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert np.abs(noise - h[1].data).max() > 0.0
del h
chdu2 = fits.CompImageHDU(data=noise, compression_type='GZIP_1',
quantize_level=0.0) # No quantization
chdu2.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as h:
assert (noise == h[1].data).all()
def test_compression_column_tforms(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/199"""
# Some interestingly tiled data so that some of it is quantized and
# some of it ends up just getting gzip-compressed
data2 = ((np.arange(1, 8, dtype=np.float32) * 10)[:, np.newaxis] +
np.arange(1, 7))
np.random.seed(1337)
data1 = np.random.uniform(size=(6 * 4, 7 * 4))
data1[:data2.shape[0], :data2.shape[1]] = data2
chdu = fits.CompImageHDU(data1, compression_type='RICE_1',
tile_size=(6, 7))
chdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'),
disable_image_compression=True) as h:
assert re.match(r'^1PB\(\d+\)$', h[1].header['TFORM1'])
assert re.match(r'^1PB\(\d+\)$', h[1].header['TFORM2'])
def test_compression_update_header(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/23
"""
self.copy_file('comp.fits')
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
hdul[1].header['test1'] = 'test'
hdul[1]._header['test2'] = 'test2'
with fits.open(self.temp('comp.fits')) as hdul:
assert 'test1' in hdul[1].header
assert hdul[1].header['test1'] == 'test'
assert 'test2' in hdul[1].header
assert hdul[1].header['test2'] == 'test2'
# Test update via index now:
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
hdr[hdr.index('TEST1')] = 'foo'
with fits.open(self.temp('comp.fits')) as hdul:
assert hdul[1].header['TEST1'] == 'foo'
# Test slice updates
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdul[1].header['TEST*'] = 'qux'
with fits.open(self.temp('comp.fits')) as hdul:
assert list(hdul[1].header['TEST*'].values()) == ['qux', 'qux']
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
idx = hdr.index('TEST1')
hdr[idx:idx + 2] = 'bar'
with fits.open(self.temp('comp.fits')) as hdul:
assert list(hdul[1].header['TEST*'].values()) == ['bar', 'bar']
# Test updating a specific COMMENT card duplicate
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdul[1].header[('COMMENT', 1)] = 'I am fire. I am death!'
with fits.open(self.temp('comp.fits')) as hdul:
assert hdul[1].header['COMMENT'][1] == 'I am fire. I am death!'
assert hdul[1]._header['COMMENT'][1] == 'I am fire. I am death!'
# Test deleting by keyword and by slice
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
del hdr['COMMENT']
idx = hdr.index('TEST1')
del hdr[idx:idx + 2]
with fits.open(self.temp('comp.fits')) as hdul:
assert 'COMMENT' not in hdul[1].header
assert 'COMMENT' not in hdul[1]._header
assert 'TEST1' not in hdul[1].header
assert 'TEST1' not in hdul[1]._header
assert 'TEST2' not in hdul[1].header
assert 'TEST2' not in hdul[1]._header
def test_compression_update_header_with_reserved(self):
"""
Ensure that setting reserved keywords related to the table data
structure on CompImageHDU image headers fails.
"""
def test_set_keyword(hdr, keyword, value):
with pytest.warns(UserWarning) as w:
hdr[keyword] = value
assert len(w) == 1
assert str(w[0].message).startswith(
f"Keyword {keyword!r} is reserved")
assert keyword not in hdr
with fits.open(self.data('comp.fits')) as hdul:
hdr = hdul[1].header
test_set_keyword(hdr, 'TFIELDS', 8)
test_set_keyword(hdr, 'TTYPE1', 'Foo')
test_set_keyword(hdr, 'ZCMPTYPE', 'ASDF')
test_set_keyword(hdr, 'ZVAL1', 'Foo')
def test_compression_header_append(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with pytest.warns(UserWarning, match="Keyword 'TFIELDS' is reserved") as w:
imghdr.append('TFIELDS')
assert len(w) == 1
assert 'TFIELDS' not in imghdr
imghdr.append(('FOO', 'bar', 'qux'), end=True)
assert 'FOO' in imghdr
assert imghdr[-1] == 'bar'
assert 'FOO' in tblhdr
assert tblhdr[-1] == 'bar'
imghdr.append(('CHECKSUM', 'abcd1234'))
assert 'CHECKSUM' in imghdr
assert imghdr['CHECKSUM'] == 'abcd1234'
assert 'CHECKSUM' not in tblhdr
assert 'ZHECKSUM' in tblhdr
assert tblhdr['ZHECKSUM'] == 'abcd1234'
def test_compression_header_append2(self):
"""
Regression test for issue https://github.com/astropy/astropy/issues/5827
"""
with fits.open(self.data('comp.fits')) as hdul:
header = hdul[1].header
while (len(header) < 1000):
header.append() # pad with grow room
# Append stats to header:
header.append(("Q1_OSAVG", 1, "[adu] quadrant 1 overscan mean"))
header.append(("Q1_OSSTD", 1, "[adu] quadrant 1 overscan stddev"))
header.append(("Q1_OSMED", 1, "[adu] quadrant 1 overscan median"))
def test_compression_header_insert(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
# First try inserting a restricted keyword
with pytest.warns(UserWarning, match="Keyword 'TFIELDS' is reserved") as w:
imghdr.insert(1000, 'TFIELDS')
assert len(w) == 1
assert 'TFIELDS' not in imghdr
assert tblhdr.count('TFIELDS') == 1
# First try keyword-relative insert
imghdr.insert('TELESCOP', ('OBSERVER', 'Phil Plait'))
assert 'OBSERVER' in imghdr
assert imghdr.index('OBSERVER') == imghdr.index('TELESCOP') - 1
assert 'OBSERVER' in tblhdr
assert tblhdr.index('OBSERVER') == tblhdr.index('TELESCOP') - 1
# Next let's see if an index-relative insert winds up being
# sensible
idx = imghdr.index('OBSERVER')
imghdr.insert('OBSERVER', ('FOO',))
assert 'FOO' in imghdr
assert imghdr.index('FOO') == idx
assert 'FOO' in tblhdr
assert tblhdr.index('FOO') == tblhdr.index('OBSERVER') - 1
def test_compression_header_set_before_after(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with pytest.warns(UserWarning, match="Keyword 'ZBITPIX' is reserved ") as w:
imghdr.set('ZBITPIX', 77, 'asdf', after='XTENSION')
assert len(w) == 1
assert 'ZBITPIX' not in imghdr
assert tblhdr.count('ZBITPIX') == 1
assert tblhdr['ZBITPIX'] != 77
# Move GCOUNT before PCOUNT (not that there's any reason you'd
# *want* to do that, but it's just a test...)
imghdr.set('GCOUNT', 99, before='PCOUNT')
assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') - 1
assert imghdr['GCOUNT'] == 99
assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') - 1
assert tblhdr['ZGCOUNT'] == 99
assert tblhdr.index('PCOUNT') == 5
assert tblhdr.index('GCOUNT') == 6
assert tblhdr['GCOUNT'] == 1
imghdr.set('GCOUNT', 2, after='PCOUNT')
assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') + 1
assert imghdr['GCOUNT'] == 2
assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') + 1
assert tblhdr['ZGCOUNT'] == 2
assert tblhdr.index('PCOUNT') == 5
assert tblhdr.index('GCOUNT') == 6
assert tblhdr['GCOUNT'] == 1
def test_compression_header_append_commentary(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2363
"""
hdu = fits.CompImageHDU(np.array([0], dtype=np.int32))
hdu.header['COMMENT'] = 'hello world'
assert hdu.header['COMMENT'] == ['hello world']
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].header['COMMENT'] == ['hello world']
def test_compression_with_gzip_column(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/71
"""
arr = np.zeros((2, 7000), dtype='float32')
# The first row (which will be the first compressed tile) has a very
# wide range of values that will be difficult to quantize, and should
# result in use of a GZIP_COMPRESSED_DATA column
arr[0] = np.linspace(0, 1, 7000)
arr[1] = np.random.normal(size=7000)
hdu = fits.CompImageHDU(data=arr)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
comp_hdu = hdul[1]
# GZIP-compressed tile should compare exactly
assert np.all(comp_hdu.data[0] == arr[0])
# The second tile uses lossy compression and may be somewhat off,
# so we don't bother comparing it exactly
def test_duplicate_compression_header_keywords(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2750
Tests that the fake header (for the compressed image) can still be read
even if the real header contained a duplicate ZTENSION keyword (the
issue applies to any keyword specific to the compression convention,
however).
"""
arr = np.arange(100, dtype=np.int32)
hdu = fits.CompImageHDU(data=arr)
header = hdu._header
# append the duplicate keyword
hdu._header.append(('ZTENSION', 'IMAGE'))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert header == hdul[1]._header
# There's no good reason to have a duplicate keyword, but
# technically it isn't invalid either :/
assert hdul[1]._header.count('ZTENSION') == 2
def test_scale_bzero_with_compressed_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
and https://github.com/astropy/astropy/issues/4588
Identical to test_scale_bzero_with_int_data() but uses a compressed
image.
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.CompImageHDU(data=a.copy())
hdu2 = fits.CompImageHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale('int16', bzero=99.0)
hdu2.scale('int16', bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_compressed_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Identical to test_scale_back_uint_assignment() but uses a compressed
image.
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.CompImageHDU(a).writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode="update",
scale_back=True) as hdul:
hdul[1].data[:] = 0
assert np.allclose(hdul[1].data, 0)
def test_compressed_header_missing_znaxis(self):
a = np.arange(100, 200, dtype=np.uint16)
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop('ZNAXIS')
with pytest.raises(KeyError):
comp_hdu.compressed_data
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop('ZBITPIX')
with pytest.raises(KeyError):
comp_hdu.compressed_data
def test_compressed_header_double_extname(self):
"""Test that a double EXTNAME with one default value does not
mask the non-default value."""
with fits.open(self.data('double_ext.fits')) as hdul:
hdu = hdul[1]
# Raw header has 2 EXTNAME entries
indices = hdu._header._keyword_indices['EXTNAME']
assert len(indices) == 2
# The non-default name should be returned.
assert hdu.name == 'ccd00'
assert 'EXTNAME' in hdu.header
assert hdu.name == hdu.header['EXTNAME']
# There should be 1 non-default EXTNAME entries.
indices = hdu.header._keyword_indices['EXTNAME']
assert len(indices) == 1
# Test header sync from property set.
new_name = 'NEW_NAME'
hdu.name = new_name
assert hdu.name == new_name
assert hdu.header['EXTNAME'] == new_name
assert hdu._header['EXTNAME'] == new_name
assert hdu._image_header['EXTNAME'] == new_name
# Check that setting the header will change the name property.
hdu.header['EXTNAME'] = 'NEW2'
assert hdu.name == 'NEW2'
hdul.writeto(self.temp('tmp.fits'), overwrite=True)
with fits.open(self.temp('tmp.fits')) as hdul1:
hdu1 = hdul1[1]
assert len(hdu1._header._keyword_indices['EXTNAME']) == 1
assert hdu1.name == 'NEW2'
# Check that deleting EXTNAME will and setting the name will
# work properly.
del hdu.header['EXTNAME']
hdu.name = 'RE-ADDED'
assert hdu.name == 'RE-ADDED'
with pytest.raises(TypeError):
hdu.name = 42
def test_compressed_header_extname(self):
"""Test consistent EXTNAME / hdu name interaction."""
name = 'FOO'
hdu = fits.CompImageHDU(data=np.arange(10), name=name)
assert hdu._header['EXTNAME'] == name
assert hdu.header['EXTNAME'] == name
assert hdu.name == name
name = 'BAR'
hdu.name = name
assert hdu._header['EXTNAME'] == name
assert hdu.header['EXTNAME'] == name
assert hdu.name == name
assert len(hdu._header._keyword_indices['EXTNAME']) == 1
def test_compressed_header_minimal(self):
"""
Regression test for https://github.com/astropy/astropy/issues/11694
Tests that CompImageHDU can be initialized with a Header that
contains few or no cards, and doesn't require specific cards
such as 'BITPIX' or 'NAXIS'.
"""
fits.CompImageHDU(data=np.arange(10), header=fits.Header())
header = fits.Header({'HELLO': 'world'})
hdu = fits.CompImageHDU(data=np.arange(10), header=header)
assert hdu.header['HELLO'] == 'world'
@pytest.mark.parametrize(
('keyword', 'dtype', 'expected'),
[('BSCALE', np.uint8, np.float32), ('BSCALE', np.int16, np.float32),
('BSCALE', np.int32, np.float64), ('BZERO', np.uint8, np.float32),
('BZERO', np.int16, np.float32), ('BZERO', np.int32, np.float64)])
def test_compressed_scaled_float(self, keyword, dtype, expected):
"""
If BSCALE,BZERO is set to floating point values, the image
should be floating-point.
https://github.com/astropy/astropy/pull/6492
Parameters
----------
keyword : `str`
Keyword to set to a floating-point value to trigger
floating-point pixels.
dtype : `numpy.dtype`
Type of original array.
expected : `numpy.dtype`
Expected type of uncompressed array.
"""
value = 1.23345 # A floating-point value
hdu = fits.CompImageHDU(np.arange(0, 10, dtype=dtype))
hdu.header[keyword] = value
hdu.writeto(self.temp('test.fits'))
del hdu
with fits.open(self.temp('test.fits')) as hdu:
assert hdu[1].header[keyword] == value
assert hdu[1].data.dtype == expected
@pytest.mark.parametrize('dtype', (np.uint8, np.int16, np.uint16, np.int32,
np.uint32))
def test_compressed_integers(self, dtype):
"""Test that the various integer dtypes are correctly written and read.
Regression test for https://github.com/astropy/astropy/issues/9072
"""
mid = np.iinfo(dtype).max // 2
data = np.arange(mid-50, mid+50, dtype=dtype)
testfile = self.temp('test.fits')
hdu = fits.CompImageHDU(data=data)
hdu.writeto(testfile, overwrite=True)
new = fits.getdata(testfile)
np.testing.assert_array_equal(data, new)
def test_write_non_contiguous_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2150
"""
orig = np.arange(100, dtype=float).reshape((10, 10), order='f')
assert not orig.flags.contiguous
primary = fits.PrimaryHDU()
hdu = fits.CompImageHDU(orig)
hdulist = fits.HDUList([primary, hdu])
hdulist.writeto(self.temp('test.fits'))
actual = fits.getdata(self.temp('test.fits'))
assert_equal(orig, actual)
def test_slice_and_write_comp_hdu(self):
"""
Regression test for https://github.com/astropy/astropy/issues/9955
"""
with fits.open(self.data('comp.fits')) as hdul:
hdul[1].data = hdul[1].data[:200, :100]
assert not hdul[1].data.flags.contiguous
hdul[1].writeto(self.temp('test.fits'))
with fits.open(self.data('comp.fits')) as hdul1:
with fits.open(self.temp('test.fits')) as hdul2:
assert_equal(hdul1[1].data[:200, :100], hdul2[1].data)
def test_comphdu_bscale(tmpdir):
"""
Regression test for a bug that caused extensions that used BZERO and BSCALE
that got turned into CompImageHDU to end up with BZERO/BSCALE before the
TFIELDS.
"""
filename1 = tmpdir.join('3hdus.fits').strpath
filename2 = tmpdir.join('3hdus_comp.fits').strpath
x = np.random.random((100, 100))*100
x0 = fits.PrimaryHDU()
x1 = fits.ImageHDU(np.array(x-50, dtype=int), uint=True)
x1.header['BZERO'] = 20331
x1.header['BSCALE'] = 2.3
hdus = fits.HDUList([x0, x1])
hdus.writeto(filename1)
# fitsverify (based on cfitsio) should fail on this file, only seeing the
# first HDU.
with fits.open(filename1) as hdus:
hdus[1] = fits.CompImageHDU(data=hdus[1].data.astype(np.uint32),
header=hdus[1].header)
hdus.writeto(filename2)
# open again and verify
with fits.open(filename2) as hdus:
hdus[1].verify('exception')
def test_scale_implicit_casting():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations.
hdu = fits.ImageHDU(np.array([1], dtype=np.int32))
hdu.scale(bzero=1.3)
def test_bzero_implicit_casting_compressed():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations. Astropy is
# actually not able to produce a file that triggers the failure - the
# issue occurs when using unsigned integer types in the FITS file, in which
# case BZERO should be 32768. But if the keyword is stored as 32768.0, then
# it was possible to trigger the implicit casting error.
filename = get_pkg_data_filename('data/compressed_float_bzero.fits')
with fits.open(filename) as hdul:
hdu = hdul[1]
hdu.data
def test_bzero_mishandled_info(tmpdir):
# Regression test for #5507:
# Calling HDUList.info() on a dataset which applies a zeropoint
# from BZERO but which astropy.io.fits does not think it needs
# to resize to a new dtype results in an AttributeError.
filename = tmpdir.join('floatimg_with_bzero.fits').strpath
hdu = fits.ImageHDU(np.zeros((10, 10)))
hdu.header['BZERO'] = 10
hdu.writeto(filename, overwrite=True)
with fits.open(filename) as hdul:
hdul.info()
def test_image_write_readonly(tmpdir):
# Regression test to make sure that we can write out read-only arrays (#5512)
x = np.array([1, 2, 3])
x.setflags(write=False)
ghdu = fits.ImageHDU(data=x)
ghdu.add_datasum()
filename = tmpdir.join('test.fits').strpath
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1, 2, 3])
# Same for compressed HDU
x = np.array([1.0, 2.0, 3.0])
x.setflags(write=False)
ghdu = fits.CompImageHDU(data=x)
# add_datasum does not work for CompImageHDU
# ghdu.add_datasum()
filename = tmpdir.join('test2.fits').strpath
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1.0, 2.0, 3.0])
def test_int8(tmp_path):
'''Test for int8 support, https://github.com/astropy/astropy/issues/11995'''
img = np.arange(-50, 50, dtype=np.int8).reshape(10, 10)
hdu = fits.PrimaryHDU(img)
hdu.writeto(tmp_path / "int8.fits")
with fits.open(tmp_path / "int8.fits") as hdul:
assert hdul[0].header['BITPIX'] == 8
assert hdul[0].header['BZERO'] == -128
assert hdul[0].header['BSCALE'] == 1.0
assert_equal(hdul[0].data, img)
assert hdul[0].data.dtype == img.dtype
|
4517edfaedfcadbc40d502fa16a05db33d6a7db36d3f73bbf0e278b994502187 | import numpy as np
import pytest
from astropy.io import ascii
from astropy.io.ascii.qdp import _read_table_qdp, _write_table_qdp
from astropy.io.ascii.qdp import _get_lines_from_file
from astropy.table import Table, Column, MaskedColumn
from astropy.utils.exceptions import AstropyUserWarning
def test_get_tables_from_qdp_file(tmpdir):
example_qdp = """
! Swift/XRT hardness ratio of trigger: XXXX, name: BUBU X-2
! Columns are as labelled
READ TERR 1
READ SERR 2
! WT -- hard data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.212439 0.212439
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 0.000000
NO NO NO NO NO
! WT -- soft data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 0.726155 0.583890
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 2.410935 1.393592
NO NO NO NO NO
! WT -- hardness ratio
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.292553 -0.374935
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 -nan
"""
path = str(tmpdir.join('test.qdp'))
with open(path, "w") as fp:
print(example_qdp, file=fp)
table0 = _read_table_qdp(fp.name, names=["MJD", "Rate"], table_id=0)
assert table0.meta["initial_comments"][0].startswith("Swift")
assert table0.meta["comments"][0].startswith("WT -- hard data")
table2 = _read_table_qdp(fp.name, names=["MJD", "Rate"], table_id=2)
assert table2.meta["initial_comments"][0].startswith("Swift")
assert table2.meta["comments"][0].startswith("WT -- hardness")
assert np.isclose(table2["MJD_nerr"][0], -2.37847222222222e-05)
def test_roundtrip(tmpdir):
example_qdp = """
! Swift/XRT hardness ratio of trigger: XXXX, name: BUBU X-2
! Columns are as labelled
READ TERR 1
READ SERR 2
! WT -- hard data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 NO 0.212439
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 0.000000
NO NO NO NO NO
! WT -- soft data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 0.726155 0.583890
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 2.410935 1.393592
NO NO NO NO NO
! WT -- hardness ratio
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.292553 -0.374935
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 NO
! Add command, just to raise the warning.
READ TERR 1
! WT -- whatever
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.292553 -0.374935
NO 1.14467592592593e-05 -1.14467592592593e-05 0.000000 NO
"""
path = str(tmpdir.join('test.qdp'))
path2 = str(tmpdir.join('test2.qdp'))
with open(path, "w") as fp:
print(example_qdp, file=fp)
with pytest.warns(AstropyUserWarning) as record:
table = _read_table_qdp(path, names=["MJD", "Rate"],
table_id=0)
assert np.any(["This file contains multiple command blocks"
in r.message.args[0]
for r in record])
_write_table_qdp(table, path2)
new_table = _read_table_qdp(path2, names=["MJD", "Rate"], table_id=0)
for col in new_table.colnames:
is_masked = np.array([np.ma.is_masked(val) for val in new_table[col]])
if np.any(is_masked):
# All NaN values are read as such.
assert np.ma.is_masked(table[col][is_masked])
is_nan = np.array([(not np.ma.is_masked(val) and np.isnan(val))
for val in new_table[col]])
# All non-NaN values are the same
assert np.allclose(new_table[col][~is_nan], table[col][~is_nan])
if np.any(is_nan):
# All NaN values are read as such.
assert np.isnan(table[col][is_nan])
assert np.allclose(new_table['MJD_perr'], [2.378472e-05, 1.1446759e-05])
for meta_name in ['initial_comments', 'comments']:
assert meta_name in new_table.meta
def test_read_example(tmpdir):
example_qdp = """
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a a(pos) a(neg) b c ce d
53000.5 0.25 -0.5 1 1.5 3.5 2
54000.5 1.25 -1.5 2 2.5 4.5 3
NO NO NO NO NO
! Table 1 comment
!a a(pos) a(neg) b c ce d
54000.5 2.25 -2.5 NO 3.5 5.5 5
55000.5 3.25 -3.5 4 4.5 6.5 nan
"""
dat = ascii.read(example_qdp, format='qdp', table_id=1,
names=['a', 'b', 'c', 'd'])
t = Table.read(example_qdp, format='ascii.qdp', table_id=1,
names=['a', 'b', 'c', 'd'])
assert np.allclose(t['a'], [54000, 55000])
assert t['c_err'][0] == 5.5
assert np.ma.is_masked(t['b'][0])
assert np.isnan(t['d'][1])
for col1, col2 in zip(t.itercols(), dat.itercols()):
assert np.allclose(col1, col2, equal_nan=True)
def test_roundtrip_example(tmpdir):
example_qdp = """
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a a(pos) a(neg) b c ce d
53000.5 0.25 -0.5 1 1.5 3.5 2
54000.5 1.25 -1.5 2 2.5 4.5 3
NO NO NO NO NO
! Table 1 comment
!a a(pos) a(neg) b c ce d
54000.5 2.25 -2.5 NO 3.5 5.5 5
55000.5 3.25 -3.5 4 4.5 6.5 nan
"""
test_file = str(tmpdir.join('test.qdp'))
t = Table.read(example_qdp, format='ascii.qdp', table_id=1,
names=['a', 'b', 'c', 'd'])
t.write(test_file, err_specs={'terr': [1], 'serr': [3]})
t2 = Table.read(test_file, names=['a', 'b', 'c', 'd'], table_id=0)
for col1, col2 in zip(t.itercols(), t2.itercols()):
assert np.allclose(col1, col2, equal_nan=True)
def test_roundtrip_example_comma(tmpdir):
example_qdp = """
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a,a(pos),a(neg),b,c,ce,d
53000.5,0.25,-0.5,1,1.5,3.5,2
54000.5,1.25,-1.5,2,2.5,4.5,3
NO,NO,NO,NO,NO
! Table 1 comment
!a,a(pos),a(neg),b,c,ce,d
54000.5,2.25,-2.5,NO,3.5,5.5,5
55000.5,3.25,-3.5,4,4.5,6.5,nan
"""
test_file = str(tmpdir.join('test.qdp'))
t = Table.read(example_qdp, format='ascii.qdp', table_id=1,
names=['a', 'b', 'c', 'd'], sep=',')
t.write(test_file, err_specs={'terr': [1], 'serr': [3]})
t2 = Table.read(test_file, names=['a', 'b', 'c', 'd'], table_id=0)
# t.values_equal(t2)
for col1, col2 in zip(t.itercols(), t2.itercols()):
assert np.allclose(col1, col2, equal_nan=True)
def test_read_write_simple(tmpdir):
test_file = str(tmpdir.join('test.qdp'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3, 4]))
t1.add_column(MaskedColumn(data=[4., np.nan, 3., 1.], name='b',
mask=[False, False, False, True]))
t1.write(test_file, format='ascii.qdp')
with pytest.warns(UserWarning) as record:
t2 = Table.read(test_file, format='ascii.qdp')
assert np.any(["table_id not specified. Reading the first available table"
in r.message.args[0]
for r in record])
assert np.allclose(t2['col1'], t1['a'])
assert np.all(t2['col1'] == t1['a'])
good = ~np.isnan(t1['b'])
assert np.allclose(t2['col2'][good], t1['b'][good])
def test_read_write_simple_specify_name(tmpdir):
test_file = str(tmpdir.join('test.qdp'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
# Give a non-None err_specs
t1.write(test_file, format='ascii.qdp')
t2 = Table.read(test_file, table_id=0, format='ascii.qdp', names=['a'])
assert np.all(t2['a'] == t1['a'])
def test_get_lines_from_qdp(tmpdir):
test_file = str(tmpdir.join('test.qdp'))
text_string = "A\nB"
text_output = _get_lines_from_file(text_string)
with open(test_file, "w") as fobj:
print(text_string, file=fobj)
file_output = _get_lines_from_file(test_file)
list_output = _get_lines_from_file(["A", "B"])
for i, line in enumerate(["A", "B"]):
assert file_output[i] == line
assert list_output[i] == line
assert text_output[i] == line
|
dbe23994e9504746d39e5f3531139c8e8e38a5b6db3e2791260065775747d119 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some methods related to ``CDS`` format
reader/writer.
Requires `pyyaml <https://pyyaml.org/>`_ to be installed.
"""
import numpy as np
import pytest
from io import StringIO
from astropy.io import ascii
from astropy import units as u
from astropy.table import Table
from astropy.table import Column, MaskedColumn
from astropy.coordinates import SkyCoord
from astropy.time import Time
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyWarning
from .common import assert_almost_equal
test_dat = ['names e d s i',
'HD81809 1E-7 22.25608 +2 67',
'HD103095 -31.6e5 +27.2500 -9E34 -30']
def test_roundtrip_mrt_table():
"""
Tests whether or not the CDS writer can roundtrip a table,
i.e. read a table to ``Table`` object and write it exactly
as it is back to a file. Since, presently CDS uses a
MRT format template while writing, only the Byte-By-Byte
and the data section of the table can be compared between
original and the newly written table.
Further, the CDS Reader does not have capability to recognize
column format from the header of a CDS/MRT table, so this test
can work for a limited set of simple tables, which don't have
whitespaces in the column values or mix-in columns. Because of
this the written table output cannot be directly matched with
the original file and have to be checked against a list of lines.
Masked columns are read properly though, and thus are being tested
during round-tripping.
The difference between ``cdsFunctional2.dat`` file and ``exp_output``
is the following:
* Metadata is different because MRT template is used for writing.
* Spacing between ``Label`` and ``Explanations`` column in the
Byte-By-Byte.
* Units are written as ``[cm.s-2]`` and not ``[cm/s2]``, since both
are valid according to CDS/MRT standard.
"""
exp_output = [
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 7 A7 --- ID Star ID ',
' 9-12 I4 K Teff [4337/4654] Effective temperature ',
'14-17 F4.2 [cm.s-2] logg [0.77/1.28] Surface gravity ',
'19-22 F4.2 km.s-1 vturb [1.23/1.82] Micro-turbulence velocity',
'24-28 F5.2 [-] [Fe/H] [-2.11/-1.5] Metallicity ',
'30-33 F4.2 [-] e_[Fe/H] ? rms uncertainty on [Fe/H] ',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'S05-5 4337 0.77 1.80 -2.07 ',
'S08-229 4625 1.23 1.23 -1.50 ',
'S05-10 4342 0.91 1.82 -2.11 0.14',
'S05-47 4654 1.28 1.74 -1.64 0.16']
dat = get_pkg_data_filename('data/cdsFunctional2.dat',
package='astropy.io.ascii.tests')
t = Table.read(dat, format='ascii.mrt')
out = StringIO()
t.write(out, format='ascii.mrt')
lines = out.getvalue().splitlines()
i_bbb = lines.index('=' * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
assert lines == exp_output
def test_write_byte_by_byte_units():
t = ascii.read(test_dat)
col_units = [None, u.C, u.kg, u.m / u.s, u.year]
t._set_column_attribute('unit', col_units)
# Add a column with magnitude units.
# Note that magnitude has to be assigned for each value explicitly.
t['magnitude'] = [u.Magnitude(25), u.Magnitude(-9)]
col_units.append(u.mag)
out = StringIO()
t.write(out, format='ascii.mrt')
# Read written table.
tRead = ascii.read(out.getvalue(), format='cds')
assert [tRead[col].unit for col in tRead.columns] == col_units
def test_write_readme_with_default_options():
exp_output = [
'Title:',
'Authors:',
'Table:',
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 8 A8 --- names Description of names ',
'10-14 E5.1 --- e [-3160000.0/0.01] Description of e',
'16-23 F8.5 --- d [22.25/27.25] Description of d ',
'25-31 E7.1 --- s [-9e+34/2.0] Description of s ',
'33-35 I3 --- i [-30/67] Description of i ',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'HD81809 1e-07 22.25608 2e+00 67',
'HD103095 -3e+06 27.25000 -9e+34 -30']
t = ascii.read(test_dat)
out = StringIO()
t.write(out, format='ascii.mrt')
assert out.getvalue().splitlines() == exp_output
def test_write_empty_table():
out = StringIO()
import pytest
with pytest.raises(NotImplementedError):
Table().write(out, format='ascii.mrt')
def test_write_null_data_values():
exp_output = ['HD81809 1e-07 22.25608 2.0e+00 67',
'HD103095 -3e+06 27.25000 -9.0e+34 -30',
'Sun 5.3e+27 ']
t = ascii.read(test_dat)
t.add_row(['Sun', '3.25', '0', '5.3e27', '2'],
mask=[False, True, True, False, True])
out = StringIO()
t.write(out, format='ascii.mrt')
lines = out.getvalue().splitlines()
i_secs = [i for i, s in enumerate(lines)
if s.startswith(('------', '======='))]
lines = lines[i_secs[-1] + 1:] # Last section is the data.
assert lines == exp_output
def test_write_byte_by_byte_for_masked_column():
"""
This test differs from the ``test_write_null_data_values``
above in that it tests the column value limits in the Byte-By-Byte
description section for columns whose values are masked.
It also checks the description for columns with same values.
"""
exp_output = [
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 8 A8 --- names Description of names ',
'10-14 E5.1 --- e [0.0/0.01]? Description of e ',
'16-17 F2.0 --- d ? Description of d ',
'19-25 E7.1 --- s [-9e+34/2.0] Description of s ',
'27-29 I3 --- i [-30/67] Description of i ',
'31-33 F3.1 --- sameF [5.0/5.0] Description of sameF',
'35-36 I2 --- sameI [20] Description of sameI ',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'HD81809 1e-07 2e+00 67 5.0 20',
'HD103095 -9e+34 -30 5.0 20']
t = ascii.read(test_dat)
t.add_column([5.0, 5.0], name='sameF')
t.add_column([20, 20], name='sameI')
t['e'] = MaskedColumn(t['e'], mask=[False, True])
t['d'] = MaskedColumn(t['d'], mask=[True, True])
out = StringIO()
t.write(out, format='ascii.mrt')
lines = out.getvalue().splitlines()
i_bbb = lines.index('=' * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
assert lines == exp_output
exp_coord_cols_output = dict(generic=[
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 8 A8 --- names Description of names ',
'10-14 E5.1 --- e [-3160000.0/0.01] Description of e',
'16-23 F8.5 --- d [22.25/27.25] Description of d ',
'25-31 E7.1 --- s [-9e+34/2.0] Description of s ',
'33-35 I3 --- i [-30/67] Description of i ',
'37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ',
'41-42 I2 --- sameI [20] Description of sameI ',
'44-45 I2 h RAh Right Ascension (hour) ',
'47-48 I2 min RAm Right Ascension (minute) ',
'50-62 F13.10 s RAs Right Ascension (second) ',
' 64 A1 --- DE- Sign of Declination ',
'65-66 I2 deg DEd Declination (degree) ',
'68-69 I2 arcmin DEm Declination (arcmin) ',
'71-82 F12.9 arcsec DEs Declination (arcsec) ',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'HD81809 1e-07 22.25608 2e+00 67 5.0 20 22 02 15.4500000000 -61 39 34.599996000',
'HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 12 48 15.2244072000 +17 46 26.496624000'],
positive_de=[
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 8 A8 --- names Description of names ',
'10-14 E5.1 --- e [-3160000.0/0.01] Description of e',
'16-23 F8.5 --- d [22.25/27.25] Description of d ',
'25-31 E7.1 --- s [-9e+34/2.0] Description of s ',
'33-35 I3 --- i [-30/67] Description of i ',
'37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ',
'41-42 I2 --- sameI [20] Description of sameI ',
'44-45 I2 h RAh Right Ascension (hour) ',
'47-48 I2 min RAm Right Ascension (minute) ',
'50-62 F13.10 s RAs Right Ascension (second) ',
' 64 A1 --- DE- Sign of Declination ',
'65-66 I2 deg DEd Declination (degree) ',
'68-69 I2 arcmin DEm Declination (arcmin) ',
'71-82 F12.9 arcsec DEs Declination (arcsec) ',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'HD81809 1e-07 22.25608 2e+00 67 5.0 20 12 48 15.2244072000 +17 46 26.496624000',
'HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 12 48 15.2244072000 +17 46 26.496624000'],
galactic=[
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 8 A8 --- names Description of names ',
'10-14 E5.1 --- e [-3160000.0/0.01] Description of e',
'16-23 F8.5 --- d [22.25/27.25] Description of d ',
'25-31 E7.1 --- s [-9e+34/2.0] Description of s ',
'33-35 I3 --- i [-30/67] Description of i ',
'37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ',
'41-42 I2 --- sameI [20] Description of sameI ',
'44-59 F16.12 deg GLON Galactic Longitude ',
'61-76 F16.12 deg GLAT Galactic Latitude ',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'HD81809 1e-07 22.25608 2e+00 67 5.0 20 330.071639591690 -45.548080484609',
'HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 330.071639591690 -45.548080484609'],
ecliptic=[
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 8 A8 --- names Description of names ',
'10-14 E5.1 --- e [-3160000.0/0.01] Description of e ',
'16-23 F8.5 --- d [22.25/27.25] Description of d ',
'25-31 E7.1 --- s [-9e+34/2.0] Description of s ',
'33-35 I3 --- i [-30/67] Description of i ',
'37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ',
'41-42 I2 --- sameI [20] Description of sameI ',
'44-59 F16.12 deg ELON Ecliptic Longitude (geocentrictrueecliptic)',
'61-76 F16.12 deg ELAT Ecliptic Latitude (geocentrictrueecliptic) ',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'HD81809 1e-07 22.25608 2e+00 67 5.0 20 306.224208650096 -45.621789850825',
'HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 306.224208650096 -45.621789850825'],
)
def test_write_coord_cols():
"""
There can only be one such coordinate column in a single table,
because division of columns into individual component columns requires
iterating over the table columns, which will have to be done again
if additional such coordinate columns are present.
"""
t = ascii.read(test_dat)
t.add_column([5.0, 5.0], name='sameF')
t.add_column([20, 20], name='sameI')
# Coordinates of ASASSN-15lh
coord = SkyCoord(330.564375, -61.65961111, unit=u.deg)
# Coordinates of ASASSN-14li
coordp = SkyCoord(192.06343503, 17.77402684, unit=u.deg)
cols = [Column([coord, coordp]), # Generic coordinate column
coordp, # Coordinate column with positive DEC
coord.galactic, # Galactic coordinates
coord.geocentrictrueecliptic # Ecliptic coordinates
]
# Loop through different types of coordinate columns.
for col, coord_type in zip(cols, exp_coord_cols_output):
exp_output = exp_coord_cols_output[coord_type]
t['coord'] = col
out = StringIO()
t.write(out, format='ascii.mrt')
lines = out.getvalue().splitlines()
i_bbb = lines.index('=' * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
# Check the written table.
assert lines == exp_output
# Check if the original table columns remains unmodified.
assert t.colnames == ['names', 'e', 'd', 's', 'i', 'sameF', 'sameI', 'coord']
def test_write_byte_by_byte_bytes_col_format():
"""
Tests the alignment of Byte counts with respect to hyphen
in the Bytes column of Byte-By-Byte. The whitespace around the
hyphen is govered by the number of digits in the total Byte
count. Single Byte columns should have a single Byte count
without the hyphen.
"""
exp_output = [
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 8 A8 --- names Description of names ',
'10-21 E12.6 --- e [-3160000.0/0.01] Description of e',
'23-30 F8.5 --- d [22.25/27.25] Description of d ',
'32-38 E7.1 --- s [-9e+34/2.0] Description of s ',
'40-42 I3 --- i [-30/67] Description of i ',
'44-46 F3.1 --- sameF [5.0/5.0] Description of sameF ',
'48-49 I2 --- sameI [20] Description of sameI ',
' 51 I1 --- singleByteCol [2] Description of singleByteCol ',
'53-54 I2 h RAh Right Ascension (hour) ',
'56-57 I2 min RAm Right Ascension (minute) ',
'59-71 F13.10 s RAs Right Ascension (second) ',
' 73 A1 --- DE- Sign of Declination ',
'74-75 I2 deg DEd Declination (degree) ',
'77-78 I2 arcmin DEm Declination (arcmin) ',
'80-91 F12.9 arcsec DEs Declination (arcsec) ',
'--------------------------------------------------------------------------------']
t = ascii.read(test_dat)
t.add_column([5.0, 5.0], name='sameF')
t.add_column([20, 20], name='sameI')
t['coord'] = SkyCoord(330.564375, -61.65961111, unit=u.deg)
t['singleByteCol'] = [2, 2]
t['e'].format = '.5E'
out = StringIO()
t.write(out, format='ascii.mrt')
lines = out.getvalue().splitlines()
i_secs = [i for i, s in enumerate(lines)
if s.startswith(('------', '======='))]
# Select only the Byte-By-Byte section.
lines = lines[i_secs[0]:i_secs[-2]]
lines.append('-' * 80) # Append a separator line.
assert lines == exp_output
def test_write_byte_by_byte_wrapping():
"""
Test line wrapping in the description column of the
Byte-By-Byte section of the ReadMe.
"""
exp_output = '''\
================================================================================
Byte-by-byte Description of file: table.dat
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 8 A8 --- thisIsALongColumnLabel This is a tediously long
description. But they do sometimes
have them. Better to put extra
details in the notes. This is a
tediously long description. But they
do sometimes have them. Better to put
extra details in the notes.
10-14 E5.1 --- e [-3160000.0/0.01] Description of e
16-23 F8.5 --- d [22.25/27.25] Description of d
--------------------------------------------------------------------------------
''' # noqa: W291
t = ascii.read(test_dat)
t.remove_columns(['s', 'i'])
description = 'This is a tediously long description.' \
+ ' But they do sometimes have them.' \
+ ' Better to put extra details in the notes. '
t['names'].description = description * 2
t['names'].name = 'thisIsALongColumnLabel'
out = StringIO()
t.write(out, format='ascii.mrt')
lines = out.getvalue().splitlines()
i_secs = [i for i, s in enumerate(lines)
if s.startswith(('------', '======='))]
# Select only the Byte-By-Byte section.
lines = lines[i_secs[0]:i_secs[-2]]
lines.append('-' * 80) # Append a separator line.
assert lines == exp_output.splitlines()
def test_write_mixin_and_broken_cols():
"""
Tests convertion to string values for ``mix-in`` columns other than
``SkyCoord`` and for columns with only partial ``SkyCoord`` values.
"""
exp_output = [
'================================================================================',
'Byte-by-byte Description of file: table.dat', # noqa
'--------------------------------------------------------------------------------', # noqa
' Bytes Format Units Label Explanations', # noqa
'--------------------------------------------------------------------------------', # noqa
' 1- 7 A7 --- name Description of name ', # noqa
' 9- 74 A66 --- Unknown Description of Unknown', # noqa
' 76-114 A39 --- Unknown Description of Unknown', # noqa
'116-138 A23 --- Unknown Description of Unknown', # noqa
'--------------------------------------------------------------------------------', # noqa
'Notes:', # noqa
'--------------------------------------------------------------------------------', # noqa
'HD81809 <SkyCoord (ICRS): (ra, dec) in deg', # noqa
' (330.564375, -61.65961111)> (0.41342785, -0.23329341, -0.88014294) 2019-01-01 00:00:00.000', # noqa
'random 12 (0.41342785, -0.23329341, -0.88014294) 2019-01-01 00:00:00.000'] # noqa
t = Table()
t['name'] = ['HD81809']
coord = SkyCoord(330.564375, -61.65961111, unit=u.deg)
t['coord'] = Column(coord)
t.add_row(['random', 12])
t['cart'] = coord.cartesian
t['time'] = Time('2019-1-1')
out = StringIO()
t.write(out, format='ascii.mrt')
lines = out.getvalue().splitlines()
i_bbb = lines.index('=' * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
# Check the written table.
assert lines == exp_output
def test_write_extra_skycoord_cols():
"""
Tests output for cases when table contains multiple ``SkyCoord`` columns.
"""
exp_output = '''\
================================================================================
Byte-by-byte Description of file: table.dat
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 7 A7 --- name Description of name
9-10 I2 h RAh Right Ascension (hour)
12-13 I2 min RAm Right Ascension (minute)
15-27 F13.10 s RAs Right Ascension (second)
29 A1 --- DE- Sign of Declination
30-31 I2 deg DEd Declination (degree)
33-34 I2 arcmin DEm Declination (arcmin)
36-47 F12.9 arcsec DEs Declination (arcsec)
49-62 A14 --- coord2 Description of coord2
--------------------------------------------------------------------------------
Notes:
--------------------------------------------------------------------------------
HD4760 0 49 39.9000000000 +06 24 07.999200000 12.4163 6.407
HD81809 22 02 15.4500000000 -61 39 34.599996000 330.564 -61.66
''' # noqa: W291
t = Table()
t['name'] = ['HD4760', 'HD81809']
t['coord1'] = SkyCoord([12.41625, 330.564375], [6.402222, -61.65961111], unit=u.deg)
t['coord2'] = SkyCoord([12.41630, 330.564400], [6.407, -61.66], unit=u.deg)
out = StringIO()
with pytest.warns(UserWarning, match=r'column 2 is being skipped with designation of a '
r'string valued column `coord2`'):
t.write(out, format='ascii.mrt')
lines = out.getvalue().splitlines()
i_bbb = lines.index('=' * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and following lines.
# Check the written table.
assert lines[:-2] == exp_output.splitlines()[:-2]
for a, b in zip(lines[-2:], exp_output.splitlines()[-2:]):
assert a[:18] == b[:18]
assert a[30:42] == b[30:42]
assert_almost_equal(np.fromstring(a[2:], sep=' '), np.fromstring(b[2:], sep=' '))
def test_write_skycoord_with_format():
"""
Tests output with custom setting for ``SkyCoord`` (second) columns.
"""
exp_output = '''\
================================================================================
Byte-by-byte Description of file: table.dat
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 7 A7 --- name Description of name
9-10 I2 h RAh Right Ascension (hour)
12-13 I2 min RAm Right Ascension (minute)
15-19 F5.2 s RAs Right Ascension (second)
21 A1 --- DE- Sign of Declination
22-23 I2 deg DEd Declination (degree)
25-26 I2 arcmin DEm Declination (arcmin)
28-31 F4.1 arcsec DEs Declination (arcsec)
--------------------------------------------------------------------------------
Notes:
--------------------------------------------------------------------------------
HD4760 0 49 39.90 +06 24 08.0
HD81809 22 02 15.45 -61 39 34.6
''' # noqa: W291
t = Table()
t['name'] = ['HD4760', 'HD81809']
t['coord'] = SkyCoord([12.41625, 330.564375], [6.402222, -61.65961111], unit=u.deg)
out = StringIO()
# This will raise a warning because `formats` is checked before the writer creating the
# final list of columns is called.
with pytest.warns(AstropyWarning, match=r"The key.s. {'[RD][AE]s', '[RD][AE]s'} specified in "
r"the formats argument do not match a column name."):
t.write(out, format='ascii.mrt', formats={'RAs': '05.2f', 'DEs': '04.1f'})
lines = out.getvalue().splitlines()
i_bbb = lines.index('=' * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and following lines.
# Check the written table.
assert lines == exp_output.splitlines()
|
bd88dccda7fe2df3cc56eeef4ba6f449c445b44e34179f70c2afdac86c71912d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
import functools
from contextlib import nullcontext
from io import BytesIO
import re
from textwrap import dedent
import pytest
import numpy as np
from numpy import ma
from astropy.table import Table, MaskedColumn
from astropy.io import ascii
from astropy.io.ascii.core import ParameterError, FastOptionsError, InconsistentTableError
from astropy.io.ascii.fastbasic import (
FastBasic, FastCsv, FastTab, FastCommentedHeader, FastRdb, FastNoHeader)
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyWarning
from .common import assert_equal, assert_almost_equal, assert_true
StringIO = lambda x: BytesIO(x.encode('ascii')) # noqa
CI = os.environ.get('CI', False)
def assert_table_equal(t1, t2, check_meta=False, rtol=1.e-15, atol=1.e-300):
"""
Test equality of all columns in a table, with stricter tolerances for
float columns than the np.allclose default.
"""
assert_equal(len(t1), len(t2))
assert_equal(t1.colnames, t2.colnames)
if check_meta:
assert_equal(t1.meta, t2.meta)
for name in t1.colnames:
if len(t1) != 0:
assert_equal(t1[name].dtype.kind, t2[name].dtype.kind)
if not isinstance(t1[name], MaskedColumn):
for i, el in enumerate(t1[name]):
try:
if not isinstance(el, str) and np.isnan(el):
assert_true(not isinstance(t2[name][i], str) and np.isnan(t2[name][i]))
elif isinstance(el, str):
assert_equal(el, t2[name][i])
else:
assert_almost_equal(el, t2[name][i], rtol=rtol, atol=atol)
except (TypeError, NotImplementedError):
pass # ignore for now
# Use this counter to create a unique filename for each file created in a test
# if this function is called more than once in a single test
_filename_counter = 0
def _read(tmpdir, table, Reader=None, format=None, parallel=False, check_meta=False, **kwargs):
# make sure we have a newline so table can't be misinterpreted as a filename
global _filename_counter
table += '\n'
reader = Reader(**kwargs)
t1 = reader.read(table)
t2 = reader.read(StringIO(table))
t3 = reader.read(table.splitlines())
t4 = ascii.read(table, format=format, guess=False, **kwargs)
t5 = ascii.read(table, format=format, guess=False, fast_reader=False, **kwargs)
assert_table_equal(t1, t2, check_meta=check_meta)
assert_table_equal(t2, t3, check_meta=check_meta)
assert_table_equal(t3, t4, check_meta=check_meta)
assert_table_equal(t4, t5, check_meta=check_meta)
if parallel:
if CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
t6 = ascii.read(table, format=format, guess=False, fast_reader={
'parallel': True}, **kwargs)
assert_table_equal(t1, t6, check_meta=check_meta)
filename = str(tmpdir.join(f'table{_filename_counter}.txt'))
_filename_counter += 1
with open(filename, 'wb') as f:
f.write(table.encode('ascii'))
f.flush()
t7 = ascii.read(filename, format=format, guess=False, **kwargs)
if parallel:
t8 = ascii.read(filename, format=format, guess=False, fast_reader={
'parallel': True}, **kwargs)
assert_table_equal(t1, t7, check_meta=check_meta)
if parallel:
assert_table_equal(t1, t8, check_meta=check_meta)
return t1
@pytest.fixture(scope='function')
def read_basic(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastBasic, format='basic')
@pytest.fixture(scope='function')
def read_csv(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastCsv, format='csv')
@pytest.fixture(scope='function')
def read_tab(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastTab, format='tab')
@pytest.fixture(scope='function')
def read_commented_header(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastCommentedHeader,
format='commented_header')
@pytest.fixture(scope='function')
def read_rdb(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastRdb, format='rdb')
@pytest.fixture(scope='function')
def read_no_header(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastNoHeader,
format='no_header')
@pytest.mark.parametrize('delimiter', [',', '\t', ' ', 'csv'])
@pytest.mark.parametrize('quotechar', ['"', "'"])
@pytest.mark.parametrize('fast', [False, True])
def test_embedded_newlines(delimiter, quotechar, fast):
"""Test that embedded newlines are supported for io.ascii readers
and writers, both fast and Python readers."""
# Start with an assortment of values with different embedded newlines and whitespace
dat = [['\t a ', ' b \n cd ', '\n'],
[' 1\n ', '2 \n" \t 3\n4\n5', "1\n '2\n"],
[' x,y \nz\t', '\t 12\n\t34\t ', '56\t\n'],
]
dat = Table(dat, names=('a', 'b', 'c'))
# Construct a table which is our expected result of writing the table and
# reading it back. Certain stripping of whitespace is expected.
exp = {} # expected output from reading
for col in dat.itercols():
vals = []
for val in col:
# Readers and writers both strip whitespace from ends of values
val = val.strip(' \t')
if not fast:
# Pure Python reader has a "feature" where it strips trailing
# whitespace from each input line. This means a value like
# " x \ny \t\n" gets read as "x\ny".
bits = val.splitlines(keepends=True)
bits_out = []
for bit in bits:
bit = re.sub(r'[ \t]+(\n?)$', r'\1', bit.strip(' \t'))
bits_out.append(bit)
val = ''.join(bits_out)
vals.append(val)
exp[col.info.name] = vals
exp = Table(exp)
if delimiter == 'csv':
format = 'csv'
delimiter = ','
else:
format = 'basic'
# Write the table to `text`
fh = io.StringIO()
ascii.write(dat, fh, format=format, delimiter=delimiter,
quotechar=quotechar, fast_writer=fast)
text = fh.getvalue()
# Read it back and compare to the expected
dat_out = ascii.read(text, format=format, guess=False, delimiter=delimiter,
quotechar=quotechar, fast_reader=fast)
eq = dat_out.values_equal(exp)
assert all(np.all(col) for col in eq.itercols())
@pytest.mark.parametrize("parallel", [True, False])
def test_simple_data(parallel, read_basic):
"""
Make sure the fast reader works with basic input data.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
def test_read_types():
"""
Make sure that the read() function takes filenames,
strings, and lists of strings in addition to file-like objects.
"""
t1 = ascii.read("a b c\n1 2 3\n4 5 6", format='fast_basic', guess=False)
# TODO: also read from file
t2 = ascii.read(StringIO("a b c\n1 2 3\n4 5 6"), format='fast_basic', guess=False)
t3 = ascii.read(["a b c", "1 2 3", "4 5 6"], format='fast_basic', guess=False)
assert_table_equal(t1, t2)
assert_table_equal(t2, t3)
@pytest.mark.parametrize("parallel", [True, False])
def test_supplied_names(parallel, read_basic):
"""
If passed as a parameter, names should replace any
column names found in the header.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", names=('X', 'Y', 'Z'), parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('X', 'Y', 'Z'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header(parallel, read_basic, read_no_header):
"""
The header should not be read when header_start=None. Unless names is
passed, the column names should be auto-generated.
"""
# Cannot set header_start=None for basic format
with pytest.raises(ValueError):
read_basic("A B C\n1 2 3\n4 5 6", header_start=None, data_start=0, parallel=parallel)
t2 = read_no_header("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']],
names=('col1', 'col2', 'col3'))
assert_table_equal(t2, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header_supplied_names(parallel, read_basic, read_no_header):
"""
If header_start=None and names is passed as a parameter, header
data should not be read and names should be used instead.
"""
table = read_no_header("A B C\n1 2 3\n4 5 6",
names=('X', 'Y', 'Z'), parallel=parallel)
expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('X', 'Y', 'Z'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_comment(parallel, read_basic):
"""
Make sure that line comments are ignored by the C reader.
"""
table = read_basic("# comment\nA B C\n # another comment\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_lines(parallel, read_basic):
"""
Make sure that empty lines are ignored by the C reader.
"""
table = read_basic("\n\nA B C\n1 2 3\n\n\n4 5 6\n\n\n\n", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_lstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the beginning of fields.
"""
text = """
1, 2, \t3
A,\t\t B, C
a, b, c
""" + ' \n'
table = read_basic(text, delimiter=',', parallel=parallel)
expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_rstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the end of fields.
"""
text = ' 1 ,2 \t,3 \nA\t,B ,C\t \t \n \ta ,b , c \n'
table = read_basic(text, delimiter=',', parallel=parallel)
expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_conversion(parallel, read_basic):
"""
The reader should try to convert each column to ints. If this fails, the
reader should try to convert to floats. Failing this, i.e. on parsing
non-numeric input including isolated positive/negative signs, it should
fall back to strings.
"""
text = """
A B C D E F G H
1 a 3 4 5 6 7 8
2. 1 9 -.1e1 10.0 8.7 6 -5.3e4
4 2 -12 .4 +.e1 - + six
"""
table = read_basic(text, parallel=parallel)
assert_equal(table['A'].dtype.kind, 'f')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'i')
assert_equal(table['D'].dtype.kind, 'f')
assert table['E'].dtype.kind in ('S', 'U')
assert table['F'].dtype.kind in ('S', 'U')
assert table['G'].dtype.kind in ('S', 'U')
assert table['H'].dtype.kind in ('S', 'U')
@pytest.mark.parametrize("parallel", [True, False])
def test_delimiter(parallel, read_basic):
"""
Make sure that different delimiters work as expected.
"""
text = dedent("""
COL1 COL2 COL3
1 A -1
2 B -2
""")
expected = Table([[1, 2], ['A', 'B'], [-1, -2]], names=('COL1', 'COL2', 'COL3'))
for sep in ' ,\t#;':
table = read_basic(text.replace(' ', sep), delimiter=sep, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_names(parallel, read_basic):
"""
If include_names is not None, the parser should read only those columns in include_names.
"""
table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", include_names=['A', 'D'], parallel=parallel)
expected = Table([[1, 5], [4, 8]], names=('A', 'D'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_exclude_names(parallel, read_basic):
"""
If exclude_names is not None, the parser should exclude the columns in exclude_names.
"""
table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", exclude_names=['A', 'D'], parallel=parallel)
expected = Table([[2, 6], [3, 7]], names=('B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_exclude_names(parallel, read_basic):
"""
Make sure that include_names is applied before exclude_names if both are specified.
"""
text = dedent("""
A B C D E F G H
1 2 3 4 5 6 7 8
9 10 11 12 13 14 15 16
""")
table = read_basic(text, include_names=['A', 'B', 'D', 'F', 'H'],
exclude_names=['B', 'F'], parallel=parallel)
expected = Table([[1, 9], [4, 12], [8, 16]], names=('A', 'D', 'H'))
assert_table_equal(table, expected)
def test_doubled_quotes(read_csv):
"""
Test #8283 (fix for #8281), parsing doubled-quotes "ab""cd" in a quoted
field was incorrect.
"""
tbl = '\n'.join(['a,b',
'"d""","d""q"',
'"""q",""""'])
expected = Table([['d"', '"q'],
['d"q', '"']],
names=('a', 'b'))
dat = read_csv(tbl)
assert_table_equal(dat, expected)
# In addition to the local read_csv wrapper, check that default
# parsing with guessing gives the right answer.
for fast_reader in True, False:
dat = ascii.read(tbl, fast_reader=fast_reader)
assert_table_equal(dat, expected)
@pytest.mark.filterwarnings("ignore:OverflowError converting to IntType in column TIMESTAMP")
def test_doubled_quotes_segv():
"""
Test the exact example from #8281 which resulted in SEGV prior to #8283
(in contrast to the tests above that just gave the wrong answer).
Attempts to produce a more minimal example were unsuccessful, so the whole
thing is included.
"""
tbl = dedent("""
"ID","TIMESTAMP","addendum_id","bib_reference","bib_reference_url","client_application","client_category","client_sort_key","color","coordsys","creator","creator_did","data_pixel_bitpix","dataproduct_subtype","dataproduct_type","em_max","em_min","format","hips_builder","hips_copyright","hips_creation_date","hips_creation_date_1","hips_creator","hips_data_range","hips_estsize","hips_frame","hips_glu_tag","hips_hierarchy","hips_initial_dec","hips_initial_fov","hips_initial_ra","hips_lon_asc","hips_master_url","hips_order","hips_order_1","hips_order_4","hips_order_min","hips_overlay","hips_pixel_bitpix","hips_pixel_cut","hips_pixel_scale","hips_progenitor_url","hips_publisher","hips_release_date","hips_release_date_1","hips_rgb_blue","hips_rgb_green","hips_rgb_red","hips_sampling","hips_service_url","hips_service_url_1","hips_service_url_2","hips_service_url_3","hips_service_url_4","hips_service_url_5","hips_service_url_6","hips_service_url_7","hips_service_url_8","hips_skyval","hips_skyval_method","hips_skyval_value","hips_status","hips_status_1","hips_status_2","hips_status_3","hips_status_4","hips_status_5","hips_status_6","hips_status_7","hips_status_8","hips_tile_format","hips_tile_format_1","hips_tile_format_4","hips_tile_width","hips_version","hipsgen_date","hipsgen_date_1","hipsgen_date_10","hipsgen_date_11","hipsgen_date_12","hipsgen_date_2","hipsgen_date_3","hipsgen_date_4","hipsgen_date_5","hipsgen_date_6","hipsgen_date_7","hipsgen_date_8","hipsgen_date_9","hipsgen_params","hipsgen_params_1","hipsgen_params_10","hipsgen_params_11","hipsgen_params_12","hipsgen_params_2","hipsgen_params_3","hipsgen_params_4","hipsgen_params_5","hipsgen_params_6","hipsgen_params_7","hipsgen_params_8","hipsgen_params_9","label","maxOrder","moc_access_url","moc_order","moc_release_date","moc_sky_fraction","obs_ack","obs_collection","obs_copyrigh_url","obs_copyright","obs_copyright_1","obs_copyright_url","obs_copyright_url_1","obs_description","obs_description_url","obs_descrition_url","obs_id","obs_initial_dec","obs_initial_fov","obs_initial_ra","obs_provenance","obs_regime","obs_title","ohips_frame","pixelCut","pixelRange","prov_did","prov_progenitor","prov_progenitor_url","publisher_did","publisher_id","s_pixel_scale","t_max","t_min"
"CDS/P/2MASS/H","1524123841000","","2006AJ....131.1163S","http://cdsbib.u-strasbg.fr/cgi-bin/cdsbib?2006AJ....131.1163S","AladinDesktop","Image/Infrared/2MASS","04-001-03","","","","ivo://CDS/P/2MASS/H","","","image","1.798E-6","1.525E-6","","Aladin/HipsGen v9.017","CNRS/Unistra","2013-05-06T20:36Z","","CDS (A.Oberto)","","","equatorial","","mean","","","","","","9","","","","","","0 60","2.236E-4","","","2016-04-22T13:48Z","","","","","","http://alasky.u-strasbg.fr/2MASS/H","https://irsa.ipac.caltech.edu/data/hips/CDS/2MASS/H","http://alaskybis.u-strasbg.fr/2MASS/H","https://alaskybis.u-strasbg.fr/2MASS/H","","","","","","","","","public master clonableOnce","public mirror unclonable","public mirror clonableOnce","public mirror clonableOnce","","","","","","jpeg fits","","","512","1.31","","","","","","","","","","","","","","","","","","","","","","","","","","","","","http://alasky.u-strasbg.fr/2MASS/H/Moc.fits","9","","1","University of Massachusetts & IPAC/Caltech","The Two Micron All Sky Survey - H band (2MASS H)","","University of Massachusetts & IPAC/Caltech","","http://www.ipac.caltech.edu/2mass/","","2MASS has uniformly scanned the entire sky in three near-infrared bands to detect and characterize point sources brighter than about 1 mJy in each band, with signal-to-noise ratio (SNR) greater than 10, using a pixel size of 2.0"". This has achieved an 80,000-fold improvement in sensitivity relative to earlier surveys. 2MASS used two highly-automated 1.3-m telescopes, one at Mt. Hopkins, AZ, and one at CTIO, Chile. Each telescope was equipped with a three-channel camera, each channel consisting of a 256x256 array of HgCdTe detectors, capable of observing the sky simultaneously at J (1.25 microns), H (1.65 microns), and Ks (2.17 microns). The University of Massachusetts (UMass) was responsible for the overall management of the project, and for developing the infrared cameras and on-site computing systems at both facilities. The Infrared Processing and Analysis Center (IPAC) is responsible for all data processing through the Production Pipeline, and construction and distribution of the data products. Funding is provided primarily by NASA and the NSF","","","","+0","0.11451621372724685","0","","Infrared","2MASS H (1.66um)","","","","","IPAC/NASA","","","","","51941","50600"
""") # noqa
ascii.read(tbl, format='csv', fast_reader=True, guess=False)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_fields(parallel, read_basic):
"""
The character quotechar (default '"') should denote the start of a field which can
contain the field delimiter and newlines.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = dedent("""
"A B" C D
1.5 2.1 -37.1
a b " c
d"
""")
table = read_basic(text, parallel=parallel)
expected = Table([['1.5', 'a'], ['2.1', 'b'], ['-37.1', 'c\nd']], names=('A B', 'C', 'D'))
assert_table_equal(table, expected)
table = read_basic(text.replace('"', "'"), quotechar="'", parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("key,val", [
('delimiter', ',,'), # multi-char delimiter
('comment', '##'), # multi-char comment
('data_start', None), # data_start=None
('data_start', -1), # data_start negative
('quotechar', '##'), # multi-char quote signifier
('header_start', -1), # negative header_start
('converters', dict((i + 1, ascii.convert_numpy(np.uint))
for i in range(3))), # passing converters
('Inputter', ascii.ContinuationLinesInputter), # passing Inputter
('header_Splitter', ascii.DefaultSplitter), # passing Splitter
('data_Splitter', ascii.DefaultSplitter)])
def test_invalid_parameters(key, val):
"""
Make sure the C reader raises an error if passed parameters it can't handle.
"""
with pytest.raises(ParameterError):
FastBasic(**{key: val}).read('1 2 3\n4 5 6')
with pytest.raises(ParameterError):
ascii.read('1 2 3\n4 5 6',
format='fast_basic', guess=False, **{key: val})
def test_invalid_parameters_other():
with pytest.raises(TypeError):
FastBasic(foo=7).read('1 2 3\n4 5 6') # unexpected argument
with pytest.raises(FastOptionsError): # don't fall back on the slow reader
ascii.read('1 2 3\n4 5 6', format='basic', fast_reader={'foo': 7})
with pytest.raises(ParameterError):
# Outputter cannot be specified in constructor
FastBasic(Outputter=ascii.TableOutputter).read('1 2 3\n4 5 6')
def test_too_many_cols1():
"""
If a row contains too many columns, the C reader should raise an error.
"""
text = dedent("""
A B C
1 2 3
4 5 6
7 8 9 10
11 12 13
""")
with pytest.raises(InconsistentTableError) as e:
FastBasic().read(text)
assert 'Number of header columns (3) ' \
'inconsistent with data columns in data line 2' in str(e.value)
def test_too_many_cols2():
text = """\
aaa,bbb
1,2,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
FastCsv().read(text)
assert 'Number of header columns (2) ' \
'inconsistent with data columns in data line 0' in str(e.value)
def test_too_many_cols3():
text = """\
aaa,bbb
1,2,,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
FastCsv().read(text)
assert 'Number of header columns (2) ' \
'inconsistent with data columns in data line 0' in str(e.value)
def test_too_many_cols4():
# https://github.com/astropy/astropy/issues/9922
with pytest.raises(InconsistentTableError) as e:
ascii.read(get_pkg_data_filename('data/conf_py.txt'),
fast_reader=True, guess=True)
assert 'Unable to guess table format with the guesses listed below' in str(e.value)
@pytest.mark.parametrize("parallel", [True, False])
def test_not_enough_cols(parallel, read_csv):
"""
If a row does not have enough columns, the FastCsv reader should add empty
fields while the FastBasic reader should raise an error.
"""
text = """
A,B,C
1,2,3
4,5
6,7,8
"""
table = read_csv(text, parallel=parallel)
assert table['B'][1] is not ma.masked
assert table['C'][1] is ma.masked
with pytest.raises(InconsistentTableError):
table = FastBasic(delimiter=',').read(text)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_end(parallel, read_basic, read_rdb):
"""
The parameter data_end should specify where data reading ends.
"""
text = """
A B C
1 2 3
4 5 6
7 8 9
10 11 12
"""
table = read_basic(text, data_end=3, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# data_end supports negative indexing
table = read_basic(text, data_end=-2, parallel=parallel)
assert_table_equal(table, expected)
text = """
A\tB\tC
N\tN\tS
1\t2\ta
3\t4\tb
5\t6\tc
"""
# make sure data_end works with RDB
table = read_rdb(text, data_end=-1, parallel=parallel)
expected = Table([[1, 3], [2, 4], ['a', 'b']], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# positive index
table = read_rdb(text, data_end=3, parallel=parallel)
expected = Table([[1], [2], ['a']], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# empty table if data_end is too small
table = read_rdb(text, data_end=1, parallel=parallel)
expected = Table([[], [], []], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_inf_nan(parallel, read_basic):
"""
Test that inf and nan-like values are correctly parsed on all platforms.
Regression test for https://github.com/astropy/astropy/pull/3525
"""
text = dedent("""\
A
nan
+nan
-nan
inf
infinity
+inf
+infinity
-inf
-infinity
""")
expected = Table({'A': [np.nan, np.nan, np.nan,
np.inf, np.inf, np.inf, np.inf,
-np.inf, -np.inf]})
table = read_basic(text, parallel=parallel)
assert table['A'].dtype.kind == 'f'
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_values(parallel, read_basic):
"""
Make sure that the parameter fill_values works as intended. If fill_values
is not specified, the default behavior should be to convert '' to 0.
"""
text = """
A, B, C
, 2, nan
a, -999, -3.4
nan, 5, -9999
8, nan, 7.6e12
"""
table = read_basic(text, delimiter=',', parallel=parallel)
# The empty value in row A should become a masked '0'
assert isinstance(table['A'], MaskedColumn)
assert table['A'][0] is ma.masked
# '0' rather than 0 because there is a string in the column
assert_equal(table['A'].data.data[0], '0')
assert table['A'][1] is not ma.masked
table = read_basic(text, delimiter=',', fill_values=('-999', '0'), parallel=parallel)
assert isinstance(table['B'], MaskedColumn)
assert table['A'][0] is not ma.masked # empty value unaffected
assert table['C'][2] is not ma.masked # -9999 is not an exact match
assert table['B'][1] is ma.masked
# Numeric because the rest of the column contains numeric data
assert_equal(table['B'].data.data[1], 0.0)
assert table['B'][0] is not ma.masked
table = read_basic(text, delimiter=',', fill_values=[], parallel=parallel)
# None of the columns should be masked
for name in 'ABC':
assert not isinstance(table[name], MaskedColumn)
table = read_basic(text, delimiter=',',
fill_values=[('', '0', 'A'),
('nan', '999', 'A', 'C')], parallel=parallel)
assert np.isnan(table['B'][3]) # nan filling skips column B
assert table['B'][3] is not ma.masked # should skip masking as well as replacing nan
assert table['A'][0] is ma.masked
assert table['A'][2] is ma.masked
assert_equal(table['A'].data.data[0], '0')
assert_equal(table['A'].data.data[2], '999')
assert table['C'][0] is ma.masked
assert_almost_equal(table['C'].data.data[0], 999.0)
assert_almost_equal(table['C'][1], -3.4) # column is still of type float
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_include_exclude_names(parallel, read_csv):
"""
fill_include_names and fill_exclude_names should filter missing/empty value handling
in the same way that include_names and exclude_names filter output columns.
"""
text = """
A, B, C
, 1, 2
3, , 4
5, 5,
"""
table = read_csv(text, fill_include_names=['A', 'B'], parallel=parallel)
assert table['A'][0] is ma.masked
assert table['B'][1] is ma.masked
assert table['C'][2] is not ma.masked # C not in fill_include_names
table = read_csv(text, fill_exclude_names=['A', 'B'], parallel=parallel)
assert table['C'][2] is ma.masked
assert table['A'][0] is not ma.masked
assert table['B'][1] is not ma.masked # A and B excluded from fill handling
table = read_csv(text, fill_include_names=['A', 'B'],
fill_exclude_names=['B'], parallel=parallel)
assert table['A'][0] is ma.masked
assert table['B'][1] is not ma.masked # fill_exclude_names applies after fill_include_names
assert table['C'][2] is not ma.masked
@pytest.mark.parametrize("parallel", [True, False])
def test_many_rows(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of rows
is large (so that each column string is longer than INITIAL_COL_SIZE).
"""
text = 'A B C\n'
for i in range(500): # create 500 rows
text += ' '.join([str(i) for i in range(3)])
text += '\n'
table = read_basic(text, parallel=parallel)
expected = Table([[0] * 500, [1] * 500, [2] * 500], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_many_columns(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of columns
is large (so that each header string is longer than INITIAL_HEADER_SIZE).
"""
# create a string with 500 columns and two data rows
text = ' '.join([str(i) for i in range(500)])
text += ('\n' + text + '\n' + text)
table = read_basic(text, parallel=parallel)
expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)])
assert_table_equal(table, expected)
def test_fast_reader():
"""
Make sure that ascii.read() works as expected by default and with
fast_reader specified.
"""
text = 'a b c\n1 2 3\n4 5 6'
with pytest.raises(ParameterError): # C reader can't handle regex comment
ascii.read(text, format='fast_basic', guess=False, comment='##')
# Enable multiprocessing and the fast converter
try:
ascii.read(text, format='basic', guess=False,
fast_reader={'parallel': True, 'use_fast_converter': True})
except NotImplementedError:
# Might get this on Windows, try without parallel...
if os.name == 'nt':
ascii.read(text, format='basic', guess=False,
fast_reader={'parallel': False,
'use_fast_converter': True})
else:
raise
# Should raise an error if fast_reader has an invalid key
with pytest.raises(FastOptionsError):
ascii.read(text, format='fast_basic', guess=False, fast_reader={'foo': True})
# Use the slow reader instead
ascii.read(text, format='basic', guess=False, comment='##', fast_reader=False)
# Will try the slow reader afterwards by default
ascii.read(text, format='basic', guess=False, comment='##')
@pytest.mark.parametrize("parallel", [True, False])
def test_read_tab(parallel, read_tab):
"""
The fast reader for tab-separated values should not strip whitespace, unlike
the basic reader.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = '1\t2\t3\n a\t b \t\n c\t" d\n e"\t '
table = read_tab(text, parallel=parallel)
assert_equal(table['1'][0], ' a') # preserve line whitespace
assert_equal(table['2'][0], ' b ') # preserve field whitespace
assert table['3'][0] is ma.masked # empty value should be masked
assert_equal(table['2'][1], ' d\n e') # preserve whitespace in quoted fields
assert_equal(table['3'][1], ' ') # preserve end-of-line whitespace
@pytest.mark.parametrize("parallel", [True, False])
def test_default_data_start(parallel, read_basic):
"""
If data_start is not explicitly passed to read(), data processing should
beginning right after the header.
"""
text = 'ignore this line\na b c\n1 2 3\n4 5 6'
table = read_basic(text, header_start=1, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_commented_header(parallel, read_commented_header):
"""
The FastCommentedHeader reader should mimic the behavior of the
CommentedHeader by overriding the default header behavior of FastBasic.
"""
text = """
# A B C
1 2 3
4 5 6
"""
t1 = read_commented_header(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(t1, expected)
text = '# first commented line\n # second commented line\n\n' + text
t2 = read_commented_header(text, header_start=2, data_start=0, parallel=parallel)
assert_table_equal(t2, expected)
t3 = read_commented_header(text, header_start=-1, data_start=0,
parallel=parallel) # negative indexing allowed
assert_table_equal(t3, expected)
text += '7 8 9'
t4 = read_commented_header(text, header_start=2, data_start=2, parallel=parallel)
expected = Table([[7], [8], [9]], names=('A', 'B', 'C'))
assert_table_equal(t4, expected)
with pytest.raises(ParameterError):
read_commented_header(text, header_start=-1, data_start=-1,
parallel=parallel) # data_start cannot be negative
@pytest.mark.parametrize("parallel", [True, False])
def test_rdb(parallel, read_rdb):
"""
Make sure the FastRdb reader works as expected.
"""
text = """
A\tB\tC
1n\tS\t4N
1\t 9\t4.3
"""
table = read_rdb(text, parallel=parallel)
expected = Table([[1], [' 9'], [4.3]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
assert_equal(table['A'].dtype.kind, 'i')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'f')
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tS\tN\n4\tb\ta' # C column contains non-numeric data
read_rdb(text, parallel=parallel)
assert 'Column C failed to convert' in str(e.value)
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tN\n1\t2\t3' # not enough types specified
read_rdb(text, parallel=parallel)
assert 'mismatch between number of column names and column types' in str(e.value)
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tN\t5\n1\t2\t3' # invalid type for column C
read_rdb(text, parallel=parallel)
assert 'type definitions do not all match [num](N|S)' in str(e.value)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_start(parallel, read_basic):
"""
Make sure that data parsing begins at data_start (ignoring empty and
commented lines but not taking quoted values into account).
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = """
A B C
1 2 3
4 5 6
7 8 "9
1"
# comment
10 11 12
"""
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table([[4, 7, 10], [5, 8, 11], ["6", "9\n1", "12"]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
table = read_basic(text, data_start=3, parallel=parallel)
# ignore empty line
expected = Table([[7, 10], [8, 11], ["9\n1", "12"]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
with pytest.raises(InconsistentTableError) as e:
# tries to begin in the middle of quoted field
read_basic(text, data_start=4, parallel=parallel)
assert 'header columns (3) inconsistent with data columns in data line 0' \
in str(e.value)
table = read_basic(text, data_start=5, parallel=parallel)
# ignore commented line
expected = Table([[10], [11], [12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
text = """
A B C
1 2 3
4 5 6
7 8 9
# comment
10 11 12
"""
# make sure reading works as expected in parallel
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table([[4, 7, 10], [5, 8, 11], [6, 9, 12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_empty_values(parallel, read_basic):
"""
Quoted empty values spanning multiple lines should be treated correctly.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = 'a b c\n1 2 " \n "'
table = read_basic(text, parallel=parallel)
assert table['c'][0] == '\n' # empty value masked by default
@pytest.mark.parametrize("parallel", [True, False])
def test_csv_comment_default(parallel, read_csv):
"""
Unless the comment parameter is specified, the CSV reader should
not treat any lines as comments.
"""
text = 'a,b,c\n#1,2,3\n4,5,6'
table = read_csv(text, parallel=parallel)
expected = Table([['#1', '4'], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_whitespace_before_comment(parallel, read_tab):
"""
Readers that don't strip whitespace from data (Tab, RDB)
should still treat lines with leading whitespace and then
the comment char as comment lines.
"""
text = 'a\tb\tc\n # comment line\n1\t2\t3'
table = read_tab(text, parallel=parallel)
expected = Table([[1], [2], [3]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_strip_line_trailing_whitespace(parallel, read_basic):
"""
Readers that strip whitespace from lines should ignore
trailing whitespace after the last data value of each
row.
"""
text = 'a b c\n1 2 \n3 4 5'
with pytest.raises(InconsistentTableError) as e:
ascii.read(StringIO(text), format='fast_basic', guess=False)
assert 'header columns (3) inconsistent with data columns in data line 0' \
in str(e.value)
text = 'a b c\n 1 2 3 \t \n 4 5 6 '
table = read_basic(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_data(parallel, read_basic):
"""
As long as column names are supplied, the C reader
should return an empty table in the absence of data.
"""
table = read_basic('a b c', parallel=parallel)
expected = Table([[], [], []], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
table = read_basic('a b c\n1 2 3', data_start=2, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_line_endings(parallel, read_basic, read_commented_header, read_rdb):
"""
Make sure the fast reader accepts CR and CR+LF
as newlines.
"""
text = 'a b c\n1 2 3\n4 5 6\n7 8 9\n'
expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c'))
for newline in ('\r\n', '\r'):
table = read_basic(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
# Make sure the splitlines() method of FileString
# works with CR/CR+LF line endings
text = '#' + text
for newline in ('\r\n', '\r'):
table = read_commented_header(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
expected = Table([MaskedColumn([1, 4, 7]), [2, 5, 8], MaskedColumn([3, 6, 9])],
names=('a', 'b', 'c'))
expected['a'][0] = np.ma.masked
expected['c'][0] = np.ma.masked
text = 'a\tb\tc\nN\tN\tN\n\t2\t\n4\t5\t6\n7\t8\t9\n'
for newline in ('\r\n', '\r'):
table = read_rdb(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
assert np.all(table == expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_store_comments(parallel, read_basic):
"""
Make sure that the output Table produced by the fast
reader stores any comment lines in its meta attribute.
"""
text = """
# header comment
a b c
# comment 2
# comment 3
1 2 3
4 5 6
"""
table = read_basic(text, parallel=parallel, check_meta=True)
assert_equal(table.meta['comments'],
['header comment', 'comment 2', 'comment 3'])
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_quotes(parallel, read_basic):
"""
Make sure the C reader doesn't segfault when the
input data contains empty quotes. [#3407]
"""
table = read_basic('a b\n1 ""\n2 ""', parallel=parallel)
expected = Table([[1, 2], [0, 0]], names=('a', 'b'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fast_tab_with_names(parallel, read_tab):
"""
Make sure the C reader doesn't segfault when the header for the
first column is missing [#3545]
"""
content = """#
\tdecDeg\tRate_pn_offAxis\tRate_mos2_offAxis\tObsID\tSourceID\tRADeg\tversion\tCounts_pn\tRate_pn\trun\tRate_mos1\tRate_mos2\tInserted_pn\tInserted_mos2\tbeta\tRate_mos1_offAxis\trcArcsec\tname\tInserted\tCounts_mos1\tInserted_mos1\tCounts_mos2\ty\tx\tCounts\toffAxis\tRot
-3.007559\t0.0000\t0.0010\t0013140201\t0\t213.462574\t0\t2\t0.0002\t0\t0.0001\t0.0001\t0\t1\t0.66\t0.0217\t3.0\tfakeXMMXCS J1413.8-0300\t3\t1\t2\t1\t398.000\t127.000\t5\t13.9\t72.3\t""" # noqa
head = [f'A{i}' for i in range(28)]
read_tab(content, data_start=1, parallel=parallel, names=head)
@pytest.mark.skipif(not os.getenv('TEST_READ_HUGE_FILE'),
reason='Environment variable TEST_READ_HUGE_FILE must be '
'defined to run this test')
def test_read_big_table(tmpdir):
"""Test reading of a huge file.
This test generates a huge CSV file (~2.3Gb) before reading it (see
https://github.com/astropy/astropy/pull/5319). The test is run only if the
environment variable ``TEST_READ_HUGE_FILE`` is defined. Note that running
the test requires quite a lot of memory (~18Gb when reading the file) !!
"""
NB_ROWS = 250000
NB_COLS = 500
filename = str(tmpdir.join("big_table.csv"))
print(f"Creating a {NB_ROWS} rows table ({NB_COLS} columns).")
data = np.random.random(NB_ROWS)
t = Table(data=[data] * NB_COLS, names=[str(i) for i in range(NB_COLS)])
data = None
print(f"Saving the table to {filename}")
t.write(filename, format='ascii.csv', overwrite=True)
t = None
print("Counting the number of lines in the csv, it should be {NB_ROWS} + 1 (header).")
with open(filename, 'r') as f:
assert sum(1 for line in f) == NB_ROWS + 1
print("Reading the file with astropy.")
t = Table.read(filename, format='ascii.csv', fast_reader=True)
assert len(t) == NB_ROWS
@pytest.mark.skipif(not os.getenv('TEST_READ_HUGE_FILE'),
reason='Environment variable TEST_READ_HUGE_FILE must be '
'defined to run this test')
def test_read_big_table2(tmpdir):
"""Test reading of a file with a huge column.
"""
# (2**32 // 2) : max value for int
# // 10 : we use a value for rows that have 10 chars (1e9)
# + 5 : add a few lines so the length cannot be stored by an int
NB_ROWS = 2**32 // 2 // 10 + 5
filename = str(tmpdir.join("big_table.csv"))
print(f"Creating a {NB_ROWS} rows table.")
data = np.full(NB_ROWS, int(1e9), dtype=np.int32)
t = Table(data=[data], names=['a'], copy=False)
print(f"Saving the table to {filename}")
t.write(filename, format='ascii.csv', overwrite=True)
t = None
print("Counting the number of lines in the csv, it should be {NB_ROWS} + 1 (header).")
with open(filename, 'r') as f:
assert sum(1 for line in f) == NB_ROWS + 1
print("Reading the file with astropy.")
t = Table.read(filename, format='ascii.csv', fast_reader=True)
assert len(t) == NB_ROWS
# Test these both with guessing turned on and off
@pytest.mark.parametrize("guess", [True, False])
# fast_reader configurations: False| 'use_fast_converter'=False|True
@pytest.mark.parametrize('fast_reader', [False, dict(use_fast_converter=False),
dict(use_fast_converter=True)])
@pytest.mark.parametrize("parallel", [False, True])
def test_data_out_of_range(parallel, fast_reader, guess):
"""
Numbers with exponents beyond float64 range (|~4.94e-324 to 1.7977e+308|)
shall be returned as 0 and +-inf respectively by the C parser, just like
the Python parser.
Test fast converter only to nominal accuracy.
"""
# Python reader and strtod() are expected to return precise results
rtol = 1.e-30
# Update fast_reader dict; adapt relative precision for fast_converter
if fast_reader:
fast_reader['parallel'] = parallel
if fast_reader.get('use_fast_converter'):
rtol = 1.e-15
elif np.iinfo(np.int_).dtype == np.dtype(np.int32):
# On 32bit the standard C parser (strtod) returns strings for these
pytest.xfail("C parser cannot handle float64 on 32bit systems")
if parallel:
if not fast_reader:
pytest.skip("Multiprocessing only available in fast reader")
elif CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
test_for_warnings = fast_reader and not parallel
if not parallel and not fast_reader:
ctx = nullcontext()
else:
ctx = pytest.warns()
fields = ['10.1E+199', '3.14e+313', '2048e+306', '0.6E-325', '-2.e345']
values = np.array([1.01e200, np.inf, np.inf, 0.0, -np.inf])
# NOTE: Warning behavior varies for the parameters being passed in.
with ctx as w:
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
if test_for_warnings: # Assert precision warnings for cols 2-5
assert len(w) == 4
for i in range(len(w)):
assert (f"OverflowError converting to FloatType in column col{i+2}"
in str(w[i].message))
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
# Test some additional corner cases
fields = ['.0101E202', '0.000000314E+314', '1777E+305', '-1799E+305',
'0.2e-323', '5200e-327', ' 0.0000000000000000000001024E+330']
values = np.array([1.01e200, 3.14e307, 1.777e308, -np.inf, 0.0, 4.94e-324, 1.024e308])
with ctx as w:
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
if test_for_warnings: # Assert precision warnings for cols 4-6
assert len(w) == 3
for i in range(len(w)):
assert (f"OverflowError converting to FloatType in column col{i+4}"
in str(w[i].message))
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
# Test corner cases again with non-standard exponent_style (auto-detection)
if fast_reader and fast_reader.get('use_fast_converter'):
fast_reader.update({'exponent_style': 'A'})
else:
pytest.skip("Fortran exponent style only available in fast converter")
fields = ['.0101D202', '0.000000314d+314', '1777+305', '-1799E+305',
'0.2e-323', '2500-327', ' 0.0000000000000000000001024Q+330']
with ctx as w:
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
if test_for_warnings:
assert len(w) == 3
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
@pytest.mark.parametrize("guess", [True, False])
# fast_reader configurations: False| 'use_fast_converter'=False|True
@pytest.mark.parametrize('fast_reader', [False, dict(use_fast_converter=False),
dict(use_fast_converter=True)])
@pytest.mark.parametrize("parallel", [False, True])
def test_data_at_range_limit(parallel, fast_reader, guess):
"""
Test parsing of fixed-format float64 numbers near range limits
(|~4.94e-324 to 1.7977e+308|) - within limit for full precision
(|~2.5e-307| for strtod C parser, factor 10 better for fast_converter)
exact numbers shall be returned, beyond that an Overflow warning raised.
Input of exactly 0.0 must not raise an OverflowError.
"""
# Python reader and strtod() are expected to return precise results
rtol = 1.e-30
# Update fast_reader dict; adapt relative precision for fast_converter
if fast_reader:
fast_reader['parallel'] = parallel
if fast_reader.get('use_fast_converter'):
rtol = 1.e-15
elif np.iinfo(np.int_).dtype == np.dtype(np.int32):
# On 32bit the standard C parser (strtod) returns strings for these
pytest.xfail("C parser cannot handle float64 on 32bit systems")
if parallel:
if not fast_reader:
pytest.skip("Multiprocessing only available in fast reader")
elif CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
# Test very long fixed-format strings (to strtod range limit w/o Overflow)
for D in 99, 202, 305:
t = ascii.read(StringIO(99 * '0' + '.' + D * '0' + '1'), format='no_header',
guess=guess, fast_reader=fast_reader)
assert_almost_equal(t['col1'][0], 10.**-(D + 1), rtol=rtol, atol=1.e-324)
for D in 99, 202, 308:
t = ascii.read(StringIO('1' + D * '0' + '.0'), format='no_header',
guess=guess, fast_reader=fast_reader)
assert_almost_equal(t['col1'][0], 10.**D, rtol=rtol, atol=1.e-324)
# 0.0 is always exact (no Overflow warning)!
for s in '0.0', '0.0e+0', 399 * '0' + '.' + 365 * '0':
t = ascii.read(StringIO(s), format='no_header',
guess=guess, fast_reader=fast_reader)
assert t['col1'][0] == 0.0
# Test OverflowError at precision limit with laxer rtol
if parallel:
pytest.skip("Catching warnings broken in parallel mode")
elif not fast_reader:
pytest.skip("Python/numpy reader does not raise on Overflow")
with pytest.warns() as warning_lines:
t = ascii.read(StringIO('0.' + 314 * '0' + '1'), format='no_header',
guess=guess, fast_reader=fast_reader)
n_warns = len(warning_lines)
assert n_warns in (0, 1), f'Expected 0 or 1 warning, found {n_warns}'
if n_warns == 1:
assert 'OverflowError converting to FloatType in column col1, possibly resulting in degraded precision' in str(warning_lines[0].message) # noqa
assert_almost_equal(t['col1'][0], 1.e-315, rtol=1.e-10, atol=1.e-324)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_int_out_of_range(parallel, guess):
"""
Integer numbers outside int range shall be returned as string columns
consistent with the standard (Python) parser (no 'upcasting' to float).
"""
imin = np.iinfo(int).min + 1
imax = np.iinfo(int).max - 1
huge = f'{imax+2:d}'
text = f'P M S\n {imax:d} {imin:d} {huge:s}'
expected = Table([[imax], [imin], [huge]], names=('P', 'M', 'S'))
# NOTE: Warning behavior varies for the parameters being passed in.
with pytest.warns() as w:
table = ascii.read(text, format='basic', guess=guess,
fast_reader={'parallel': parallel})
if not parallel:
assert len(w) == 1
assert ("OverflowError converting to IntType in column S, reverting to String"
in str(w[0].message))
assert_table_equal(table, expected)
# Check with leading zeroes to make sure strtol does not read them as octal
text = f'P M S\n000{imax:d} -0{-imin:d} 00{huge:s}'
expected = Table([[imax], [imin], ['00' + huge]], names=('P', 'M', 'S'))
with pytest.warns() as w:
table = ascii.read(text, format='basic', guess=guess,
fast_reader={'parallel': parallel})
if not parallel:
assert len(w) == 1
assert ("OverflowError converting to IntType in column S, reverting to String"
in str(w[0].message))
assert_table_equal(table, expected)
@pytest.mark.parametrize("guess", [True, False])
def test_int_out_of_order(guess):
"""
Mixed columns should be returned as float, but if the out-of-range integer
shows up first, it will produce a string column - with both readers.
Broken with the parallel fast_reader.
"""
imax = np.iinfo(int).max - 1
text = f'A B\n 12.3 {imax:d}0\n {imax:d}0 45.6e7'
expected = Table([[12.3, 10. * imax], [f'{imax:d}0', '45.6e7']],
names=('A', 'B'))
with pytest.warns(AstropyWarning, match=r'OverflowError converting to '
r'IntType in column B, reverting to String'):
table = ascii.read(text, format='basic', guess=guess, fast_reader=True)
assert_table_equal(table, expected)
with pytest.warns(AstropyWarning, match=r'OverflowError converting to '
r'IntType in column B, reverting to String'):
table = ascii.read(text, format='basic', guess=guess, fast_reader=False)
assert_table_equal(table, expected)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_fortran_reader(parallel, guess):
"""
Make sure that ascii.read() can read Fortran-style exponential notation
using the fast_reader.
"""
# Check for nominal np.float64 precision
rtol = 1.e-15
atol = 0.0
text = 'A B C D\n100.01{:s}99 2.0 2.0{:s}-103 3\n' + \
' 4.2{:s}-1 5.0{:s}-1 0.6{:s}4 .017{:s}+309'
expc = Table([[1.0001e101, 0.42], [2, 0.5], [2.e-103, 6.e3], [3, 1.7e307]],
names=('A', 'B', 'C', 'D'))
expstyles = {'e': 6 * ('E'),
'D': ('D', 'd', 'd', 'D', 'd', 'D'),
'Q': 3 * ('q', 'Q'),
'Fortran': ('E', '0', 'D', 'Q', 'd', '0')}
# C strtod (not-fast converter) can't handle Fortran exp
with pytest.raises(FastOptionsError) as e:
ascii.read(text.format(*(6 * ('D'))), format='basic', guess=guess,
fast_reader={'use_fast_converter': False,
'parallel': parallel, 'exponent_style': 'D'})
assert 'fast_reader: exponent_style requires use_fast_converter' in str(e.value)
# Enable multiprocessing and the fast converter iterate over
# all style-exponent combinations, with auto-detection
for s, c in expstyles.items():
table = ascii.read(text.format(*c), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': s})
assert_table_equal(table, expc, rtol=rtol, atol=atol)
# Additional corner-case checks including triple-exponents without
# any character and mixed whitespace separators
text = 'A B\t\t C D\n1.0001+101 2.0+000\t 0.0002-099 3\n ' + \
'0.42-000 \t 0.5 6.+003 0.000000000000000000000017+330'
table = ascii.read(text, guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'A'})
assert_table_equal(table, expc, rtol=rtol, atol=atol)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_fortran_invalid_exp(parallel, guess):
"""
Test Fortran-style exponential notation in the fast_reader with invalid
exponent-like patterns (no triple-digits) to make sure they are returned
as strings instead, as with the standard C parser.
"""
if parallel and CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
formats = {'basic': ' ', 'tab': '\t', 'csv': ','}
header = ['S1', 'F2', 'S2', 'F3', 'S3', 'F4', 'F5', 'S4', 'I1', 'F6', 'F7']
# Tested entries and expected returns, first for auto-detect,
# then for different specified exponents
fields = ['1.0001+1', '.42d1', '2.3+10', '0.5', '3+1001', '3000.',
'2', '4.56e-2.3', '8000', '4.2-022', '.00000145e314']
vals_e = ['1.0001+1', '.42d1', '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
vals_d = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', '.00000145e314']
vals_a = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, 4.2e-22, 1.45e308]
vals_v = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
# Iterate over supported format types and separators
for f, s in formats.items():
t1 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)),
format=f, guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'A'})
assert_table_equal(t1, Table([[col] for col in vals_a], names=header))
# Non-basic separators require guessing enabled to be detected
if guess:
formats['bar'] = '|'
else:
formats = {'basic': ' '}
for s in formats.values():
t2 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'a'})
assert_table_equal(t2, Table([[col] for col in vals_a], names=header))
# Iterate for (default) expchar 'E'
for s in formats.values():
t3 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'use_fast_converter': True})
assert_table_equal(t3, Table([[col] for col in vals_e], names=header))
# Iterate for expchar 'D'
for s in formats.values():
t4 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'D'})
assert_table_equal(t4, Table([[col] for col in vals_d], names=header))
# Iterate for regular converter (strtod)
for s in formats.values():
t5 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'use_fast_converter': False})
read_values = [col[0] for col in t5.itercols()]
if os.name == 'nt':
# Apparently C strtod() on (some?) MSVC recognizes 'd' exponents!
assert read_values == vals_v or read_values == vals_e
else:
assert read_values == vals_e
def test_fortran_reader_notbasic():
"""
Check if readers without a fast option raise a value error when a
fast_reader is asked for (implies the default 'guess=True').
"""
tabstr = dedent("""
a b
1 1.23D4
2 5.67D-8
""")[1:-1]
t1 = ascii.read(tabstr.split('\n'), fast_reader=dict(exponent_style='D'))
assert t1['b'].dtype.kind == 'f'
tabrdb = dedent("""
a\tb
# A simple RDB table
N\tN
1\t 1.23D4
2\t 5.67-008
""")[1:-1]
t2 = ascii.read(tabrdb.split('\n'), format='rdb',
fast_reader=dict(exponent_style='fortran'))
assert t2['b'].dtype.kind == 'f'
tabrst = dedent("""
= =======
a b
= =======
1 1.23E4
2 5.67E-8
= =======
""")[1:-1]
t3 = ascii.read(tabrst.split('\n'), format='rst')
assert t3['b'].dtype.kind == 'f'
t4 = ascii.read(tabrst.split('\n'), guess=True)
assert t4['b'].dtype.kind == 'f'
# In the special case of fast_converter=True (the default),
# incompatibility is ignored
t5 = ascii.read(tabrst.split('\n'), format='rst', fast_reader=True)
assert t5['b'].dtype.kind == 'f'
with pytest.raises(ParameterError):
ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader='force')
with pytest.raises(ParameterError):
ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader=dict(use_fast_converter=False))
tabrst = tabrst.replace('E', 'D')
with pytest.raises(ParameterError):
ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader=dict(exponent_style='D'))
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize('fast_reader', [dict(exponent_style='D'),
dict(exponent_style='A')])
def test_dict_kwarg_integrity(fast_reader, guess):
"""
Check if dictionaries passed as kwargs (fast_reader in this test) are
left intact by ascii.read()
"""
expstyle = fast_reader.get('exponent_style', 'E')
fields = ['10.1D+199', '3.14d+313', '2048d+306', '0.6D-325', '-2.d345']
ascii.read(StringIO(' '.join(fields)), guess=guess,
fast_reader=fast_reader)
assert fast_reader.get('exponent_style', None) == expstyle
@pytest.mark.parametrize('fast_reader', [False,
dict(parallel=True),
dict(parallel=False)])
def test_read_empty_basic_table_with_comments(fast_reader):
"""
Test for reading a "basic" format table that has no data but has comments.
Tests the fix for #8267.
"""
dat = """
# comment 1
# comment 2
col1 col2
"""
t = ascii.read(dat, fast_reader=fast_reader)
assert t.meta['comments'] == ['comment 1', 'comment 2']
assert len(t) == 0
assert t.colnames == ['col1', 'col2']
@pytest.mark.parametrize('fast_reader', [dict(use_fast_converter=True),
dict(exponent_style='A')])
def test_conversion_fast(fast_reader):
"""
The reader should try to convert each column to ints. If this fails, the
reader should try to convert to floats. Failing this, i.e. on parsing
non-numeric input including isolated positive/negative signs, it should
fall back to strings.
"""
text = """
A B C D E F G H
1 a 3 4 5 6 7 8
2. 1 9 -.1e1 10.0 8.7 6 -5.3e4
4 2 -12 .4 +.e1 - + six
"""
table = ascii.read(text, fast_reader=fast_reader)
assert_equal(table['A'].dtype.kind, 'f')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'i')
assert_equal(table['D'].dtype.kind, 'f')
assert table['E'].dtype.kind in ('S', 'U')
assert table['F'].dtype.kind in ('S', 'U')
assert table['G'].dtype.kind in ('S', 'U')
assert table['H'].dtype.kind in ('S', 'U')
@pytest.mark.parametrize('delimiter', ['\n', '\r'])
@pytest.mark.parametrize('fast_reader', [False, True, 'force'])
def test_newline_as_delimiter(delimiter, fast_reader):
"""
Check that newline characters are correctly handled as delimiters.
Tests the fix for #9928.
"""
if delimiter == '\r':
eol = '\n'
else:
eol = '\r'
inp0 = ["a | b | c ", " 1 | '2' | 3.00000 "]
inp1 = "a {0:s} b {0:s}c{1:s} 1 {0:s}'2'{0:s} 3.0".format(delimiter, eol)
inp2 = [f"a {delimiter} b{delimiter} c",
f"1{delimiter} '2' {delimiter} 3.0"]
t0 = ascii.read(inp0, delimiter='|', fast_reader=fast_reader)
t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader)
t2 = ascii.read(inp2, delimiter=delimiter, fast_reader=fast_reader)
assert t1.colnames == t2.colnames == ['a', 'b', 'c']
assert len(t1) == len(t2) == 1
assert t1['b'].dtype.kind in ('S', 'U')
assert t2['b'].dtype.kind in ('S', 'U')
assert_table_equal(t1, t0)
assert_table_equal(t2, t0)
inp0 = 'a {0:s} b {0:s} c{1:s} 1 {0:s}"2"{0:s} 3.0'.format('|', eol)
inp1 = 'a {0:s} b {0:s} c{1:s} 1 {0:s}"2"{0:s} 3.0'.format(delimiter, eol)
t0 = ascii.read(inp0, delimiter='|', fast_reader=fast_reader)
t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader)
if not fast_reader:
pytest.xfail("Quoted fields are not parsed correctly by BaseSplitter")
assert_equal(t1['b'].dtype.kind, 'i')
@pytest.mark.parametrize('delimiter', [' ', '|', '\n', '\r'])
@pytest.mark.parametrize('fast_reader', [False, True, 'force'])
def test_single_line_string(delimiter, fast_reader):
"""
String input without a newline character is interpreted as filename,
unless element of an iterable. Maybe not logical, but test that it is
at least treated consistently.
"""
expected = Table([[1], [2], [3.00]], names=('col1', 'col2', 'col3'))
text = "1{0:s}2{0:s}3.0".format(delimiter)
if delimiter in ('\r', '\n'):
t1 = ascii.read(text, format='no_header', delimiter=delimiter, fast_reader=fast_reader)
assert_table_equal(t1, expected)
else:
# Windows raises OSError, but not the other OSes.
with pytest.raises((FileNotFoundError, OSError)):
t1 = ascii.read(text, format='no_header', delimiter=delimiter, fast_reader=fast_reader)
t2 = ascii.read([text], format='no_header', delimiter=delimiter, fast_reader=fast_reader)
assert_table_equal(t2, expected)
|
ec4677bca1fc0be35ac533e8507359391dd20943cf3bff4df8a9242b154b129c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from io import StringIO
import numpy as np
import pytest
from astropy.io import ascii
from astropy.io.ascii.core import InconsistentTableError
from .common import (assert_equal, assert_almost_equal)
def assert_equal_splitlines(arg1, arg2):
assert_equal(arg1.splitlines(), arg2.splitlines())
def test_read_normal():
"""Nice, typical fixed format table"""
table = """
# comment (with blank line above)
| Col1 | Col2 |
| 1.2 | "hello" |
| 2.4 |'s worlds|
"""
reader = ascii.get_reader(Reader=ascii.FixedWidth)
dat = reader.read(table)
assert_equal(dat.colnames, ['Col1', 'Col2'])
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], '"hello"')
assert_equal(dat[1][1], "'s worlds")
def test_read_normal_names():
"""Nice, typical fixed format table with col names provided"""
table = """
# comment (with blank line above)
| Col1 | Col2 |
| 1.2 | "hello" |
| 2.4 |'s worlds|
"""
reader = ascii.get_reader(Reader=ascii.FixedWidth,
names=('name1', 'name2'))
dat = reader.read(table)
assert_equal(dat.colnames, ['name1', 'name2'])
assert_almost_equal(dat[1][0], 2.4)
def test_read_normal_names_include():
"""Nice, typical fixed format table with col names provided"""
table = """
# comment (with blank line above)
| Col1 | Col2 | Col3 |
| 1.2 | "hello" | 3 |
| 2.4 |'s worlds| 7 |
"""
reader = ascii.get_reader(Reader=ascii.FixedWidth,
names=('name1', 'name2', 'name3'),
include_names=('name1', 'name3'))
dat = reader.read(table)
assert_equal(dat.colnames, ['name1', 'name3'])
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], 3)
def test_read_normal_exclude():
"""Nice, typical fixed format table with col name excluded"""
table = """
# comment (with blank line above)
| Col1 | Col2 |
| 1.2 | "hello" |
| 2.4 |'s worlds|
"""
reader = ascii.get_reader(Reader=ascii.FixedWidth,
exclude_names=('Col1',))
dat = reader.read(table)
assert_equal(dat.colnames, ['Col2'])
assert_equal(dat[1][0], "'s worlds")
def test_read_weird():
"""Weird input table with data values chopped by col extent """
table = """
Col1 | Col2 |
1.2 "hello"
2.4 sdf's worlds
"""
reader = ascii.get_reader(Reader=ascii.FixedWidth)
dat = reader.read(table)
assert_equal(dat.colnames, ['Col1', 'Col2'])
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], '"hel')
assert_equal(dat[1][1], "df's wo")
def test_read_double():
"""Table with double delimiters"""
table = """
|| Name || Phone || TCP||
| John | 555-1234 |192.168.1.10X|
| Mary | 555-2134 |192.168.1.12X|
| Bob | 555-4527 | 192.168.1.9X|
"""
dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False)
assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP'))
assert_equal(dat[1][0], "Mary")
assert_equal(dat[0][1], "555-1234")
assert_equal(dat[2][2], "192.168.1.9")
def test_read_space_delimiter():
"""Table with space delimiter"""
table = """
Name --Phone- ----TCP-----
John 555-1234 192.168.1.10
Mary 555-2134 192.168.1.12
Bob 555-4527 192.168.1.9
"""
dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False,
delimiter=' ')
assert_equal(tuple(dat.dtype.names), ('Name', '--Phone-', '----TCP-----'))
assert_equal(dat[1][0], "Mary")
assert_equal(dat[0][1], "555-1234")
assert_equal(dat[2][2], "192.168.1.9")
def test_read_no_header_autocolumn():
"""Table with no header row and auto-column naming"""
table = """
| John | 555-1234 |192.168.1.10|
| Mary | 555-2134 |192.168.1.12|
| Bob | 555-4527 | 192.168.1.9|
"""
dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False,
header_start=None, data_start=0)
assert_equal(tuple(dat.dtype.names), ('col1', 'col2', 'col3'))
assert_equal(dat[1][0], "Mary")
assert_equal(dat[0][1], "555-1234")
assert_equal(dat[2][2], "192.168.1.9")
def test_read_no_header_names():
"""Table with no header row and with col names provided. Second
and third rows also have hanging spaces after final |."""
table = """
| John | 555-1234 |192.168.1.10|
| Mary | 555-2134 |192.168.1.12|
| Bob | 555-4527 | 192.168.1.9|
"""
dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False,
header_start=None, data_start=0,
names=('Name', 'Phone', 'TCP'))
assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP'))
assert_equal(dat[1][0], "Mary")
assert_equal(dat[0][1], "555-1234")
assert_equal(dat[2][2], "192.168.1.9")
def test_read_no_header_autocolumn_NoHeader():
"""Table with no header row and auto-column naming"""
table = """
| John | 555-1234 |192.168.1.10|
| Mary | 555-2134 |192.168.1.12|
| Bob | 555-4527 | 192.168.1.9|
"""
dat = ascii.read(table, Reader=ascii.FixedWidthNoHeader)
assert_equal(tuple(dat.dtype.names), ('col1', 'col2', 'col3'))
assert_equal(dat[1][0], "Mary")
assert_equal(dat[0][1], "555-1234")
assert_equal(dat[2][2], "192.168.1.9")
def test_read_no_header_names_NoHeader():
"""Table with no header row and with col names provided. Second
and third rows also have hanging spaces after final |."""
table = """
| John | 555-1234 |192.168.1.10|
| Mary | 555-2134 |192.168.1.12|
| Bob | 555-4527 | 192.168.1.9|
"""
dat = ascii.read(table, Reader=ascii.FixedWidthNoHeader,
names=('Name', 'Phone', 'TCP'))
assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP'))
assert_equal(dat[1][0], "Mary")
assert_equal(dat[0][1], "555-1234")
assert_equal(dat[2][2], "192.168.1.9")
def test_read_col_starts():
"""Table with no delimiter with column start and end values specified."""
table = """
# 5 9 17 18 28
# | | || |
John 555- 1234 192.168.1.10
Mary 555- 2134 192.168.1.12
Bob 555- 4527 192.168.1.9
"""
dat = ascii.read(table, Reader=ascii.FixedWidthNoHeader,
names=('Name', 'Phone', 'TCP'),
col_starts=(0, 9, 18),
col_ends=(5, 17, 28),
)
assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP'))
assert_equal(dat[0][1], "555- 1234")
assert_equal(dat[1][0], "Mary")
assert_equal(dat[1][2], "192.168.1.")
assert_equal(dat[2][2], "192.168.1") # col_end=28 cuts this column off
def test_read_detect_col_starts_or_ends():
"""Table with no delimiter with only column start or end values specified"""
table = """
#1 9 19 <== Column start indexes
#| | | <== Column start positions
#<------><--------><-------------> <== Inferred column positions
John 555- 1234 192.168.1.10
Mary 555- 2134 192.168.1.123
Bob 555- 4527 192.168.1.9
Bill 555-9875 192.255.255.255
"""
for kwargs in ({'col_starts': (1, 9, 19)},
{'col_ends': (8, 18, 33)}):
dat = ascii.read(table,
Reader=ascii.FixedWidthNoHeader,
names=('Name', 'Phone', 'TCP'),
**kwargs)
assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP'))
assert_equal(dat[0][1], "555- 1234")
assert_equal(dat[1][0], "Mary")
assert_equal(dat[1][2], "192.168.1.123")
assert_equal(dat[3][2], "192.255.255.255")
table = """\
| Col1 | Col2 | Col3 | Col4 |
| 1.2 | "hello" | 1 | a |
| 2.4 | 's worlds | 2 | 2 |
"""
dat = ascii.read(table, Reader=ascii.FixedWidth)
def test_write_normal():
"""Write a table as a normal fixed width table."""
out = StringIO()
ascii.write(dat, out, Writer=ascii.FixedWidth)
assert_equal_splitlines(out.getvalue(), """\
| Col1 | Col2 | Col3 | Col4 |
| 1.2 | "hello" | 1 | a |
| 2.4 | 's worlds | 2 | 2 |
""")
def test_write_fill_values():
"""Write a table as a normal fixed width table."""
out = StringIO()
ascii.write(dat, out, Writer=ascii.FixedWidth,
fill_values=('a', 'N/A'))
assert_equal_splitlines(out.getvalue(), """\
| Col1 | Col2 | Col3 | Col4 |
| 1.2 | "hello" | 1 | N/A |
| 2.4 | 's worlds | 2 | 2 |
""")
def test_write_no_pad():
"""Write a table as a fixed width table with no padding."""
out = StringIO()
ascii.write(dat, out, Writer=ascii.FixedWidth,
delimiter_pad=None)
assert_equal_splitlines(out.getvalue(), """\
|Col1| Col2|Col3|Col4|
| 1.2| "hello"| 1| a|
| 2.4|'s worlds| 2| 2|
""")
def test_write_no_bookend():
"""Write a table as a fixed width table with no bookend."""
out = StringIO()
ascii.write(dat, out, Writer=ascii.FixedWidth, bookend=False)
assert_equal_splitlines(out.getvalue(), """\
Col1 | Col2 | Col3 | Col4
1.2 | "hello" | 1 | a
2.4 | 's worlds | 2 | 2
""")
def test_write_no_delimiter():
"""Write a table as a fixed width table with no delimiter."""
out = StringIO()
ascii.write(dat, out, Writer=ascii.FixedWidth, bookend=False,
delimiter=None)
assert_equal_splitlines(out.getvalue(), """\
Col1 Col2 Col3 Col4
1.2 "hello" 1 a
2.4 's worlds 2 2
""")
def test_write_noheader_normal():
"""Write a table as a normal fixed width table."""
out = StringIO()
ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader)
assert_equal_splitlines(out.getvalue(), """\
| 1.2 | "hello" | 1 | a |
| 2.4 | 's worlds | 2 | 2 |
""")
def test_write_noheader_no_pad():
"""Write a table as a fixed width table with no padding."""
out = StringIO()
ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader,
delimiter_pad=None)
assert_equal_splitlines(out.getvalue(), """\
|1.2| "hello"|1|a|
|2.4|'s worlds|2|2|
""")
def test_write_noheader_no_bookend():
"""Write a table as a fixed width table with no bookend."""
out = StringIO()
ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader,
bookend=False)
assert_equal_splitlines(out.getvalue(), """\
1.2 | "hello" | 1 | a
2.4 | 's worlds | 2 | 2
""")
def test_write_noheader_no_delimiter():
"""Write a table as a fixed width table with no delimiter."""
out = StringIO()
ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader, bookend=False,
delimiter=None)
assert_equal_splitlines(out.getvalue(), """\
1.2 "hello" 1 a
2.4 's worlds 2 2
""")
def test_write_formats():
"""Write a table as a fixed width table with no delimiter."""
out = StringIO()
ascii.write(dat, out, Writer=ascii.FixedWidth,
formats={'Col1': '%-8.3f', 'Col2': '%-15s'})
assert_equal_splitlines(out.getvalue(), """\
| Col1 | Col2 | Col3 | Col4 |
| 1.200 | "hello" | 1 | a |
| 2.400 | 's worlds | 2 | 2 |
""")
def test_read_twoline_normal():
"""Typical fixed format table with two header lines (with some cruft
thrown in to test column positioning"""
table = """
Col1 Col2
---- ---------
1.2xx"hello"
2.4 's worlds
"""
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine)
assert_equal(dat.dtype.names, ('Col1', 'Col2'))
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], '"hello"')
assert_equal(dat[1][1], "'s worlds")
def test_read_twoline_ReST():
"""Read restructured text table"""
table = """
======= ===========
Col1 Col2
======= ===========
1.2 "hello"
2.4 's worlds
======= ===========
"""
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine,
header_start=1, position_line=2, data_end=-1)
assert_equal(dat.dtype.names, ('Col1', 'Col2'))
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], '"hello"')
assert_equal(dat[1][1], "'s worlds")
def test_read_twoline_human():
"""Read text table designed for humans and test having position line
before the header line"""
table = """
+------+----------+
| Col1 | Col2 |
+------|----------+
| 1.2 | "hello" |
| 2.4 | 's worlds|
+------+----------+
"""
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine,
delimiter='+',
header_start=1, position_line=0,
data_start=3, data_end=-1)
assert_equal(dat.dtype.names, ('Col1', 'Col2'))
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], '"hello"')
assert_equal(dat[1][1], "'s worlds")
def test_read_twoline_fail():
"""Test failure if too many different character are on position line.
The position line shall consist of only one character in addition to
the delimiter.
"""
table = """
| Col1 | Col2 |
|------|==========|
| 1.2 | "hello" |
| 2.4 | 's worlds|
"""
with pytest.raises(InconsistentTableError) as excinfo:
ascii.read(table, Reader=ascii.FixedWidthTwoLine,
delimiter='|', guess=False)
assert 'Position line should only contain delimiters and one other character' in str(
excinfo.value)
def test_read_twoline_wrong_marker():
'''Test failure when position line uses characters prone to ambiguity
Characters in position line must be part an allowed set because
normal letters or numbers will lead to ambiguous tables.
'''
table = """
| Col1 | Col2 |
|aaaaaa|aaaaaaaaaa|
| 1.2 | "hello" |
| 2.4 | 's worlds|
"""
with pytest.raises(InconsistentTableError) as excinfo:
ascii.read(table, Reader=ascii.FixedWidthTwoLine,
delimiter='|', guess=False)
assert 'Characters in position line must be part' in str(excinfo.value)
def test_write_twoline_normal():
"""Write a table as a normal fixed width table."""
out = StringIO()
ascii.write(dat, out, Writer=ascii.FixedWidthTwoLine)
assert_equal_splitlines(out.getvalue(), """\
Col1 Col2 Col3 Col4
---- --------- ---- ----
1.2 "hello" 1 a
2.4 's worlds 2 2
""")
def test_write_twoline_no_pad():
"""Write a table as a fixed width table with no padding."""
out = StringIO()
ascii.write(dat, out, Writer=ascii.FixedWidthTwoLine,
delimiter_pad=' ', position_char='=')
assert_equal_splitlines(out.getvalue(), """\
Col1 Col2 Col3 Col4
==== ========= ==== ====
1.2 "hello" 1 a
2.4 's worlds 2 2
""")
def test_write_twoline_no_bookend():
"""Write a table as a fixed width table with no bookend."""
out = StringIO()
ascii.write(dat, out, Writer=ascii.FixedWidthTwoLine,
bookend=True, delimiter='|')
assert_equal_splitlines(out.getvalue(), """\
|Col1| Col2|Col3|Col4|
|----|---------|----|----|
| 1.2| "hello"| 1| a|
| 2.4|'s worlds| 2| 2|
""")
def test_fixedwidthnoheader_splitting():
"""Test fix in #8511 where data_start is being ignored"""
tbl = """\
AAA y z
1 2 3
4 5 6
7 8 9
"""
names = ['a', 'b', 'c']
dat = ascii.read(tbl, data_start=1, data_end=3,
delimiter=' ', names=names,
format='fixed_width_no_header')
assert dat.colnames == names
assert np.all(dat['a'] == [1, 4])
assert np.all(dat['b'] == [2, 5])
assert np.all(dat['c'] == [3, 6])
|
33f349902c17602fcdd4719119c902ec7a5756a30f5308c86803b0298a544a63 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from io import StringIO
import pytest
from astropy.io.ascii.ui import read
from astropy.io.ascii.ipac import Ipac, IpacFormatError, IpacFormatErrorDBMS
from astropy.io import ascii
from astropy.table import Table, Column
from astropy.io.ascii.core import masked
DATA = '''
| a | b |
| char | char |
ABBBBBBABBBBBBBA
'''
def test_ipac_default():
# default should be ignore
table = read(DATA, Reader=Ipac)
assert table['a'][0] == 'BBBBBB'
assert table['b'][0] == 'BBBBBBB'
def test_ipac_ignore():
table = read(DATA, Reader=Ipac, definition='ignore')
assert table['a'][0] == 'BBBBBB'
assert table['b'][0] == 'BBBBBBB'
def test_ipac_left():
table = read(DATA, Reader=Ipac, definition='left')
assert table['a'][0] == 'BBBBBBA'
assert table['b'][0] == 'BBBBBBBA'
def test_ipac_right():
table = read(DATA, Reader=Ipac, definition='right')
assert table['a'][0] == 'ABBBBBB'
assert table['b'][0] == 'ABBBBBBB'
def test_too_long_colname_default():
table = Table([[3]], names=['a1234567890123456789012345678901234567890'])
out = StringIO()
with pytest.raises(IpacFormatError):
ascii.write(table, out, Writer=Ipac)
def test_too_long_colname_strict():
table = Table([[3]], names=['a1234567890123456'])
out = StringIO()
with pytest.raises(IpacFormatErrorDBMS):
ascii.write(table, out, Writer=Ipac, DBMS=True)
def test_too_long_colname_notstrict():
table = Table([[3]], names=['a1234567890123456789012345678901234567890'])
out = StringIO()
with pytest.raises(IpacFormatError):
ascii.write(table, out, Writer=Ipac, DBMS=False)
@pytest.mark.parametrize(("strict_", "Err"),
[(True, IpacFormatErrorDBMS),
(False, IpacFormatError)])
def test_non_alfnum_colname(strict_, Err):
table = Table([[3]], names=['a123456789 01234'])
out = StringIO()
with pytest.raises(Err):
ascii.write(table, out, Writer=Ipac, DBMS=strict_)
def test_colname_starswithnumber_strict():
table = Table([[3]], names=['a123456789 01234'])
out = StringIO()
with pytest.raises(IpacFormatErrorDBMS):
ascii.write(table, out, Writer=Ipac, DBMS=True)
def test_double_colname_strict():
table = Table([[3], [1]], names=['DEC', 'dec'])
out = StringIO()
with pytest.raises(IpacFormatErrorDBMS):
ascii.write(table, out, Writer=Ipac, DBMS=True)
@pytest.mark.parametrize('colname', ['x', 'y', 'z', 'X', 'Y', 'Z'])
def test_reserved_colname_strict(colname):
table = Table([['reg']], names=[colname])
out = StringIO()
with pytest.raises(IpacFormatErrorDBMS):
ascii.write(table, out, Writer=Ipac, DBMS=True)
def test_too_long_comment():
with pytest.warns(UserWarning, match=r'Comment string > 78 characters was '
r'automatically wrapped\.'):
table = Table([[3]])
table.meta['comments'] = ['a' * 79]
out = StringIO()
ascii.write(table, out, Writer=Ipac)
expected_out = """\
\\ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
\\ a
|col0|
|long|
| |
|null|
3
"""
assert out.getvalue().strip().splitlines() == expected_out.splitlines()
def test_out_with_nonstring_null():
'''Test a (non-string) fill value.
Even for an unmasked tables, the fill_value should show up in the
table header.
'''
table = Table([[3]], masked=True)
out = StringIO()
ascii.write(table, out, Writer=Ipac, fill_values=[(masked, -99999)])
expected_out = """\
| col0|
| long|
| |
|-99999|
3
"""
assert out.getvalue().strip().splitlines() == expected_out.splitlines()
def test_include_exclude_names():
table = Table([[1], [2], [3]], names=('A', 'B', 'C'))
out = StringIO()
ascii.write(table, out, Writer=Ipac, include_names=('A', 'B'), exclude_names=('A',))
# column B should be the only included column in output
expected_out = """\
| B|
|long|
| |
|null|
2
"""
assert out.getvalue().strip().splitlines() == expected_out.splitlines()
def test_short_dtypes():
table = Table([Column([1.0], dtype='f4'), Column([2], dtype='i2')],
names=('float_col', 'int_col'))
out = StringIO()
ascii.write(table, out, Writer=Ipac)
expected_out = """\
|float_col|int_col|
| float| int|
| | |
| null| null|
1.0 2
"""
assert out.getvalue().strip().splitlines() == expected_out.splitlines()
|
d9a466545f178a9764cfed1f4481ee859e16b2fcc06adaa982a5ca26a82c9ee2 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some of the methods related to the ``HTML``
reader/writer and aims to document its functionality.
Requires `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_
to be installed.
"""
from io import StringIO
from astropy.io.ascii import html
from astropy.io.ascii import core
from astropy.table import Table
import pytest
import numpy as np
from .common import setup_function, teardown_function # noqa
from astropy.io import ascii
from astropy.utils.compat.optional_deps import HAS_BLEACH, HAS_BS4 # noqa
if HAS_BS4:
from bs4 import BeautifulSoup, FeatureNotFound
@pytest.mark.skipif('not HAS_BS4')
def test_soupstring():
"""
Test to make sure the class SoupString behaves properly.
"""
soup = BeautifulSoup('<html><head></head><body><p>foo</p></body></html>',
'html.parser')
soup_str = html.SoupString(soup)
assert isinstance(soup_str, str)
assert isinstance(soup_str, html.SoupString)
assert soup_str == '<html><head></head><body><p>foo</p></body></html>'
assert soup_str.soup is soup
def test_listwriter():
"""
Test to make sure the class ListWriter behaves properly.
"""
lst = []
writer = html.ListWriter(lst)
for i in range(5):
writer.write(i)
for ch in 'abcde':
writer.write(ch)
assert lst == [0, 1, 2, 3, 4, 'a', 'b', 'c', 'd', 'e']
@pytest.mark.skipif('not HAS_BS4')
def test_identify_table():
"""
Test to make sure that identify_table() returns whether the
given BeautifulSoup tag is the correct table to process.
"""
# Should return False on non-<table> tags and None
soup = BeautifulSoup('<html><body></body></html>', 'html.parser')
assert html.identify_table(soup, {}, 0) is False
assert html.identify_table(None, {}, 0) is False
soup = BeautifulSoup('<table id="foo"><tr><th>A</th></tr><tr>'
'<td>B</td></tr></table>', 'html.parser').table
assert html.identify_table(soup, {}, 2) is False
assert html.identify_table(soup, {}, 1) is True # Default index of 1
# Same tests, but with explicit parameter
assert html.identify_table(soup, {'table_id': 2}, 1) is False
assert html.identify_table(soup, {'table_id': 1}, 1) is True
# Test identification by string ID
assert html.identify_table(soup, {'table_id': 'bar'}, 1) is False
assert html.identify_table(soup, {'table_id': 'foo'}, 1) is True
@pytest.mark.skipif('not HAS_BS4')
def test_missing_data():
"""
Test reading a table with missing data
"""
# First with default where blank => '0'
table_in = ['<table>',
'<tr><th>A</th></tr>',
'<tr><td></td></tr>',
'<tr><td>1</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html')
assert dat.masked is False
assert np.all(dat['A'].mask == [True, False])
assert dat['A'].dtype.kind == 'i'
# Now with a specific value '...' => missing
table_in = ['<table>',
'<tr><th>A</th></tr>',
'<tr><td>...</td></tr>',
'<tr><td>1</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html', fill_values=[('...', '0')])
assert dat.masked is False
assert np.all(dat['A'].mask == [True, False])
assert dat['A'].dtype.kind == 'i'
@pytest.mark.skipif('not HAS_BS4')
def test_rename_cols():
"""
Test reading a table and renaming cols
"""
table_in = ['<table>',
'<tr><th>A</th> <th>B</th></tr>',
'<tr><td>1</td><td>2</td></tr>',
'</table>']
# Swap column names
dat = Table.read(table_in, format='ascii.html', names=['B', 'A'])
assert dat.colnames == ['B', 'A']
assert len(dat) == 1
# Swap column names and only include A (the renamed version)
dat = Table.read(table_in, format='ascii.html', names=['B', 'A'], include_names=['A'])
assert dat.colnames == ['A']
assert len(dat) == 1
assert np.all(dat['A'] == 2)
@pytest.mark.skipif('not HAS_BS4')
def test_no_names():
"""
Test reading a table with no column header
"""
table_in = ['<table>',
'<tr><td>1</td></tr>',
'<tr><td>2</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html')
assert dat.colnames == ['col1']
assert len(dat) == 2
dat = Table.read(table_in, format='ascii.html', names=['a'])
assert dat.colnames == ['a']
assert len(dat) == 2
@pytest.mark.skipif('not HAS_BS4')
def test_identify_table_fail():
"""
Raise an exception with an informative error message if table_id
is not found.
"""
table_in = ['<table id="foo"><tr><th>A</th></tr>',
'<tr><td>B</td></tr></table>']
with pytest.raises(core.InconsistentTableError) as err:
Table.read(table_in, format='ascii.html', htmldict={'table_id': 'bad_id'},
guess=False)
assert err.match("ERROR: HTML table id 'bad_id' not found$")
with pytest.raises(core.InconsistentTableError) as err:
Table.read(table_in, format='ascii.html', htmldict={'table_id': 3},
guess=False)
assert err.match("ERROR: HTML table number 3 not found$")
@pytest.mark.skipif('not HAS_BS4')
def test_backend_parsers():
"""
Make sure the user can specify which back-end parser to use
and that an error is raised if the parser is invalid.
"""
for parser in ('lxml', 'xml', 'html.parser', 'html5lib'):
try:
Table.read('data/html2.html', format='ascii.html',
htmldict={'parser': parser}, guess=False)
except FeatureNotFound:
if parser == 'html.parser':
raise
# otherwise ignore if the dependency isn't present
# reading should fail if the parser is invalid
with pytest.raises(FeatureNotFound):
Table.read('data/html2.html', format='ascii.html',
htmldict={'parser': 'foo'}, guess=False)
@pytest.mark.skipif('HAS_BS4')
def test_htmlinputter_no_bs4():
"""
This should return an OptionalTableImportError if BeautifulSoup
is not installed.
"""
inputter = html.HTMLInputter()
with pytest.raises(core.OptionalTableImportError):
inputter.process_lines([])
@pytest.mark.skipif('not HAS_BS4')
def test_htmlinputter():
"""
Test to ensure that HTMLInputter correctly converts input
into a list of SoupStrings representing table elements.
"""
f = 'data/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
# In absence of table_id, defaults to the first table
expected = ['<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>',
'<tr><td>1</td><td>a</td><td>1.05</td></tr>',
'<tr><td>2</td><td>b</td><td>2.75</td></tr>',
'<tr><td>3</td><td>c</td><td>-1.25</td></tr>']
assert [str(x) for x in inputter.get_lines(table)] == expected
# Should raise an InconsistentTableError if the table is not found
inputter.html = {'table_id': 4}
with pytest.raises(core.InconsistentTableError):
inputter.get_lines(table)
# Identification by string ID
inputter.html['table_id'] = 'second'
expected = ['<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>',
'<tr><td>4</td><td>d</td><td>10.5</td></tr>',
'<tr><td>5</td><td>e</td><td>27.5</td></tr>',
'<tr><td>6</td><td>f</td><td>-12.5</td></tr>']
assert [str(x) for x in inputter.get_lines(table)] == expected
# Identification by integer index
inputter.html['table_id'] = 3
expected = ['<tr><th>C1</th><th>C2</th><th>C3</th></tr>',
'<tr><td>7</td><td>g</td><td>105.0</td></tr>',
'<tr><td>8</td><td>h</td><td>275.0</td></tr>',
'<tr><td>9</td><td>i</td><td>-125.0</td></tr>']
assert [str(x) for x in inputter.get_lines(table)] == expected
@pytest.mark.skipif('not HAS_BS4')
def test_htmlsplitter():
"""
Test to make sure that HTMLSplitter correctly inputs lines
of type SoupString to return a generator that gives all
header and data elements.
"""
splitter = html.HTMLSplitter()
lines = [html.SoupString(BeautifulSoup('<table><tr><th>Col 1</th><th>Col 2</th></tr></table>',
'html.parser').tr),
html.SoupString(BeautifulSoup('<table><tr><td>Data 1</td><td>Data 2</td></tr></table>',
'html.parser').tr)]
expected_data = [['Col 1', 'Col 2'], ['Data 1', 'Data 2']]
assert list(splitter(lines)) == expected_data
# Make sure the presence of a non-SoupString triggers a TypeError
lines.append('<tr><td>Data 3</td><td>Data 4</td></tr>')
with pytest.raises(TypeError):
list(splitter(lines))
# Make sure that passing an empty list triggers an error
with pytest.raises(core.InconsistentTableError):
list(splitter([]))
@pytest.mark.skipif('not HAS_BS4')
def test_htmlheader_start():
"""
Test to ensure that the start_line method of HTMLHeader
returns the first line of header data. Uses t/html.html
for sample input.
"""
f = 'data/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
header = html.HTMLHeader()
lines = inputter.get_lines(table)
assert str(lines[header.start_line(lines)]) == \
'<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>'
inputter.html['table_id'] = 'second'
lines = inputter.get_lines(table)
assert str(lines[header.start_line(lines)]) == \
'<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>'
inputter.html['table_id'] = 3
lines = inputter.get_lines(table)
assert str(lines[header.start_line(lines)]) == \
'<tr><th>C1</th><th>C2</th><th>C3</th></tr>'
# start_line should return None if no valid header is found
lines = [html.SoupString(BeautifulSoup('<table><tr><td>Data</td></tr></table>',
'html.parser').tr),
html.SoupString(BeautifulSoup('<p>Text</p>', 'html.parser').p)]
assert header.start_line(lines) is None
# Should raise an error if a non-SoupString is present
lines.append('<tr><th>Header</th></tr>')
with pytest.raises(TypeError):
header.start_line(lines)
@pytest.mark.skipif('not HAS_BS4')
def test_htmldata():
"""
Test to ensure that the start_line and end_lines methods
of HTMLData returns the first line of table data. Uses
t/html.html for sample input.
"""
f = 'data/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
data = html.HTMLData()
lines = inputter.get_lines(table)
assert str(lines[data.start_line(lines)]) == \
'<tr><td>1</td><td>a</td><td>1.05</td></tr>'
# end_line returns the index of the last data element + 1
assert str(lines[data.end_line(lines) - 1]) == \
'<tr><td>3</td><td>c</td><td>-1.25</td></tr>'
inputter.html['table_id'] = 'second'
lines = inputter.get_lines(table)
assert str(lines[data.start_line(lines)]) == \
'<tr><td>4</td><td>d</td><td>10.5</td></tr>'
assert str(lines[data.end_line(lines) - 1]) == \
'<tr><td>6</td><td>f</td><td>-12.5</td></tr>'
inputter.html['table_id'] = 3
lines = inputter.get_lines(table)
assert str(lines[data.start_line(lines)]) == \
'<tr><td>7</td><td>g</td><td>105.0</td></tr>'
assert str(lines[data.end_line(lines) - 1]) == \
'<tr><td>9</td><td>i</td><td>-125.0</td></tr>'
# start_line should raise an error if no table data exists
lines = [html.SoupString(BeautifulSoup('<div></div>', 'html.parser').div),
html.SoupString(BeautifulSoup('<p>Text</p>', 'html.parser').p)]
with pytest.raises(core.InconsistentTableError):
data.start_line(lines)
# end_line should return None if no table data exists
assert data.end_line(lines) is None
# Should raise an error if a non-SoupString is present
lines.append('<tr><td>Data</td></tr>')
with pytest.raises(TypeError):
data.start_line(lines)
with pytest.raises(TypeError):
data.end_line(lines)
def test_multicolumn_write():
"""
Test to make sure that the HTML writer writes multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]
table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th colspan="2">C2</th>
<th colspan="3">C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0</td>
<td>1.0</td>
<td>a</td>
<td>a</td>
<td>a</td>
</tr>
<tr>
<td>2</td>
<td>2.0</td>
<td>2.0</td>
<td>b</td>
<td>b</td>
<td>b</td>
</tr>
<tr>
<td>3</td>
<td>3.0</td>
<td>3.0</td>
<td>c</td>
<td>c</td>
<td>c</td>
</tr>
</table>
</body>
</html>
"""
out = html.HTML().write(table)[0].strip()
assert out == expected.strip()
@pytest.mark.skipif('not HAS_BLEACH')
def test_multicolumn_write_escape():
"""
Test to make sure that the HTML writer writes multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('<a></a>', '<a></a>', 'a'), ('<b></b>', 'b', 'b'), ('c', 'c', 'c')]
table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th colspan="2">C2</th>
<th colspan="3">C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0</td>
<td>1.0</td>
<td><a></a></td>
<td><a></a></td>
<td>a</td>
</tr>
<tr>
<td>2</td>
<td>2.0</td>
<td>2.0</td>
<td><b></b></td>
<td>b</td>
<td>b</td>
</tr>
<tr>
<td>3</td>
<td>3.0</td>
<td>3.0</td>
<td>c</td>
<td>c</td>
<td>c</td>
</tr>
</table>
</body>
</html>
"""
out = html.HTML(htmldict={'raw_html_cols': 'C3'}).write(table)[0].strip()
assert out == expected.strip()
def test_write_no_multicols():
"""
Test to make sure that the HTML writer will not use
multi-dimensional columns if the multicol parameter
is False.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]
table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th>C2</th>
<th>C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0 .. 1.0</td>
<td>a .. a</td>
</tr>
<tr>
<td>2</td>
<td>2.0 .. 2.0</td>
<td>b .. b</td>
</tr>
<tr>
<td>3</td>
<td>3.0 .. 3.0</td>
<td>c .. c</td>
</tr>
</table>
</body>
</html>
"""
assert html.HTML({'multicol': False}).write(table)[0].strip() == \
expected.strip()
@pytest.mark.skipif('not HAS_BS4')
def test_multicolumn_read():
"""
Test to make sure that the HTML reader inputs multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
Ensure that any string element within a multidimensional column
casts all elements to string prior to type conversion operations.
"""
table = Table.read('data/html2.html', format='ascii.html')
str_type = np.dtype((str, 21))
expected = Table(np.array([(['1', '2.5000000000000000001'], 3),
(['1a', '1'], 3.5)],
dtype=[('A', str_type, (2,)), ('B', '<f8')]))
assert np.all(table == expected)
@pytest.mark.skipif('not HAS_BLEACH')
def test_raw_html_write():
"""
Test that columns can contain raw HTML which is not escaped.
"""
t = Table([['<em>x</em>'], ['<em>y</em>']], names=['a', 'b'])
# One column contains raw HTML (string input)
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': 'a'})
expected = """\
<tr>
<td><em>x</em></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
# One column contains raw HTML (list input)
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a']})
assert expected in out.getvalue()
# Two columns contains raw HTML (list input)
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a', 'b']})
expected = """\
<tr>
<td><em>x</em></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
@pytest.mark.skipif('not HAS_BLEACH')
def test_raw_html_write_clean():
"""
Test that columns can contain raw HTML which is not escaped.
"""
import bleach # noqa
t = Table([['<script>x</script>'], ['<p>y</p>'], ['<em>y</em>']], names=['a', 'b', 'c'])
# Confirm that <script> and <p> get escaped but not <em>
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': t.colnames})
expected = """\
<tr>
<td><script>x</script></td>
<td><p>y</p></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
# Confirm that we can whitelist <p>
out = StringIO()
t.write(out, format='ascii.html',
htmldict={'raw_html_cols': t.colnames,
'raw_html_clean_kwargs': {'tags': bleach.ALLOWED_TAGS + ['p']}})
expected = """\
<tr>
<td><script>x</script></td>
<td><p>y</p></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
def test_write_table_html_fill_values():
"""
Test that passing fill_values should replace any matching row
"""
buffer_output = StringIO()
t = Table([[1], [2]], names=('a', 'b'))
ascii.write(t, buffer_output, fill_values=('1', 'Hello world'),
format='html')
t_expected = Table([['Hello world'], [2]], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_write_table_html_fill_values_optional_columns():
"""
Test that passing optional column in fill_values should only replace
matching columns
"""
buffer_output = StringIO()
t = Table([[1], [1]], names=('a', 'b'))
ascii.write(t, buffer_output, fill_values=('1', 'Hello world', 'b'),
format='html')
t_expected = Table([[1], ['Hello world']], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_write_table_html_fill_values_masked():
"""
Test that passing masked values in fill_values should only replace
masked columns or values
"""
buffer_output = StringIO()
t = Table([[1], [1]], names=('a', 'b'), masked=True, dtype=('i4', 'i8'))
t['a'] = np.ma.masked
ascii.write(t, buffer_output, fill_values=(ascii.masked, 'TEST'),
format='html')
t_expected = Table([['TEST'], [1]], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_multicolumn_table_html_fill_values():
"""
Test to make sure that the HTML writer writes multidimensional
columns with correctly replaced fill_values.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]
buffer_output = StringIO()
t = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
ascii.write(t, buffer_output, fill_values=('a', 'z'),
format='html')
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('z', 'z', 'z'), ('b', 'b', 'b'), ('c', 'c', 'c')]
buffer_expected = StringIO()
t_expected = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_multi_column_write_table_html_fill_values_masked():
"""
Test that passing masked values in fill_values should only replace
masked columns or values for multidimensional tables
"""
buffer_output = StringIO()
t = Table([[1, 2, 3, 4], ['--', 'a', '--', 'b']], names=('a', 'b'), masked=True)
t['a'][0:2] = np.ma.masked
t['b'][0:2] = np.ma.masked
ascii.write(t, buffer_output, fill_values=[(ascii.masked, 'MASKED')],
format='html')
t_expected = Table([['MASKED', 'MASKED', 3, 4], [
'MASKED', 'MASKED', '--', 'b']], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
print(buffer_expected.getvalue())
assert buffer_output.getvalue() == buffer_expected.getvalue()
@pytest.mark.skipif('not HAS_BS4')
def test_read_html_unicode():
"""
Test reading an HTML table with unicode values
"""
table_in = ['<table>',
'<tr><td>Δ</td></tr>',
'<tr><td>Δ</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html')
assert np.all(dat['col1'] == ['Δ', 'Δ'])
|
5820a156ebb6ddf68b0eed96e5d1e631717bf7281f5f55cdf2ed31161ac0f20b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import numpy as np
from astropy.utils.decorators import deprecated
__all__ = ['assert_equal', 'assert_almost_equal',
'assert_true', 'setup_function', 'teardown_function',
'has_isnan']
CWD = os.getcwd()
TEST_DIR = os.path.dirname(__file__)
has_isnan = True
try:
from math import isnan # noqa
except ImportError:
try:
from numpy import isnan # noqa
except ImportError:
has_isnan = False
print('Tests requiring isnan will fail')
def setup_function(function):
os.chdir(TEST_DIR)
def teardown_function(function):
os.chdir(CWD)
# Compatibility functions to convert from nose to pytest
def assert_equal(a, b):
assert a == b
def assert_almost_equal(a, b, **kwargs):
assert np.allclose(a, b, **kwargs)
def assert_true(a):
assert a
def make_decorator(func):
"""
Wraps a test decorator so as to properly replicate metadata
of the decorated function, including nose's additional stuff
(namely, setup and teardown).
"""
def decorate(newfunc):
if hasattr(func, 'compat_func_name'):
name = func.compat_func_name
else:
name = func.__name__
newfunc.__dict__ = func.__dict__
newfunc.__doc__ = func.__doc__
newfunc.__module__ = func.__module__
if not hasattr(newfunc, 'compat_co_firstlineno'):
try:
newfunc.compat_co_firstlineno = func.func_code.co_firstlineno
except AttributeError:
newfunc.compat_co_firstlineno = func.__code__.co_firstlineno
try:
newfunc.__name__ = name
except TypeError:
# can't set func name in 2.3
newfunc.compat_func_name = name
return newfunc
return decorate
@deprecated('5.1', alternative='pytest.raises')
def raises(*exceptions):
"""Test must raise one of expected exceptions to pass.
Example use::
@raises(TypeError, ValueError)
def test_raises_type_error():
raise TypeError("This test passes")
@raises(Exception)
def test_that_fails_by_passing():
pass
"""
valid = ' or '.join([e.__name__ for e in exceptions])
def decorate(func):
name = func.__name__
def newfunc(*arg, **kw):
try:
func(*arg, **kw)
except exceptions:
pass
else:
message = f"{name}() did not raise {valid}"
raise AssertionError(message)
newfunc = make_decorator(func)(newfunc)
return newfunc
return decorate
|
5ccbe7eaa733cac7e8f0207a207376d9dd1eecfddeb5d40f33c9da706b4fc5ed | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.io.ascii import read
from astropy.utils.data import get_pkg_data_filename
# NOTE: Python can be built without bz2 or lzma
from astropy.utils.compat.optional_deps import HAS_BZ2, HAS_LZMA # noqa
@pytest.mark.parametrize('filename', ['data/daophot.dat.gz', 'data/latex1.tex.gz',
'data/short.rdb.gz'])
def test_gzip(filename):
t_comp = read(get_pkg_data_filename(filename))
t_uncomp = read(get_pkg_data_filename(filename.replace('.gz', '')))
assert t_comp.dtype.names == t_uncomp.dtype.names
assert np.all(t_comp.as_array() == t_uncomp.as_array())
@pytest.mark.xfail('not HAS_BZ2')
@pytest.mark.parametrize('filename', ['data/short.rdb.bz2', 'data/ipac.dat.bz2'])
def test_bzip2(filename):
t_comp = read(get_pkg_data_filename(filename))
t_uncomp = read(get_pkg_data_filename(filename.replace('.bz2', '')))
assert t_comp.dtype.names == t_uncomp.dtype.names
assert np.all(t_comp.as_array() == t_uncomp.as_array())
@pytest.mark.xfail('not HAS_LZMA')
@pytest.mark.parametrize('filename', ['data/short.rdb.xz', 'data/ipac.dat.xz'])
def test_xz(filename):
t_comp = read(get_pkg_data_filename(filename))
t_uncomp = read(get_pkg_data_filename(filename.replace('.xz', '')))
assert t_comp.dtype.names == t_uncomp.dtype.names
assert np.all(t_comp.as_array() == t_uncomp.as_array())
|
5faade11f31845505507d86d9ccec609c28601f97c54ca1e9d50efa2af2f45dc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some of the methods related to the ``ECSV``
reader/writer.
"""
from astropy.table.column import MaskedColumn
import os
import copy
import sys
from io import StringIO
from contextlib import nullcontext
import pytest
import numpy as np
import yaml
from astropy.table import Table, Column, QTable
from astropy.table.table_helpers import simple_table
from astropy.units import allclose as quantity_allclose
from astropy.units import QuantityInfo
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.compat import NUMPY_LT_1_19
from astropy.io.ascii.ecsv import DELIMITERS
from astropy.io import ascii
from astropy import units as u
from astropy.io.tests.mixin_columns import mixin_cols, compare_attrs
from .common import TEST_DIR
DTYPES = ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32',
'uint64', 'float16', 'float32', 'float64', 'float128',
'str']
if not hasattr(np, 'float128') or os.name == 'nt' or sys.maxsize <= 2**32:
DTYPES.remove('float128')
T_DTYPES = Table()
for dtype in DTYPES:
if dtype == 'bool':
data = np.array([False, True, False])
elif dtype == 'str':
data = np.array(['ab 0', 'ab, 1', 'ab2'])
else:
data = np.arange(3, dtype=dtype)
c = Column(data, unit='m / s', description='descr_' + dtype,
meta={'meta ' + dtype: 1})
T_DTYPES[dtype] = c
T_DTYPES.meta['comments'] = ['comment1', 'comment2']
# Corresponds to simple_table()
SIMPLE_LINES = ['# %ECSV 1.0',
'# ---',
'# datatype:',
'# - {name: a, datatype: int64}',
'# - {name: b, datatype: float64}',
'# - {name: c, datatype: string}',
'# schema: astropy-2.0',
'a b c',
'1 1.0 c',
'2 2.0 d',
'3 3.0 e']
def test_write_simple():
"""
Write a simple table with common types. This shows the compact version
of serialization with one line per column.
"""
t = simple_table()
out = StringIO()
t.write(out, format='ascii.ecsv')
assert out.getvalue().splitlines() == SIMPLE_LINES
def test_write_full():
"""
Write a full-featured table with common types and explicitly checkout output
"""
t = T_DTYPES['bool', 'int64', 'float64', 'str']
lines = ['# %ECSV 1.0',
'# ---',
'# datatype:',
'# - name: bool',
'# unit: m / s',
'# datatype: bool',
'# description: descr_bool',
'# meta: {meta bool: 1}',
'# - name: int64',
'# unit: m / s',
'# datatype: int64',
'# description: descr_int64',
'# meta: {meta int64: 1}',
'# - name: float64',
'# unit: m / s',
'# datatype: float64',
'# description: descr_float64',
'# meta: {meta float64: 1}',
'# - name: str',
'# unit: m / s',
'# datatype: string',
'# description: descr_str',
'# meta: {meta str: 1}',
'# meta: !!omap',
'# - comments: [comment1, comment2]',
'# schema: astropy-2.0',
'bool int64 float64 str',
'False 0 0.0 "ab 0"',
'True 1 1.0 "ab, 1"',
'False 2 2.0 ab2']
out = StringIO()
t.write(out, format='ascii.ecsv')
assert out.getvalue().splitlines() == lines
def test_write_read_roundtrip():
"""
Write a full-featured table with all types and see that it round-trips on
readback. Use both space and comma delimiters.
"""
t = T_DTYPES
for delimiter in DELIMITERS:
out = StringIO()
t.write(out, format='ascii.ecsv', delimiter=delimiter)
t2s = [Table.read(out.getvalue(), format='ascii.ecsv'),
Table.read(out.getvalue(), format='ascii'),
ascii.read(out.getvalue()),
ascii.read(out.getvalue(), format='ecsv', guess=False),
ascii.read(out.getvalue(), format='ecsv')]
for t2 in t2s:
assert t.meta == t2.meta
for name in t.colnames:
assert t[name].attrs_equal(t2[name])
assert np.all(t[name] == t2[name])
def test_bad_delimiter():
"""
Passing a delimiter other than space or comma gives an exception
"""
out = StringIO()
with pytest.raises(ValueError) as err:
T_DTYPES.write(out, format='ascii.ecsv', delimiter='|')
assert 'only space and comma are allowed' in str(err.value)
def test_bad_header_start():
"""
Bad header without initial # %ECSV x.x
"""
lines = copy.copy(SIMPLE_LINES)
lines[0] = '# %ECV 0.9'
with pytest.raises(ascii.InconsistentTableError):
Table.read('\n'.join(lines), format='ascii.ecsv', guess=False)
def test_bad_delimiter_input():
"""
Illegal delimiter in input
"""
lines = copy.copy(SIMPLE_LINES)
lines.insert(2, '# delimiter: |')
with pytest.raises(ValueError) as err:
Table.read('\n'.join(lines), format='ascii.ecsv', guess=False)
assert 'only space and comma are allowed' in str(err.value)
def test_multidim_input():
"""
Multi-dimensional column in input
"""
t = Table()
t['a'] = np.arange(24).reshape(2, 3, 4)
t['a'].info.description = 'description'
t['a'].info.meta = {1: 2}
t['b'] = [1, 2]
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert np.all(t2['a'] == t['a'])
assert t2['a'].shape == t['a'].shape
assert t2['a'].dtype == t['a'].dtype
assert t2['a'].info.description == t['a'].info.description
assert t2['a'].info.meta == t['a'].info.meta
assert np.all(t2['b'] == t['b'])
def test_round_trip_empty_table():
"""Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)"""
t = Table(dtype=[bool, 'i', 'f'], names=['a', 'b', 'c'])
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t.dtype == t2.dtype
assert len(t2) == 0
def test_csv_ecsv_colnames_mismatch():
"""
Test that mismatch in column names from normal CSV header vs.
ECSV YAML header raises the expected exception.
"""
lines = copy.copy(SIMPLE_LINES)
header_index = lines.index('a b c')
lines[header_index] = 'a b d'
with pytest.raises(ValueError) as err:
ascii.read(lines, format='ecsv')
assert "column names from ECSV header ['a', 'b', 'c']" in str(err.value)
def test_regression_5604():
"""
See https://github.com/astropy/astropy/issues/5604 for more.
"""
t = Table()
t.meta = {"foo": 5 * u.km, "foo2": u.s}
t["bar"] = [7] * u.km
out = StringIO()
t.write(out, format="ascii.ecsv")
assert '!astropy.units.Unit' in out.getvalue()
assert '!astropy.units.Quantity' in out.getvalue()
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
# For a column that is a native astropy Column, ignore the specified
# `attrs`. This happens for a mixin like Quantity that is stored in a
# `Table` (not QTable).
if isinstance(obj1, Column):
attrs = []
assert obj1.shape == obj2.shape
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f':
assert quantity_allclose(a1, a2, rtol=1e-10)
else:
assert np.all(a1 == a2)
# For no attrs that means we just compare directly.
if not attrs:
if isinstance(obj1, np.ndarray) and obj1.dtype.kind == 'f':
assert quantity_allclose(obj1, obj2, rtol=1e-15)
else:
assert np.all(obj1 == obj2)
def test_ecsv_mixins_ascii_read_class():
"""Ensure that ascii.read(ecsv_file) returns the correct class
(QTable if any Quantity subclasses, Table otherwise).
"""
# Make a table with every mixin type except Quantities
t = QTable({name: col for name, col in mixin_cols.items()
if not isinstance(col.info, QuantityInfo)})
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format='ecsv')
assert type(t2) is Table
# Add a single quantity column
t['lon'] = mixin_cols['lon']
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format='ecsv')
assert type(t2) is QTable
def test_ecsv_mixins_qtable_to_table():
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.allclose(col.value, col2, rtol=1e-10)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_ecsv_mixins_as_one(table_cls):
"""Test write/read all cols at once and validate intermediate column names"""
names = sorted(mixin_cols)
serialized_names = ['ang',
'cr.x', 'cr.y', 'cr.z',
'dt',
'el.x', 'el.y', 'el.z',
'lat',
'lon',
'nd',
'obj',
'qdb',
'qdex',
'qmag',
'sc.ra', 'sc.dec',
'scd.ra', 'scd.dec', 'scd.distance',
'scd.obstime',
'scdc.x', 'scdc.y', 'scdc.z',
'scdc.obstime',
'scpm.ra', 'scpm.dec', 'scpm.distance',
'scpm.pm_ra_cosdec', 'scpm.pm_dec',
'scpmrv.ra', 'scpmrv.dec', 'scpmrv.distance',
'scpmrv.pm_ra_cosdec', 'scpmrv.pm_dec',
'scpmrv.radial_velocity',
'scrv.ra', 'scrv.dec', 'scrv.distance',
'scrv.radial_velocity',
'sd.d_lon_coslat', 'sd.d_lat', 'sd.d_distance',
'sr.lon', 'sr.lat', 'sr.distance',
'srd.lon', 'srd.lat', 'srd.distance',
'srd.differentials.s.d_lon_coslat',
'srd.differentials.s.d_lat',
'srd.differentials.s.d_distance',
'tm', # serialize_method is formatted_value
'tm2', # serialize_method is formatted_value
'tm3.jd1', 'tm3.jd2', # serialize is jd1_jd2
'tm3.location.x', 'tm3.location.y', 'tm3.location.z',
'x']
t = table_cls([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
# Read as a ascii.basic table (skip all the ECSV junk)
t3 = table_cls.read(out.getvalue(), format='ascii.basic')
assert t3.colnames == serialized_names
def make_multidim(col, ndim):
"""Take a col with length=2 and make it N-d by repeating elements.
For the special case of ndim==1 just return the original.
The output has shape [3] * ndim. By using 3 we can be sure that repeating
the two input elements gives an output that is sufficiently unique for
the multidim tests.
"""
if ndim > 1:
import itertools
idxs = [idx for idx, _ in zip(itertools.cycle([0, 1]), range(3 ** ndim))]
col = col[idxs].reshape([3] * ndim)
return col
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
@pytest.mark.parametrize('ndim', (1, 2, 3))
def test_ecsv_mixins_per_column(table_cls, name_col, ndim):
"""Test write/read one col at a time and do detailed validation.
This tests every input column type as 1-d, 2-d and 3-d.
"""
name, col = name_col
c = make_multidim(np.array([1.0, 2.0]), ndim)
col = make_multidim(col, ndim)
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'description'
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
for colname in t.colnames:
assert len(t2[colname].shape) == ndim
compare = ['data'] if colname in ('c1', 'c2') else compare_attrs[colname]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
def test_round_trip_masked_table_default(tmpdir):
"""Test (mostly) round-trip of MaskedColumn through ECSV using default serialization
that uses an empty string "" to mark NULL values. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = str(tmpdir.join('test.ecsv'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
t.write(filename)
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
# From formal perspective the round-trip columns are the "same"
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# But peeking under the mask shows that the underlying data are changed
# because by default ECSV uses "" to represent masked elements.
t[name].mask = False
t2[name].mask = False
assert not np.all(t2[name] == t[name]) # Expected diff
def test_round_trip_masked_table_serialize_mask(tmpdir):
"""Same as prev but set the serialize_method to 'data_mask' so mask is written out"""
filename = str(tmpdir.join('test.ecsv'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
t['c'][0] = '' # This would come back as masked for default "" NULL marker
# MaskedColumn with no masked elements. See table the MaskedColumnInfo class
# _represent_as_dict() method for info about how we test a column with no masked elements.
t['d'] = [1, 2, 3]
t.write(filename, serialize_method='data_mask')
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_ecsv_round_trip_user_defined_unit(table_cls, tmpdir):
"""Ensure that we can read-back enabled user-defined units."""
# Test adapted from #8897, where it was noted that this works
# but was not tested.
filename = str(tmpdir.join('test.ecsv'))
unit = u.def_unit('bandpass_sol_lum')
t = table_cls()
t['l'] = np.arange(5) * unit
t.write(filename)
# without the unit enabled, get UnrecognizedUnit
if table_cls is QTable:
ctx = pytest.warns(u.UnitsWarning, match=r"'bandpass_sol_lum' did not parse .*")
else:
ctx = nullcontext()
# Note: The read might also generate ResourceWarning, in addition to UnitsWarning
with ctx:
t2 = table_cls.read(filename)
assert isinstance(t2['l'].unit, u.UnrecognizedUnit)
assert str(t2['l'].unit) == 'bandpass_sol_lum'
if table_cls is QTable:
assert np.all(t2['l'].value == t['l'].value)
else:
assert np.all(t2['l'] == t['l'])
# But with it enabled, it works.
with u.add_enabled_units(unit):
t3 = table_cls.read(filename)
assert t3['l'].unit is unit
assert np.all(t3['l'] == t['l'])
# Just to be sure, aloso try writing with unit enabled.
filename2 = str(tmpdir.join('test2.ecsv'))
t3.write(filename2)
t4 = table_cls.read(filename)
assert t4['l'].unit is unit
assert np.all(t4['l'] == t['l'])
def test_read_masked_bool():
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: col0, datatype: bool}
# schema: astropy-2.0
col0
1
0
True
""
False
"""
dat = ascii.read(txt, format='ecsv')
col = dat['col0']
assert isinstance(col, MaskedColumn)
assert np.all(col.mask == [False, False, False, True, False])
assert np.all(col == [True, False, True, False, False])
@pytest.mark.parametrize('serialize_method', ['null_value', 'data_mask'])
@pytest.mark.parametrize('dtype', [np.int64, np.float64, bool, str])
@pytest.mark.parametrize('delimiter', [',', ' '])
def test_roundtrip_multidim_masked_array(serialize_method, dtype, delimiter):
# TODO also test empty string with null value
t = Table()
col = MaskedColumn(np.arange(12).reshape(2, 3, 2), dtype=dtype)
if dtype is str:
# np does something funny and gives a dtype of U21.
col = col.astype('U2')
col.mask[0, 0, 0] = True
col.mask[1, 1, 1] = True
t['a'] = col
t['b'] = ['x', 'y'] # Add another column for kicks
out = StringIO()
t.write(out, format='ascii.ecsv', serialize_method=serialize_method)
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
if hasattr(t[name], 'mask'):
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
@pytest.mark.parametrize('subtype', ['some-user-type', 'complex'])
def test_multidim_unknown_subtype(subtype):
"""Test an ECSV file with a string type but unknown subtype"""
txt = f"""\
# %ECSV 1.0
# ---
# datatype:
# - name: a
# datatype: string
# subtype: {subtype}
# schema: astropy-2.0
a
[1,2]
[3,4]"""
with pytest.warns(AstropyUserWarning,
match=rf"unexpected subtype '{subtype}' set for column 'a'"):
t = ascii.read(txt, format='ecsv')
assert t['a'].dtype.kind == 'U'
assert t['a'][0] == '[1,2]'
def test_multidim_bad_shape():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - name: a
# datatype: string
# subtype: int64[3]
# schema: astropy-2.0
a
[1,2]
[3,4]"""
with pytest.raises(ValueError, match="column 'a' failed to convert: shape mismatch"):
Table.read(txt, format='ascii.ecsv')
def test_write_not_json_serializable():
t = Table()
t['a'] = np.array([set([1, 2]), 1], dtype=object)
match = "could not convert column 'a' to string: Object of type set is not JSON serializable"
out = StringIO()
with pytest.raises(TypeError, match=match):
t.write(out, format='ascii.ecsv')
def test_read_not_json_serializable():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: string, subtype: json}
# schema: astropy-2.0
a
fail
[3,4]"""
match = "column 'a' failed to convert: column value is not valid JSON"
with pytest.raises(ValueError, match=match):
Table.read(txt, format='ascii.ecsv')
def test_read_complex():
"""Test an ECSV file with a complex column"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: complex}
# schema: astropy-2.0
a
1+1j
2+2j"""
match = "datatype 'complex' of column 'a' is not in allowed values"
with pytest.raises(ValueError, match=match):
Table.read(txt, format='ascii.ecsv')
@pytest.mark.skipif(NUMPY_LT_1_19,
reason="numpy cannot parse 'complex' as string until 1.19+")
def test_read_complex_v09():
"""Test an ECSV file with a complex column for version 0.9
Note: ECSV Version <=0.9 files should not raise ValueError
for complex datatype to maintain backwards compatibility.
"""
txt = """\
# %ECSV 0.9
# ---
# datatype:
# - {name: a, datatype: complex}
# schema: astropy-2.0
a
1+1j
2+2j"""
t = Table.read(txt, format='ascii.ecsv')
assert t['a'].dtype.type is np.complex128
def test_read_bad_datatype_for_object_subtype():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: int64, subtype: json}
# schema: astropy-2.0
a
fail
[3,4]"""
match = "column 'a' failed to convert: datatype of column 'a' must be \"string\""
with pytest.raises(ValueError, match=match):
Table.read(txt, format='ascii.ecsv')
def test_read_bad_datatype():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: object}
# schema: astropy-2.0
a
fail
[3,4]"""
match = r"column 'a' is not in allowed values \('bool', 'int8', 'int16', 'int32'"
with pytest.raises(ValueError, match=match):
Table.read(txt, format='ascii.ecsv')
def test_read_bad_datatype_v09():
"""Test a malformed ECSV file for version 0.9
Note: ECSV Version <=0.9 files should not raise ValueError
for malformed datatypes to maintain backwards compatibility.
"""
txt = """\
# %ECSV 0.9
# ---
# datatype:
# - {name: a, datatype: object}
# schema: astropy-2.0
a
fail
[3,4]"""
t = Table.read(txt, format='ascii.ecsv')
assert t['a'][0] == "fail"
assert type(t['a'][1]) is str
assert type(t['a'].dtype) == np.dtype("O")
def test_full_repr_roundtrip():
"""Test round-trip of float values to full precision even with format
specified"""
t = Table()
t['a'] = np.array([np.pi, 1/7], dtype=np.float64)
t['a'].info.format = '.2f'
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert np.all(t['a'] == t2['a'])
assert t2['a'].info.format == '.2f'
#############################################################################
# Define a number of specialized columns for testing and the expected values
# of `datatype` for each column.
#############################################################################
# First here is some helper code used to make the expected outputs code.
def _get_ecsv_header_dict(text):
lines = [line.strip() for line in text.splitlines()]
lines = [line[2:] for line in lines if line.startswith('#')]
lines = lines[2:] # Get rid of the header
out = yaml.safe_load('\n'.join(lines))
return out
def _make_expected_values(cols):
from pprint import pformat
for name, col in cols.items():
t = Table()
t[name] = col
out = StringIO()
t.write(out, format='ascii.ecsv')
hdr = _get_ecsv_header_dict(out.getvalue())
fmt_hdr = pformat(hdr['datatype'])
print(f'exps[{name!r}] =', fmt_hdr[:1])
print(fmt_hdr[1:])
print()
# Expected values of `datatype` for each column
exps = {}
cols = {}
# Run of the mill scalar for completeness
cols['scalar'] = np.array([1, 2], dtype=np.int16)
exps['scalar'] = [
{'datatype': 'int16', 'name': 'scalar'}]
# Array of lists that works as a 2-d variable array. This is just treated
# as an object.
cols['2-d variable array lists'] = c = np.empty(shape=(2,), dtype=object)
c[0] = [[1, 2], ["a", 4]]
c[1] = [[1, 2, 3], [4, 5.25, 6]]
exps['2-d variable array lists'] = [
{'datatype': 'string',
'name': '2-d variable array lists',
'subtype': 'json'}]
# Array of numpy arrays that is a 2-d variable array
cols['2-d variable array numpy'] = c = np.empty(shape=(2,), dtype=object)
c[0] = np.array([[1, 2], [3, 4]], dtype=np.float32)
c[1] = np.array([[1, 2, 3], [4, 5.5, 6]], dtype=np.float32)
exps['2-d variable array numpy'] = [
{'datatype': 'string',
'name': '2-d variable array numpy',
'subtype': 'float32[2,null]'}]
cols['1-d variable array lists'] = np.array([[1, 2], [3, 4, 5]], dtype=object)
exps['1-d variable array lists'] = [
{'datatype': 'string',
'name': '1-d variable array lists',
'subtype': 'json'}]
# Variable-length array
cols['1-d variable array numpy'] = np.array(
[np.array([1, 2], dtype=np.uint8),
np.array([3, 4, 5], dtype=np.uint8)], dtype=object)
exps['1-d variable array numpy'] = [
{'datatype': 'string',
'name': '1-d variable array numpy',
'subtype': 'uint8[null]'}]
cols['1-d variable array numpy str'] = np.array(
[np.array(['a', 'b']),
np.array(['c', 'd', 'e'])], dtype=object)
exps['1-d variable array numpy str'] = [
{'datatype': 'string',
'name': '1-d variable array numpy str',
'subtype': 'string[null]'}]
cols['1-d variable array numpy bool'] = np.array(
[np.array([True, False]),
np.array([True, False, True])], dtype=object)
exps['1-d variable array numpy bool'] = [
{'datatype': 'string',
'name': '1-d variable array numpy bool',
'subtype': 'bool[null]'}]
cols['1-d regular array'] = np.array([[1, 2], [3, 4]], dtype=np.int8)
exps['1-d regular array'] = [
{'datatype': 'string',
'name': '1-d regular array',
'subtype': 'int8[2]'}]
cols['2-d regular array'] = np.arange(8, dtype=np.float16).reshape(2, 2, 2)
exps['2-d regular array'] = [
{'datatype': 'string',
'name': '2-d regular array',
'subtype': 'float16[2,2]'}]
cols['scalar object'] = np.array([{'a': 1}, {'b':2}], dtype=object)
exps['scalar object'] = [
{'datatype': 'string', 'name': 'scalar object', 'subtype': 'json'}]
cols['1-d object'] = np.array(
[[{'a': 1}, {'b':2}],
[{'a': 1}, {'b':2}]], dtype=object)
exps['1-d object'] = [
{'datatype': 'string',
'name': '1-d object',
'subtype': 'json[2]'}]
@pytest.mark.parametrize('name,col,exp',
list(zip(cols, cols.values(), exps.values())))
def test_specialized_columns(name, col, exp):
"""Test variable length lists, multidim columns, object columns.
"""
t = Table()
t[name] = col
out = StringIO()
t.write(out, format='ascii.ecsv')
hdr = _get_ecsv_header_dict(out.getvalue())
assert hdr['datatype'] == exp
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
for val1, val2 in zip(t2[name], t[name]):
if isinstance(val1, np.ndarray):
assert val1.dtype == val2.dtype
assert np.all(val1 == val2)
def test_full_subtypes():
"""Read ECSV file created by M. Taylor that includes scalar, fixed array,
variable array for all datatypes. This file has missing values for all
columns as both per-value null and blank entries for the entire column
value.
Note: original file was modified to include blank values in f_float and
f_double columns.
"""
t = Table.read(os.path.join(TEST_DIR, 'data', 'subtypes.ecsv'))
colnames = ('i_index,'
's_byte,s_short,s_int,s_long,s_float,s_double,s_string,s_boolean,'
'f_byte,f_short,f_int,f_long,f_float,f_double,f_string,f_boolean,'
'v_byte,v_short,v_int,v_long,v_float,v_double,v_string,v_boolean,'
'm_int,m_double').split(',')
assert t.colnames == colnames
type_map = {'byte': 'int8',
'short': 'int16',
'int': 'int32',
'long': 'int64',
'float': 'float32',
'double': 'float64',
'string': 'str',
'boolean': 'bool'}
for col in t.itercols():
info = col.info
if info.name == 'i_index':
continue
assert isinstance(col, MaskedColumn)
type_name = info.name[2:] # short, int, etc
subtype = info.name[:1]
if subtype == 's': # Scalar
assert col.shape == (16,)
if subtype == 'f': # Fixed array
assert col.shape == (16, 3)
if subtype == 'v': # Variable array
assert col.shape == (16,)
assert info.dtype.name == 'object'
for val in col:
assert isinstance(val, np.ndarray)
assert val.dtype.name.startswith(type_map[type_name])
assert len(val) in [0, 1, 2, 3]
else:
assert info.dtype.name.startswith(type_map[type_name])
def test_masked_empty_subtypes():
"""Test blank field in subtypes. Similar to previous test but with explicit
checks of values"""
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: o, datatype: string, subtype: json}
# - {name: f, datatype: string, subtype: 'int64[2]'}
# - {name: v, datatype: string, subtype: 'int64[null]'}
# schema: astropy-2.0
o f v
null [0,1] [1]
"" "" ""
[1,2] [2,3] [2,3]
"""
t = Table.read(txt, format='ascii.ecsv')
assert np.all(t['o'] == np.array([None, -1, [1, 2]], dtype=object))
assert np.all(t['o'].mask == [False, True, False])
exp = np.ma.array([[0, 1], [-1, -1], [2, 3]], mask=[[0, 0], [1, 1], [0, 0]])
assert np.all(t['f'] == exp)
assert np.all(t['f'].mask == exp.mask)
assert np.all(t['v'][0] == [1])
assert np.all(t['v'][2] == [2, 3])
assert np.all(t['v'].mask == [False, True, False])
def test_masked_vals_in_array_subtypes():
"""Test null values in fixed and variable array subtypes."""
t = Table()
t['f'] = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]], dtype=np.int64)
t['v'] = np.empty(2, dtype=object)
t['v'][0] = np.ma.array([1, 2], mask=[0, 1], dtype=np.int64)
t['v'][1] = np.ma.array([3, 4, 5], mask=[1, 0, 0], dtype=np.int64)
out = StringIO()
t.write(out, format='ascii.ecsv')
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: f, datatype: string, subtype: 'int64[2]'}
# - {name: v, datatype: string, subtype: 'int64[null]'}
# schema: astropy-2.0
f v
[1,null] [1,null]
[null,4] [null,4,5]
"""
hdr = _get_ecsv_header_dict(out.getvalue())
hdr_exp = _get_ecsv_header_dict(txt)
assert hdr == hdr_exp
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
assert type(t2[name]) is type(t[name])
for val1, val2 in zip(t2[name], t[name]):
if isinstance(val1, np.ndarray):
assert val1.dtype == val2.dtype
if isinstance(val1, np.ma.MaskedArray):
assert np.all(val1.mask == val2.mask)
assert np.all(val1 == val2)
def test_guess_ecsv_with_one_column():
"""Except for ECSV, guessing always requires at least 2 columns"""
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: col, datatype: string, description: hello}
# schema: astropy-2.0
col
1
2
"""
t = ascii.read(txt)
assert t['col'].dtype.kind == 'U' # would be int with basic format
assert t['col'].description == 'hello'
|
8b8a3ddcc41cca04ec79ee13828b925c9b29201728e20905d829daec3a9bbb96 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.io.ascii.core import convert_numpy
import re
from io import BytesIO, open
from collections import OrderedDict
import locale
import platform
from io import StringIO
import pathlib
import pytest
import numpy as np
from astropy.io import ascii
from astropy.table import Table, MaskedColumn
from astropy import table
from astropy.units import Unit
from astropy.table.table_helpers import simple_table
from .common import (assert_equal, assert_almost_equal,
assert_true)
from astropy.io.ascii import core
from astropy.io.ascii.ui import _probably_html, get_read_trace
from astropy.utils.exceptions import AstropyWarning
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2 # noqa
# setup/teardown function to have the tests run in the correct directory
from .common import setup_function, teardown_function # noqa
def asciiIO(x):
return BytesIO(x.encode('ascii'))
@pytest.mark.parametrize('fast_reader', [True, False, {'use_fast_converter': False},
{'use_fast_converter': True}, 'force'])
def test_convert_overflow(fast_reader):
"""
Test reading an extremely large integer, which falls through to
string due to an overflow error (#2234). The C parsers used to
return inf (kind 'f') for this.
"""
expected_kind = 'U'
with pytest.warns(AstropyWarning, match="OverflowError converting to IntType in column a"):
dat = ascii.read(['a', '1' * 10000], format='basic',
fast_reader=fast_reader, guess=False)
assert dat['a'].dtype.kind == expected_kind
def test_read_specify_converters_with_names():
"""
Exact example from #9701: When using ascii.read with both the names and
converters arguments, the converters dictionary ignores the user-supplied
names and requires that you know the guessed names.
"""
csv_text = ['a,b,c', '1,2,3', '4,5,6']
names = ['A', 'B', 'C']
converters = {
'A': [ascii.convert_numpy(float)],
'B': [ascii.convert_numpy(int)],
'C': [ascii.convert_numpy(str)]
}
t = ascii.read(csv_text, format='csv', names=names, converters=converters)
assert t['A'].dtype.kind == 'f'
assert t['B'].dtype.kind == 'i'
assert t['C'].dtype.kind == 'U'
def test_read_remove_and_rename_columns():
csv_text = ['a,b,c', '1,2,3', '4,5,6']
reader = ascii.get_reader(Reader=ascii.Csv)
reader.read(csv_text)
header = reader.header
with pytest.raises(KeyError, match='Column NOT-EXIST does not exist'):
header.remove_columns(['NOT-EXIST'])
header.remove_columns(['c'])
assert header.colnames == ('a', 'b')
header.rename_column('a', 'aa')
assert header.colnames == ('aa', 'b')
with pytest.raises(KeyError, match='Column NOT-EXIST does not exist'):
header.rename_column('NOT-EXIST', 'aa')
def test_guess_with_names_arg():
"""
Make sure reading a table with guess=True gives the expected result when
the names arg is specified.
"""
# This is a NoHeader format table and so `names` should replace
# the default col0, col1 names. It fails as a Basic format
# table when guessing because the column names would be '1', '2'.
dat = ascii.read(['1,2', '3,4'], names=('a', 'b'))
assert len(dat) == 2
assert dat.colnames == ['a', 'b']
# This is a Basic format table and the first row
# gives the column names 'c', 'd', which get replaced by 'a', 'b'
dat = ascii.read(['c,d', '3,4'], names=('a', 'b'))
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
# This is also a Basic format table and the first row
# gives the column names 'c', 'd', which get replaced by 'a', 'b'
dat = ascii.read(['c d', 'e f'], names=('a', 'b'))
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
def test_guess_with_format_arg():
"""
When the format or Reader is explicitly given then disable the
strict column name checking in guessing.
"""
dat = ascii.read(['1,2', '3,4'], format='basic')
assert len(dat) == 1
assert dat.colnames == ['1', '2']
dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), format='basic')
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
dat = ascii.read(['1,2', '3,4'], Reader=ascii.Basic)
assert len(dat) == 1
assert dat.colnames == ['1', '2']
dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), Reader=ascii.Basic)
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
# For good measure check the same in the unified I/O interface
dat = Table.read(['1,2', '3,4'], format='ascii.basic')
assert len(dat) == 1
assert dat.colnames == ['1', '2']
dat = Table.read(['1,2', '3,4'], format='ascii.basic', names=('a', 'b'))
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
def test_guess_with_delimiter_arg():
"""
When the delimiter is explicitly given then do not try others in guessing.
"""
fields = ['10.1E+19', '3.14', '2048', '-23']
values = [1.01e20, 3.14, 2048, -23]
# Default guess should recognise CSV with optional spaces
t0 = ascii.read(asciiIO(', '.join(fields)), guess=True)
for n, v in zip(t0.colnames, values):
assert t0[n][0] == v
# Forcing space as delimiter produces type str columns ('10.1E+19,')
t1 = ascii.read(asciiIO(', '.join(fields)), guess=True, delimiter=' ')
for n, v in zip(t1.colnames[:-1], fields[:-1]):
assert t1[n][0] == v + ','
def test_reading_mixed_delimiter_tabs_spaces():
# Regression test for https://github.com/astropy/astropy/issues/6770
dat = ascii.read('1 2\t3\n1 2\t3', format='no_header', names=list('abc'))
assert len(dat) == 2
Table.read(['1 2\t3', '1 2\t3'], format='ascii.no_header',
names=['a', 'b', 'c'])
assert len(dat) == 2
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_with_names_arg(fast_reader):
"""
Test that a bad value of `names` raises an exception.
"""
# CParser only uses columns in `names` and thus reports mismatch in num_col
with pytest.raises(ascii.InconsistentTableError):
ascii.read(['c d', 'e f'], names=('a', ), guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_all_files(fast_reader):
for testfile in get_testfiles():
if testfile.get('skip'):
print(f"\n\n******** SKIPPING {testfile['name']}")
continue
print(f"\n\n******** READING {testfile['name']}")
for guess in (True, False):
test_opts = testfile['opts'].copy()
if 'guess' not in test_opts:
test_opts['guess'] = guess
if ('Reader' in test_opts and f"fast_{test_opts['Reader']._format_name}"
in core.FAST_CLASSES): # has fast version
if 'Inputter' not in test_opts: # fast reader doesn't allow this
test_opts['fast_reader'] = fast_reader
table = ascii.read(testfile['name'], **test_opts)
assert_equal(table.dtype.names, testfile['cols'])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile['nrows'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_all_files_via_table(fast_reader):
for testfile in get_testfiles():
if testfile.get('skip'):
print(f"\n\n******** SKIPPING {testfile['name']}")
continue
print(f"\n\n******** READING {testfile['name']}")
for guess in (True, False):
test_opts = testfile['opts'].copy()
if 'guess' not in test_opts:
test_opts['guess'] = guess
if 'Reader' in test_opts:
format = f"ascii.{test_opts['Reader']._format_name}"
del test_opts['Reader']
else:
format = 'ascii'
if f'fast_{format}' in core.FAST_CLASSES:
test_opts['fast_reader'] = fast_reader
table = Table.read(testfile['name'], format=format, **test_opts)
assert_equal(table.dtype.names, testfile['cols'])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile['nrows'])
def test_guess_all_files():
for testfile in get_testfiles():
if testfile.get('skip'):
print(f"\n\n******** SKIPPING {testfile['name']}")
continue
if not testfile['opts'].get('guess', True):
continue
print(f"\n\n******** READING {testfile['name']}")
for filter_read_opts in (['Reader', 'delimiter', 'quotechar'], []):
# Copy read options except for those in filter_read_opts
guess_opts = dict((k, v) for k, v in testfile['opts'].items()
if k not in filter_read_opts)
table = ascii.read(testfile['name'], guess=True, **guess_opts)
assert_equal(table.dtype.names, testfile['cols'])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile['nrows'])
def test_validate_read_kwargs():
lines = ['a b', '1 2', '3 4']
# Check that numpy integers are allowed
out = ascii.read(lines, data_start=np.int16(2))
assert np.all(out['a'] == [3])
with pytest.raises(TypeError, match=r"read\(\) argument 'data_end' must be a "
r"<class 'int'> object, "
r"got <class 'str'> instead"):
ascii.read(lines, data_end='needs integer')
with pytest.raises(TypeError, match=r"read\(\) argument 'fill_include_names' must "
r"be a list-like object, got <class 'str'> instead"):
ascii.read(lines, fill_include_names='ID')
def test_daophot_indef():
"""Test that INDEF is correctly interpreted as a missing value"""
table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot)
for col in table.itercols():
# Four columns have all INDEF values and are masked, rest are normal Column
if col.name in ('OTIME', 'MAG', 'MERR', 'XAIRMASS'):
assert np.all(col.mask)
else:
assert not hasattr(col, 'mask')
def test_daophot_types():
"""
Test specific data types which are different from what would be
inferred automatically based only data values. DAOphot reader uses
the header information to assign types.
"""
table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot)
assert table['LID'].dtype.char in 'fd' # float or double
assert table['MAG'].dtype.char in 'fd' # even without any data values
assert table['PIER'].dtype.char in 'US' # string (data values are consistent with int)
assert table['ID'].dtype.char in 'il' # int or long
def test_daophot_header_keywords():
table = ascii.read('data/daophot.dat', Reader=ascii.Daophot)
expected_keywords = (('NSTARFILE', 'test.nst.1', 'filename', '%-23s'),
('REJFILE', '"hello world"', 'filename', '%-23s'),
('SCALE', '1.', 'units/pix', '%-23.7g'),)
keywords = table.meta['keywords'] # Ordered dict of keyword structures
for name, value, units, format_ in expected_keywords:
keyword = keywords[name]
assert_equal(keyword['value'], value)
assert_equal(keyword['units'], units)
assert_equal(keyword['format'], format_)
def test_daophot_multiple_aperture():
table = ascii.read('data/daophot3.dat', Reader=ascii.Daophot)
assert 'MAG5' in table.colnames # MAG5 is one of the newly created column names
assert table['MAG5'][4] == 22.13 # A sample entry in daophot3.dat file
assert table['MERR2'][0] == 1.171
assert np.all(table['RAPERT5'] == 23.3) # assert all the 5th apertures are same 23.3
def test_daophot_multiple_aperture2():
table = ascii.read('data/daophot4.dat', Reader=ascii.Daophot)
assert 'MAG15' in table.colnames # MAG15 is one of the newly created column name
assert table['MAG15'][1] == -7.573 # A sample entry in daophot4.dat file
assert table['MERR2'][0] == 0.049
assert np.all(table['RAPERT5'] == 5.) # assert all the 5th apertures are same 5.0
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_empty_table_no_header(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('data/no_data_without_header.dat', Reader=ascii.NoHeader,
guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_wrong_quote(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('data/simple.txt', guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_extra_data_col(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('data/bad.txt', fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_extra_data_col2(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('data/simple5.txt', delimiter='|', fast_reader=fast_reader)
def test_missing_file():
with pytest.raises(OSError):
ascii.read('does_not_exist')
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_set_names(fast_reader):
names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6')
data = ascii.read('data/simple3.txt', names=names, delimiter='|',
fast_reader=fast_reader)
assert_equal(data.dtype.names, names)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_set_include_names(fast_reader):
names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6')
include_names = ('c1', 'c3')
data = ascii.read('data/simple3.txt', names=names, include_names=include_names,
delimiter='|', fast_reader=fast_reader)
assert_equal(data.dtype.names, include_names)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_set_exclude_names(fast_reader):
exclude_names = ('Y', 'object')
data = ascii.read('data/simple3.txt', exclude_names=exclude_names, delimiter='|',
fast_reader=fast_reader)
assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'rad'))
def test_include_names_daophot():
include_names = ('ID', 'MAG', 'PIER')
data = ascii.read('data/daophot.dat', include_names=include_names)
assert_equal(data.dtype.names, include_names)
def test_exclude_names_daophot():
exclude_names = ('ID', 'YCENTER', 'MERR', 'NITER', 'CHI', 'PERROR')
data = ascii.read('data/daophot.dat', exclude_names=exclude_names)
assert_equal(data.dtype.names, ('XCENTER', 'MAG', 'MSKY', 'SHARPNESS', 'PIER'))
def test_custom_process_lines():
def process_lines(lines):
bars_at_ends = re.compile(r'^\| | \|$', re.VERBOSE)
striplines = (x.strip() for x in lines)
return [bars_at_ends.sub('', x) for x in striplines if len(x) > 0]
reader = ascii.get_reader(delimiter='|')
reader.inputter.process_lines = process_lines
data = reader.read('data/bars_at_ends.txt')
assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'))
assert_equal(len(data), 3)
def test_custom_process_line():
def process_line(line):
line_out = re.sub(r'^\|\s*', '', line.strip())
return line_out
reader = ascii.get_reader(data_start=2, delimiter='|')
reader.header.splitter.process_line = process_line
reader.data.splitter.process_line = process_line
data = reader.read('data/nls1_stackinfo.dbout')
cols = get_testfiles('data/nls1_stackinfo.dbout')['cols']
assert_equal(data.dtype.names, cols[1:])
def test_custom_splitters():
reader = ascii.get_reader()
reader.header.splitter = ascii.BaseSplitter()
reader.data.splitter = ascii.BaseSplitter()
f = 'data/test4.dat'
data = reader.read(f)
testfile = get_testfiles(f)
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
assert_almost_equal(data.field('zabs1.nh')[2], 0.0839710433091)
assert_almost_equal(data.field('p1.gamma')[2], 1.25997502704)
assert_almost_equal(data.field('p1.ampl')[2], 0.000696444029148)
assert_equal(data.field('statname')[2], 'chi2modvar')
assert_almost_equal(data.field('statval')[2], 497.56468441)
def test_start_end():
data = ascii.read('data/test5.dat', header_start=1, data_start=3, data_end=-5)
assert_equal(len(data), 13)
assert_equal(data.field('statname')[0], 'chi2xspecvar')
assert_equal(data.field('statname')[-1], 'chi2gehrels')
def test_set_converters():
converters = {'zabs1.nh': [ascii.convert_numpy('int32'),
ascii.convert_numpy('float32')],
'p1.gamma': [ascii.convert_numpy('str')]
}
data = ascii.read('data/test4.dat', converters=converters)
assert_equal(str(data['zabs1.nh'].dtype), 'float32')
assert_equal(data['p1.gamma'][0], '1.26764500000')
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_from_string(fast_reader):
f = 'data/simple.txt'
with open(f) as fd:
table = fd.read()
testfile = get_testfiles(f)[0]
data = ascii.read(table, fast_reader=fast_reader, **testfile['opts'])
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_from_filelike(fast_reader):
f = 'data/simple.txt'
testfile = get_testfiles(f)[0]
with open(f, 'rb') as fd:
data = ascii.read(fd, fast_reader=fast_reader, **testfile['opts'])
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_from_lines(fast_reader):
f = 'data/simple.txt'
with open(f) as fd:
table = fd.readlines()
testfile = get_testfiles(f)[0]
data = ascii.read(table, fast_reader=fast_reader, **testfile['opts'])
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
def test_comment_lines():
table = ascii.get_reader(Reader=ascii.Rdb)
data = table.read('data/apostrophe.rdb')
assert_equal(table.comment_lines, ['# first comment', ' # second comment'])
assert_equal(data.meta['comments'], ['first comment', 'second comment'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values(fast_reader):
f = 'data/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,
**testfile['opts'])
assert_true((data['a'].mask == [False, True]).all())
assert_true((data['a'] == [1, 1]).all())
assert_true((data['b'].mask == [False, True]).all())
assert_true((data['b'] == [2, 1]).all())
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_col(fast_reader):
f = 'data/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1', 'b'), fast_reader=fast_reader,
**testfile['opts'])
check_fill_values(data)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_include_names(fast_reader):
f = 'data/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,
fill_include_names=['b'], **testfile['opts'])
check_fill_values(data)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_exclude_names(fast_reader):
f = 'data/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,
fill_exclude_names=['a'], **testfile['opts'])
check_fill_values(data)
def check_fill_values(data):
"""compare array column by column with expectation """
assert not hasattr(data['a'], 'mask')
assert_true((data['a'] == ['1', 'a']).all())
assert_true((data['b'].mask == [False, True]).all())
# Check that masked value is "do not care" in comparison
assert_true((data['b'] == [2, -999]).all())
data['b'].mask = False # explicitly unmask for comparison
assert_true((data['b'] == [2, 1]).all())
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_list(fast_reader):
f = 'data/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=[('a', '42'), ('1', '42', 'a')],
fast_reader=fast_reader, **testfile['opts'])
data['a'].mask = False # explicitly unmask for comparison
assert_true((data['a'] == [42, 42]).all())
def test_masking_Cds_Mrt():
f = 'data/cds.dat' # Tested for CDS and MRT
for testfile in get_testfiles(f):
data = ascii.read(f,
**testfile['opts'])
assert_true(data['AK'].mask[0])
assert not hasattr(data['Fit'], 'mask')
def test_null_Ipac():
f = 'data/ipac.dat'
testfile = get_testfiles(f)[0]
data = ascii.read(f, **testfile['opts'])
mask = np.array([(True, False, True, False, True),
(False, False, False, False, False)],
dtype=[('ra', '|b1'),
('dec', '|b1'),
('sai', '|b1'),
('v2', '|b1'),
('sptype', '|b1')])
assert np.all(data.mask == mask)
def test_Ipac_meta():
keywords = OrderedDict((('intval', 1),
('floatval', 2.3e3),
('date', "Wed Sp 20 09:48:36 1995"),
('key_continue', 'IPAC keywords can continue across lines')))
comments = ['This is an example of a valid comment']
f = 'data/ipac.dat'
testfile = get_testfiles(f)[0]
data = ascii.read(f, **testfile['opts'])
assert data.meta['keywords'].keys() == keywords.keys()
for data_kv, kv in zip(data.meta['keywords'].values(), keywords.values()):
assert data_kv['value'] == kv
assert data.meta['comments'] == comments
def test_set_guess_kwarg():
"""Read a file using guess with one of the typical guess_kwargs explicitly set."""
data = ascii.read('data/space_delim_no_header.dat',
delimiter=',', guess=True)
assert(data.dtype.names == ('1 3.4 hello',))
assert(len(data) == 1)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_rdb_wrong_type(fast_reader):
"""Read RDB data with inconsistent data type (except failure)"""
table = """col1\tcol2
N\tN
1\tHello"""
with pytest.raises(ValueError):
ascii.read(table, Reader=ascii.Rdb, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_default_missing(fast_reader):
"""Read a table with empty values and ensure that corresponding entries are masked"""
table = '\n'.join(['a,b,c,d',
'1,3,,',
'2, , 4.0 , ss '])
dat = ascii.read(table, fast_reader=fast_reader)
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 -- --',
' 2 -- 4.0 ss']
# Single row table with a single missing element
table = """ a \n "" """
dat = ascii.read(table, fast_reader=fast_reader)
assert dat.pformat() == [' a ',
'---',
' --']
assert dat['a'].dtype.kind == 'i'
# Same test with a fixed width reader
table = '\n'.join([' a b c d ',
'--- --- --- ---',
' 1 3 ',
' 2 4.0 ss'])
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine)
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 -- --',
' 2 -- 4.0 ss']
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=None)
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 ',
' 2 4.0 ss']
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=[])
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 ',
' 2 4.0 ss']
def get_testfiles(name=None):
"""Set up information about the columns, number of rows, and reader params to
read a bunch of test files and verify columns and number of rows."""
testfiles = [
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 'data/apostrophe.rdb',
'nrows': 2,
'opts': {'Reader': ascii.Rdb}},
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 'data/apostrophe.tab',
'nrows': 2,
'opts': {'Reader': ascii.Tab}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 'data/cds.dat',
'nrows': 1,
'opts': {'Reader': ascii.Cds}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 'data/cds.dat',
'nrows': 1,
'opts': {'Reader': ascii.Mrt}},
# Test malformed CDS file (issues #2241 #467)
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 'data/cds_malformed.dat',
'nrows': 1,
'opts': {'Reader': ascii.Cds, 'data_start': 'guess'}},
{'cols': ('a', 'b', 'c'),
'name': 'data/commented_header.dat',
'nrows': 2,
'opts': {'Reader': ascii.CommentedHeader}},
{'cols': ('a', 'b', 'c'),
'name': 'data/commented_header2.dat',
'nrows': 2,
'opts': {'Reader': ascii.CommentedHeader, 'header_start': -1}},
{'cols': ('col1', 'col2', 'col3', 'col4', 'col5'),
'name': 'data/continuation.dat',
'nrows': 2,
'opts': {'Inputter': ascii.ContinuationLinesInputter,
'Reader': ascii.NoHeader}},
{'cols': ('ID',
'XCENTER',
'YCENTER',
'MAG',
'MERR',
'MSKY',
'NITER',
'SHARPNESS',
'CHI',
'PIER',
'PERROR'),
'name': 'data/daophot.dat',
'nrows': 2,
'opts': {'Reader': ascii.Daophot}},
{'cols': ('NUMBER',
'FLUX_ISO',
'FLUXERR_ISO',
'VALU-ES',
'VALU-ES_1',
'FLAG'),
'name': 'data/sextractor.dat',
'nrows': 3,
'opts': {'Reader': ascii.SExtractor}},
{'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'),
'name': 'data/ipac.dat',
'nrows': 2,
'opts': {'Reader': ascii.Ipac}},
{'cols': ('col0',
'objID',
'osrcid',
'xsrcid',
'SpecObjID',
'ra',
'dec',
'obsid',
'ccdid',
'z',
'modelMag_i',
'modelMagErr_i',
'modelMag_r',
'modelMagErr_r',
'expo',
'theta',
'rad_ecf_39',
'detlim90',
'fBlim90'),
'name': 'data/nls1_stackinfo.dbout',
'nrows': 58,
'opts': {'data_start': 2, 'delimiter': '|', 'guess': False}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 'data/no_data_cds.dat',
'nrows': 0,
'opts': {'Reader': ascii.Cds}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 'data/no_data_cds.dat',
'nrows': 0,
'opts': {'Reader': ascii.Mrt}},
{'cols': ('ID',
'XCENTER',
'YCENTER',
'MAG',
'MERR',
'MSKY',
'NITER',
'SHARPNESS',
'CHI',
'PIER',
'PERROR'),
'name': 'data/no_data_daophot.dat',
'nrows': 0,
'opts': {'Reader': ascii.Daophot}},
{'cols': ('NUMBER',
'FLUX_ISO',
'FLUXERR_ISO',
'VALUES',
'VALUES_1',
'FLAG'),
'name': 'data/no_data_sextractor.dat',
'nrows': 0,
'opts': {'Reader': ascii.SExtractor}},
{'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'),
'name': 'data/no_data_ipac.dat',
'nrows': 0,
'opts': {'Reader': ascii.Ipac}},
{'cols': ('ra', 'v2'),
'name': 'data/ipac.dat',
'nrows': 2,
'opts': {'Reader': ascii.Ipac, 'include_names': ['ra', 'v2']}},
{'cols': ('a', 'b', 'c'),
'name': 'data/no_data_with_header.dat',
'nrows': 0,
'opts': {}},
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 'data/short.rdb',
'nrows': 7,
'opts': {'Reader': ascii.Rdb}},
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 'data/short.tab',
'nrows': 7,
'opts': {'Reader': ascii.Tab}},
{'cols': ('test 1a', 'test2', 'test3', 'test4'),
'name': 'data/simple.txt',
'nrows': 2,
'opts': {'quotechar': "'"}},
{'cols': ('top1', 'top2', 'top3', 'top4'),
'name': 'data/simple.txt',
'nrows': 1,
'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 2}},
{'cols': ('top1', 'top2', 'top3', 'top4'),
'name': 'data/simple.txt',
'nrows': 1,
'opts': {'quotechar': "'", 'header_start': 1}},
{'cols': ('top1', 'top2', 'top3', 'top4'),
'name': 'data/simple.txt',
'nrows': 2,
'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 1}},
{'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'),
'name': 'data/simple2.txt',
'nrows': 3,
'opts': {'delimiter': '|'}},
{'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'),
'name': 'data/simple3.txt',
'nrows': 2,
'opts': {'delimiter': '|'}},
{'cols': ('col1', 'col2', 'col3', 'col4', 'col5', 'col6'),
'name': 'data/simple4.txt',
'nrows': 3,
'opts': {'Reader': ascii.NoHeader, 'delimiter': '|'}},
{'cols': ('col1', 'col2', 'col3'),
'name': 'data/space_delim_no_header.dat',
'nrows': 2,
'opts': {'Reader': ascii.NoHeader}},
{'cols': ('col1', 'col2', 'col3'),
'name': 'data/space_delim_no_header.dat',
'nrows': 2,
'opts': {'Reader': ascii.NoHeader, 'header_start': None}},
{'cols': ('obsid', 'offset', 'x', 'y', 'name', 'oaa'),
'name': 'data/space_delim_blank_lines.txt',
'nrows': 3,
'opts': {}},
{'cols': ('zabs1.nh', 'p1.gamma', 'p1.ampl', 'statname', 'statval'),
'name': 'data/test4.dat',
'nrows': 9,
'opts': {}},
{'cols': ('a', 'b', 'c'),
'name': 'data/fill_values.txt',
'nrows': 2,
'opts': {'delimiter': ','}},
{'name': 'data/whitespace.dat',
'cols': ('quoted colname with tab\tinside', 'col2', 'col3'),
'nrows': 2,
'opts': {'delimiter': r'\s'}},
{'name': 'data/simple_csv.csv',
'cols': ('a', 'b', 'c'),
'nrows': 2,
'opts': {'Reader': ascii.Csv}},
{'name': 'data/simple_csv_missing.csv',
'cols': ('a', 'b', 'c'),
'nrows': 2,
'skip': True,
'opts': {'Reader': ascii.Csv}},
{'cols': ('cola', 'colb', 'colc'),
'name': 'data/latex1.tex',
'nrows': 2,
'opts': {'Reader': ascii.Latex}},
{'cols': ('Facility', 'Id', 'exposure', 'date'),
'name': 'data/latex2.tex',
'nrows': 3,
'opts': {'Reader': ascii.AASTex}},
{'cols': ('cola', 'colb', 'colc'),
'name': 'data/latex3.tex',
'nrows': 2,
'opts': {'Reader': ascii.Latex}},
{'cols': ('Col1', 'Col2', 'Col3', 'Col4'),
'name': 'data/fixed_width_2_line.txt',
'nrows': 2,
'opts': {'Reader': ascii.FixedWidthTwoLine}},
]
try:
import bs4 # noqa
testfiles.append({'cols': ('Column 1', 'Column 2', 'Column 3'),
'name': 'data/html.html',
'nrows': 3,
'opts': {'Reader': ascii.HTML}})
except ImportError:
pass
if name is not None:
# If there are multiple matches then return a list, else return just
# the one match.
out = [x for x in testfiles if x['name'] == name]
if len(out) == 1:
out = out[0]
else:
out = testfiles
return out
def test_header_start_exception():
'''Check certain Readers throw an exception if ``header_start`` is set
For certain Readers it does not make sense to set the ``header_start``, they
throw an exception if you try.
This was implemented in response to issue #885.
'''
for readerclass in [ascii.NoHeader, ascii.SExtractor, ascii.Ipac,
ascii.BaseReader, ascii.FixedWidthNoHeader,
ascii.Cds, ascii.Mrt, ascii.Daophot]:
with pytest.raises(ValueError):
ascii.core._get_reader(readerclass, header_start=5)
def test_csv_table_read():
"""
Check for a regression introduced by #1935. Pseudo-CSV file with
commented header line.
"""
lines = ['# a, b',
'1, 2',
'3, 4']
t = ascii.read(lines)
assert t.colnames == ['a', 'b']
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_overlapping_names(fast_reader):
"""
Check that the names argument list can overlap with the existing column names.
This tests the issue in #1991.
"""
t = ascii.read(['a b', '1 2'], names=['b', 'a'], fast_reader=fast_reader)
assert t.colnames == ['b', 'a']
def test_sextractor_units():
"""
Make sure that the SExtractor reader correctly inputs descriptions and units.
"""
table = ascii.read('data/sextractor2.dat', Reader=ascii.SExtractor, guess=False)
expected_units = [None, Unit('pix'), Unit('pix'), Unit('mag'),
Unit('mag'), None, Unit('pix**2'), Unit('m**(-6)'),
Unit('mag * arcsec**(-2)')]
expected_descrs = ['Running object number',
'Windowed position estimate along x',
'Windowed position estimate along y',
'Kron-like elliptical aperture magnitude',
'RMS error for AUTO magnitude',
'Extraction flags',
None,
'Barycenter position along MAMA x axis',
'Peak surface brightness above background']
for i, colname in enumerate(table.colnames):
assert table[colname].unit == expected_units[i]
assert table[colname].description == expected_descrs[i]
def test_sextractor_last_column_array():
"""
Make sure that the SExtractor reader handles the last column correctly when it is array-like.
"""
table = ascii.read('data/sextractor3.dat', Reader=ascii.SExtractor, guess=False)
expected_columns = ['X_IMAGE', 'Y_IMAGE', 'ALPHA_J2000', 'DELTA_J2000',
'MAG_AUTO', 'MAGERR_AUTO',
'MAG_APER', 'MAG_APER_1', 'MAG_APER_2', 'MAG_APER_3',
'MAG_APER_4', 'MAG_APER_5', 'MAG_APER_6',
'MAGERR_APER', 'MAGERR_APER_1', 'MAGERR_APER_2', 'MAGERR_APER_3',
'MAGERR_APER_4', 'MAGERR_APER_5', 'MAGERR_APER_6']
expected_units = [Unit('pix'), Unit('pix'), Unit('deg'), Unit('deg'),
Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag')]
expected_descrs = ['Object position along x', None,
'Right ascension of barycenter (J2000)',
'Declination of barycenter (J2000)',
'Kron-like elliptical aperture magnitude',
'RMS error for AUTO magnitude', ] + [
'Fixed aperture magnitude vector'] * 7 + [
'RMS error vector for fixed aperture mag.'] * 7
for i, colname in enumerate(table.colnames):
assert table[colname].name == expected_columns[i]
assert table[colname].unit == expected_units[i]
assert table[colname].description == expected_descrs[i]
def test_list_with_newlines():
"""
Check that lists of strings where some strings consist of just a newline
("\n") are parsed correctly.
"""
t = ascii.read(["abc", "123\n", "456\n", "\n", "\n"])
assert t.colnames == ['abc']
assert len(t) == 2
assert t[0][0] == 123
assert t[1][0] == 456
def test_commented_csv():
"""
Check that Csv reader does not have ignore lines with the # comment
character which is defined for most Basic readers.
"""
t = ascii.read(['#a,b', '1,2', '#3,4'], format='csv')
assert t.colnames == ['#a', 'b']
assert len(t) == 2
assert t['#a'][1] == '#3'
def test_meta_comments():
"""
Make sure that line comments are included in the ``meta`` attribute
of the output Table.
"""
t = ascii.read(['#comment1', '# comment2 \t', 'a,b,c', '1,2,3'])
assert t.colnames == ['a', 'b', 'c']
assert t.meta['comments'] == ['comment1', 'comment2']
def test_guess_fail():
"""
Check the error message when guess fails
"""
with pytest.raises(ascii.InconsistentTableError) as err:
ascii.read('asfdasdf\n1 2 3', format='basic')
assert "** To figure out why the table did not read, use guess=False and" in str(err.value)
# Test the case with guessing enabled but for a format that has no free params
with pytest.raises(ValueError) as err:
ascii.read('asfdasdf\n1 2 3', format='ipac')
assert 'At least one header line beginning and ending with delimiter required' in str(err.value)
# Test the case with guessing enabled but with all params specified
with pytest.raises(ValueError) as err:
ascii.read('asfdasdf\n1 2 3', format='basic',
quotechar='"', delimiter=' ', fast_reader=False)
assert 'Number of header columns (1) inconsistent with data columns (3)' in str(err.value)
@pytest.mark.xfail('not HAS_BZ2')
def test_guessing_file_object():
"""
Test guessing a file object. Fixes #3013 and similar issue noted in #3019.
"""
with open('data/ipac.dat.bz2', 'rb') as fd:
t = ascii.read(fd)
assert t.colnames == ['ra', 'dec', 'sai', 'v2', 'sptype']
def test_pformat_roundtrip():
"""Check that the screen output of ``print tab`` can be read. See #3025."""
"""Read a table with empty values and ensure that corresponding entries are masked"""
table = '\n'.join(['a,b,c,d',
'1,3,1.11,1',
'2, 2, 4.0 , ss '])
dat = ascii.read(table)
out = ascii.read(dat.pformat())
assert len(dat) == len(out)
assert dat.colnames == out.colnames
for c in dat.colnames:
assert np.all(dat[c] == out[c])
def test_ipac_abbrev():
lines = ['| c1 | c2 | c3 | c4 | c5| c6 | c7 | c8 | c9|c10|c11|c12|',
'| r | rE | rea | real | D | do | dou | f | i | l | da| c |',
' 1 2 3 4 5 6 7 8 9 10 11 12 ']
dat = ascii.read(lines, format='ipac')
for name in dat.columns[0:8]:
assert dat[name].dtype.kind == 'f'
for name in dat.columns[8:10]:
assert dat[name].dtype.kind == 'i'
for name in dat.columns[10:12]:
assert dat[name].dtype.kind in ('U', 'S')
def test_almost_but_not_quite_daophot():
'''Regression test for #3319.
This tables looks so close to a daophot table, that the daophot reader gets
quite far before it fails with an AttributeError.
Note that this table will actually be read as Commented Header table with
the columns ['some', 'header', 'info'].
'''
lines = ["# some header info",
"#F header info beginning with 'F'",
"1 2 3",
"4 5 6",
"7 8 9"]
dat = ascii.read(lines)
assert len(dat) == 3
@pytest.mark.parametrize('fast', [False, 'force'])
def test_commented_header_comments(fast):
"""
Test that comments in commented_header are as expected with header_start
at different positions, and that the table round-trips.
"""
comments = ['comment 1', 'comment 2', 'comment 3']
lines = ['# a b',
'# comment 1',
'# comment 2',
'# comment 3',
'1 2',
'3 4']
dat = ascii.read(lines, format='commented_header', fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
out = StringIO()
ascii.write(dat, out, format='commented_header', fast_writer=fast)
assert out.getvalue().splitlines() == lines
lines.insert(1, lines.pop(0))
dat = ascii.read(lines, format='commented_header', header_start=1, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
lines.insert(2, lines.pop(1))
dat = ascii.read(lines, format='commented_header', header_start=2, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
dat = ascii.read(lines, format='commented_header', header_start=-2, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
lines.insert(3, lines.pop(2))
dat = ascii.read(lines, format='commented_header', header_start=-1, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
lines = ['# a b',
'1 2',
'3 4']
dat = ascii.read(lines, format='commented_header', fast_reader=fast)
assert 'comments' not in dat.meta
assert dat.colnames == ['a', 'b']
def test_probably_html():
"""
Test the routine for guessing if a table input to ascii.read is probably HTML
"""
for tabl0 in ('data/html.html',
'http://blah.com/table.html',
'https://blah.com/table.html',
'file://blah/table.htm',
'ftp://blah.com/table.html',
'file://blah.com/table.htm',
' <! doctype html > hello world',
'junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk',
['junk < table baz>', ' <tr foo >', ' <td bar> ', '</td> </tr>', '</table> junk'],
(' <! doctype html > ', ' hello world'),
):
assert _probably_html(tabl0) is True
for tabl0 in ('data/html.htms',
'Xhttp://blah.com/table.html',
' https://blah.com/table.htm',
'fole://blah/table.htm',
' < doctype html > hello world',
'junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk',
['junk < table baz>', ' <t foo >', ' <td bar> ', '</td> </tr>', '</table> junk'],
(' <! doctype htm > ', ' hello world'),
[[1, 2, 3]],
):
assert _probably_html(tabl0) is False
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_data_header_start(fast_reader):
tests = [(['# comment',
'',
' ',
'skip this line', # line 0
'a b', # line 1
'1 2'], # line 2
[{'header_start': 1},
{'header_start': 1, 'data_start': 2}
]
),
(['# comment',
'',
' \t',
'skip this line', # line 0
'a b', # line 1
'',
' \t',
'skip this line', # line 2
'1 2'], # line 3
[{'header_start': 1, 'data_start': 3}]),
(['# comment',
'',
' ',
'a b', # line 0
'',
' ',
'skip this line', # line 1
'1 2'], # line 2
[{'header_start': 0, 'data_start': 2},
{'data_start': 2}])]
for lines, kwargs_list in tests:
for kwargs in kwargs_list:
t = ascii.read(lines, format='basic', fast_reader=fast_reader,
guess=True, **kwargs)
assert t.colnames == ['a', 'b']
assert len(t) == 1
assert np.all(t['a'] == [1])
# Sanity check that the expected Reader is being used
assert get_read_trace()[-1]['kwargs']['Reader'] is (
ascii.Basic if (fast_reader is False) else ascii.FastBasic)
def test_table_with_no_newline():
"""
Test that an input file which is completely empty fails in the expected way.
Test that an input file with one line but no newline succeeds.
"""
# With guessing
table = BytesIO()
with pytest.raises(ascii.InconsistentTableError):
ascii.read(table)
# Without guessing
table = BytesIO()
with pytest.raises(ValueError) as err:
ascii.read(table, guess=False, fast_reader=False, format='basic')
assert 'No header line found' in str(err.value)
table = BytesIO()
t = ascii.read(table, guess=False, fast_reader=True, format='fast_basic')
assert not t and t.as_array().size == 0
# Put a single line of column names but with no newline
for kwargs in [dict(),
dict(guess=False, fast_reader=False, format='basic'),
dict(guess=False, fast_reader=True, format='fast_basic')]:
table = BytesIO()
table.write(b'a b')
t = ascii.read(table, **kwargs)
assert t.colnames == ['a', 'b']
assert len(t) == 0
def test_path_object():
fpath = pathlib.Path('data/simple.txt')
data = ascii.read(fpath)
assert len(data) == 2
assert sorted(list(data.columns)) == ['test 1a', 'test2', 'test3', 'test4']
assert data['test2'][1] == 'hat2'
def test_column_conversion_error():
"""
Test that context information (upstream exception message) from column
conversion error is provided.
"""
ipac = """\
| col0 |
| double |
1 2
"""
with pytest.raises(ValueError) as err:
ascii.read(ipac, guess=False, format='ipac')
assert 'Column col0 failed to convert:' in str(err.value)
with pytest.raises(ValueError) as err:
ascii.read(['a b', '1 2'], guess=False, format='basic', converters={'a': []})
assert 'no converters' in str(err.value)
def test_non_C_locale_with_fast_reader():
"""Test code that forces "C" locale while calling fast reader (#4364)"""
current = locale.setlocale(locale.LC_ALL)
try:
if platform.system() == 'Darwin':
locale.setlocale(locale.LC_ALL, 'fr_FR')
else:
locale.setlocale(locale.LC_ALL, 'fr_FR.utf8')
for fast_reader in (True,
False,
{'use_fast_converter': False},
{'use_fast_converter': True}):
t = ascii.read(['a b', '1.5 2'], format='basic', guess=False,
fast_reader=fast_reader)
assert t['a'].dtype.kind == 'f'
except locale.Error as e:
pytest.skip(f'Locale error: {e}')
finally:
locale.setlocale(locale.LC_ALL, current)
def test_no_units_for_char_columns():
'''Test that a char column of a Table is assigned no unit and not
a dimensionless unit.'''
t1 = Table([["A"]], names="B")
out = StringIO()
ascii.write(t1, out, format="ipac")
t2 = ascii.read(out.getvalue(), format="ipac", guess=False)
assert t2["B"].unit is None
def test_initial_column_fill_values():
"""Regression test for #5336, #5338."""
class TestHeader(ascii.BasicHeader):
def _set_cols_from_names(self):
self.cols = [ascii.Column(name=x) for x in self.names]
# Set some initial fill values
for col in self.cols:
col.fill_values = {'--': '0'}
class Tester(ascii.Basic):
header_class = TestHeader
reader = ascii.get_reader(Reader=Tester)
assert reader.read("""# Column definition is the first uncommented line
# Default delimiter is the space character.
a b c
# Data starts after the header column definition, blank lines ignored
-- 2 3
4 5 6 """)['a'][0] is np.ma.masked
def test_latex_no_trailing_backslash():
"""
Test that latex/aastex file with no trailing backslash can be read.
"""
lines = r"""
\begin{table}
\begin{tabular}{ccc}
a & b & c \\
1 & 1.0 & c \\ % comment
3\% & 3.0 & e % comment
\end{tabular}
\end{table}
"""
dat = ascii.read(lines, format='latex')
assert dat.colnames == ['a', 'b', 'c']
assert np.all(dat['a'] == ['1', r'3\%'])
assert np.all(dat['c'] == ['c', 'e'])
def text_aastex_no_trailing_backslash():
lines = r"""
\begin{deluxetable}{ccc}
\tablehead{\colhead{a} & \colhead{b} & \colhead{c}}
\startdata
1 & 1.0 & c \\
2 & 2.0 & d \\ % comment
3\% & 3.0 & e % comment
\enddata
\end{deluxetable}
"""
dat = ascii.read(lines, format='aastex')
assert dat.colnames == ['a', 'b', 'c']
assert np.all(dat['a'] == ['1', r'3\%'])
assert np.all(dat['c'] == ['c', 'e'])
@pytest.mark.parametrize('encoding', ['utf8', 'latin1', 'cp1252'])
def test_read_with_encoding(tmpdir, encoding):
data = {
'commented_header': '# à b è \n 1 2 héllo',
'csv': 'à,b,è\n1,2,héllo'
}
testfile = str(tmpdir.join('test.txt'))
for fmt, content in data.items():
with open(testfile, 'w', encoding=encoding) as f:
f.write(content)
table = ascii.read(testfile, encoding=encoding)
assert table.pformat() == [' à b è ',
'--- --- -----',
' 1 2 héllo']
for guess in (True, False):
table = ascii.read(testfile, format=fmt, fast_reader=False,
encoding=encoding, guess=guess)
assert table['è'].dtype.kind == 'U'
assert table.pformat() == [' à b è ',
'--- --- -----',
' 1 2 héllo']
def test_unsupported_read_with_encoding(tmpdir):
# Fast reader is not supported, make sure it raises an exception
with pytest.raises(ascii.ParameterError):
ascii.read('data/simple3.txt', guess=False, fast_reader='force',
encoding='latin1', format='fast_csv')
def test_read_chunks_input_types():
"""
Test chunked reading for different input types: file path, file object,
and string input.
"""
fpath = 'data/test5.dat'
t1 = ascii.read(fpath, header_start=1, data_start=3, )
with open(fpath, 'r') as fd1, open(fpath, 'r') as fd2:
for fp in (fpath, fd1, fd2.read()):
t_gen = ascii.read(fp, header_start=1, data_start=3,
guess=False, format='fast_basic',
fast_reader={'chunk_size': 400, 'chunk_generator': True})
ts = list(t_gen)
for t in ts:
for col, col1 in zip(t.columns.values(), t1.columns.values()):
assert col.name == col1.name
assert col.dtype.kind == col1.dtype.kind
assert len(ts) == 4
t2 = table.vstack(ts)
assert np.all(t1 == t2)
with open(fpath, 'r') as fd1, open(fpath, 'r') as fd2:
for fp in (fpath, fd1, fd2.read()):
# Now read the full table in chunks
t3 = ascii.read(fp, header_start=1, data_start=3,
fast_reader={'chunk_size': 300})
assert np.all(t1 == t3)
@pytest.mark.parametrize('masked', [True, False])
def test_read_chunks_formats(masked):
"""
Test different supported formats for chunked reading.
"""
t1 = simple_table(size=102, cols=10, kinds='fS', masked=masked)
for i, name in enumerate(t1.colnames):
t1.rename_column(name, f'col{i + 1}')
# TO DO commented_header does not currently work due to the special-cased
# implementation of header parsing.
for format in 'tab', 'csv', 'no_header', 'rdb', 'basic':
out = StringIO()
ascii.write(t1, out, format=format)
t_gen = ascii.read(out.getvalue(), format=format,
fast_reader={'chunk_size': 400, 'chunk_generator': True})
ts = list(t_gen)
for t in ts:
for col, col1 in zip(t.columns.values(), t1.columns.values()):
assert col.name == col1.name
assert col.dtype.kind == col1.dtype.kind
assert len(ts) > 4
t2 = table.vstack(ts)
assert np.all(t1 == t2)
# Now read the full table in chunks
t3 = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400})
assert np.all(t1 == t3)
def test_read_chunks_chunk_size_too_small():
fpath = 'data/test5.dat'
with pytest.raises(ValueError) as err:
ascii.read(fpath, header_start=1, data_start=3,
fast_reader={'chunk_size': 10})
assert 'no newline found in chunk (chunk_size too small?)' in str(err.value)
def test_read_chunks_table_changes():
"""Column changes type or size between chunks. This also tests the case with
no final newline.
"""
col = ['a b c'] + ['1.12334 xyz a'] * 50 + ['abcdefg 555 abc'] * 50
table = '\n'.join(col)
t1 = ascii.read(table, guess=False)
t2 = ascii.read(table, fast_reader={'chunk_size': 100})
# This also confirms that the dtypes are exactly the same, i.e.
# the string itemsizes are the same.
assert np.all(t1 == t2)
def test_read_non_ascii():
"""Test that pure-Python reader is used in case the file contains non-ASCII characters
in it.
"""
table = Table.read(['col1, col2', '\u2119, \u01b4', '1, 2'], format='csv')
assert np.all(table['col1'] == ['\u2119', '1'])
assert np.all(table['col2'] == ['\u01b4', '2'])
@pytest.mark.parametrize('enable', [True, False, 'force'])
def test_kwargs_dict_guess(enable):
"""Test that fast_reader dictionary is preserved through guessing sequence.
"""
# Fails for enable=(True, 'force') - #5578
ascii.read('a\tb\n 1\t2\n3\t 4.0', fast_reader=dict(enable=enable))
assert get_read_trace()[-1]['kwargs']['Reader'] is (
ascii.Tab if (enable is False) else ascii.FastTab)
for k in get_read_trace():
if not k.get('status', 'Disabled').startswith('Disabled'):
assert k.get('kwargs').get('fast_reader').get('enable') is enable
def _get_lines(rdb):
lines = ['a a_2 a_1 a a']
if rdb:
lines += ['N N N N N']
lines += ['1 2 3 4 5', '10 20 30 40 50']
if rdb:
lines = ['\t'.join(line.split()) for line in lines]
return lines
@pytest.mark.parametrize('rdb', [False, True])
@pytest.mark.parametrize('fast_reader', [False, 'force'])
def test_deduplicate_names_basic(rdb, fast_reader):
"""Test that duplicate column names are successfully de-duplicated for the
basic format. Skip the case of rdb=True and fast_reader='force' when selecting
include_names, since that fails and is tested below.
"""
lines = _get_lines(rdb)
dat = ascii.read(lines, fast_reader=fast_reader)
assert dat.colnames == ['a', 'a_2', 'a_1', 'a_3', 'a_4']
assert len(dat) == 2
dat = ascii.read(lines, fast_reader=fast_reader, include_names=['a', 'a_2', 'a_3'])
assert len(dat) == 2
assert dat.colnames == ['a', 'a_2', 'a_3']
assert np.all(dat['a'] == [1, 10])
assert np.all(dat['a_2'] == [2, 20])
assert np.all(dat['a_3'] == [4, 40])
dat = ascii.read(lines, fast_reader=fast_reader,
names=['b1', 'b2', 'b3', 'b4', 'b5'],
include_names=['b1', 'b2', 'a_4', 'b4'])
assert len(dat) == 2
assert dat.colnames == ['b1', 'b2', 'b4']
assert np.all(dat['b1'] == [1, 10])
assert np.all(dat['b2'] == [2, 20])
assert np.all(dat['b4'] == [4, 40])
dat = ascii.read(lines, fast_reader=fast_reader,
names=['b1', 'b2', 'b3', 'b4', 'b5'],
exclude_names=['b3', 'b5', 'a_3', 'a_4'])
assert len(dat) == 2
assert dat.colnames == ['b1', 'b2', 'b4']
assert np.all(dat['b1'] == [1, 10])
assert np.all(dat['b2'] == [2, 20])
assert np.all(dat['b4'] == [4, 40])
def test_include_names_rdb_fast():
"""Test that selecting column names via `include_names` works for the RDB format
with fast reader. This is testing the fix for a bug identified in #9939.
"""
lines = _get_lines(True)
lines[0] = 'a\ta_2\ta_1\ta_3\ta_4'
dat = ascii.read(lines, fast_reader='force', include_names=['a', 'a_2', 'a_3'])
assert len(dat) == 2
assert dat['a'].dtype == int
assert dat['a_2'].dtype == int
@pytest.mark.parametrize('fast_reader', [False, 'force'])
def test_deduplicate_names_with_types(fast_reader):
"""Test that on selecting column names via `include_names` in the RDB format with
different types and duplicate column names type assignment is correctly preserved.
"""
lines = _get_lines(True)
lines[1] = 'N\tN\tN\tS\tS'
dat = ascii.read(lines, fast_reader=fast_reader, include_names=['a', 'a_2', 'a_3'])
assert len(dat) == 2
assert dat['a_2'].dtype.kind == 'i'
assert dat['a_3'].dtype.kind == 'U'
dat = ascii.read(lines, fast_reader=fast_reader, names=['b1', 'b2', 'b3', 'b4', 'b5'],
include_names=['a1', 'a_2', 'b1', 'b2', 'b4'])
assert len(dat) == 2
assert dat.colnames == ['b1', 'b2', 'b4']
assert dat['b2'].dtype.kind == 'i'
assert dat['b4'].dtype.kind == 'U'
@pytest.mark.parametrize('rdb', [False, True])
@pytest.mark.parametrize('fast_reader', [False, 'force'])
def test_set_invalid_names(rdb, fast_reader):
"""Test exceptions for invalid (duplicate or `None`) names specified via argument."""
lines = _get_lines(rdb)
if rdb:
fmt = 'rdb'
else:
fmt = 'basic'
with pytest.raises(ValueError) as err:
ascii.read(lines, fast_reader=fast_reader, format=fmt, guess=rdb,
names=['b1', 'b2', 'b1', 'b4', 'b5'])
assert 'Duplicate column names' in str(err.value)
with pytest.raises(TypeError) as err:
ascii.read(lines, fast_reader=fast_reader, format=fmt, guess=rdb,
names=['b1', 'b2', 'b1', None, None])
assert 'Cannot have None for column name' in str(err.value)
def test_read_masked_bool():
txt = """\
col0 col1
1 1
0 2
True 3
"" 4
False 5
"""
# Reading without converters returns col0 as a string
dat = ascii.read(txt, format='basic')
col = dat['col0']
assert isinstance(col, MaskedColumn)
assert col.dtype.kind == 'U'
assert col[0] == "1"
# Force col0 to be read as bool
converters = {'col0': [convert_numpy(bool)]}
dat = ascii.read(txt, format='basic', converters=converters)
col = dat['col0']
assert isinstance(col, MaskedColumn)
assert col.dtype.kind == 'b'
assert np.all(col.mask == [False, False, False, True, False])
assert np.all(col == [True, False, True, False, False])
def test_read_converters_wildcard():
'''Test converters where the column name is specified with
a wildcard.
'''
converters = {'F*': [ascii.convert_numpy(np.float32)]}
t = ascii.read(['Fabc Iabc', '1 2'], converters=converters)
assert np.issubdtype(t['Fabc'].dtype, np.float32)
assert not np.issubdtype(t['Iabc'].dtype, np.float32)
|
b313588ce54f742f8b56c7f1f57e3acff94ba0f3fce4e0bdb606d58cbfe83496 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import copy
from contextlib import nullcontext
from io import StringIO
from itertools import chain
import pytest
import numpy as np
from astropy.io import ascii
from astropy import table
from astropy.table.table_helpers import simple_table
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.compat.optional_deps import HAS_BS4
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from astropy import units as u
from .common import setup_function, teardown_function # noqa
if HAS_BS4:
from bs4 import BeautifulSoup, FeatureNotFound # noqa
test_defs = [
dict(kwargs=dict(),
out="""\
ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(delimiter=None),
out="""\
ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(formats={'XCENTER': '%12.1f',
'YCENTER': '{0:.1f}'},
include_names=['XCENTER', 'YCENTER'],
strip_whitespace=False),
out="""\
XCENTER YCENTER
" 138.5" 256.4
" 18.1" 280.2
"""
),
dict(kwargs=dict(Writer=ascii.Rdb, exclude_names=['CHI']),
out="""\
ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tPIER\tPERROR
N\tN\tN\tN\tN\tN\tN\tN\tN\tS
14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0\tNo_error
18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t0\tNo_error
"""
),
dict(kwargs=dict(Writer=ascii.Tab),
out="""\
ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tCHI\tPIER\tPERROR
14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0.802\t0\tNo_error
18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t1.104\t0\tNo_error
"""
),
dict(kwargs=dict(Writer=ascii.Csv),
out="""\
ID,XCENTER,YCENTER,MAG,MERR,MSKY,NITER,SHARPNESS,CHI,PIER,PERROR
14,138.538,256.405,15.461,0.003,34.85955,4,-0.032,0.802,0,No_error
18,18.114,280.170,22.329,0.206,30.12784,4,-2.544,1.104,0,No_error
"""
),
dict(kwargs=dict(Writer=ascii.NoHeader),
out="""\
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(Writer=ascii.CommentedHeader),
out="""\
# ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(Writer=ascii.CommentedHeader, comment='&'),
out="""\
&ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(Writer=ascii.Latex),
out="""\
\\begin{table}
\\begin{tabular}{ccccccccccc}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\end{tabular}
\\end{table}
"""
),
dict(kwargs=dict(Writer=ascii.AASTex),
out="""\
\\begin{deluxetable}{ccccccccccc}
\\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{pixels} & \\colhead{pixels} & \\colhead{magnitudes} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}}
\\startdata
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error
\\enddata
\\end{deluxetable}
""" # noqa
),
dict(
kwargs=dict(Writer=ascii.AASTex, caption='Mag values \\label{tab1}', latexdict={
'units': {'MAG': '[mag]', 'XCENTER': '[pixel]'}, 'tabletype': 'deluxetable*',
'tablealign': 'htpb'}),
out="""\
\\begin{deluxetable*}{ccccccccccc}[htpb]
\\tablecaption{Mag values \\label{tab1}}
\\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{[pixel]} & \\colhead{pixels} & \\colhead{[mag]} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}}
\\startdata
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error
\\enddata
\\end{deluxetable*}
""" # noqa
),
dict(
kwargs=dict(Writer=ascii.Latex, caption='Mag values \\label{tab1}',
latexdict={'preamble': '\\begin{center}', 'tablefoot': '\\end{center}',
'data_end': ['\\hline', '\\hline'],
'units':{'MAG': '[mag]', 'XCENTER': '[pixel]'},
'tabletype': 'table*',
'tablealign': 'h'},
col_align='|lcccccccccc|'),
out="""\
\\begin{table*}[h]
\\begin{center}
\\caption{Mag values \\label{tab1}}
\\begin{tabular}{|lcccccccccc|}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& [pixel] & pixels & [mag] & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\hline
\\hline
\\end{tabular}
\\end{center}
\\end{table*}
"""
),
dict(kwargs=dict(Writer=ascii.Latex, latexdict=ascii.latexdicts['template']),
out="""\
\\begin{tabletype}[tablealign]
preamble
\\caption{caption}
\\begin{tabular}{col_align}
header_start
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
header_end
data_start
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
data_end
\\end{tabular}
tablefoot
\\end{tabletype}
"""
),
dict(kwargs=dict(Writer=ascii.Latex, latexdict={'tabletype': None}),
out="""\
\\begin{tabular}{ccccccccccc}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\end{tabular}
"""
),
dict(kwargs=dict(Writer=ascii.HTML, htmldict={'css': 'table,th,td{border:1px solid black;'}),
out="""\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
<style>
table,th,td{border:1px solid black; </style>
</head>
<body>
<table>
<thead>
<tr>
<th>ID</th>
<th>XCENTER</th>
<th>YCENTER</th>
<th>MAG</th>
<th>MERR</th>
<th>MSKY</th>
<th>NITER</th>
<th>SHARPNESS</th>
<th>CHI</th>
<th>PIER</th>
<th>PERROR</th>
</tr>
</thead>
<tr>
<td>14</td>
<td>138.538</td>
<td>256.405</td>
<td>15.461</td>
<td>0.003</td>
<td>34.85955</td>
<td>4</td>
<td>-0.032</td>
<td>0.802</td>
<td>0</td>
<td>No_error</td>
</tr>
<tr>
<td>18</td>
<td>18.114</td>
<td>280.170</td>
<td>22.329</td>
<td>0.206</td>
<td>30.12784</td>
<td>4</td>
<td>-2.544</td>
<td>1.104</td>
<td>0</td>
<td>No_error</td>
</tr>
</table>
</body>
</html>
"""
),
dict(kwargs=dict(Writer=ascii.Ipac),
out="""\
\\MERGERAD='INDEF'
\\IRAF='NOAO/IRAFV2.10EXPORT'
\\USER=''
\\HOST='tucana'
\\DATE='05-28-93'
\\TIME='14:46:13'
\\PACKAGE='daophot'
\\TASK='nstar'
\\IMAGE='test'
\\GRPFILE='test.psg.1'
\\PSFIMAGE='test.psf.1'
\\NSTARFILE='test.nst.1'
\\REJFILE='"hello world"'
\\SCALE='1.'
\\DATAMIN='50.'
\\DATAMAX='24500.'
\\GAIN='1.'
\\READNOISE='0.'
\\OTIME='00:07:59.0'
\\XAIRMASS='1.238106'
\\IFILTER='V'
\\RECENTER='yes'
\\FITSKY='no'
\\PSFMAG='16.594'
\\PSFRAD='5.'
\\FITRAD='3.'
\\MAXITER='50'
\\MAXGROUP='60'
\\FLATERROR='0.75'
\\PROFERROR='5.'
\\CLIPEXP='6'
\\CLIPRANGE='2.5'
| ID| XCENTER| YCENTER| MAG| MERR| MSKY| NITER| SHARPNESS| CHI| PIER| PERROR|
| long| double| double| double| double| double| long| double| double| long| char|
| | pixels| pixels| magnitudes| magnitudes| counts| | | | | perrors|
| null| null| null| null| null| null| null| null| null| null| null|
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
""" # noqa
),
]
test_defs_no_data = [
dict(kwargs=dict(Writer=ascii.Ipac),
out="""\
\\ This is an example of a valid comment.
\\ The 2nd data line is used to verify the exact column parsing
\\ (unclear if this is a valid for the IPAC format)
\\catalog='sao'
\\date='Wed Sp 20 09:48:36 1995'
\\mykeyword='Another way for defining keyvalue string'
| ra| dec| sai| v2|sptype|
|double|double|long|double| char|
| unit| unit|unit| unit| ergs|
| null| null|null| null| null|
"""
),
]
tab_to_fill = ['a b c', '1 2 3', '1 1 3']
test_defs_fill_value = [
dict(kwargs=dict(),
out="""\
a b c
1 2 3
1 1 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w')),
out="""\
a b c
w 2 3
w w 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w', 'b')),
out="""\
a b c
1 2 3
1 w 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w'),
fill_include_names=['b']),
out="""\
a b c
1 2 3
1 w 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w'),
fill_exclude_names=['a']),
out="""\
a b c
1 2 3
1 w 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w'),
fill_include_names=['a'],
fill_exclude_names=['a', 'b']),
out="""\
a b c
1 2 3
1 1 3
"""
),
dict(kwargs=dict(fill_values=[('1', 'w')],
formats={'a': '%4.2f'}),
out="""\
a b c
1.00 2 3
1.00 w 3
"""
),
]
test_def_masked_fill_value = [
dict(kwargs=dict(),
out="""\
a b c
"" 2 3
1 1 ""
"""
),
dict(kwargs=dict(fill_values=[('1', 'w'), (ascii.masked, 'X')]),
out="""\
a b c
X 2 3
w w X
"""
),
dict(kwargs=dict(fill_values=[('1', 'w'), (ascii.masked, 'XXX')],
formats={'a': '%4.1f'}),
out="""\
a b c
XXX 2 3
1.0 w XXX
"""
),
dict(kwargs=dict(Writer=ascii.Csv),
out="""\
a,b,c
,2,3
1,1,
"""
),
]
def check_write_table(test_def, table, fast_writer):
out = StringIO()
try:
ascii.write(table, out, fast_writer=fast_writer, **test_def['kwargs'])
except ValueError as e: # if format doesn't have a fast writer, ignore
if 'not in the list of formats with fast writers' not in str(e.value):
raise e
return
print(f"Expected:\n{test_def['out']}")
print(f'Actual:\n{out.getvalue()}')
assert [x.strip() for x in out.getvalue().strip().splitlines()] == [
x.strip() for x in test_def['out'].strip().splitlines()]
def check_write_table_via_table(test_def, table, fast_writer):
out = StringIO()
test_def = copy.deepcopy(test_def)
if 'Writer' in test_def['kwargs']:
format = f"ascii.{test_def['kwargs']['Writer']._format_name}"
del test_def['kwargs']['Writer']
else:
format = 'ascii'
try:
table.write(out, format=format, fast_writer=fast_writer, **test_def['kwargs'])
except ValueError as e: # if format doesn't have a fast writer, ignore
if 'not in the list of formats with fast writers' not in str(e.value):
raise e
return
print(f"Expected:\n{test_def['out']}")
print(f'Actual:\n{out.getvalue()}')
assert [x.strip() for x in out.getvalue().strip().splitlines()] == [
x.strip() for x in test_def['out'].strip().splitlines()]
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_table(fast_writer):
table = ascii.get_reader(Reader=ascii.Daophot)
data = table.read('data/daophot.dat')
for test_def in test_defs:
check_write_table(test_def, data, fast_writer)
check_write_table_via_table(test_def, data, fast_writer)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_fill_values(fast_writer):
data = ascii.read(tab_to_fill)
for test_def in test_defs_fill_value:
check_write_table(test_def, data, fast_writer)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_fill_masked_different(fast_writer):
'''see discussion in #2255'''
data = ascii.read(tab_to_fill)
data = table.Table(data, masked=True)
data['a'].mask = [True, False]
data['c'].mask = [False, True]
for test_def in test_def_masked_fill_value:
check_write_table(test_def, data, fast_writer)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_no_data_ipac(fast_writer):
"""Write an IPAC table that contains no data."""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('data/no_data_ipac.dat')
for test_def in test_defs_no_data:
check_write_table(test_def, data, fast_writer)
check_write_table_via_table(test_def, data, fast_writer)
def test_write_invalid_toplevel_meta_ipac():
"""Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored in the top-level metadata and therefore should
raise a warning, and check that the warning has been raised"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('data/no_data_ipac.dat')
data.meta['blah'] = 'extra'
out = StringIO()
with pytest.warns(AstropyWarning, match=r'.*were not written.*') as warn:
data.write(out, format='ascii.ipac')
assert len(warn) == 1
def test_write_invalid_keyword_meta_ipac():
"""Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored appropriately in the ``keywords`` section
of the metadata but with invalid format and therefore should raise a
warning, and check that the warning has been raised"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('data/no_data_ipac.dat')
data.meta['keywords']['blah'] = 'invalid'
out = StringIO()
with pytest.warns(AstropyWarning, match=r'.*has been skipped.*') as warn:
data.write(out, format='ascii.ipac')
assert len(warn) == 1
def test_write_valid_meta_ipac():
"""Write an IPAC table that contains no data and has *correctly* specified
metadata. No warnings should be issued"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('data/no_data_ipac.dat')
data.meta['keywords']['blah'] = {'value': 'invalid'}
out = StringIO()
data.write(out, format='ascii.ipac')
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_comments(fast_writer):
"""Write comments in output originally read by io.ascii."""
data = ascii.read('#c1\n # c2\t\na,b,c\n# c3\n1,2,3')
out = StringIO()
ascii.write(data, out, format='basic', fast_writer=fast_writer)
expected = ['# c1', '# c2', '# c3', 'a b c', '1 2 3']
assert out.getvalue().splitlines() == expected
# header comes before comments for commented-header
out = StringIO()
ascii.write(data, out, format='commented_header', fast_writer=fast_writer)
expected = ['# a b c', '# c1', '# c2', '# c3', '1 2 3']
assert out.getvalue().splitlines() == expected
# setting comment=False should disable comment writing
out = StringIO()
ascii.write(data, out, format='basic', comment=False, fast_writer=fast_writer)
expected = ['a b c', '1 2 3']
assert out.getvalue().splitlines() == expected
@pytest.mark.parametrize("fast_writer", [True, False])
@pytest.mark.parametrize("fmt", ['%0.1f', '.1f', '0.1f', '{0:0.1f}'])
def test_write_format(fast_writer, fmt):
"""Check different formats for a column."""
data = ascii.read('#c1\n # c2\t\na,b,c\n# c3\n1.11,2.22,3.33')
out = StringIO()
expected = ['# c1', '# c2', '# c3', 'a b c', '1.1 2.22 3.33']
data['a'].format = fmt
ascii.write(data, out, format='basic', fast_writer=fast_writer)
assert out.getvalue().splitlines() == expected
@pytest.mark.parametrize("fast_writer", [True, False])
def test_strip_names(fast_writer):
"""Names should be stripped of whitespace by default."""
data = table.Table([[1], [2], [3]], names=(' A', 'B ', ' C '))
out = StringIO()
ascii.write(data, out, format='csv', fast_writer=fast_writer)
assert out.getvalue().splitlines()[0] == 'A,B,C'
def test_latex_units():
"""
Check to make sure that Latex and AASTex writers attempt to fall
back on the **unit** attribute of **Column** if the supplied
**latexdict** does not specify units.
"""
t = table.Table([table.Column(name='date', data=['a', 'b']),
table.Column(name='NUV exp.time', data=[1, 2])])
latexdict = copy.deepcopy(ascii.latexdicts['AA'])
latexdict['units'] = {'NUV exp.time': 's'}
out = StringIO()
expected = '''\
\\begin{table}{cc}
\\tablehead{\\colhead{date} & \\colhead{NUV exp.time}\\\\ \\colhead{ } & \\colhead{s}}
\\startdata
a & 1 \\\\
b & 2
\\enddata
\\end{table}
'''.replace('\n', os.linesep)
ascii.write(t, out, format='aastex', latexdict=latexdict)
assert out.getvalue() == expected
# use unit attribute instead
t['NUV exp.time'].unit = u.s
t['date'].unit = u.yr
out = StringIO()
ascii.write(t, out, format='aastex', latexdict=ascii.latexdicts['AA'])
assert out.getvalue() == expected.replace(
'colhead{s}', r'colhead{$\mathrm{s}$}').replace(
'colhead{ }', r'colhead{$\mathrm{yr}$}')
@pytest.mark.parametrize("fast_writer", [True, False])
def test_commented_header_comments(fast_writer):
"""
Test the fix for #3562 with confusing exception using comment=False
for the commented_header writer.
"""
t = table.Table([[1, 2]])
with pytest.raises(ValueError) as err:
out = StringIO()
ascii.write(t, out, format='commented_header', comment=False,
fast_writer=fast_writer)
assert "for the commented_header writer you must supply a string" in str(err.value)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_byte_string_output(fast_writer):
"""
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3.
"""
t = table.Table([['Hello', 'World']], dtype=['S10'])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['col0', 'Hello', 'World']
@pytest.mark.parametrize('names, include_names, exclude_names, formats, issues_warning', [
(['x', 'y'], ['x', 'y'], ['x'], {'x': '%d', 'y': '%f'}, True),
(['x', 'y'], ['x', 'y'], ['y'], {'x': '%d'}, False),
(['x', 'y'], ['x', 'y'], [], {'p': '%d', 'q': '%f'}, True),
(['x', 'y'], ['x', 'y'], [], {'z': '%f'}, True),
(['x', 'y'], ['x', 'y'], [], {'x': '%d'}, False),
(['x', 'y'], ['x', 'y'], [], {'p': '%d', 'y': '%f'}, True),
(['x', 'y'], ['x', 'y'], [], {}, False)
])
def test_names_with_formats(names, include_names, exclude_names, formats, issues_warning):
"""Test for #4508."""
t = table.Table([[1, 2, 3], [4.1, 5.2, 6.3]])
out = StringIO()
if issues_warning:
ctx = pytest.warns(AstropyWarning)
else:
ctx = nullcontext()
with ctx as warn:
ascii.write(t, out, names=names, include_names=include_names,
exclude_names=exclude_names, formats=formats)
if issues_warning:
assert len(warn) == 1
@pytest.mark.parametrize('formats, issues_warning', [
({'p': '%d', 'y': '%f'}, True),
({'x': '%d', 'y': '%f'}, True),
({'z': '%f'}, True),
({}, False)
])
def test_columns_names_with_formats(formats, issues_warning):
"""Test the fix for #4508."""
t = table.Table([[1, 2, 3], [4.1, 5.2, 6.3]])
out = StringIO()
if issues_warning:
ctx = pytest.warns(AstropyWarning)
else:
ctx = nullcontext()
with ctx as warn:
ascii.write(t, out, formats=formats)
if issues_warning:
assert len(warn) == 1
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_quoted_empty_field(fast_writer):
"""
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3.
"""
t = table.Table([['Hello', ''], ['', '']], dtype=['S10', 'S10'])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['col0 col1', 'Hello ""', '"" ""']
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer, delimiter=',')
assert out.getvalue().splitlines() == ['col0,col1', 'Hello,', ',']
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_empty_table(fast_writer):
"""Test writing empty table #8275."""
t = table.Table([[]], dtype=['S2'])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['col0']
@pytest.mark.parametrize("format", ['ascii', 'csv', 'html', 'latex',
'ascii.fixed_width', 'html'])
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_overwrite_ascii(format, fast_writer, tmpdir):
"""Test overwrite argument for various ASCII writers"""
filename = tmpdir.join("table-tmp.dat").strpath
with open(filename, 'w'):
# create empty file
pass
t = table.Table([['Hello', ''], ['', '']], dtype=['S10', 'S10'])
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename, format=format, fast_writer=fast_writer)
t.write(filename, overwrite=True, format=format,
fast_writer=fast_writer)
# If the output is a file object, overwrite is ignored
with open(filename, 'w') as fp:
t.write(fp, overwrite=False, format=format,
fast_writer=fast_writer)
t.write(fp, overwrite=True, format=format,
fast_writer=fast_writer)
fmt_name_classes = list(chain(ascii.core.FAST_CLASSES.items(),
ascii.core.FORMAT_CLASSES.items()))
@pytest.mark.parametrize("fmt_name_class", fmt_name_classes)
def test_roundtrip_masked(fmt_name_class):
"""
Round trip a simple masked table through every writable format and confirm
that reading back gives the same result.
"""
fmt_name, fmt_cls = fmt_name_class
if not getattr(fmt_cls, '_io_registry_can_write', True):
return
# Skip tests for fixed_width or HTML without bs4
if ((fmt_name == 'html' and not HAS_BS4)
or fmt_name == 'fixed_width'):
return
if 'qdp' in fmt_name:
# QDP tables are for numeric values only
t = simple_table(masked=True, kinds=['f', 'i'])
else:
t = simple_table(masked=True)
out = StringIO()
fast = fmt_name in ascii.core.FAST_CLASSES
try:
ascii.write(t, out, format=fmt_name, fast_writer=fast)
except ImportError: # Some failed dependency, skip test
return
# No-header formats need to be told the column names
kwargs = {'names': t.colnames} if 'no_header' in fmt_name else {}
if 'qdp' in fmt_name:
kwargs.update({'table_id': 0, 'names': t.colnames})
t2 = ascii.read(out.getvalue(), format=fmt_name, fast_reader=fast, guess=False, **kwargs)
assert t.colnames == t2.colnames
for col, col2 in zip(t.itercols(), t2.itercols()):
assert col.dtype.kind == col2.dtype.kind
assert np.all(col == col2)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_newlines(fast_writer, tmpdir):
# Regression test for https://github.com/astropy/astropy/issues/5126
# On windows, when writing to a filename (not e.g. StringIO), newlines were
# \r\r\n instead of \r\n.
filename = tmpdir.join('test').strpath
t = table.Table([['a', 'b', 'c']], names=['col'])
ascii.write(t, filename, fast_writer=fast_writer)
with open(filename, 'r', newline='') as f:
content = f.read()
assert content == os.linesep.join(['col', 'a', 'b', 'c']) + os.linesep
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_csv_with_comments(fast_writer):
"""
Test fix for #7357 where writing a Table with comments to 'csv' fails with
a cryptic message. The comments are dropped by default, but when comment='#'
is supplied they are still written.
"""
out = StringIO()
t = table.Table([[1, 2], [3, 4]], names=['a', 'b'])
t.meta['comments'] = ['hello']
ascii.write(t, out, format='csv', fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['a,b', '1,3', '2,4']
out = StringIO()
ascii.write(t, out, format='csv', fast_writer=fast_writer, comment='#')
assert out.getvalue().splitlines() == ['#hello', 'a,b', '1,3', '2,4']
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_formatted_mixin(fast_writer):
"""
Test fix for #8680 where writing a QTable with a quantity mixin generates
an exception if a format is specified.
"""
out = StringIO()
t = table.QTable([[1, 2], [1, 2] * u.m], names=['a', 'b'])
ascii.write(t, out, fast_writer=fast_writer, formats={'a': '%02d', 'b': '%.2f'})
assert out.getvalue().splitlines() == ['a b',
'01 1.00',
'02 2.00']
def test_validate_write_kwargs():
out = StringIO()
t = table.QTable([[1, 2], [1, 2]], names=['a', 'b'])
with pytest.raises(TypeError, match=r"write\(\) argument 'fast_writer' must be a "
r"\(<class 'bool'>, <class 'str'>\) object, "
r"got <class 'int'> instead"):
ascii.write(t, out, fast_writer=12)
@pytest.mark.parametrize("fmt_name_class", fmt_name_classes)
def test_multidim_column_error(fmt_name_class):
"""
Test that trying to write a multidim column fails in every format except
ECSV.
"""
fmt_name, fmt_cls = fmt_name_class
if not getattr(fmt_cls, '_io_registry_can_write', True):
return
# Skip tests for ecsv or HTML without bs4. See the comment in latex.py
# Latex class where max_ndim = None is defined regarding latex and aastex.
if ((fmt_name == 'html' and not HAS_BS4)
or fmt_name in ('ecsv', 'latex', 'aastex')):
return
out = StringIO()
t = table.Table()
t['a'] = np.arange(16).reshape(2, 2, 2, 2)
t['b'] = [1, 2]
fast = fmt_name in ascii.core.FAST_CLASSES
with pytest.raises(ValueError, match=r'column\(s\) with dimension'):
ascii.write(t, out, format=fmt_name, fast_writer=fast)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_as_columns(fast_writer):
"""
Test that writing a set of columns also roundtrips (as long as the
table does not have metadata, etc.)
"""
# Use masked in case that makes it more difficult.
data = ascii.read(tab_to_fill)
data = table.Table(data, masked=True)
data['a'].mask = [True, False]
data['c'].mask = [False, True]
data = list(data.columns.values())
for test_def in test_def_masked_fill_value:
check_write_table(test_def, data, fast_writer)
|
78bb03956d59ec4f4f00a259595429920e36c554ba92ba419dc3eb5b0c167e84 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.table import Table, Column
from astropy.table.table_helpers import simple_table
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.compat.optional_deps import HAS_BS4 # noqa
import numpy as np
files = ['data/cds.dat', 'data/ipac.dat', 'data/daophot.dat', 'data/latex1.tex',
'data/simple_csv.csv']
if HAS_BS4:
files.append('data/html.html')
@pytest.mark.parametrize('filename', files)
def test_read_generic(filename):
Table.read(get_pkg_data_filename(filename), format='ascii')
def test_write_generic(tmpdir):
t = Table()
t.add_column(Column(name='a', data=[1, 2, 3]))
t.add_column(Column(name='b', data=['a', 'b', 'c']))
t.write(str(tmpdir.join("test")), format='ascii')
def test_read_ipac():
Table.read(get_pkg_data_filename('data/ipac.dat'), format='ipac')
def test_read_cds():
Table.read(get_pkg_data_filename('data/cds.dat'), format='cds')
def test_read_dapphot():
Table.read(get_pkg_data_filename('data/daophot.dat'), format='daophot')
def test_read_latex():
Table.read(get_pkg_data_filename('data/latex1.tex'), format='latex')
def test_read_latex_noformat():
Table.read(get_pkg_data_filename('data/latex1.tex'))
def test_write_latex(tmpdir):
t = Table()
t.add_column(Column(name='a', data=[1, 2, 3]))
t.add_column(Column(name='b', data=['a', 'b', 'c']))
path = str(tmpdir.join("data.tex"))
t.write(path, format='latex')
def test_write_latex_noformat(tmpdir):
t = Table()
t.add_column(Column(name='a', data=[1, 2, 3]))
t.add_column(Column(name='b', data=['a', 'b', 'c']))
path = str(tmpdir.join("data.tex"))
t.write(path)
@pytest.mark.skipif('not HAS_BS4')
def test_read_html():
Table.read(get_pkg_data_filename('data/html.html'), format='html')
@pytest.mark.skipif('not HAS_BS4')
def test_read_html_noformat():
Table.read(get_pkg_data_filename('data/html.html'))
def test_write_html(tmpdir):
t = Table()
t.add_column(Column(name='a', data=[1, 2, 3]))
t.add_column(Column(name='b', data=['a', 'b', 'c']))
path = str(tmpdir.join("data.html"))
t.write(path, format='html')
def test_write_html_noformat(tmpdir):
t = Table()
t.add_column(Column(name='a', data=[1, 2, 3]))
t.add_column(Column(name='b', data=['a', 'b', 'c']))
path = str(tmpdir.join("data.html"))
t.write(path)
def test_read_rdb():
Table.read(get_pkg_data_filename('data/short.rdb'), format='rdb')
def test_read_rdb_noformat():
Table.read(get_pkg_data_filename('data/short.rdb'))
def test_write_rdb(tmpdir):
t = Table()
t.add_column(Column(name='a', data=[1, 2, 3]))
t.add_column(Column(name='b', data=['a', 'b', 'c']))
path = str(tmpdir.join("data.rdb"))
t.write(path, format='rdb')
def test_write_rdb_noformat(tmpdir):
t = Table()
t.add_column(Column(name='a', data=[1, 2, 3]))
t.add_column(Column(name='b', data=['a', 'b', 'c']))
path = str(tmpdir.join("data.rdb"))
t.write(path)
def test_read_csv():
'''If properly registered, filename should be sufficient to specify format
#3189
'''
Table.read(get_pkg_data_filename('data/simple_csv.csv'))
def test_write_csv(tmpdir):
'''If properly registered, filename should be sufficient to specify format
#3189
'''
t = Table()
t.add_column(Column(name='a', data=[1, 2, 3]))
t.add_column(Column(name='b', data=['a', 'b', 'c']))
path = str(tmpdir.join("data.csv"))
t.write(path)
def test_auto_identify_ecsv(tmpdir):
tbl = simple_table()
tmpfile = str(tmpdir.join('/tmpFile.ecsv'))
tbl.write(tmpfile)
tbl2 = Table.read(tmpfile)
assert np.all(tbl == tbl2)
|
7bba2560a7e1ba1fc2d488cbebd9e17fec9302cd19440c87e3150a2adb3c25a6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.io import ascii
from .common import (assert_equal, assert_almost_equal, # noqa
setup_function, teardown_function) # noqa
from astropy import units as u
def read_table1(readme, data):
reader = ascii.Cds(readme)
return reader.read(data)
def read_table2(readme, data):
reader = ascii.get_reader(Reader=ascii.Cds, readme=readme)
reader.outputter = ascii.TableOutputter()
return reader.read(data)
def read_table3(readme, data):
return ascii.read(data, readme=readme)
def test_description():
readme = 'data/cds/description/ReadMe'
data = 'data/cds/description/table.dat'
for read_table in (read_table1, read_table2, read_table3):
table = read_table(readme, data)
assert_equal(len(table), 2)
assert_equal(table['Cluster'].description, 'Cluster name')
assert_equal(table['Star'].description, '')
assert_equal(table['Wave'].description, 'wave? Wavelength in Angstroms')
assert_equal(table['El'].description, 'a')
assert_equal(table['ion'].description, '- Ionization stage (1 for neutral element)')
assert_equal(table['EW'].description, 'Equivalent width (in mA)')
assert_equal(table['Q'].description, 'DAOSPEC quality parameter Q(large values are bad)')
def test_multi_header():
readme = 'data/cds/multi/ReadMe'
data = 'data/cds/multi/lhs2065.dat'
for read_table in (read_table1, read_table2, read_table3):
table = read_table(readme, data)
assert_equal(len(table), 18)
assert_almost_equal(table['Lambda'][-1], 6479.32)
assert_equal(table['Fnu'][-1], '0.285937')
data = 'data/cds/multi/lp944-20.dat'
for read_table in (read_table1, read_table2, read_table3):
table = read_table(readme, data)
assert_equal(len(table), 18)
assert_almost_equal(table['Lambda'][0], 6476.09)
assert_equal(table['Fnu'][-1], '0.489005')
def test_glob_header():
readme = 'data/cds/glob/ReadMe'
data = 'data/cds/glob/lmxbrefs.dat'
for read_table in (read_table1, read_table2, read_table3):
table = read_table(readme, data)
assert_equal(len(table), 291)
assert_equal(table['Name'][-1], 'J1914+0953')
assert_equal(table['BibCode'][-2], '2005A&A...432..235R')
def test_header_from_readme():
r = ascii.Cds("data/vizier/ReadMe")
table = r.read("data/vizier/table1.dat")
assert len(r.data.data_lines) == 15
assert len(table) == 15
assert len(table.keys()) == 18
Bmag = [14.79,
15.00,
14.80,
12.38,
12.36,
12.24,
13.75,
13.65,
13.41,
11.59,
11.68,
11.53,
13.92,
14.03,
14.18]
for i, val in enumerate(table.field('Bmag')):
assert val == Bmag[i]
table = r.read("data/vizier/table5.dat")
assert len(r.data.data_lines) == 49
assert len(table) == 49
assert len(table.keys()) == 10
Q = [0.289,
0.325,
0.510,
0.577,
0.539,
0.390,
0.957,
0.736,
1.435,
1.117,
1.473,
0.808,
1.416,
2.209,
0.617,
1.046,
1.604,
1.419,
1.431,
1.183,
1.210,
1.005,
0.706,
0.665,
0.340,
0.323,
0.391,
0.280,
0.343,
0.369,
0.495,
0.828,
1.113,
0.499,
1.038,
0.260,
0.863,
1.638,
0.479,
0.232,
0.627,
0.671,
0.371,
0.851,
0.607,
-9.999,
1.958,
1.416,
0.949]
for i, val in enumerate(table.field('Q')):
if val is np.ma.masked:
# text value for a missing value in that table
assert Q[i] == -9.999
else:
assert val == Q[i]
@pytest.mark.parametrize('reader_cls', (ascii.Cds, ascii.Mrt))
def test_cds_units(reader_cls):
from astropy import units
data_and_readme = 'data/cds.dat'
reader = ascii.get_reader(reader_cls)
table = reader.read(data_and_readme)
# column unit is GMsun (giga solar masses)
# make sure this is parsed correctly, not as a "string" unit
assert table['Fit'].to(units.solMass).unit == units.solMass
@pytest.mark.parametrize('reader_cls', (ascii.Cds, ascii.Mrt))
def test_cds_function_units(reader_cls):
data_and_readme = 'data/cdsFunctional.dat'
reader = ascii.get_reader(reader_cls)
table = reader.read(data_and_readme)
assert table['logg'].unit == u.dex(u.cm/u.s**2)
assert table['logTe'].unit == u.dex(u.K)
assert table['Mass'].unit == u.Msun
assert table['e_Mass'].unit == u.Msun
assert table['Age'].unit == u.Myr
assert table['e_Age'].unit == u.Myr
@pytest.mark.parametrize('reader_cls', (ascii.Cds, ascii.Mrt))
def test_cds_function_units2(reader_cls):
# This one includes some dimensionless dex.
data_and_readme = 'data/cdsFunctional2.dat'
reader = ascii.get_reader(reader_cls)
table = reader.read(data_and_readme)
assert table['Teff'].unit == u.K
assert table['logg'].unit == u.dex(u.cm/u.s**2)
assert table['vturb'].unit == u.km/u.s
assert table['[Fe/H]'].unit == u.dex(u.one)
assert table['e_[Fe/H]'].unit == u.dex(u.one)
assert_almost_equal(table['[Fe/H]'].to(u.one),
10.**(np.array([-2.07, -1.50, -2.11, -1.64])))
def test_cds_ignore_nullable():
# Make sure CDS Reader does not ignore nullabilty for columns
# with a limit specifier
readme = 'data/cds/null/ReadMe'
data = 'data/cds/null/table.dat'
r = ascii.Cds(readme)
r.read(data)
assert_equal(r.header.cols[6].description, 'Temperature class codified (10)')
assert_equal(r.header.cols[8].description, 'Luminosity class codified (11)')
assert_equal(r.header.cols[5].description, 'Pericenter position angle (18)')
def test_cds_no_whitespace():
# Make sure CDS Reader only checks null values when an '=' symbol is present,
# and read description text even if there is no whitespace after '?'.
readme = 'data/cds/null/ReadMe1'
data = 'data/cds/null/table.dat'
r = ascii.Cds(readme)
r.read(data)
assert_equal(r.header.cols[6].description, 'Temperature class codified (10)')
assert_equal(r.header.cols[6].null, '')
assert_equal(r.header.cols[7].description, 'Equivalent width (in mA)')
assert_equal(r.header.cols[7].null, '-9.9')
assert_equal(r.header.cols[10].description,
'DAOSPEC quality parameter Q(large values are bad)')
assert_equal(r.header.cols[10].null, '-9.999')
def test_cds_order():
# Make sure CDS Reader does not ignore order specifier that maybe present after
# the null specifier '?'
readme = 'data/cds/null/ReadMe1'
data = 'data/cds/null/table.dat'
r = ascii.Cds(readme)
r.read(data)
assert_equal(r.header.cols[5].description, 'Catalogue Identification Number')
assert_equal(r.header.cols[8].description, 'Equivalent width (in mA)')
assert_equal(r.header.cols[9].description, 'Luminosity class codified (11)')
if __name__ == "__main__": # run from main directory; not from test/
test_header_from_readme()
test_multi_header()
test_glob_header()
test_description()
test_cds_units()
test_cds_ignore_nullable()
test_cds_no_whitespace()
test_cds_order()
|