Skip to content

Commit

Permalink
Merge sherpa#1926 (DougBurke) - Let users know they should use load_x…
Browse files Browse the repository at this point in the history
…stable_model not load_table_model

Let users know they should use load_xstable_model not load_table_model
  • Loading branch information
wmclaugh authored Jan 8, 2024
2 parents 90df9a7 + dfc4d4a commit 45f76ae
Show file tree
Hide file tree
Showing 8 changed files with 258 additions and 149 deletions.
61 changes: 29 additions & 32 deletions sherpa/astro/io/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@
import logging
import os
import re
import sys
from typing import Any, Optional

import numpy
Expand Down Expand Up @@ -501,9 +500,9 @@ def _read_ancillary(data, key, label, dname,
if output_once:
info(f'read {label} file {data[key]}')

except Exception:
except Exception as exc:
if output_once:
warning(str(sys.exc_info()[1]))
warning(str(exc))

return out

Expand Down Expand Up @@ -544,16 +543,17 @@ def read_pha(arg, use_errors=False, use_background=False):
elif data['syserror'] is None:
msg = 'statistical'
if output_once:
wmsg = "systematic errors were not found in " + \
f"file '{filename}'"
warning(wmsg)
warning("systematic errors were not found in "
"file '%s'", filename)

else:
msg = 'statistical and systematic'

if output_once:
imsg = msg + " errors were found in file " + \
f"'{filename}' \nbut not used; " + \
"to use them, re-read with use_errors=True"
info(imsg)
info("%s errors were found in file '%s'\n"
"but not used; to use them, re-read "
"with use_errors=True", msg, filename)

data['staterror'] = None
data['syserror'] = None

Expand Down Expand Up @@ -581,7 +581,6 @@ def read_pha(arg, use_errors=False, use_background=False):
# Do not read backgrounds of backgrounds
if not use_background:
bkg_datasets = read_pha(data['backfile'], use_errors, True)

if output_once:
info(f"read background file {data['backfile']}")

Expand All @@ -597,9 +596,9 @@ def read_pha(arg, use_errors=False, use_background=False):
bkg_datasets.set_response(arf, rmf)
backgrounds.append(bkg_datasets)

except Exception:
except Exception as exc:
if output_once:
warning(str(sys.exc_info()[1]))
warning(str(exc))

for bkg_type, bscal_type in zip(('background_up', 'background_down'),
('backscup', 'backscdn')):
Expand Down Expand Up @@ -693,22 +692,6 @@ def _pack_image(dataset):
return data, header


def _set_keyword(header, label, value):
"""Extract the string form of the value, and store
the last path element in the header using the label
key.
"""

if value is None:
return

name = getattr(value, 'name', 'none')
if name is not None and name.find('/') != -1:
name = name.split('/')[-1]

header[label] = name


# This is to match NAXIS1 and the like, but we do not make it
# very prescriptive (ie enforce the FITS keyword standards).
#
Expand Down Expand Up @@ -926,9 +909,23 @@ def _pack_pha(dataset):
if dataset.exposure is not None:
header["EXPOSURE"] = dataset.exposure

_set_keyword(header, "RESPFILE", rmf)
_set_keyword(header, "ANCRFILE", arf)
_set_keyword(header, "BACKFILE", bkg)
def _set_keyword(label: str, value: Optional[Data1D]) -> None:
"""Do we set the *FILE keyword?"""

if value is None or value.name is None:
return

# Split on / and then take the last element. Should this
# instead make the path relative to the file name for the PHA
# dataset (but the problem is that this information is not
# known here)?
#
toks = value.name.split("/")
header[label] = toks[-1]

_set_keyword("RESPFILE", rmf)
_set_keyword("ANCRFILE", arf)
_set_keyword("BACKFILE", bkg)

# The column ordering for the ouput file is determined by the
# order the keys are added to the data dict.
Expand Down
41 changes: 39 additions & 2 deletions sherpa/astro/io/tests/test_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#

import struct
import warnings

import numpy as np

Expand All @@ -36,12 +37,48 @@
@requires_data
@requires_fits
@requires_xspec
def test_mod_fits(make_data_path):
def test_mod_fits(make_data_path, clean_astro_ui, caplog):
"""Can we read in an XSPEC table model with load_table_model.
This approach is deprecated. Use load_xstable_model instead.
"""

tablemodelfile = make_data_path("xspec-tablemodel-RCS.mod")
with warnings.catch_warnings(record=True) as warn:
ui.load_table_model("tmod", tablemodelfile)

msg = "Use load_xstable_model to load XSPEC table models"
assert len(warn) == 1
assert warn[0].category == DeprecationWarning
assert str(warn[0].message) == msg

tmod = ui.get_model_component("tmod")
assert tmod.name == "xstablemodel.tmod"

assert len(caplog.records) == 1
assert caplog.records[0].name == "sherpa.astro.ui.utils"
assert caplog.records[0].levelname == "WARNING"
assert caplog.records[0].getMessage() == msg


@requires_data
@requires_fits
@requires_xspec
def test_xsmod_fits(make_data_path, clean_astro_ui, caplog):
"""Can we read in an XSPEC table model with load_xstable_model."""

tablemodelfile = make_data_path("xspec-tablemodel-RCS.mod")
ui.load_table_model("tmod", tablemodelfile)
with warnings.catch_warnings(record=True) as warn:
ui.load_xstable_model("tmod", tablemodelfile)

assert len(warn) == 0

tmod = ui.get_model_component("tmod")
assert tmod.name == "xstablemodel.tmod"

# just check the log output is empty
assert len(caplog.records) == 0


@requires_fits
@pytest.mark.parametrize("flag", [True, False])
Expand Down
72 changes: 72 additions & 0 deletions sherpa/astro/io/tests/test_io_pha.py
Original file line number Diff line number Diff line change
Expand Up @@ -858,6 +858,78 @@ def test_write_pha_fits_with_extras_roundtrip(tmp_path, caplog):
raise RuntimeError(f"Unknown io backend: {io.backend}")


@requires_fits
def test_pha_missing_backfile(tmp_path, caplog):
"""What happens if BACKFILE does not exist.
This goes through a slightly-different path to the missing
ANCRFILE case tested in test_write_pha_fits_with_extras_roundtrip.
"""

chans = np.arange(1, 5, dtype=np.int32)
counts = np.asarray([1, 0, 3, 2], dtype=np.int32)
etime = 1023.4
bscal = 0.05

hdr = {"TELESCOP": "CHANDRA", "INSTRUME": "ACIS", "FILTER": "NONE",
"CHANTYPE": "PI",
"DETCHANS": 5,
"OBJECT": "Made up source",
"CORRFILE": "None",
"BACKFILE": "made-up-backfile.fits",
# "structural keywords" which match DETCHANS
"TLMIN1": 1, "TLMAX1": 5}

pha = DataPHA("testy",
chans.astype(np.float64),
counts.astype(np.float32),
exposure=etime,
backscal=bscal,
header=hdr)

outfile = tmp_path / "out.pi"
io.write_pha(str(outfile), pha, ascii=False, clobber=False)
pha = None

assert len(caplog.record_tuples) == 0

with SherpaVerbosity("INFO"):
inpha = io.read_pha(str(outfile))

assert len(caplog.record_tuples) == 1
lname, lvl, msg = caplog.record_tuples[0]
assert lname == "sherpa.astro.io"
assert lvl == logging.WARNING

# message depends on the backend
if backend_is("crates"):
assert msg.startswith("File ")
assert msg.endswith("/made-up-backfile.fits does not exist.")
elif backend_is("pyfits"):
assert msg.startswith("file '")
assert msg.endswith("/made-up-backfile.fits' not found")

assert inpha.channel == pytest.approx(chans)
assert inpha.counts == pytest.approx(counts)

assert inpha.response_ids == []
assert inpha.background_ids == []

# Checks related to issue #1885 (so this is partly a regression
# test).
#
for key in ["ANCRFILE", "BACKFILE", "RESPFILE"]:
assert key not in inpha.header

assert inpha.header["CORRFILE"] == "None"

# DJB is interested to see when this gets set, so treat this
# as a regression test.
#
assert inpha.header["SYS_ERR"] == 0


@requires_fits
@requires_data
def test_chandra_phaII_roundtrip(make_data_path, tmp_path):
Expand Down
10 changes: 9 additions & 1 deletion sherpa/astro/io/tests/test_io_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ def test_write_rmf_fits_chandra_acis(make_data_path, tmp_path):

@requires_data
@requires_fits
def test_write_rmf_fits_asca_sis(make_data_path, tmp_path):
def test_write_rmf_fits_asca_sis(make_data_path, tmp_path, caplog):
"""Check we can write out an RMF as a FITS file.
Use the ASCA SIS as it has a different structure to
Expand All @@ -437,7 +437,15 @@ def test_write_rmf_fits_asca_sis(make_data_path, tmp_path):
NUMELT = 505483

infile = make_data_path("sis0.rmf")
assert len(caplog.record_tuples) == 0
orig = io.read_rmf(infile)
assert len(caplog.record_tuples) == 1

lname, lvl, msg = caplog.record_tuples[0]
assert lname == io.backend.__name__
assert lvl == logging.ERROR
assert msg.startswith("Failed to locate TLMIN keyword for F_CHAN column in RMF file '")
assert msg.endswith("sis0.rmf'; Update the offset value in the RMF data set to appropriate TLMIN value prior to fitting")

# Unlike the ACIS case, this does not have NUMELT/GRP and
# leave in the HDU keys to check they don't get over-written.
Expand Down
18 changes: 7 additions & 11 deletions sherpa/astro/ui/tests/test_astro_supports_pha2.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#
# Copyright (C) 2017, 2018, 2020, 2021
# Copyright (C) 2017, 2018, 2020, 2021, 2023
# Smithsonian Astrophysical Observatory
#
#
Expand Down Expand Up @@ -165,20 +165,16 @@ def test_load_pha2(loader, id0, ids, make_data_path, caplog, clean_astro_ui):
assert b.name == infile

# Test Log messages
msg_one = "systematic errors were not found in file '{}'".format(infile)
msg_one = f"systematic errors were not found in file '{infile}'"

# Editors can remove trailing spaces from lines, so split into
# separate lines so the space after the file name is included.
# Perhaps this space should be removed from the warning message?
#
msg_two = "statistical errors were found in file '{}' \n".format(infile) + \
msg_two = f"statistical errors were found in file '{infile}'\n" + \
"but not used; to use them, re-read with use_errors=True"

msg_three = "read background_up into a dataset from file {}".format(infile)
msg_four = "read background_down into a dataset from file {}".format(infile)
msg_three = f"read background_up into a dataset from file {infile}"
msg_four = f"read background_down into a dataset from file {infile}"

msg_five = "Multiple data sets have been input: " + \
"{}-{}".format(ids[0], ids[11])
f"{ids[0]}-{ids[11]}"

assert caplog.record_tuples == [
('sherpa.astro.io', logging.WARNING, msg_one),
Expand Down Expand Up @@ -212,7 +208,7 @@ def test_load_pha2_compare_meg_order1(make_data_path, clean_astro_ui):
ui.load_pha(pha2file)

for n, lbl in zip([9, 10], ["-1", "1"]):
h = '3c120_meg_{}'.format(lbl)
h = f'3c120_meg_{lbl}'
ui.load_arf(n, make_data_path(h + '.arf'))
ui.load_rmf(n, make_data_path(h + '.rmf'))

Expand Down
4 changes: 2 additions & 2 deletions sherpa/astro/ui/tests/test_astro_ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,13 +275,13 @@ def test_load_data(loader, make_data_path, clean_astro_ui, caplog):
assert ui.list_data_ids() == ['foo']

msg1 = f"systematic errors were not found in file '{infile}'"
msg2 = f"statistical errors were found in file '{infile}' \n" + \
msg2 = f"statistical errors were found in file '{infile}'\n" + \
"but not used; to use them, re-read with use_errors=True"
msg3 = f"read ARF file {arf}"
msg4 = f"read RMF file {rmf}"

msg5 = f"systematic errors were not found in file '{bgfile}'"
msg6 = f"statistical errors were found in file '{bgfile}' \n" + \
msg6 = f"statistical errors were found in file '{bgfile}'\n" + \
"but not used; to use them, re-read with use_errors=True"
msg7 = f"read background file {bgfile}"

Expand Down
Loading

0 comments on commit 45f76ae

Please sign in to comment.