From bfb5c38343e3f7f78eedf1c45163bb8d3c2d31e0 Mon Sep 17 00:00:00 2001 From: Douglas Burke Date: Thu, 5 Oct 2023 13:28:56 -0400 Subject: [PATCH 1/5] TEST: improve coverage for RMF/PHA input Check that we get a message when reading in the sis0.rmf file about the lack of offset data (no TLMIN on F_CHAN or CHANNEL columns). Check that we get a warning about a missing background file (BACKFILE keyword) for a PHA file (note that we have two places where we report issues for missing *FILE keywords and BACKFILE is handled differently to ANCR/RESPFILE which we already cover). --- sherpa/astro/io/tests/test_io_pha.py | 72 +++++++++++++++++++++++ sherpa/astro/io/tests/test_io_response.py | 10 +++- 2 files changed, 81 insertions(+), 1 deletion(-) diff --git a/sherpa/astro/io/tests/test_io_pha.py b/sherpa/astro/io/tests/test_io_pha.py index d719f6219a..4eb517b78e 100644 --- a/sherpa/astro/io/tests/test_io_pha.py +++ b/sherpa/astro/io/tests/test_io_pha.py @@ -858,6 +858,78 @@ def test_write_pha_fits_with_extras_roundtrip(tmp_path, caplog): raise RuntimeError(f"Unknown io backend: {io.backend}") +@requires_fits +def test_pha_missing_backfile(tmp_path, caplog): + """What happens if BACKFILE does not exist. + + This goes through a slightly-different path to the missing + ANCRFILE case tested in test_write_pha_fits_with_extras_roundtrip. + + """ + + chans = np.arange(1, 5, dtype=np.int32) + counts = np.asarray([1, 0, 3, 2], dtype=np.int32) + etime = 1023.4 + bscal = 0.05 + + hdr = {"TELESCOP": "CHANDRA", "INSTRUME": "ACIS", "FILTER": "NONE", + "CHANTYPE": "PI", + "DETCHANS": 5, + "OBJECT": "Made up source", + "CORRFILE": "None", + "BACKFILE": "made-up-backfile.fits", + # "structural keywords" which match DETCHANS + "TLMIN1": 1, "TLMAX1": 5} + + pha = DataPHA("testy", + chans.astype(np.float64), + counts.astype(np.float32), + exposure=etime, + backscal=bscal, + header=hdr) + + outfile = tmp_path / "out.pi" + io.write_pha(str(outfile), pha, ascii=False, clobber=False) + pha = None + + assert len(caplog.record_tuples) == 0 + + with SherpaVerbosity("INFO"): + inpha = io.read_pha(str(outfile)) + + assert len(caplog.record_tuples) == 1 + lname, lvl, msg = caplog.record_tuples[0] + assert lname == "sherpa.astro.io" + assert lvl == logging.WARNING + + # message depends on the backend + if backend_is("crates"): + assert msg.startswith("File ") + assert msg.endswith("/made-up-backfile.fits does not exist.") + elif backend_is("pyfits"): + assert msg.startswith("file '") + assert msg.endswith("/made-up-backfile.fits' not found") + + assert inpha.channel == pytest.approx(chans) + assert inpha.counts == pytest.approx(counts) + + assert inpha.response_ids == [] + assert inpha.background_ids == [] + + # Checks related to issue #1885 (so this is partly a regression + # test). + # + for key in ["ANCRFILE", "BACKFILE", "RESPFILE"]: + assert key not in inpha.header + + assert inpha.header["CORRFILE"] == "None" + + # DJB is interested to see when this gets set, so treat this + # as a regression test. + # + assert inpha.header["SYS_ERR"] == 0 + + @requires_fits @requires_data def test_chandra_phaII_roundtrip(make_data_path, tmp_path): diff --git a/sherpa/astro/io/tests/test_io_response.py b/sherpa/astro/io/tests/test_io_response.py index 0fef201fe2..daa4aa795d 100644 --- a/sherpa/astro/io/tests/test_io_response.py +++ b/sherpa/astro/io/tests/test_io_response.py @@ -422,7 +422,7 @@ def test_write_rmf_fits_chandra_acis(make_data_path, tmp_path): @requires_data @requires_fits -def test_write_rmf_fits_asca_sis(make_data_path, tmp_path): +def test_write_rmf_fits_asca_sis(make_data_path, tmp_path, caplog): """Check we can write out an RMF as a FITS file. Use the ASCA SIS as it has a different structure to @@ -437,7 +437,15 @@ def test_write_rmf_fits_asca_sis(make_data_path, tmp_path): NUMELT = 505483 infile = make_data_path("sis0.rmf") + assert len(caplog.record_tuples) == 0 orig = io.read_rmf(infile) + assert len(caplog.record_tuples) == 1 + + lname, lvl, msg = caplog.record_tuples[0] + assert lname == io.backend.__name__ + assert lvl == logging.ERROR + assert msg.startswith("Failed to locate TLMIN keyword for F_CHAN column in RMF file '") + assert msg.endswith("sis0.rmf'; Update the offset value in the RMF data set to appropriate TLMIN value prior to fitting") # Unlike the ACIS case, this does not have NUMELT/GRP and # leave in the HDU keys to check they don't get over-written. From 2a0ce1d2f9216a5706a8d1331e0d723a30c5b129 Mon Sep 17 00:00:00 2001 From: Douglas Burke Date: Wed, 18 Oct 2023 08:40:17 -0400 Subject: [PATCH 2/5] TEST: note load_table_model is deprecated for XSPEC table models Ensure we check load_xstable_model and note that load_table_model is deprecated when loading XSPEC table models. --- sherpa/astro/io/tests/test_io.py | 38 ++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/sherpa/astro/io/tests/test_io.py b/sherpa/astro/io/tests/test_io.py index 80092ebc6a..cb62c7706e 100644 --- a/sherpa/astro/io/tests/test_io.py +++ b/sherpa/astro/io/tests/test_io.py @@ -19,6 +19,7 @@ # import struct +import warnings import numpy as np @@ -36,12 +37,45 @@ @requires_data @requires_fits @requires_xspec -def test_mod_fits(make_data_path): +def test_mod_fits(make_data_path, clean_astro_ui, caplog): + """Can we read in an XSPEC table model with load_table_model. + + This approach is deprecated. Use load_xstable_model instead. + """ + + tablemodelfile = make_data_path("xspec-tablemodel-RCS.mod") + with warnings.catch_warnings(record=True) as warn: + ui.load_table_model("tmod", tablemodelfile) + + assert len(warn) == 1 + assert warn[0].category == DeprecationWarning + assert str(warn[0].message) == "Use load_xstable_model to load XSPEC table models" + + tmod = ui.get_model_component("tmod") + assert tmod.name == "xstablemodel.tmod" + + # just check the log output is empty + assert len(caplog.records) == 0 + + +@requires_data +@requires_fits +@requires_xspec +def test_xsmod_fits(make_data_path, clean_astro_ui, caplog): + """Can we read in an XSPEC table model with load_xstable_model.""" + tablemodelfile = make_data_path("xspec-tablemodel-RCS.mod") - ui.load_table_model("tmod", tablemodelfile) + with warnings.catch_warnings(record=True) as warn: + ui.load_xstable_model("tmod", tablemodelfile) + + assert len(warn) == 0 + tmod = ui.get_model_component("tmod") assert tmod.name == "xstablemodel.tmod" + # just check the log output is empty + assert len(caplog.records) == 0 + @requires_fits @pytest.mark.parametrize("flag", [True, False]) From 85b7ad20ddf48231b13c2a8c5b8dc784eb7b19cb Mon Sep 17 00:00:00 2001 From: Douglas Burke Date: Mon, 16 Oct 2023 09:42:56 -0400 Subject: [PATCH 3/5] IO: minor internal cleanup User-visible change: remove a space at the end of a multi-line warning message. This has long bothered my sensibilities so remove it. We have tests that need to be updated because of this change but it shows the tests are testing the thing we want testing. Replace some old exception catching which used the sys module's exception handling to diagnose the message to write out when we can just grab it from the exception itself. We have tests that catch this so we can check that - at least in the tested cases - it works. For PHA handlnig, move a global routine into _pack_pha as it is only used there, and improving the comments. --- sherpa/astro/io/__init__.py | 61 +++++++++---------- .../ui/tests/test_astro_supports_pha2.py | 18 +++--- sherpa/astro/ui/tests/test_astro_ui.py | 4 +- 3 files changed, 38 insertions(+), 45 deletions(-) diff --git a/sherpa/astro/io/__init__.py b/sherpa/astro/io/__init__.py index 62be4f0965..ad85a88de0 100644 --- a/sherpa/astro/io/__init__.py +++ b/sherpa/astro/io/__init__.py @@ -54,7 +54,6 @@ import logging import os import re -import sys from typing import Any, Optional import numpy @@ -501,9 +500,9 @@ def _read_ancillary(data, key, label, dname, if output_once: info(f'read {label} file {data[key]}') - except Exception: + except Exception as exc: if output_once: - warning(str(sys.exc_info()[1])) + warning(str(exc)) return out @@ -544,16 +543,17 @@ def read_pha(arg, use_errors=False, use_background=False): elif data['syserror'] is None: msg = 'statistical' if output_once: - wmsg = "systematic errors were not found in " + \ - f"file '{filename}'" - warning(wmsg) + warning("systematic errors were not found in " + "file '%s'", filename) + else: msg = 'statistical and systematic' + if output_once: - imsg = msg + " errors were found in file " + \ - f"'{filename}' \nbut not used; " + \ - "to use them, re-read with use_errors=True" - info(imsg) + info("%s errors were found in file '%s'\n" + "but not used; to use them, re-read " + "with use_errors=True", msg, filename) + data['staterror'] = None data['syserror'] = None @@ -581,7 +581,6 @@ def read_pha(arg, use_errors=False, use_background=False): # Do not read backgrounds of backgrounds if not use_background: bkg_datasets = read_pha(data['backfile'], use_errors, True) - if output_once: info(f"read background file {data['backfile']}") @@ -597,9 +596,9 @@ def read_pha(arg, use_errors=False, use_background=False): bkg_datasets.set_response(arf, rmf) backgrounds.append(bkg_datasets) - except Exception: + except Exception as exc: if output_once: - warning(str(sys.exc_info()[1])) + warning(str(exc)) for bkg_type, bscal_type in zip(('background_up', 'background_down'), ('backscup', 'backscdn')): @@ -693,22 +692,6 @@ def _pack_image(dataset): return data, header -def _set_keyword(header, label, value): - """Extract the string form of the value, and store - the last path element in the header using the label - key. - """ - - if value is None: - return - - name = getattr(value, 'name', 'none') - if name is not None and name.find('/') != -1: - name = name.split('/')[-1] - - header[label] = name - - # This is to match NAXIS1 and the like, but we do not make it # very prescriptive (ie enforce the FITS keyword standards). # @@ -926,9 +909,23 @@ def _pack_pha(dataset): if dataset.exposure is not None: header["EXPOSURE"] = dataset.exposure - _set_keyword(header, "RESPFILE", rmf) - _set_keyword(header, "ANCRFILE", arf) - _set_keyword(header, "BACKFILE", bkg) + def _set_keyword(label: str, value: Optional[Data1D]) -> None: + """Do we set the *FILE keyword?""" + + if value is None or value.name is None: + return + + # Split on / and then take the last element. Should this + # instead make the path relative to the file name for the PHA + # dataset (but the problem is that this information is not + # known here)? + # + toks = value.name.split("/") + header[label] = toks[-1] + + _set_keyword("RESPFILE", rmf) + _set_keyword("ANCRFILE", arf) + _set_keyword("BACKFILE", bkg) # The column ordering for the ouput file is determined by the # order the keys are added to the data dict. diff --git a/sherpa/astro/ui/tests/test_astro_supports_pha2.py b/sherpa/astro/ui/tests/test_astro_supports_pha2.py index eeec17d61c..feafacc632 100644 --- a/sherpa/astro/ui/tests/test_astro_supports_pha2.py +++ b/sherpa/astro/ui/tests/test_astro_supports_pha2.py @@ -1,5 +1,5 @@ # -# Copyright (C) 2017, 2018, 2020, 2021 +# Copyright (C) 2017, 2018, 2020, 2021, 2023 # Smithsonian Astrophysical Observatory # # @@ -165,20 +165,16 @@ def test_load_pha2(loader, id0, ids, make_data_path, caplog, clean_astro_ui): assert b.name == infile # Test Log messages - msg_one = "systematic errors were not found in file '{}'".format(infile) + msg_one = f"systematic errors were not found in file '{infile}'" - # Editors can remove trailing spaces from lines, so split into - # separate lines so the space after the file name is included. - # Perhaps this space should be removed from the warning message? - # - msg_two = "statistical errors were found in file '{}' \n".format(infile) + \ + msg_two = f"statistical errors were found in file '{infile}'\n" + \ "but not used; to use them, re-read with use_errors=True" - msg_three = "read background_up into a dataset from file {}".format(infile) - msg_four = "read background_down into a dataset from file {}".format(infile) + msg_three = f"read background_up into a dataset from file {infile}" + msg_four = f"read background_down into a dataset from file {infile}" msg_five = "Multiple data sets have been input: " + \ - "{}-{}".format(ids[0], ids[11]) + f"{ids[0]}-{ids[11]}" assert caplog.record_tuples == [ ('sherpa.astro.io', logging.WARNING, msg_one), @@ -212,7 +208,7 @@ def test_load_pha2_compare_meg_order1(make_data_path, clean_astro_ui): ui.load_pha(pha2file) for n, lbl in zip([9, 10], ["-1", "1"]): - h = '3c120_meg_{}'.format(lbl) + h = f'3c120_meg_{lbl}' ui.load_arf(n, make_data_path(h + '.arf')) ui.load_rmf(n, make_data_path(h + '.rmf')) diff --git a/sherpa/astro/ui/tests/test_astro_ui.py b/sherpa/astro/ui/tests/test_astro_ui.py index 6417b0c3b3..d8b59c86a7 100644 --- a/sherpa/astro/ui/tests/test_astro_ui.py +++ b/sherpa/astro/ui/tests/test_astro_ui.py @@ -275,13 +275,13 @@ def test_load_data(loader, make_data_path, clean_astro_ui, caplog): assert ui.list_data_ids() == ['foo'] msg1 = f"systematic errors were not found in file '{infile}'" - msg2 = f"statistical errors were found in file '{infile}' \n" + \ + msg2 = f"statistical errors were found in file '{infile}'\n" + \ "but not used; to use them, re-read with use_errors=True" msg3 = f"read ARF file {arf}" msg4 = f"read RMF file {rmf}" msg5 = f"systematic errors were not found in file '{bgfile}'" - msg6 = f"statistical errors were found in file '{bgfile}' \n" + \ + msg6 = f"statistical errors were found in file '{bgfile}'\n" + \ "but not used; to use them, re-read with use_errors=True" msg7 = f"read background file {bgfile}" From d40262e671b3c24ee718396c7fe7705dd7ee5777 Mon Sep 17 00:00:00 2001 From: Douglas Burke Date: Thu, 19 Oct 2023 10:04:45 -0400 Subject: [PATCH 4/5] UTILS: make users use load_xstable_model The support for XSPEC table models in the UI load_table_model command has been marked as deprecated snice the 4.9 release but no-one knows this (unless they read the documentation). So we now are explicit with the deprecation warning (making it an actual logging call as well as a deprecation warning). The load_table_model call has been cleaned up (and it will make removing the XSPEC support easier as we can now just delete the lines). There are also a number of pylint changes, mainly - explicitly catch Exception rather than a bare "except:" block lint-wise this does not save much, but it does mean that we no-longer catch the "exit exceptions" like Control-C, which should be better for responsiveness and makes the code clearer about what we are doing. - use f-strings - except for logging statements where we do lazy logging - remove one unused symbol --- sherpa/astro/io/tests/test_io.py | 9 ++- sherpa/astro/ui/utils.py | 128 +++++++++++++++---------------- sherpa/ui/utils.py | 73 +++++++++--------- 3 files changed, 106 insertions(+), 104 deletions(-) diff --git a/sherpa/astro/io/tests/test_io.py b/sherpa/astro/io/tests/test_io.py index cb62c7706e..cd0f763357 100644 --- a/sherpa/astro/io/tests/test_io.py +++ b/sherpa/astro/io/tests/test_io.py @@ -47,15 +47,18 @@ def test_mod_fits(make_data_path, clean_astro_ui, caplog): with warnings.catch_warnings(record=True) as warn: ui.load_table_model("tmod", tablemodelfile) + msg = "Use load_xstable_model to load XSPEC table models" assert len(warn) == 1 assert warn[0].category == DeprecationWarning - assert str(warn[0].message) == "Use load_xstable_model to load XSPEC table models" + assert str(warn[0].message) == msg tmod = ui.get_model_component("tmod") assert tmod.name == "xstablemodel.tmod" - # just check the log output is empty - assert len(caplog.records) == 0 + assert len(caplog.records) == 1 + assert caplog.records[0].name == "sherpa.astro.ui.utils" + assert caplog.records[0].levelname == "WARNING" + assert caplog.records[0].getMessage() == msg @requires_data diff --git a/sherpa/astro/ui/utils.py b/sherpa/astro/ui/utils.py index b59abe2be0..a46fb29870 100644 --- a/sherpa/astro/ui/utils.py +++ b/sherpa/astro/ui/utils.py @@ -34,7 +34,7 @@ from sherpa.utils import sao_arange, send_to_pager from sherpa.utils.err import ArgumentErr, ArgumentTypeErr, DataErr, \ IdentifierErr, ImportErr, IOErr, ModelErr -from sherpa.utils.numeric_types import SherpaFloat, SherpaInt +from sherpa.utils.numeric_types import SherpaFloat from sherpa.data import Data1D, Data1DAsymmetricErrs, Data2D, Data2DInt import sherpa.astro.all import sherpa.astro.plot @@ -1712,13 +1712,13 @@ def unpack_data(self, filename, *args, **kwargs): """ try: return self.unpack_pha(filename, *args, **kwargs) - except: + except Exception: try: return self.unpack_image(filename, *args, **kwargs) - except: + except Exception: try: return self.unpack_table(filename, *args, **kwargs) - except: + except Exception: # If this errors out then so be it return self.unpack_ascii(filename, *args, **kwargs) @@ -1874,10 +1874,10 @@ def _load_data(self, id, datasets): ids.append(idval) if num > 1: - info("Multiple data sets have been input: " + - "{}-{}".format(ids[0], ids[-1])) + info("Multiple data sets have been input: %s-%s", + ids[0], ids[-1]) else: - info("One data set has been input: {}".format(ids[0])) + info("One data set has been input: %s", ids[0]) # DOC-NOTE: also in sherpa.utils without the support for # multiple datasets. @@ -7041,7 +7041,7 @@ def set_analysis(self, id, quantity=None, type='rate', factor=0): else: fstring = f"{nfilter} {data.get_xlabel()}" - info(f"dataset {idval}: {fstring}") + info("dataset %s: %s", idval, fstring) def get_analysis(self, id=None): """Return the units used when fitting spectral data. @@ -9558,12 +9558,12 @@ def fake_pha(self, id, arf, rmf, exposure, backscal=None, areascal=None, resp_ids = range(1, len(arf) + 1) self.load_multi_arfs(id, arf, resp_ids=resp_ids) elif arf is None: - # In some cases, arf is None, but rmf is not. - # For example, XMM/RGS does uses only a single file (the RMF) - # to hold all information. - pass + # In some cases, arf is None, but rmf is not. + # For example, XMM/RGS does uses only a single file (the RMF) + # to hold all information. + pass else: - self.set_arf(id, arf) + self.set_arf(id, arf) if numpy.iterable(rmf): resp_ids = range(1, len(rmf) + 1) @@ -9622,7 +9622,7 @@ def load_psf(self, modelname, filename_or_model, *args, **kwargs): if _is_str(filename_or_model): try: kernel = self._eval_model_expression(filename_or_model) - except: + except Exception: kernel = self.unpack_data(filename_or_model, *args, **kwargs) @@ -9736,12 +9736,11 @@ def set_full_model(self, id, model=None): if sherpa.ui.utils._is_subclass(type(part), instruments): do_warning = False if do_warning: - warning("PHA source model '%s' \ndoes not" % - model.name + - " have an associated instrument model; " + - "consider using \nset_source() instead of" + - " set_full_model() to include associated " + - "\ninstrument automatically") + warning("PHA source model '%s' \ndoes not" + " have an associated instrument model; " + "consider using \nset_source() instead of" + " set_full_model() to include associated " + "\ninstrument automatically", model.name) set_full_model.__doc__ = sherpa.ui.utils.Session.set_full_model.__doc__ @@ -10237,10 +10236,10 @@ def set_bkg_full_model(self, id, model=None, bkg_id=None): do_warning = False if do_warning: self.delete_bkg_model(id, bkg_id) - raise TypeError("PHA background source model '%s' \n" % model.name + - " does not have an associated instrument model;" + - " consider using\n set_bkg_source() instead of" + - " set_bkg_model() to include associated\n instrument" + + raise TypeError(f"PHA background source model '{model.name}' \n" + " does not have an associated instrument model;" + " consider using\n set_bkg_source() instead of" + " set_bkg_model() to include associated\n instrument" " automatically") self._runparamprompt(model.pars) @@ -10343,9 +10342,9 @@ def set_bkg_model(self, id, model=None, bkg_id=None): # Delete any previous model set with set_full_bkg_model() bkg_mdl = self._background_models.get(id, {}).pop(bkg_id, None) if bkg_mdl is not None: - warning("Clearing background convolved model\n'%s'\n" % - (bkg_mdl.name) + "for dataset %s background %s" % - (str(id), str(bkg_id))) + warning("Clearing background convolved model\n'%s'\n" + "for dataset %s background %s", + bkg_mdl.name, str(id), str(bkg_id)) set_bkg_source = set_bkg_model @@ -10410,7 +10409,7 @@ def _read_user_model(self, filename, *args, **kwargs): except TypeError: y = sherpa.astro.io.backend.get_ascii_data(filename, *args, **kwargs)[1].pop() - except: + except Exception: try: data = self.unpack_table(filename, *args, **kwargs) x = data.get_x() @@ -10422,7 +10421,7 @@ def _read_user_model(self, filename, *args, **kwargs): except TypeError: y = sherpa.astro.io.backend.get_table_data(filename, *args, **kwargs)[1].pop() - except: + except Exception: # unpack_data doesn't include a call to try # getting data from image, so try that here. data = self.unpack_image(filename, *args, **kwargs) @@ -10534,14 +10533,13 @@ def load_table_model(self, modelname, filename, .. note:: Deprecated in Sherpa 4.9 The new `load_xstable_model` routine should be used for loading XSPEC table model files. Support for these files - will be removed from `load_table_model` in the next + will be removed from `load_table_model` in the 4.17 release. A table model is defined on a grid of points which is - interpolated onto the independent axis of the data set. The - model will have at least one parameter (the amplitude, or - scaling factor to multiply the data by), but may have more - (if X-Spec table models are used). + interpolated onto the independent axis of the data set. The + model has a single parameter, ``ampl``, which is used to scale + the data, and it can be fixed or allowed to vary during a fit. Parameters ---------- @@ -10595,35 +10593,34 @@ def load_table_model(self, modelname, filename, >>> set_source('img', emap * gauss2d) """ - tablemodel = TableModel(modelname) - # interpolation method - tablemodel.method = method - tablemodel.filename = filename try: - if not sherpa.utils.is_binary_file(filename): - # TODO: use a Sherpa exception - raise Exception("Not a FITS file") - self.load_xstable_model(modelname, filename) - warnings.warn('Use load_xstable_model to load XSPEC table models', - DeprecationWarning) + + # Since users don't see DeprecationWarnings in ipython + # let's be explicit now, as most people are not aware of + # this change. + # + msg = 'Use load_xstable_model to load XSPEC table models' + warnings.warn(msg, DeprecationWarning) + warning(msg) return + except Exception: + pass + x = None + y = None + try: + x, y = self._read_user_model(filename, *args, **kwargs) except Exception: - x = None - y = None - try: - x, y = self._read_user_model(filename, *args, **kwargs) - except: - # Fall back to reading plain ASCII, if no other - # more sophisticated I/O backend loaded (such as - # pyfits or crates) SMD 05/29/13 - data = sherpa.io.read_data(filename, ncols=2) - x = data.x - y = data.y - tablemodel.load(x, y) + data = sherpa.io.read_data(filename, ncols=2) + x = data.x + y = data.y + tablemodel = TableModel(modelname) + tablemodel.method = method + tablemodel.filename = filename + tablemodel.load(x, y) self._tbl_models.append(tablemodel) self._add_model_component(tablemodel) @@ -10773,16 +10770,18 @@ def _prepare_fit(self, id, otherids=()): bkg_srcs = self._background_sources.get(s.idval, {}) if s.data.subtracted: if (bkg_models or bkg_srcs): - warning(f'data set {repr(s.idval)} is background-subtracted; ' + - 'background models will be ignored') + warning('data set %s is background-subtracted; ' + 'background models will be ignored', + repr(s.idval)) continue if not (bkg_models or bkg_srcs): if s.data.background_ids and self._current_stat.name != 'wstat': - warning(f'data set {repr(s.idval)} has associated backgrounds, ' + - 'but they have not been subtracted, ' + - 'nor have background models been set') + warning('data set %s has associated backgrounds, ' + 'but they have not been subtracted, ' + 'nor have background models been set', + repr(s.idval)) continue @@ -14702,12 +14701,11 @@ def is_numpy_ndarray(arg, name, npars, dim1=None): msg = name + ' must be 2d numpy.ndarray' raise IOErr(msg) if shape[0] != npars: - msg = name + ' must be of dimension (%d, x)' % npars + msg = name + f' must be of dimension ({npars}, x)' raise IOErr(msg) if dim1 is not None: if shape[1] != npars: - msg = name + ' must be of dimension (%d, %d)' % \ - (npars, npars) + msg = name + f' must be of dimension ({npars}, {npars})' raise IOErr(msg) _, fit = self._get_fit(id) @@ -15768,7 +15766,7 @@ def save_all(self, outfile=None, clobber=False): else: raise IOErr('filefound', outfile) - with open(outfile, 'w') as fh: + with open(outfile, 'w', encoding="UTF-8") as fh: serialize.save_all(self, fh) else: diff --git a/sherpa/ui/utils.py b/sherpa/ui/utils.py index bcea1908d1..d6a36291fa 100644 --- a/sherpa/ui/utils.py +++ b/sherpa/ui/utils.py @@ -437,7 +437,7 @@ def read_template_model(modelname, templatefile, tm.ampl.freeze() templates.append(tm) - assert(len(templates) == parvals.shape[0]) + assert len(templates) == parvals.shape[0] return create_template_model(modelname, parnames, parvals, templates, @@ -1208,7 +1208,7 @@ def _get_show_data(self, id=None): ids = [self._fix_id(id)] for id in ids: data_str += 'Data Set: %s\n' % id - data_str += self.get_data(id).__str__() + '\n\n' + data_str += str(self.get_data(id)) + '\n\n' return data_str def _get_show_filter(self, id=None): @@ -1230,7 +1230,7 @@ def _get_show_model(self, id=None): for id in ids: if id in mdl_ids: model_str += 'Model: %s\n' % id - model_str += self.get_model(id).__str__() + '\n\n' + model_str += str(self.get_model(id)) + '\n\n' return model_str def _get_show_source(self, id=None): @@ -1242,7 +1242,7 @@ def _get_show_source(self, id=None): for id in ids: if id in src_ids: model_str += 'Model: %s\n' % id - model_str += self.get_source(id).__str__() + '\n\n' + model_str += str(self.get_source(id)) + '\n\n' return model_str def _get_show_kernel(self, id=None): @@ -1254,7 +1254,7 @@ def _get_show_kernel(self, id=None): if id in self._psf.keys(): kernel_str += 'PSF Kernel: %s\n' % id # Show the PSF parameters - kernel_str += self.get_psf(id).__str__() + '\n\n' + kernel_str += str(self.get_psf(id)) + '\n\n' return kernel_str def _get_show_psf(self, id=None): @@ -1266,18 +1266,18 @@ def _get_show_psf(self, id=None): if id in self._psf.keys(): psf_str += 'PSF Model: %s\n' % id # Show the PSF dataset or PSF model - psf_str += self.get_psf(id).kernel.__str__() + '\n\n' + psf_str += str(self.get_psf(id).kernel) + '\n\n' return psf_str def _get_show_method(self): return ('Optimization Method: %s\n%s\n' % (type(self._current_method).__name__, - self._current_method.__str__())) + str(self._current_method))) def _get_show_stat(self): return ('Statistic: %s\n%s\n' % (type(self._current_stat).__name__, - self._current_stat.__str__())) + str(self._current_stat))) def _get_show_fit(self): if self._fit_results is None: @@ -2018,7 +2018,8 @@ def _get_plottype(self, plottype): # use the logger interface instead. It does mean that the # message will be repeated each time it is used. # - warning(f"The argument '{plottype}' is deprecated and '{answer}' should be used instead") + warning("The argument '%s' is deprecated and " + "'%s' should be used instead", plottype, answer) return answer def _check_plottype(self, plottype): @@ -5968,8 +5969,8 @@ class defines the functional form and the parameters of the name = modelclass.__name__.lower() if not _is_subclass(modelclass, sherpa.models.ArithmeticModel): - raise TypeError("model class '%s' is not a derived class" % name + - " from sherpa.models.ArithmeticModel") + raise TypeError(f"model class '{name}' is not a derived class " + "from sherpa.models.ArithmeticModel") self._model_types[name] = ModelWrapper(self, modelclass, args, kwargs) self._model_globals.update(self._model_types) @@ -6675,7 +6676,7 @@ def _runparamprompt(self, pars): ntokens = len(tokens) if ntokens > 3: - info("Error: Please provide a comma-separated " + + info("Error: Please provide a comma-separated " "list of floats; e.g. val,min,max") continue @@ -6683,21 +6684,21 @@ def _runparamprompt(self, pars): try: val = float(tokens[0]) except Exception as e: - info(f"Please provide a float value; {e}") + info("Please provide a float value; %s", e) continue if ntokens > 1 and tokens[1] != '': try: minval = float(tokens[1]) except Exception as e: - info(f"Please provide a float value; {e}") + info("Please provide a float value; %s", e) continue if ntokens > 2 and tokens[2] != '': try: maxval = float(tokens[2]) except Exception as e: - info(f"Please provide a float value; {e}") + info("Please provide a float value; %s", e) continue try: @@ -7367,9 +7368,8 @@ def load_table_model(self, modelname, filename, ncols=2, colkeys=None, A table model is defined on a grid of points which is interpolated onto the independent axis of the data set. The - model has a single parameter, ``ampl``, which is used to - scale the data, and it can be fixed or allowed to vary - during a fit. + model has a single parameter, ``ampl``, which is used to scale + the data, and it can be fixed or allowed to vary during a fit. Parameters ---------- @@ -7444,12 +7444,13 @@ def load_table_model(self, modelname, filename, ncols=2, colkeys=None, >>> set_par(filt.ampl, 1e3, min=1, max=1e6) """ + + x, y = self._read_user_model(filename, ncols, colkeys, + dstype, sep, comment) + tablemodel = TableModel(modelname) - # interpolation method tablemodel.method = method tablemodel.filename = filename - x, y = self._read_user_model(filename, ncols, colkeys, - dstype, sep, comment) tablemodel.load(x, y) self._tbl_models.append(tablemodel) self._add_model_component(tablemodel) @@ -7554,8 +7555,8 @@ def func2d(pars, x0, x1, x0hi=None, x1hi=None): usermodel = sherpa.models.UserModel(modelname) usermodel.calc = func usermodel._file = filename - if (filename is not None): - x, usermodel._y = self._read_user_model(filename, ncols, colkeys, + if filename is not None: + _, usermodel._y = self._read_user_model(filename, ncols, colkeys, dstype, sep, comment) self._add_model_component(usermodel) @@ -7629,7 +7630,7 @@ def add_user_pars(self, modelname, parnames, usermodel = self._get_model_component(modelname) if (usermodel is None or - type(usermodel) is not sherpa.models.UserModel): + not isinstance(usermodel, sherpa.models.UserModel)): raise ArgumentTypeErr('badarg', modelname, "a user model") pars = [] @@ -7823,7 +7824,7 @@ def load_conv(self, modelname, filename_or_model, *args, **kwargs): if _is_str(filename_or_model): try: kernel = self._eval_model_expression(filename_or_model) - except: + except Exception: kernel = self.unpack_data(filename_or_model, *args, **kwargs) @@ -7887,7 +7888,7 @@ def load_psf(self, modelname, filename_or_model, *args, **kwargs): if _is_str(filename_or_model): try: kernel = self._eval_model_expression(filename_or_model) - except: + except Exception: kernel = self.unpack_data(filename_or_model, *args, **kwargs) @@ -8314,9 +8315,9 @@ def freeze(self, *args): try: par.freeze() - except AttributeError: + except AttributeError as exc: raise ArgumentTypeErr('badarg', 'par', - 'a parameter or model object or expression string') + 'a parameter or model object or expression string') from exc def thaw(self, *args): """Allow model parameters to be varied during a fit. @@ -8377,9 +8378,9 @@ def thaw(self, *args): try: par.thaw() - except AttributeError: + except AttributeError as exc: raise ArgumentTypeErr('badarg', 'par', - 'a parameter or model object or expression string') + 'a parameter or model object or expression string') from exc def link(self, par, val): """Link a parameter to a value. @@ -11895,9 +11896,9 @@ def get_source_plot(self, id=None, recalc=True): id = self._fix_id(id) mdl = self._models.get(id, None) if mdl is not None: - raise IdentifierErr("Convolved model\n'{}'".format(mdl.name) + - "\n is set for dataset {}.".format(id) + - " You should use get_model_plot instead.") + raise IdentifierErr(f"Convolved model\n'{mdl.name}'\n" + f" is set for dataset {id}. " + "You should use get_model_plot instead.") if recalc: data = self.get_data(id) @@ -13968,9 +13969,9 @@ def plot_source(self, id=None, replot=False, id = self._fix_id(id) mdl = self._models.get(id, None) if mdl is not None: - raise IdentifierErr("Convolved model\n'{}'".format(mdl.name) + - "\n is set for dataset {}.".format(id) + - " You should use plot_model instead.") + raise IdentifierErr(f"Convolved model\n'{mdl.name}'\n" + f" is set for dataset {id}. " + "You should use plot_model instead.") plotobj = self.get_source_plot(id, recalc=not replot) self._plot(plotobj, overplot=overplot, clearwindow=clearwindow, From dfc4d4ad7a913aece13210163c06c8af910b0b9c Mon Sep 17 00:00:00 2001 From: Doug Burke Date: Wed, 3 Jan 2024 13:12:09 -0500 Subject: [PATCH 5/5] Update sherpa/astro/ui/utils.py Co-authored-by: wmclaugh --- sherpa/astro/ui/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sherpa/astro/ui/utils.py b/sherpa/astro/ui/utils.py index a46fb29870..7304aefbef 100644 --- a/sherpa/astro/ui/utils.py +++ b/sherpa/astro/ui/utils.py @@ -9559,7 +9559,7 @@ def fake_pha(self, id, arf, rmf, exposure, backscal=None, areascal=None, self.load_multi_arfs(id, arf, resp_ids=resp_ids) elif arf is None: # In some cases, arf is None, but rmf is not. - # For example, XMM/RGS does uses only a single file (the RMF) + # For example, XMM/RGS uses only a single file (the RMF) # to hold all information. pass else: