diff --git a/README.rst b/README.rst index a03670646d..66cf1c4249 100644 --- a/README.rst +++ b/README.rst @@ -46,7 +46,7 @@ PypeIt |forks| |stars| |github| |pypi| |pypi_downloads| |License| -|docs| |CITests| |Coverage| +|docs| |CITests| |DOI_latest| |JOSS| |arxiv| diff --git a/doc/figures/Edges_A_0_MSC01_orders_qa.png b/doc/figures/Edges_A_0_MSC01_orders_qa.png new file mode 100644 index 0000000000..72c6c30127 Binary files /dev/null and b/doc/figures/Edges_A_0_MSC01_orders_qa.png differ diff --git a/doc/help/run_pypeit.rst b/doc/help/run_pypeit.rst index 77d8e84bbc..77ed59f2fb 100644 --- a/doc/help/run_pypeit.rst +++ b/doc/help/run_pypeit.rst @@ -4,7 +4,7 @@ usage: run_pypeit [-h] [-v VERBOSITY] [-r REDUX_PATH] [-m] [-s] [-o] [-c] pypeit_file - ## PypeIt : The Python Spectroscopic Data Reduction Pipeline v1.16.1.dev468+g832ee84e4 + ## PypeIt : The Python Spectroscopic Data Reduction Pipeline v1.16.1.dev95+g35e103c10 ## ## Available spectrographs include: ## aat_uhrf, bok_bc, gemini_flamingos1, gemini_flamingos2, diff --git a/doc/pypeit_par.rst b/doc/pypeit_par.rst index 4ba6149515..31464764da 100644 --- a/doc/pypeit_par.rst +++ b/doc/pypeit_par.rst @@ -350,77 +350,80 @@ EdgeTracePar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.EdgeTracePar` -=========================== ================ =========================================== ============== ====================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== -Key Type Options Default Description -=========================== ================ =========================================== ============== ====================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== -``add_missed_orders`` bool .. False For any Echelle spectrograph (fixed-format or otherwise), attempt to add orders that have been missed by the automated edge tracing algorithm. For *fixed-format* Echelles, this is based on the expected positions on on the detector. Otherwise, the detected orders are modeled and roughly used to predict the locations of missed orders; see additional parameters ``order_width_poly``, ``order_gap_poly``, and ``order_spat_range``. -``add_predict`` str .. ``nearest`` Sets the method used to predict the shape of the left and right traces for a user-defined slit inserted. Options are (1) ``straight`` inserts traces with a constant spatial pixels position, (2) ``nearest`` inserts traces with a form identical to the automatically identified trace at the nearest spatial position to the inserted slit, or (3) ``pca`` uses the PCA decomposition to predict the shape of the traces. -``add_slits`` str, list .. .. Add one or more user-defined slits. The syntax to define a slit to add is: 'det:spec:spat_left:spat_right' where det=detector, spec=spectral pixel, spat_left=spatial pixel of left slit boundary, and spat_righ=spatial pixel of right slit boundary. For example, '2:2000:2121:2322,3:2000:1201:1500' will add a slit to detector 2 passing through spec=2000 extending spatially from 2121 to 2322 and another on detector 3 at spec=2000 extending from 1201 to 1500. -``auto_pca`` bool .. True During automated tracing, attempt to construct a PCA decomposition of the traces. When True, the edge traces resulting from the initial detection, centroid refinement, and polynomial fitting must meet a set of criteria for performing the pca; see :func:`pypeit.edgetrace.EdgeTraceSet.can_pca`. If False, the ``sync_predict`` parameter *cannot* be set to ``pca``; if it is not, the value is set to ``nearest`` and a warning is issued when validating the parameter set. -``bound_detector`` bool .. False When the code is ready to synchronize the left/right trace edges, the traces should have been constructed, vetted, and cleaned. This can sometimes lead to *no* valid traces. This parameter dictates what to do next. If ``bound_detector`` is True, the code will artificially add left and right edges that bound the detector; if False, the code identifies the slit-edge tracing as being unsuccessful, warns the user, and ends gracefully. Note that setting ``bound_detector`` to True is needed for some long-slit data where the slit edges are, in fact, beyond the edges of the detector. -``clip`` bool .. True Remove traces flagged as bad, instead of only masking them. This is currently only used by :func:`~pypeit.edgetrace.EdgeTraceSet.centroid_refine`. -``det_buffer`` int .. 5 The minimum separation between the detector edges and a slit edge for any added edge traces. Must be positive. -``det_min_spec_length`` int, float .. 0.33 The minimum spectral length (as a fraction of the detector size) of a trace determined by direct measurements of the detector data (as opposed to what should be included in any modeling approach; see fit_min_spec_length). -``dlength_range`` int, float .. .. Similar to ``minimum_slit_dlength``, but constrains the *fractional* change in the slit length as a function of wavelength. For example, a value of 0.2 means that slit length should not vary more than 20%as a function of wavelength. -``edge_detect_clip`` int, float .. .. Sigma clipping level for peaks detected in the collapsed, Sobel-filtered significance image. -``edge_thresh`` int, float .. 20.0 Threshold for finding edges in the Sobel-filtered significance image. -``exclude_regions`` list, str .. .. User-defined regions to exclude from the slit tracing. To set this parameter, the text should be a comma separated list of pixel ranges (in the x direction) to be excluded and the detector number. For example, the following string 1:0:20,1:300:400 would select two regions in det=1 between pixels 0 and 20 and between 300 and 400. -``filt_iter`` int .. 0 Number of median-filtering iterations to perform on sqrt(trace) image before applying to Sobel filter to detect slit/order edges. -``fit_function`` str ``polynomial``, ``legendre``, ``chebyshev`` ``legendre`` Function fit to edge measurements. Options are: polynomial, legendre, chebyshev -``fit_maxdev`` int, float .. 5.0 Maximum deviation between the fitted and measured edge position for rejection in spatial pixels. -``fit_maxiter`` int .. 25 Maximum number of rejection iterations during edge fitting. -``fit_min_spec_length`` float .. 0.6 Minimum unmasked spectral length of a traced slit edge to use in any modeling procedure (polynomial fitting or PCA decomposition). -``fit_niter`` int .. 1 Number of iterations of re-measuring and re-fitting the edge data; see :func:`~pypeit.core.trace.fit_trace`. -``fit_order`` int .. 5 Order of the function fit to edge measurements. -``follow_span`` int .. 20 In the initial connection of spectrally adjacent edge detections, this sets the number of previous spectral rows to consider when following slits forward. -``fwhm_gaussian`` int, float .. 3.0 The `fwhm` parameter to use when using Gaussian weighting in :func:`~pypeit.core.trace.fit_trace` when refining the PCA predictions of edges. See description :func:`~pypeit.core.trace.peak_trace`. -``fwhm_uniform`` int, float .. 3.0 The `fwhm` parameter to use when using uniform weighting in :func:`~pypeit.core.trace.fit_trace` when refining the PCA predictions of edges. See description of :func:`~pypeit.core.trace.peak_trace`. -``gap_offset`` int, float .. 5.0 Offset (pixels) used for the slit edge gap width when inserting slit edges (see `sync_center`) or when nudging predicted slit edges to avoid slit overlaps. This should be larger than `minimum_slit_gap` when converted to arcseconds. -``left_right_pca`` bool .. False Construct a PCA decomposition for the left and right traces separately. This can be important for cross-dispersed echelle spectrographs (e.g., Keck-NIRES) -``length_range`` int, float .. .. Allowed range in slit length compared to the median slit length. For example, a value of 0.3 means that slit lengths should not vary more than 30%. Relatively shorter or longer slits are masked or clipped. Most useful for echelle or multi-slit data where the slits should have similar or identical lengths. -``maskdesign_filename`` str, list .. .. Mask design info contained in this file or files (comma separated) -``maskdesign_maxsep`` int, float .. 50 Maximum allowed offset in pixels between the slit edges defined by the slit-mask design and the traced edges. -``maskdesign_sigrej`` int, float .. 3 Number of sigma for sigma-clipping rejection during slit-mask design matching. -``maskdesign_step`` int, float .. 1 Step in pixels used to generate a list of possible offsets (within +/- `maskdesign_maxsep`) between the slit edges defined by the mask design and the traced edges. -``match_tol`` int, float .. 3.0 Same-side slit edges below this separation in pixels are considered part of the same edge. -``max_nudge`` int, float .. .. If parts of any (predicted) trace fall off the detector edge, allow them to be nudged away from the detector edge up to and including this maximum number of pixels. If None, no limit is set; otherwise should be 0 or larger. -``max_shift_abs`` int, float .. 0.5 Maximum spatial shift in pixels between an input edge location and the recentroided value. -``max_shift_adj`` int, float .. 0.15 Maximum spatial shift in pixels between the edges in adjacent spectral positions. -``max_spat_error`` int, float .. .. Maximum error in the spatial position of edges in pixels. -``minimum_slit_dlength`` int, float .. .. Minimum *change* in the slit length (arcsec) as a function of wavelength in arcsec. This is mostly meant to catch cases when the polynomial fit to the detected edges becomes ill-conditioned (e.g., when the slits run off the edge of the detector) and leads to wild traces. If reducing the order of the polynomial (``fit_order``) does not help, try using this to remove poorly constrained slits. -``minimum_slit_gap`` int, float .. .. Minimum slit gap in arcsec. Gaps between slits are determined by the median difference between the right and left edge locations of adjacent slits. Slits with small gaps are merged by removing the intervening traces.If None, no minimum slit gap is applied. This should be smaller than `gap_offset` when converted to pixels. -``minimum_slit_length`` int, float .. .. Minimum slit length in arcsec. Slit lengths are determined by the median difference between the left and right edge locations for the unmasked trace locations. This is used to identify traces that are *erroneously* matched together to form slits. Short slits are expected to be ignored or removed (see ``clip``). If None, no minimum slit length applied. -``minimum_slit_length_sci`` int, float .. .. Minimum slit length in arcsec for a science slit. Slit lengths are determined by the median difference between the left and right edge locations for the unmasked trace locations. Used in combination with ``minimum_slit_length``, this parameter is used to identify box or alignment slits; i.e., those slits that are shorter than ``minimum_slit_length_sci`` but larger than ``minimum_slit_length`` are box/alignment slits. Box slits are *never* removed (see ``clip``), but no spectra are extracted from them. If None, no minimum science slit length is applied. -``niter_gaussian`` int .. 6 The number of iterations of :func:`~pypeit.core.trace.fit_trace` to use when using Gaussian weighting. -``niter_uniform`` int .. 9 The number of iterations of :func:`~pypeit.core.trace.fit_trace` to use when using uniform weighting. -``order_gap_poly`` int .. 3 Order of the Legendre polynomial used to model the spatial gap between orders as a function of the order spatial position. See ``add_missed_orders``. -``order_match`` int, float .. .. Orders for *fixed-format* echelle spectrographs are always matched to a predefined expectation for the number of orders found and their relative placement in the detector. This sets the tolerance allowed for matching identified "slits" to echelle orders. Must be relative to the fraction of the detector spatial scale (i.e., a value of 0.05 means that the order locations must be within 5% of the expected value). If None, no limit is used. -``order_offset`` int, float .. .. Orders for *fixed-format* echelle spectrographs are always matched to a predefined expectation for the number of orders found and their relative placement in the detector. This sets the offset to introduce to the expected order positions to improve the match for this specific data. This is an additive offset to the measured slit positions; i.e., this should minimize the difference between the expected order positions and ``self.slit_spatial_center() + offset``. Must be in the fraction of the detector spatial scale. If None, no offset is applied. -``order_spat_range`` list .. .. The spatial range of the detector/mosaic over which to predict order locations. If None, the full detector/mosaic range is used. See ``add_missed_orders``. -``order_width_poly`` int .. 2 Order of the Legendre polynomial used to model the spatial width of each order as a function of spatial pixel position. See ``add_missed_orders``. -``overlap`` bool .. False Assume slits identified as abnormally short are actually due to overlaps between adjacent slits/orders. If set to True, you *must* have also used ``length_range`` to identify left-right edge pairs that have an abnormally short separation. For those short slits, the code attempts to convert the short slits into slit gaps. This is particularly useful for blue orders in Keck-HIRES data. -``pad`` int .. 0 Integer number of pixels to consider beyond the slit edges when selecting pixels that are 'on' the slit. -``pca_function`` str ``polynomial``, ``legendre``, ``chebyshev`` ``polynomial`` Type of function fit to the PCA coefficients for each component. Options are: polynomial, legendre, chebyshev -``pca_maxiter`` int .. 25 Maximum number of rejection iterations when fitting the PCA coefficients. -``pca_maxrej`` int .. 1 Maximum number of PCA coefficients rejected during a given fit iteration. -``pca_min_edges`` int .. 4 Minimum number of edge traces required to perform a PCA decomposition of the trace form. If left_right_pca is True, this minimum applies to the number of left and right traces separately. -``pca_n`` int .. .. The number of PCA components to keep, which must be less than the number of detected traces. If not provided, determined by calculating the minimum number of components required to explain a given percentage of variance in the edge data; see `pca_var_percent`. -``pca_order`` int .. 2 Order of the function fit to the PCA coefficients. -``pca_sigrej`` int, float, list .. 2.0, 2.0 Sigma rejection threshold for fitting PCA components. Individual numbers are used for both lower and upper rejection. A list of two numbers sets these explicitly (e.g., [2., 3.]). -``pca_var_percent`` int, float .. 99.8 The percentage (i.e., not the fraction) of the variance in the edge data accounted for by the PCA used to truncate the number of PCA coefficients to keep (see `pca_n`). Ignored if `pca_n` is provided directly. -``rm_slits`` str, list .. .. Remove one or more user-specified slits. The syntax used to define a slit to remove is: 'det:spec:spat' where det=detector, spec=spectral pixel, spat=spatial pixel. For example, '2:2000:2121,3:2000:1500' will remove the slit on detector 2 that contains pixel (spat,spec)=(2000,2121) and on detector 3 that contains pixel (2000,2121). -``smash_range`` list .. 0.0, 1.0 Range of the slit in the spectral direction (in fractional units) to smash when searching for slit edges. If the spectrum covers only a portion of the image, use that range. -``sobel_enhance`` int .. 0 Enhance the sobel filtering? A value of 0 will not enhance the sobel filtering. Any other value > 0 will sum the sobel values. For example, a value of 3 will combine the sobel values for the 3 nearest pixels. This is useful when a slit edge is poorly defined (e.g. vignetted). -``sobel_mode`` str ``nearest``, ``constant`` ``nearest`` Mode for Sobel filtering. Default is 'nearest'; note we find'constant' works best for DEIMOS. -``sync_center`` str ``median``, ``nearest``, ``gap`` ``median`` Mode to use for determining the location of traces to insert. Use `median` to use the median of the matched left and right edge pairs, `nearest` to use the length of the nearest slit, or `gap` to offset by a fixed gap width from the next slit edge. -``sync_predict`` str ``pca``, ``nearest``, ``auto`` ``pca`` Mode to use when predicting the form of the trace to insert. Use `pca` to use the PCA decomposition, `nearest` to reproduce the shape of the nearest trace, or `auto` to let PypeIt decide which mode to use between `pca` and `nearest`. In general, it will first try `pca`, and if that is not possible, it will use `nearest`. -``sync_to_edge`` bool .. True If adding a first left edge or a last right edge, ignore `center_mode` for these edges and place them at the edge of the detector (with the relevant shape). -``trace_median_frac`` int, float .. .. After detection of peaks in the rectified Sobel-filtered image and before refitting the edge traces, the rectified image is median filtered with a kernel width of `trace_median_frac*nspec` along the spectral dimension. -``trace_rms_tol`` int, float .. .. After retracing edges using peaks detected in the rectified and collapsed image, the RMS difference (in pixels) between the original and refit traces are calculated. This sets the upper limit of the RMS for traces that will be removed. If None, no limit is set and all new traces are kept. -``trace_thresh`` int, float .. .. After rectification and median filtering of the Sobel-filtered image (see `trace_median_frac`), values in the median-filtered image *below* this threshold are masked in the refitting of the edge trace data. If None, no masking applied. -``trim_spec`` list .. .. User-defined truncation of all slits in the spectral direction.Should be two integers, e.g. 100,150 trims 100 pixels from the short wavelength end and 150 pixels from the long wavelength end of the spectral axis of the detector. -``use_maskdesign`` bool .. False Use slit-mask designs to identify slits. -=========================== ================ =========================================== ============== ====================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== +=========================== ================ =========================================== ============== =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== +Key Type Options Default Description +=========================== ================ =========================================== ============== =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== +``add_missed_orders`` bool .. False For any Echelle spectrograph (fixed-format or otherwise), attempt to add orders that have been missed by the automated edge tracing algorithm. For *fixed-format* echelles, this is based on the expected positions on on the detector. Otherwise, the detected orders are modeled and used to predict the locations of missed orders; see additional parameters ``order_width_poly``, ``order_gap_poly``, ``order_fitrej``, ``order_outlier``, and ``order_spat_range``. +``add_predict`` str .. ``nearest`` Sets the method used to predict the shape of the left and right traces for a user-defined slit inserted. Options are (1) ``straight`` inserts traces with a constant spatial pixels position, (2) ``nearest`` inserts traces with a form identical to the automatically identified trace at the nearest spatial position to the inserted slit, or (3) ``pca`` uses the PCA decomposition to predict the shape of the traces. +``add_slits`` str, list .. .. Add one or more user-defined slits. The syntax to define a slit to add is: 'det:spec:spat_left:spat_right' where det=detector, spec=spectral pixel, spat_left=spatial pixel of left slit boundary, and spat_righ=spatial pixel of right slit boundary. For example, '2:2000:2121:2322,3:2000:1201:1500' will add a slit to detector 2 passing through spec=2000 extending spatially from 2121 to 2322 and another on detector 3 at spec=2000 extending from 1201 to 1500. +``auto_pca`` bool .. True During automated tracing, attempt to construct a PCA decomposition of the traces. When True, the edge traces resulting from the initial detection, centroid refinement, and polynomial fitting must meet a set of criteria for performing the pca; see :func:`pypeit.edgetrace.EdgeTraceSet.can_pca`. If False, the ``sync_predict`` parameter *cannot* be set to ``pca``; if it is not, the value is set to ``nearest`` and a warning is issued when validating the parameter set. +``bound_detector`` bool .. False When the code is ready to synchronize the left/right trace edges, the traces should have been constructed, vetted, and cleaned. This can sometimes lead to *no* valid traces. This parameter dictates what to do next. If ``bound_detector`` is True, the code will artificially add left and right edges that bound the detector; if False, the code identifies the slit-edge tracing as being unsuccessful, warns the user, and ends gracefully. Note that setting ``bound_detector`` to True is needed for some long-slit data where the slit edges are, in fact, beyond the edges of the detector. +``clip`` bool .. True Remove traces flagged as bad, instead of only masking them. This is currently only used by :func:`~pypeit.edgetrace.EdgeTraceSet.centroid_refine`. +``det_buffer`` int .. 5 The minimum separation between the detector edges and a slit edge for any added edge traces. Must be positive. +``det_min_spec_length`` int, float .. 0.33 The minimum spectral length (as a fraction of the detector size) of a trace determined by direct measurements of the detector data (as opposed to what should be included in any modeling approach; see fit_min_spec_length). +``dlength_range`` int, float .. .. Similar to ``minimum_slit_dlength``, but constrains the *fractional* change in the slit length as a function of wavelength. For example, a value of 0.2 means that slit length should not vary more than 20%as a function of wavelength. +``edge_detect_clip`` int, float .. .. Sigma clipping level for peaks detected in the collapsed, Sobel-filtered significance image. +``edge_thresh`` int, float .. 20.0 Threshold for finding edges in the Sobel-filtered significance image. +``exclude_regions`` list, str .. .. User-defined regions to exclude from the slit tracing. To set this parameter, the text should be a comma separated list of pixel ranges (in the x direction) to be excluded and the detector number. For example, the following string 1:0:20,1:300:400 would select two regions in det=1 between pixels 0 and 20 and between 300 and 400. +``filt_iter`` int .. 0 Number of median-filtering iterations to perform on sqrt(trace) image before applying to Sobel filter to detect slit/order edges. +``fit_function`` str ``polynomial``, ``legendre``, ``chebyshev`` ``legendre`` Function fit to edge measurements. Options are: polynomial, legendre, chebyshev +``fit_maxdev`` int, float .. 5.0 Maximum deviation between the fitted and measured edge position for rejection in spatial pixels. +``fit_maxiter`` int .. 25 Maximum number of rejection iterations during edge fitting. +``fit_min_spec_length`` float .. 0.6 Minimum unmasked spectral length of a traced slit edge to use in any modeling procedure (polynomial fitting or PCA decomposition). +``fit_niter`` int .. 1 Number of iterations of re-measuring and re-fitting the edge data; see :func:`~pypeit.core.trace.fit_trace`. +``fit_order`` int .. 5 Order of the function fit to edge measurements. +``follow_span`` int .. 20 In the initial connection of spectrally adjacent edge detections, this sets the number of previous spectral rows to consider when following slits forward. +``fwhm_gaussian`` int, float .. 3.0 The `fwhm` parameter to use when using Gaussian weighting in :func:`~pypeit.core.trace.fit_trace` when refining the PCA predictions of edges. See description :func:`~pypeit.core.trace.peak_trace`. +``fwhm_uniform`` int, float .. 3.0 The `fwhm` parameter to use when using uniform weighting in :func:`~pypeit.core.trace.fit_trace` when refining the PCA predictions of edges. See description of :func:`~pypeit.core.trace.peak_trace`. +``gap_offset`` int, float .. 5.0 Offset (pixels) used for the slit edge gap width when inserting slit edges (see `sync_center`) or when nudging predicted slit edges to avoid slit overlaps. This should be larger than `minimum_slit_gap` when converted to arcseconds. +``left_right_pca`` bool .. False Construct a PCA decomposition for the left and right traces separately. This can be important for cross-dispersed echelle spectrographs (e.g., Keck-NIRES) +``length_range`` int, float .. .. Allowed range in slit length compared to the median slit length. For example, a value of 0.3 means that slit lengths should not vary more than 30%. Relatively shorter or longer slits are masked or clipped. Most useful for echelle or multi-slit data where the slits should have similar or identical lengths. +``maskdesign_filename`` str, list .. .. Mask design info contained in this file or files (comma separated) +``maskdesign_maxsep`` int, float .. 50 Maximum allowed offset in pixels between the slit edges defined by the slit-mask design and the traced edges. +``maskdesign_sigrej`` int, float .. 3 Number of sigma for sigma-clipping rejection during slit-mask design matching. +``maskdesign_step`` int, float .. 1 Step in pixels used to generate a list of possible offsets (within +/- `maskdesign_maxsep`) between the slit edges defined by the mask design and the traced edges. +``match_tol`` int, float .. 3.0 Same-side slit edges below this separation in pixels are considered part of the same edge. +``max_nudge`` int, float .. .. If parts of any (predicted) trace fall off the detector edge, allow them to be nudged away from the detector edge up to and including this maximum number of pixels. If None, no limit is set; otherwise should be 0 or larger. +``max_overlap`` float .. .. When adding missing echelle orders based on where existing orders are found, the prediction can yield overlapping orders. The edges of these orders are adjusted to eliminate the overlap, and orders can be added up over the spatial range of the detector set by ``order_spate_range``. If this value is None, orders are added regardless of how much they overlap. If not None, this defines the maximum fraction of an order spatial width that can overlap with other orders. For example, if ``max_overlap=0.5``, any order that overlaps its neighboring orders by more than 50% will not be added as a missing order. +``max_shift_abs`` int, float .. 0.5 Maximum spatial shift in pixels between an input edge location and the recentroided value. +``max_shift_adj`` int, float .. 0.15 Maximum spatial shift in pixels between the edges in adjacent spectral positions. +``max_spat_error`` int, float .. .. Maximum error in the spatial position of edges in pixels. +``minimum_slit_dlength`` int, float .. .. Minimum *change* in the slit length (arcsec) as a function of wavelength in arcsec. This is mostly meant to catch cases when the polynomial fit to the detected edges becomes ill-conditioned (e.g., when the slits run off the edge of the detector) and leads to wild traces. If reducing the order of the polynomial (``fit_order``) does not help, try using this to remove poorly constrained slits. +``minimum_slit_gap`` int, float .. .. Minimum slit gap in arcsec. Gaps between slits are determined by the median difference between the right and left edge locations of adjacent slits. Slits with small gaps are merged by removing the intervening traces.If None, no minimum slit gap is applied. This should be smaller than `gap_offset` when converted to pixels. +``minimum_slit_length`` int, float .. .. Minimum slit length in arcsec. Slit lengths are determined by the median difference between the left and right edge locations for the unmasked trace locations. This is used to identify traces that are *erroneously* matched together to form slits. Short slits are expected to be ignored or removed (see ``clip``). If None, no minimum slit length applied. +``minimum_slit_length_sci`` int, float .. .. Minimum slit length in arcsec for a science slit. Slit lengths are determined by the median difference between the left and right edge locations for the unmasked trace locations. Used in combination with ``minimum_slit_length``, this parameter is used to identify box or alignment slits; i.e., those slits that are shorter than ``minimum_slit_length_sci`` but larger than ``minimum_slit_length`` are box/alignment slits. Box slits are *never* removed (see ``clip``), but no spectra are extracted from them. If None, no minimum science slit length is applied. +``niter_gaussian`` int .. 6 The number of iterations of :func:`~pypeit.core.trace.fit_trace` to use when using Gaussian weighting. +``niter_uniform`` int .. 9 The number of iterations of :func:`~pypeit.core.trace.fit_trace` to use when using uniform weighting. +``order_fitrej`` int, float .. 3.0 When fitting the width of and gap beteween echelle orders with Legendre polynomials, this is the sigma-clipping threshold when excluding data from the fit. See ``add_missed_orders``. +``order_gap_poly`` int .. 3 Order of the Legendre polynomial used to model the spatial gap between orders as a function of the order spatial position. See ``add_missed_orders``. +``order_match`` int, float .. .. Orders for *fixed-format* echelle spectrographs are always matched to a predefined expectation for the number of orders found and their relative placement in the detector. This sets the tolerance allowed for matching identified "slits" to echelle orders. Must be relative to the fraction of the detector spatial scale (i.e., a value of 0.05 means that the order locations must be within 5% of the expected value). If None, no limit is used. +``order_offset`` int, float .. .. Orders for *fixed-format* echelle spectrographs are always matched to a predefined expectation for the number of orders found and their relative placement in the detector. This sets the offset to introduce to the expected order positions to improve the match for this specific data. This is an additive offset to the measured slit positions; i.e., this should minimize the difference between the expected order positions and ``self.slit_spatial_center() + offset``. Must be in the fraction of the detector spatial scale. If None, no offset is applied. +``order_outlier`` int, float .. .. When fitting the width of echelle orders with Legendre polynomials, this is the sigma-clipping threshold used to identify outliers. Orders clipped by this threshold are *removed* from further consideration, whereas orders clipped by ``order_fitrej`` are excluded from the polynomial fit but are not removed. Note this is *only applied to the order widths*, not the order gaps. If None, no "outliers" are identified/removed. Should be larger or equal to ``order_fitrej``. +``order_spat_range`` list .. .. The spatial range of the detector/mosaic over which to predict order locations. If None, the full detector/mosaic range is used. See ``add_missed_orders``. +``order_width_poly`` int .. 2 Order of the Legendre polynomial used to model the spatial width of each order as a function of spatial pixel position. See ``add_missed_orders``. +``overlap`` bool .. False Assume slits identified as abnormally short are actually due to overlaps between adjacent slits/orders. If set to True, you *must* have also used ``length_range`` to identify left-right edge pairs that have an abnormally short separation. For those short slits, the code attempts to convert the short slits into slit gaps. This is particularly useful for blue orders in Keck-HIRES data. +``pad`` int .. 0 Integer number of pixels to consider beyond the slit edges when selecting pixels that are 'on' the slit. +``pca_function`` str ``polynomial``, ``legendre``, ``chebyshev`` ``polynomial`` Type of function fit to the PCA coefficients for each component. Options are: polynomial, legendre, chebyshev +``pca_maxiter`` int .. 25 Maximum number of rejection iterations when fitting the PCA coefficients. +``pca_maxrej`` int .. 1 Maximum number of PCA coefficients rejected during a given fit iteration. +``pca_min_edges`` int .. 4 Minimum number of edge traces required to perform a PCA decomposition of the trace form. If left_right_pca is True, this minimum applies to the number of left and right traces separately. +``pca_n`` int .. .. The number of PCA components to keep, which must be less than the number of detected traces. If not provided, determined by calculating the minimum number of components required to explain a given percentage of variance in the edge data; see `pca_var_percent`. +``pca_order`` int .. 2 Order of the function fit to the PCA coefficients. +``pca_sigrej`` int, float, list .. 2.0, 2.0 Sigma rejection threshold for fitting PCA components. Individual numbers are used for both lower and upper rejection. A list of two numbers sets these explicitly (e.g., [2., 3.]). +``pca_var_percent`` int, float .. 99.8 The percentage (i.e., not the fraction) of the variance in the edge data accounted for by the PCA used to truncate the number of PCA coefficients to keep (see `pca_n`). Ignored if `pca_n` is provided directly. +``rm_slits`` str, list .. .. Remove one or more user-specified slits. The syntax used to define a slit to remove is: 'det:spec:spat' where det=detector, spec=spectral pixel, spat=spatial pixel. For example, '2:2000:2121,3:2000:1500' will remove the slit on detector 2 that contains pixel (spat,spec)=(2000,2121) and on detector 3 that contains pixel (2000,2121). +``smash_range`` list .. 0.0, 1.0 Range of the slit in the spectral direction (in fractional units) to smash when searching for slit edges. If the spectrum covers only a portion of the image, use that range. +``sobel_enhance`` int .. 0 Enhance the sobel filtering? A value of 0 will not enhance the sobel filtering. Any other value > 0 will sum the sobel values. For example, a value of 3 will combine the sobel values for the 3 nearest pixels. This is useful when a slit edge is poorly defined (e.g. vignetted). +``sobel_mode`` str ``nearest``, ``constant`` ``nearest`` Mode for Sobel filtering. Default is 'nearest'; note we find'constant' works best for DEIMOS. +``sync_center`` str ``median``, ``nearest``, ``gap`` ``median`` Mode to use for determining the location of traces to insert. Use `median` to use the median of the matched left and right edge pairs, `nearest` to use the length of the nearest slit, or `gap` to offset by a fixed gap width from the next slit edge. +``sync_predict`` str ``pca``, ``nearest``, ``auto`` ``pca`` Mode to use when predicting the form of the trace to insert. Use `pca` to use the PCA decomposition, `nearest` to reproduce the shape of the nearest trace, or `auto` to let PypeIt decide which mode to use between `pca` and `nearest`. In general, it will first try `pca`, and if that is not possible, it will use `nearest`. +``sync_to_edge`` bool .. True If adding a first left edge or a last right edge, ignore `center_mode` for these edges and place them at the edge of the detector (with the relevant shape). +``trace_median_frac`` int, float .. .. After detection of peaks in the rectified Sobel-filtered image and before refitting the edge traces, the rectified image is median filtered with a kernel width of `trace_median_frac*nspec` along the spectral dimension. +``trace_rms_tol`` int, float .. .. After retracing edges using peaks detected in the rectified and collapsed image, the RMS difference (in pixels) between the original and refit traces are calculated. This sets the upper limit of the RMS for traces that will be removed. If None, no limit is set and all new traces are kept. +``trace_thresh`` int, float .. .. After rectification and median filtering of the Sobel-filtered image (see `trace_median_frac`), values in the median-filtered image *below* this threshold are masked in the refitting of the edge trace data. If None, no masking applied. +``trim_spec`` list .. .. User-defined truncation of all slits in the spectral direction.Should be two integers, e.g. 100,150 trims 100 pixels from the short wavelength end and 150 pixels from the long wavelength end of the spectral axis of the detector. +``use_maskdesign`` bool .. False Use slit-mask designs to identify slits. +=========================== ================ =========================================== ============== =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== ---- diff --git a/doc/qa.rst b/doc/qa.rst index e6e61396e8..f895fc6003 100644 --- a/doc/qa.rst +++ b/doc/qa.rst @@ -9,9 +9,11 @@ PypeIt QA ========= As part of the standard reduction, PypeIt generates a series -of Quality Assurance (QA) files. This document describes +of fixed-format Quality Assurance (QA) figures. This document describes the typical outputs, in the typical order that they appear. +*This page is still a work in progress.* + The basic arrangement is that individual PNG files are created and then a set of HTML files are generated to organize viewing of the PNGs. @@ -20,16 +22,10 @@ viewing of the PNGs. HTML ==== -When the code completes (or crashes out), a set of -HTML files are generated in the ``QA/`` folder. There -is one HTML file per calibration frame set and one -HTML file per science exposure. Example names are -``MF_A.html``. - -Open in your browser and have at 'em. -Quick links are provided to allow one to jump between -the various files. - +When the code completes (or crashes out), an HTML file is generated in the +``QA/`` folder, one per setup that has been reduced (typically one). An example +filename is ``MF_A.html``. These HTML files are out of date, so you're better +off opening the PNG files in the ``PNGs`` directory directly. Calibration QA ============== @@ -39,9 +35,38 @@ to calibration processing. There is a unique one generated for each setup and detector and (possibly) calibration set. -Generally, the title describes the type of QA and the -sub-title indicates the user who ran PypeIt and the -date of the processing. +Generally, the title describes the type of QA plotted. + +.. _qa-order-predict: + +Echelle Order Prediction +------------------------ + +When reducing echelle observations and inserting missing orders, a QA plot is +produced to assess the success of the predicted locations. The example below is +for Keck/HIRES. + +.. figure:: figures/Edges_A_0_MSC01_orders_qa.png + :width: 60% + + Example QA plot showing the measured order spatial widths (blue) and gaps + (green) in pixels. The widths should be nearly constant as a function of + position, whereas the gaps should change monotonically with spatial pixel. + +In the figure above, measured values that are included in the polynomial fit are +shown as filled points. The colored lines show the best fit polynomial model +used for the predicted order locations. The fit allows for an iterative +rejection of points; measured widths and gaps that are rejected during the fit +are shown as orange and purple crosses, respectively. The measurements that are +rejected during the fit are not necessarily *removed* as invalid traces, but the +code allows you to identify outlier traces that *will be* removed. None of the +traces in the example image above are identified as outliers; if they exist, +they will be plotted as orange and purple triangles for widths and gaps, +respectively. Missing orders that will be added are included as open squares; +gaps are green, widths are blue. To deal with overlap, "bracketing" orders are +added for the overlap calculation but are removed in the final set of traces; +the title of the plot indicates if bracketing orders are included and the +vertical dashed lines shows the edges of the detector/mosaic. .. _qa-wave-fit: @@ -52,7 +77,7 @@ PypeIt produces plots like the one below showing the result of the wavelength calibration. .. figure:: figures/deimos_arc1d.png - :width: 60 % + :width: 60% An example QA plot for Keck/DEIMOS wavelength calibration. The extracted arc spectrum is shown to the left with arc lines used for the wavelength solution diff --git a/doc/releases/1.16.1dev.rst b/doc/releases/1.16.1dev.rst index 9efeb54374..1a64a67c66 100644 --- a/doc/releases/1.16.1dev.rst +++ b/doc/releases/1.16.1dev.rst @@ -21,9 +21,18 @@ Dependency Changes Functionality/Performance Improvements and Additions ---------------------------------------------------- +- Added the ``max_overlap`` parameter, which limits the set of new order traces + added, to compensate for orders missed during automated edge-tracing, to those + that have less than a given fractional overlap with adjacent orders. +- Added the ``order_fitrej`` and ``order_outlier`` parameters used to set the + sigma-clipping threshold used when fitting Legendre functions to the order + widths and gaps. - Added the possibility to decide if the extracted standard star spectrum should be used as a crutch for tracing the object in the science frame (before it was done as default). This is done by setting the parameter ``use_std_trace`` in FindObjPar. +- Now PypeIt can handle the case where "Standard star trace does not match the + number of orders in the echelle data" both in `run_pypeit` and in + `pypeit_coadd_1dspec`. - Now PypeIt can handle the case where "Standard star trace does not match the number of orders in the echelle data" both in `run_pypeit` and in `pypeit_coadd_1dspec`. - Added the functionality to use slitless flats to create pixelflats. Note: new frametype @@ -33,6 +42,7 @@ Functionality/Performance Improvements and Additions - Added a functionality that allows, when multiple frames are combined, to scale each frame to have the same mean value before combining. To use this functionality, the new parameter ``scale_mean`` should be set to ``True``. +- Added the possibility to use the parameter ``fwhm_fromlines`` also for the tilts calibration. Instrument-specific Updates --------------------------- @@ -109,6 +119,14 @@ Under-the-hood Improvements - Introduced :class:`~pypeit.pypeitdata.PypeItDataPaths` to handle all interactions with the ``pypeit/data`` directory, which provides a unified interface for accessing on-disk and cached files. +- When adding missing orders, the full syncing procedure is no longer performed. + The code now only checks that the edges are still synced after the missed + orders are added. +- When detecting overlapping orders/slits, the code now forces each edge used to + have been directly detected; i.e., if an edge is inserted, the fact that the + resulting slit is abnormally short should not trigger the overlap detection. +- Improved the QA plot resulting from fitting order widths and gaps as a + function of spatial position. - Updated general raw image reader so that it correctly accounts for spectrographs that read the data and overscan sections directly from the file headers. diff --git a/pypeit/calibrations.py b/pypeit/calibrations.py index 63c2d313b8..b2f2e8fd71 100644 --- a/pypeit/calibrations.py +++ b/pypeit/calibrations.py @@ -1176,11 +1176,14 @@ def get_tilts(self): _spat_flexure = self.mstilt.spat_flexure \ if self.par['tiltframe']['process']['spat_flexure_correct'] else None + # get measured fwhm from wv_calib + measured_fwhms = [wvfit.fwhm for wvfit in self.wv_calib.wv_fits] + # Build buildwaveTilts = wavetilts.BuildWaveTilts( self.mstilt, self.slits, self.spectrograph, self.par['tilts'], self.par['wavelengths'], det=self.det, qa_path=self.qa_path, - spat_flexure=_spat_flexure) + spat_flexure=_spat_flexure, measured_fwhms=measured_fwhms) # TODO still need to deal with syntax for LRIS ghosts. Maybe we don't need it self.wavetilts = buildwaveTilts.run(doqa=self.write_qa, show=self.show) diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index aea12a2a5e..3b5ff61923 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -223,7 +223,7 @@ def extract_point_source(wave, flxcube, ivarcube, bpmcube, wcscube, exptime, Returns ------- - sobjs : :class:`pypeit.specobjs.SpecObjs` + sobjs : :class:`~pypeit.specobjs.SpecObjs` SpecObjs object containing the extracted spectrum """ if whitelight_range is None: diff --git a/pypeit/core/flexure.py b/pypeit/core/flexure.py index d4a816993f..487dd504bf 100644 --- a/pypeit/core/flexure.py +++ b/pypeit/core/flexure.py @@ -37,7 +37,7 @@ from pypeit.datamodel import DataContainer from pypeit.images.detector_container import DetectorContainer from pypeit.images.mosaic import Mosaic -from pypeit import specobj, specobjs, spec2dobj +from pypeit import specobj, specobjs from pypeit import wavemodel from IPython import embed @@ -1395,83 +1395,6 @@ def sky_em_residuals(wave:np.ndarray, flux:np.ndarray, return dwave[m], diff[m], diff_err[m], los[m], los_err[m] -def flexure_diagnostic(file, file_type='spec2d', flexure_type='spec', chk_version=False): - """ - Print the spectral or spatial flexure of a spec2d or spec1d file - - Args: - file (:obj:`str`, `Path`_): - Filename of the spec2d or spec1d file to check - file_type (:obj:`str`, optional): - Type of the file to check. Options are 'spec2d' or 'spec1d'. Default - is 'spec2d'. - flexure_type (:obj:`str`, optional): - Type of flexure to check. Options are 'spec' or 'spat'. Default is - 'spec'. - chk_version (:obj:`bool`, optional): - If True, raise an error if the datamodel version or type check - failed. If False, throw a warning only. Default is False. - - Returns: - :obj:`astropy.table.Table`, :obj:`float`: If file_type is 'spec2d' and - flexure_type is 'spec', return a table with the spectral flexure. If - file_type is 'spec2d' and flexure_type is 'spat', return the spatial - flexure. If file_type is 'spec1d', return a table with the spectral - flexure. - """ - - # value to return - return_flex = None - - if file_type == 'spec2d': - # load the spec2d file - allspec2D = spec2dobj.AllSpec2DObj.from_fits(file, chk_version=chk_version) - # Loop on Detectors - for det in allspec2D.detectors: - print('') - print('=' * 50 + f'{det:^7}' + '=' * 51) - # get and print the spectral flexure - if flexure_type == 'spec': - spec_flex = allspec2D[det].sci_spec_flexure - spec_flex.rename_column('sci_spec_flexure', 'global_spec_shift') - if np.all(spec_flex['global_spec_shift'] != None): - spec_flex['global_spec_shift'].format = '0.3f' - # print the table - spec_flex.pprint_all() - # return the table - return_flex = spec_flex - # get and print the spatial flexure - if flexure_type == 'spat': - spat_flex = allspec2D[det].sci_spat_flexure - # print the value - print(f'Spat shift: {spat_flex}') - # return the value - return_flex = spat_flex - elif file_type == 'spec1d': - # no spat flexure in spec1d file - if flexure_type == 'spat': - msgs.error("Spat flexure not available in the spec1d file, try with a spec2d file") - # load the spec1d file - sobjs = specobjs.SpecObjs.from_fitsfile(file, chk_version=chk_version) - spec_flex = Table() - spec_flex['NAME'] = sobjs.NAME - spec_flex['global_spec_shift'] = sobjs.FLEX_SHIFT_GLOBAL - if np.all(spec_flex['global_spec_shift'] != None): - spec_flex['global_spec_shift'].format = '0.3f' - spec_flex['local_spec_shift'] = sobjs.FLEX_SHIFT_LOCAL - if np.all(spec_flex['local_spec_shift'] != None): - spec_flex['local_spec_shift'].format = '0.3f' - spec_flex['total_spec_shift'] = sobjs.FLEX_SHIFT_TOTAL - if np.all(spec_flex['total_spec_shift'] != None): - spec_flex['total_spec_shift'].format = '0.3f' - # print the table - spec_flex.pprint_all() - # return the table - return_flex = spec_flex - - return return_flex - - # TODO -- Consider separating the methods from the DataContainer as per calibrations class MultiSlitFlexure(DataContainer): """ diff --git a/pypeit/core/skysub.py b/pypeit/core/skysub.py index 7957785ef0..c826c444a9 100644 --- a/pypeit/core/skysub.py +++ b/pypeit/core/skysub.py @@ -1090,7 +1090,7 @@ def ech_local_skysub_extract(sciimg, sciivar, fullmask, tilts, waveimg, Parameters ---------- sciimg : `numpy.ndarray`_ - science image, usually with a global sky subtracted. + Science image, usually with a global sky subtracted. shape = (nspec, nspat) sciivar : `numpy.ndarray`_ inverse variance of science image. @@ -1181,9 +1181,9 @@ def ech_local_skysub_extract(sciimg, sciivar, fullmask, tilts, waveimg, fullbkpt = bset.breakpoints force_gauss : bool, default = False - If True, a Gaussian profile will always be assumed for the - optimal extraction using the FWHM determined from object finding (or provided by the user) for the spatial - profile. + If True, a Gaussian profile will always be assumed for the optimal + extraction using the FWHM determined from object finding (or provided by + the user) for the spatial profile. sn_gauss : int or float, default = 4.0 The signal to noise threshold above which optimal extraction with non-parametric b-spline fits to the objects spatial diff --git a/pypeit/core/trace.py b/pypeit/core/trace.py index 350518ecef..225363ed09 100644 --- a/pypeit/core/trace.py +++ b/pypeit/core/trace.py @@ -1647,7 +1647,41 @@ def predicted_center_difference(lower_spat, spat, width_fit, gap_fit): return np.absolute(spat - test_spat) -def extrapolate_orders(cen, width_fit, gap_fit, min_spat, max_spat, tol=0.01): +def extrapolate_prev_order(cen, width_fit, gap_fit, tol=0.01): + """ + Predict the location of the order to the spatial left (lower spatial pixel + numbers) of the order center provided. + + This is largely a support function for :func:`extrapolate_orders`. + + Args: + cen (:obj:`float`): + Center of the known order. + width_fit (:class:`~pypeit.core.fitting.PypeItFit`): + Model of the order width as a function of the order center. + gap_fit (:class:`~pypeit.core.fitting.PypeItFit`): + Model of the order gap *after* each order as a function of the order + center. + tol (:obj:`float`, optional): + Tolerance used when optimizing the order locations predicted toward + lower spatial pixels. + + Returns: + :obj:`float`: Predicted center of the previous order. + """ + # Guess the position of the previous order + w = width_fit.eval(cen) + guess = np.array([cen - w - gap_fit.eval(cen)]) + # Set the bounds based on this guess and the expected order width + bounds = optimize.Bounds(lb=guess - w/2, ub=guess + w/2) + # Optimize the spatial position + res = optimize.minimize(predicted_center_difference, guess, + args=(cen, width_fit, gap_fit), + method='trust-constr', jac='2-point', bounds=bounds, tol=tol) + return res.x[0] + + +def extrapolate_orders(cen, width_fit, gap_fit, min_spat, max_spat, tol=0.01, bracket=False): """ Predict the locations of additional orders by extrapolation. @@ -1676,6 +1710,9 @@ def extrapolate_orders(cen, width_fit, gap_fit, min_spat, max_spat, tol=0.01): tol (:obj:`float`, optional): Tolerance used when optimizing the order locations predicted toward lower spatial pixels. + bracket (:obj:`bool`, optional): + Bracket the added orders with one additional order on either side. + This can be useful for dealing with predicted overlap. Returns: :obj:`tuple`: Two arrays with orders centers (1) below the first and (2) @@ -1686,16 +1723,7 @@ def extrapolate_orders(cen, width_fit, gap_fit, min_spat, max_spat, tol=0.01): # Extrapolate toward lower spatial positions lower_spat = [cen[0]] while lower_spat[-1] > min_spat: - # Guess the position of the previous order - l = width_fit.eval(lower_spat[-1]) - guess = np.array([lower_spat[-1] - l - gap_fit.eval(lower_spat[-1])]) - # Set the bounds based on this guess and the expected order width - bounds = optimize.Bounds(lb=guess - l/2, ub=guess + l/2) - # Optimize the spatial position - res = optimize.minimize(predicted_center_difference, guess, - args=(lower_spat[-1], width_fit, gap_fit), - method='trust-constr', jac='2-point', bounds=bounds, tol=tol) - lower_spat += [res.x[0]] + lower_spat += [extrapolate_prev_order(lower_spat[-1], width_fit, gap_fit, tol=tol)] # Extrapolate toward larger spatial positions upper_spat = [cen[-1]] @@ -1703,8 +1731,12 @@ def extrapolate_orders(cen, width_fit, gap_fit, min_spat, max_spat, tol=0.01): upper_spat += [upper_spat[-1] + width_fit.eval(upper_spat[-1]) + gap_fit.eval(upper_spat[-1])] - # Return arrays after removing the first and last spatial position (which - # are either repeats of values in `cen` or outside the spatial range) + # Return arrays after removing the first spatial position because it is a + # repeat of the input. If not bracketting, also remove the last point + # because it will be outside the minimum or maximum spatial position (set by + # min_spat, max_spat). + if bracket: + return np.array(lower_spat[-1:0:-1]), np.array(upper_spat[1:]) return np.array(lower_spat[-2:0:-1]), np.array(upper_spat[1:-1]) diff --git a/pypeit/edgetrace.py b/pypeit/edgetrace.py index 65518173d3..be6d75d9fe 100644 --- a/pypeit/edgetrace.py +++ b/pypeit/edgetrace.py @@ -877,11 +877,21 @@ def auto_trace(self, bpm=None, debug=False, show_stages=False): if not self.is_empty and self.par['add_missed_orders']: # Refine the order traces self.order_refine(debug=debug) - # Check that the edges are still sinked (not overkill if orders are - # missed) - self.success = self.sync() - if not self.success: - return + # Check that the edges are still synced + if not self.is_synced: + msgs.error('Traces are no longer synced after adding in missed orders.') + +# KBW: Keep this code around for a while. It is the old code that +# resynced the edges just after adding in new orders. Nominally, this +# shouldn't be necessary, but the comment suggests this may be +# necessary if orders are missed. We should keep this around until +# we're sure it's not needed. +# # Check that the edges are still sinked (not overkill if orders are +# # missed) +# self.success = self.sync(debug=True) +# if not self.success: +# return + if show_stages: self.show(title='After adding in missing orders') @@ -2612,10 +2622,19 @@ def check_synced(self, rebuild_pca=False): # ' changes along the dispersion direction.') # self.remove_traces(dl_flag, rebuild_pca=_rebuild_pca) - # Get the slits that have been flagged as abnormally short. This should - # be the same as the definition above, it's just redone here to ensure - # `short` is defined when `length_rtol` is None. - short = self.fully_masked_traces(flag='ABNORMALSLIT_SHORT') + # Try to detect overlap between adjacent slits by finding abnormally + # short slits. + # - Find abnormally short slits that *do not* include inserted edges; + # i.e., these must be *detected* edges, not inserted ones. + # - *Both* edges in the fit must be flagged because of this + # requirement that the trace not be inserted. This means that we + # set mode='neither' when running synced_selection. I also set + # assume_synced=True: the traces should be synced if the code has + # made it this far. Any flags that would indicate otherwise will + # have been set by this function. + short = self.fully_masked_traces(flag='ABNORMALSLIT_SHORT', + exclude=self.bitmask.insert_flags) + short = self.synced_selection(short, mode='neither', assume_synced=True) if self.par['overlap'] and np.any(short): msgs.info('Assuming slits flagged as abnormally short are actually due to ' 'overlapping slit edges.') @@ -2819,7 +2838,7 @@ def synced_selection(self, indx, mode='ignore', assume_synced=False): if not assume_synced and not self.is_synced: msgs.error('To synchronize the trace selection, it is expected that the traces have ' - 'been left-right synchronized. Either run sync() to sychronize, ignore ' + 'been left-right synchronized. Either run sync() to sychronize or ignore ' 'the synchronization (which may raise an exception) by setting ' 'assume_synced=True.') if mode == 'both': @@ -4090,7 +4109,7 @@ def sync(self, rebuild_pca=True, debug=False): if debug: msgs.info('Show instance includes inserted traces but before checking the sync.') - self.show(flag='any') + self.show(title='includes inserted traces before checking the sync', flag='any') # Check the full synchronized list and log completion of the # method @@ -4100,6 +4119,7 @@ def sync(self, rebuild_pca=True, debug=False): i += 1 if i == maxiter: msgs.error('Fatal left-right trace de-synchronization error.') + if self.log is not None: self.log += [inspect.stack()[0][3]] return True @@ -4271,7 +4291,7 @@ def insert_traces(self, side, trace_cen, loc=None, mode='user', resort=True, nud if loc.size != ntrace: msgs.error('Number of sides does not match the number of insertion locations.') - msgs.info('Inserting {0} new traces.'.format(ntrace)) + msgs.info(f'Inserting {ntrace} new traces.') # Nudge the traces if nudge: @@ -4855,13 +4875,17 @@ def order_refine(self, debug=False): add_left, add_right = self.order_refine_fixed_format(reference_row, debug=debug) rmtraces = None else: + # TODO: `bracket` is hard-coded! Currently I expect we always want + # to set bracket=True, but we should plan to revisit this and maybe + # expose as a user parameter. + bracket = True add_left, add_right, rmtraces \ - = self.order_refine_free_format(reference_row, debug=debug) + = self.order_refine_free_format(reference_row, bracket=bracket, debug=debug) if add_left is None or add_right is None: msgs.info('No additional orders found to add') return - + if rmtraces is not None: self.remove_traces(rmtraces, rebuild_pca=True) @@ -4915,12 +4939,58 @@ def order_refine_fixed_format(self, reference_row, debug=False): return add_left_edges, add_right_edges - # NOTE: combined_order_tol is effectively hard-coded. - def order_refine_free_format(self, reference_row, combined_order_tol=1.8, debug=False): + # NOTE: combined_order_tol is effectively hard-coded; i.e., the current code + # always uses the default when calling this function. + def order_refine_free_format(self, reference_row, combined_order_tol=1.8, bracket=True, + debug=False): """ Refine the order locations for "free-format" Echelles. - Traces must be synced before reaching here. + Traces must be synced before calling this function. + + The procedure is as follows: + + - The function selects the good traces and calculates the width of + each order and the gap between each order and fits them with + Legendre polynomials (using the polynomial orders set by the + ``order_width_poly`` and ``order_gap_poly`` parameters); 5-sigma + outliers are removed from the fit. + + - Based on this fit, the code adds missed orders, both interspersed + with detected orders and extrapolated over the full spatial range + of the detector/mosaic. The spatial extent over which this + prediction is performed is set by ``order_spat_range`` and can be + limited by any resulting overlap in the prediction, as set by + ``max_overlap``. + + - Any detected "orders" that are actually the adjoining of one or + more orders are flagged for rejection. + + Args: + reference_row (:obj:`int`): + The index of the spectral pixel (row) in the set of left and + right traces at which to predict the positions of the missed + orders. Nominally, this is the reference row used for the + construction of the trace PCA. + combined_order_tol (:obj:`float`, optional): + For orders that are very nearly overlapping, the automated edge + tracing can often miss the right and left edges of two adjacent + orders. This leads to the detected edges of two adjacent orders + being combined into a single order. This value sets the maximum + ratio of the width of any given detected order to the polynomial + fit to the order width as a function of spatial position on the + detector. + bracket (:obj:`bool`, optional): + Bracket the added orders with one additional order on either side. + This can be useful for dealing with predicted overlap. + debug (:obj:`bool`, optional): + Run in debug mode. + + Returns: + :obj:`tuple`: Three `numpy.ndarray`_ objects that provide (1,2) the + left and right edges of orders to be added to the set of edge traces + and (3) a boolean array indicating which of the existing traces + should be removed. """ # Select the left/right traces # TODO: This is pulled from get_slits. Maybe want a function for this. @@ -4928,7 +4998,8 @@ def order_refine_free_format(self, reference_row, combined_order_tol=1.8, debug= # Save the list of good left edges in case we need to remove any left_gpm = gpm & self.is_left left = self.edge_fit[:,left_gpm] - right = self.edge_fit[:,gpm & self.is_right] + right_gpm = gpm & self.is_right + right = self.edge_fit[:,right_gpm] # Use the trace locations at the middle of the spectral shape of the # detector/mosaic @@ -4941,45 +5012,59 @@ def order_refine_free_format(self, reference_row, combined_order_tol=1.8, debug= gap = left[1:] - right[:-1] # Create the polynomial models. - # TODO: - # - Expose the rejection parameters to the user? - # - Be more strict with upper rejection, to preferentially ignore - # measurements biased by missing orders and/or combined orders? width_fit = fitting.robust_fit(cen, width, self.par['order_width_poly'], - function='legendre', lower=3., upper=3., maxiter=5, - sticky=True) + function='legendre', lower=self.par['order_fitrej'], + upper=self.par['order_fitrej'], maxiter=5, sticky=True) # Connection of center to gap uses the gap spatially *after* the order. gap_fit = fitting.robust_fit(cen[:-1], gap, self.par['order_gap_poly'], - function='legendre', lower=3., upper=3., maxiter=5, - sticky=True) - - # Ideally, measured widths/gaps should be regected for one of the + function='legendre', lower=self.par['order_fitrej'], + upper=self.par['order_fitrej'], maxiter=5, sticky=True) + + # Ideally, measured widths/gaps should be rejected for one of the # following reasons: # - The width is too large because gaps were missed (i.e. multiple # orders were combined) # - The width is too small because order overlap was detected and # removed. # - The gap is too large because orders were missed - # This finds cases where multiple orders have been combined - combined_orders = width / width_fit.eval(cen) > combined_order_tol + + # In the case when the user does not reject "outliers", we still reject + # orders that we expected to be cases where multiple orders have been + # combined + bad_order = width / width_fit.eval(cen) > combined_order_tol + if self.par['order_outlier'] is not None: + # Exclude "outliers" + resid = np.absolute(width_fit.yval - width_fit.eval(width_fit.xval)) + bad_order |= (resid/width_fit.calc_fit_rms() > self.par['order_outlier']) + # TODO: The gaps for HIRES can have *very* large residuals. Using + # the gaps to identify outliers would remove many orders that + # probably shouldn't be removed. +# resid = np.absolute(gap_fit.yval - gap_fit.eval(gap_fit.xval)) +# bad_order[:-1] |= (resid/gap_fit.calc_fit_rms() > self.par['order_outlier']) + # And sets flags used to remove them, in favor of replacing them with # the predicted locations of the individual orders. rmtraces = np.zeros(left_gpm.size, dtype=bool) - rmtraces[np.where(left_gpm)[0][combined_orders]] = True + rmtraces[np.where(left_gpm)[0][bad_order]] = True rmtraces = self.synced_selection(rmtraces, mode='both') # Interpolate any missing orders # TODO: Expose tolerances to the user? - individual_orders = np.logical_not(combined_orders) + good_order = np.logical_not(bad_order) order_cen, order_missing \ - = trace.find_missing_orders(cen[individual_orders], width_fit, gap_fit) - - # Extrapolate orders + = trace.find_missing_orders(cen[good_order], width_fit, gap_fit) + if np.sum(order_missing) > order_missing.size // 2: + msgs.warn('Found more missing orders than detected orders. Check the order ' + 'refinement QA file! The code will continue, but you likely need to adjust ' + 'your edge-tracing parameters.') + + # Extrapolate orders; this includes one additional order to either side + # of the spatial extent set by rng. rng = [0., float(self.nspat)] if self.par['order_spat_range'] is None \ else self.par['order_spat_range'] lower_order_cen, upper_order_cen \ - = trace.extrapolate_orders(cen[individual_orders], width_fit, gap_fit, - rng[0], rng[1]) + = trace.extrapolate_orders(cen[good_order], width_fit, gap_fit, + rng[0], rng[1], bracket=bracket) # Combine the results order_cen = np.concatenate((lower_order_cen, order_cen, upper_order_cen)) @@ -4996,123 +5081,328 @@ def order_refine_free_format(self, reference_row, combined_order_tol=1.8, debug= # TODO: Making this directory should probably be done elsewhere if ofile is not None and not ofile.parent.is_dir(): ofile.parent.mkdir(parents=True) - self.order_refine_free_format_qa(cen, combined_orders, width, gap, width_fit, gap_fit, - order_cen, order_missing, ofile=ofile) + self.order_refine_free_format_qa(cen, bad_order, width, gap, width_fit, gap_fit, + order_cen, order_missing, bracket=bracket, ofile=ofile) # Return the coordinates for the left and right edges to add add_width = width_fit.eval(order_cen[order_missing]) - add_left, add_right = order_cen[order_missing] - add_width / 2, order_cen[order_missing] + add_width / 2 + add_left = order_cen[order_missing] - add_width / 2 + add_right = order_cen[order_missing] + add_width / 2 # Join the added edges with the existing ones - _left = np.append(add_left, left[individual_orders]) + _left = np.append(add_left, left[good_order]) # Create a sorting vector srt = np.argsort(_left) # Create a vector that will reverse the sorting isrt = np.argsort(srt) # Join and sort the right edges - _right = np.append(add_right, right[individual_orders])[srt] + _right = np.append(add_right, right[good_order])[srt] # Sort the left edges _left = _left[srt] + # NOTE: Although I haven't tested this, I think this approach works best + # under the assumption that the overlap *decreases* from small pixel + # numbers to large pixel numbers. This should be true if the pypeit + # convention is maintained with blue orders toward small pixel values + # and red orders at large pixel values. + # Deal with overlapping orders among the ones to be added. The edges # are adjusted equally on both sides to avoid changing the order center # and exclude the overlap regions from the reduction. - if np.any(_left[1:] - _right[:-1] < 0): - # Loop sequentially so that each pair is updated as the loop progresses - for i in range(1, _left.size): - # *Negative* of the gap; i.e., positives values means there's - # overlap - ngap = _right[i-1] - _left[i] - if ngap > 0: - # Adjust both order edges to avoid the overlap region but - # keep the same center coordinate - _left[i-1] += ngap - _right[i-1] -= ngap - _left[i] += ngap - _right[i] -= ngap + if np.all(_left[1:] - _right[:-1] > 0): + if bracket: + add_left, add_right = self._handle_bracketing_orders(add_left, add_right) + # There is no overlap, so just return the orders to add + return add_left, add_right, rmtraces + + # Used to remove orders that have too much overlap + nord = _left.size + ok_overlap = np.ones(nord, dtype=bool) + + # Loop sequentially so that each pair is updated as the loop progresses + for i in range(1, nord): + # *Negative* of the gap; i.e., positives values means there's + # overlap + ngap = _right[i-1] - _left[i] + if ngap > 0: + if self.par['max_overlap'] is not None: + ok_overlap[i-1] = 2*ngap/(_right[i-1] - _left[i-1]) < self.par['max_overlap'] + ok_overlap[i] = 2*ngap/(_right[i] - _left[i]) < self.par['max_overlap'] + # Adjust both order edges to avoid the overlap region but + # keep the same center coordinate + _left[i-1] += ngap + _right[i-1] -= ngap + _left[i] += ngap + _right[i] -= ngap + + # For any *existing* traces that were adjusted because of the overlap, + # this applies the adjustment to the `edge_fit` data. + # NOTE: This only adjusts the "fit" locations (edge_fit), *not* the + # measured centroid locations (edge_cen). This should not cause + # problems because, e.g., the `get_slits` function uses `edge_fit`. + nadd = add_left.size + left_indx = np.where(left_gpm)[0][good_order] + offset = _left[isrt][nadd:] - left + self.edge_fit[:,left_indx] += offset[None,:] + right_indx = np.where(right_gpm)[0][good_order] + offset = _right[isrt][nadd:] - right + self.edge_fit[:,right_indx] += offset[None,:] # Get the adjusted traces to add. Note this currently does *not* change # the original traces - return _left[isrt][:add_left.size], _right[isrt][:add_right.size], rmtraces - - def order_refine_free_format_qa(self, cen, combined_orders, width, gap, width_fit, gap_fit, - order_cen, order_missing, ofile=None): + ok_overlap = ok_overlap[isrt][:nadd] + add_left = _left[isrt][:nadd][ok_overlap] + add_right = _right[isrt][:nadd][ok_overlap] + + if bracket: + add_left, add_right = self._handle_bracketing_orders(add_left, add_right) + return add_left, add_right, rmtraces + + @staticmethod + def _handle_bracketing_orders(add_left, add_right): """ - QA plot for order placements + Utility function to remove added orders that bracket the left and right + edge of the detector, used to handle overlap. + + Args: + add_left (`numpy.ndarray`_): + List of left edges to add + add_right (`numpy.ndarray`_): + List of right edges to add + + Returns: + :obj:`tuple`: The two `numpy.ndarray`_ objects after removing the + bracketing orders. """ + nadd = add_left.size + if nadd < 2: + # TODO: The code should not get here! If it does, we need to + # figure out why and fix it. + msgs.error('CODING ERROR: Order bracketing failed!') + if nadd == 2: + return None, None + return add_left[1:-1], add_right[1:-1] + + def order_refine_free_format_qa(self, cen, bad_order, width, gap, width_fit, gap_fit, + order_cen, order_missing, bracket=False, ofile=None): + """ + Create the QA plot for order modeling. + + Args: + cen (`numpy.ndarray`_): + Spatial centers of the detected orders. + bad_order (`numpy.ndarray`_): + Boolean array selecting "orders" that have been flagged as + outliers. + width (`numpy.ndarray`_): + Measured order spatial widths in pixels. + gap (`numpy.ndarray`_): + Measured order gaps in pixels. + width_fit (:class:`~pypeit.core.fitting.PypeItFit`): + Model of the order width as a function of the order center. + gap_fit (:class:`~pypeit.core.fitting.PypeItFit`): + Model of the order gap *after* each order as a function of the order + center. + order_cen (`numpy.ndarray`_): + Spatial centers of all "individual" orders. + order_missing (`numpy.ndarray`_): + Boolean array selecting "individual" orders that were not traced + by the automated tracing and flagged as missing. See + :func:`~pypeit.core.trace.find_missing_orders` and + :func:`~pypeit.core.trace.extrapolate_orders`. + bracket (:obj:`bool`, optional): + Flag that missing orders have been bracketed by additional + orders in an attempt to deal with overlap regions. + ofile (:obj:`str`, `Path`_, optional): + Path for the QA figure file. If None, the plot is shown in a + matplotlib window. + """ + # Setup + w_resid = width - width_fit.eval(cen) + w_rms = width_fit.calc_fit_rms() + med_wr = np.median(w_resid) + mad_wr = np.median(np.absolute(w_resid - med_wr)) + + w_out = bad_order & width_fit.gpm.astype(bool) + w_rej = np.logical_not(bad_order) & np.logical_not(width_fit.gpm) + w_outrej = bad_order & np.logical_not(width_fit.gpm) + w_good = np.logical_not(w_out | w_rej | w_outrej) + + g_cen = cen[:-1] + g_bad_order = bad_order[:-1] + g_resid = gap - gap_fit.eval(g_cen) + g_rms = gap_fit.calc_fit_rms() + med_gr = np.median(g_resid) + mad_gr = np.median(np.absolute(g_resid - med_gr)) + + g_out = g_bad_order & gap_fit.gpm.astype(bool) + g_rej = np.logical_not(g_bad_order) & np.logical_not(gap_fit.gpm) + g_outrej = g_bad_order & np.logical_not(gap_fit.gpm) + g_good = np.logical_not(g_out | g_rej | g_outrej) + + # Set the spatial limits based on the extent of the order centers and/or + # the detector spatial extent + sx = min(0, np.amin(order_cen)) + ex = max(self.nspat, np.amax(order_cen)) + buf = 1.1 + xlim = [(sx * (1 + buf) + ex * (1 - buf))/2, (sx * (1 - buf) + ex * (1 + buf))/2] + + # Set the residual plot limits based on the median and median absolute + # deviation + width_lim = np.array([med_wr - 20*mad_wr, med_wr + 20*mad_wr]) + gap_lim = np.array([med_gr - 20*mad_gr, med_gr + 20*mad_gr]) + + # Sample the width and gap models over the full width of the plots + mod_cen = np.linspace(*xlim, 100) + mod_width = width_fit.eval(mod_cen) + mod_gap = gap_fit.eval(mod_cen) + + # Create the plot w,h = plt.figaspect(1) fig = plt.figure(figsize=(1.5*w,1.5*h)) - ax = fig.add_axes([0.15, 0.35, 0.8, 0.6]) + # Plot the data and each fit + ax = fig.add_axes([0.10, 0.35, 0.8, 0.6]) ax.minorticks_on() ax.tick_params(which='both', direction='in', top=True, right=True) ax.grid(True, which='major', color='0.7', zorder=0, linestyle='-') - ax.set_xlim([0, self.nspat]) + ax.set_xlim(xlim) ax.xaxis.set_major_formatter(ticker.NullFormatter()) - # TODO: Do something similar for the gaps? - if np.any(combined_orders): - individual_orders = np.logical_not(combined_orders) - ax.scatter(cen[combined_orders], width[combined_orders], - marker='^', color='C1', s=80, lw=0, label='combined orders flag', - zorder=3) - ax.scatter(cen[individual_orders], width[individual_orders], - marker='.', color='C0', s=50, lw=0, label='measured widths', - zorder=3) - else: - ax.scatter(cen, width, - marker='.', color='C0', s=50, lw=0, label='measured widths', - zorder=3) + # Set the plot title + title = 'Order prediction model' + if bracket: + title += ' (bracketed)' + ax.text(0.5, 1.02, title, ha='center', va='center', transform=ax.transAxes) + + # Plot the detector bounds + ax.axvline(0, color='k', ls='--', lw=2) + ax.axvline(self.nspat, color='k', ls='--', lw=2) + + # Models + ax.plot(mod_cen, mod_width, color='C0', alpha=0.3, lw=3, zorder=3) + ax.plot(mod_cen, mod_gap, color='C2', alpha=0.3, lw=3, zorder=3) + + # Measurements included in the fit + ax.scatter(cen[w_good], width[w_good], + marker='.', color='C0', s=50, lw=0, label='fitted widths', zorder=4) + if np.any(w_rej): + # Rejected but not considered an outlier + ax.scatter(cen[w_rej], width[w_rej], + marker='x', color='C1', s=50, lw=1, label='rej widths', zorder=4) + if np.any(w_out): + # Outlier but not rejected + ax.scatter(cen[w_out], width[w_out], + marker='^', facecolor='none', edgecolor='C1', s=50, lw=1, + label='outlier widths', zorder=4) + if np.any(w_outrej): + # Both outlier and rejected + ax.scatter(cen[w_outrej], width[w_outrej], + marker='^', facecolor='C1', s=50, lw=1, label='rej,outlier widths', zorder=4) + # Orders to add ax.scatter(order_cen[order_missing], width_fit.eval(order_cen[order_missing]), - marker='x', color='C0', s=80, lw=1, label='missing widths', zorder=3) - ax.plot(order_cen, width_fit.eval(order_cen), color='C0', alpha=0.3, lw=3, zorder=2) - ax.scatter(cen[:-1], gap, marker='.', color='C2', s=50, lw=0, label='measured gaps', - zorder=3) + marker='s', facecolor='none', edgecolor='C0', s=80, lw=1, + label='missing widths', zorder=3) + + # Same as above but for gaps + ax.scatter(g_cen[g_good], gap[g_good], + marker='.', color='C2', s=50, lw=0, label='fitted gaps', zorder=4) + if np.any(g_rej): + ax.scatter(g_cen[g_rej], gap[g_rej], + marker='x', color='C4', s=50, lw=1, label='rej gaps', zorder=4) + if np.any(g_out): + ax.scatter(g_cen[g_out], gap[g_out], + marker='^', facecolor='none', edgecolor='C4', s=50, lw=1, + label='outlier gaps', zorder=4) + if np.any(g_outrej): + ax.scatter(g_cen[g_outrej], gap[g_outrej], + marker='^', facecolor='C4', s=50, lw=1, label='rej,outlier gaps', zorder=4) ax.scatter(order_cen[order_missing], gap_fit.eval(order_cen[order_missing]), - marker='x', color='C2', s=80, lw=1, label='missing gaps', zorder=3) - ax.plot(order_cen, gap_fit.eval(order_cen), color='C2', alpha=0.3, lw=3, zorder=2) + marker='s', facecolor='none', edgecolor='C2', s=80, lw=1, + label='missing gaps', zorder=3) + + # Add the y label and legend ax.set_ylabel('Order Width/Gap [pix]') ax.legend() - width_resid = width - width_fit.eval(cen) - med_wr = np.median(width_resid) - mad_wr = np.median(np.absolute(width_resid - med_wr)) - width_lim = [med_wr - 10*mad_wr, med_wr + 10*mad_wr] - - gap_resid = gap - gap_fit.eval(cen[:-1]) - med_gr = np.median(gap_resid) - mad_gr = np.median(np.absolute(gap_resid - med_gr)) - gap_lim = [med_gr - 10*mad_gr, med_gr + 10*mad_gr] - - ax = fig.add_axes([0.15, 0.25, 0.8, 0.1]) + # Plot the width residuals + ax = fig.add_axes([0.10, 0.25, 0.8, 0.1]) ax.minorticks_on() - ax.tick_params(which='both', direction='in', top=True, right=True) - ax.grid(True, which='major', color='0.7', zorder=0, linestyle='-') - ax.set_xlim([0, self.nspat]) + ax.tick_params(which='both', direction='in', top=True, right=False) + ax.set_xlim(xlim) ax.set_ylim(width_lim) ax.xaxis.set_major_formatter(ticker.NullFormatter()) - if np.any(combined_orders): - ax.scatter(cen[combined_orders], width_resid[combined_orders], - marker='^', color='C1', s=80, lw=0, zorder=3) - ax.scatter(cen[individual_orders], width_resid[individual_orders], - marker='.', color='C0', s=50, lw=0, zorder=3) - else: - ax.scatter(cen, width_resid, marker='.', color='C0', s=50, lw=0, zorder=3) + + # Plot the detector bounds + ax.axvline(0, color='k', ls='--', lw=2) + ax.axvline(self.nspat, color='k', ls='--', lw=2) + + # Model is at 0 residual ax.axhline(0, color='C0', alpha=0.3, lw=3, zorder=2) + # Measurements included in the fit + ax.scatter(cen[w_good], w_resid[w_good], marker='.', color='C0', s=50, lw=0, zorder=4) + # Rejected but not considered an outlier + ax.scatter(cen[w_rej], w_resid[w_rej], marker='x', color='C1', s=50, lw=1, zorder=4) + # Outlier but not rejected + ax.scatter(cen[w_out], w_resid[w_out], + marker='^', facecolor='none', edgecolor='C1', s=50, lw=1, zorder=4) + # Both outlier and rejected + ax.scatter(cen[w_outrej], w_resid[w_outrej], + marker='^', facecolor='C1', s=50, lw=1, zorder=4) + + # Add the label ax.set_ylabel(r'$\Delta$Width') - ax = fig.add_axes([0.15, 0.15, 0.8, 0.1]) + # Add a right axis that gives the residuals normalized by the rms; use + # this to set the grid. + axt = ax.twinx() + axt.minorticks_on() + axt.tick_params(which='both', direction='in') + axt.grid(True, which='major', color='0.7', zorder=0, linestyle='-') + axt.set_xlim(xlim) + axt.set_ylim(width_lim / w_rms) + axt.set_ylabel(r'$\Delta$/RMS') + + # Plot the gap residuals + ax = fig.add_axes([0.10, 0.15, 0.8, 0.1]) ax.minorticks_on() - ax.tick_params(which='both', direction='in', top=True, right=True) - ax.grid(True, which='major', color='0.7', zorder=0, linestyle='-') - ax.set_xlim([0, self.nspat]) + ax.tick_params(which='both', direction='in', top=True, right=False) + ax.set_xlim(xlim) ax.set_ylim(gap_lim) - ax.scatter(cen[:-1], gap_resid, marker='.', color='C2', s=50, lw=0, zorder=3) + + # Plot the detector bounds + ax.axvline(0, color='k', ls='--', lw=2) + ax.axvline(self.nspat, color='k', ls='--', lw=2) + + # Model is at 0 residual ax.axhline(0, color='C2', alpha=0.3, lw=3, zorder=2) + # Measurements included in the fit + ax.scatter(g_cen[g_good], g_resid[g_good], + marker='.', color='C2', s=50, lw=0, zorder=4) + # Rejected but not considered an outlier + ax.scatter(g_cen[g_rej], g_resid[g_rej], + marker='x', color='C4', s=50, lw=1, zorder=4) + # Outlier but not rejected + ax.scatter(g_cen[g_out], g_resid[g_out], + marker='^', facecolor='none', edgecolor='C4', s=50, lw=1, zorder=4) + # Both outlier and rejected + ax.scatter(g_cen[g_outrej], g_resid[g_outrej], + marker='^', facecolor='C4', s=50, lw=1, zorder=4) + + # Add the axis labels ax.set_ylabel(r'$\Delta$Gap') - ax.set_xlabel('Spatial pixel') + # Add a right axis that gives the residuals normalized by the rms; use + # this to set the grid. + axt = ax.twinx() + axt.minorticks_on() + axt.tick_params(which='both', direction='in') + axt.grid(True, which='major', color='0.7', zorder=0, linestyle='-') + axt.set_xlim(xlim) + axt.set_ylim(gap_lim / g_rms) + axt.set_ylabel(r'$\Delta$/RMS') + if ofile is None: plt.show() else: diff --git a/pypeit/flatfield.py b/pypeit/flatfield.py index b11735ce51..e97b39825e 100644 --- a/pypeit/flatfield.py +++ b/pypeit/flatfield.py @@ -1900,6 +1900,7 @@ def make_slitless_pixflat(self, msbias=None, msdark=None, calib_dir=None, write_ flatpar = deepcopy(self.par['flatfield']) # do not tweak the slits flatpar['tweak_slits'] = False + flatpar['slit_illum_finecorr'] = False pixelFlatField = FlatField(slitless_pixel_flat, self.spectrograph, flatpar, slits, wavetilts=None, wv_calib=None, slitless=True, qa_path=self.qa_path) diff --git a/pypeit/images/combineimage.py b/pypeit/images/combineimage.py index 99fb4f1629..31980b00b5 100644 --- a/pypeit/images/combineimage.py +++ b/pypeit/images/combineimage.py @@ -37,7 +37,6 @@ class CombineImage: rawImages (:obj:`list`): A list of one or more :class:`~pypeit.images.rawimage.RawImage` objects to be combined. - """ def __init__(self, rawImages, par): if not isinstance(par, pypeitpar.ProcessImagesPar): diff --git a/pypeit/par/pypeitpar.py b/pypeit/par/pypeitpar.py index 502f4e026d..a19680b156 100644 --- a/pypeit/par/pypeitpar.py +++ b/pypeit/par/pypeitpar.py @@ -2978,15 +2978,16 @@ def __init__(self, reference=None, method=None, echelle=None, ech_nspec_coeff=No defaults['fwhm'] = 4. dtypes['fwhm'] = [int, float] descr['fwhm'] = 'Spectral sampling of the arc lines. This is the FWHM of an arcline in ' \ - 'binned pixels of the input arc image' + 'binned pixels of the input arc image. Note that this is used also in the ' \ + 'wave tilts calibration.' defaults['fwhm_fromlines'] = True dtypes['fwhm_fromlines'] = bool descr['fwhm_fromlines'] = 'Estimate spectral resolution in each slit using the arc lines. '\ - 'If True, the estimated FWHM will override ``fwhm`` only in '\ + 'If True, the estimated FWHM will override ``fwhm`` in '\ 'the determination of the wavelength solution (including the ' \ 'calculation of the threshold for the solution RMS, see ' \ - '``rms_thresh_frac_fwhm``), but not for the wave tilts calibration.' \ + '``rms_thresh_frac_fwhm``), and ALSO for the wave tilts calibration.' \ defaults['fwhm_spat_order'] = 0 dtypes['fwhm_spat_order'] = int @@ -3265,9 +3266,10 @@ def __init__(self, filt_iter=None, sobel_mode=None, edge_thresh=None, sobel_enha minimum_slit_dlength=None, dlength_range=None, minimum_slit_length=None, minimum_slit_length_sci=None, length_range=None, minimum_slit_gap=None, clip=None, order_match=None, order_offset=None, add_missed_orders=None, order_width_poly=None, - order_gap_poly=None, order_spat_range=None, overlap=None, use_maskdesign=None, - maskdesign_maxsep=None, maskdesign_step=None, maskdesign_sigrej=None, pad=None, - add_slits=None, add_predict=None, rm_slits=None, maskdesign_filename=None): + order_gap_poly=None, order_fitrej=None, order_outlier=None, order_spat_range=None, + overlap=None, max_overlap=None, use_maskdesign=None, maskdesign_maxsep=None, + maskdesign_step=None, maskdesign_sigrej=None, pad=None, add_slits=None, + add_predict=None, rm_slits=None, maskdesign_filename=None): # Grab the parameter names and values from the function # arguments @@ -3651,11 +3653,12 @@ def __init__(self, filt_iter=None, sobel_mode=None, edge_thresh=None, sobel_enha descr['add_missed_orders'] = 'For any Echelle spectrograph (fixed-format or otherwise), ' \ 'attempt to add orders that have been missed by the ' \ 'automated edge tracing algorithm. For *fixed-format* ' \ - 'Echelles, this is based on the expected positions on ' \ + 'echelles, this is based on the expected positions on ' \ 'on the detector. Otherwise, the detected orders are ' \ - 'modeled and roughly used to predict the locations of ' \ - 'missed orders; see additional parameters ' \ - '``order_width_poly``, ``order_gap_poly``, and ' \ + 'modeled and used to predict the locations of missed ' \ + 'orders; see additional parameters ' \ + '``order_width_poly``, ``order_gap_poly``, ' \ + '``order_fitrej``, ``order_outlier``, and ' \ '``order_spat_range``.' defaults['order_width_poly'] = 2 @@ -3670,6 +3673,24 @@ def __init__(self, filt_iter=None, sobel_mode=None, edge_thresh=None, sobel_enha 'gap between orders as a function of the order spatial ' \ 'position. See ``add_missed_orders``.' + defaults['order_fitrej'] = 3. + dtypes['order_fitrej'] = [int, float] + descr['order_fitrej'] = 'When fitting the width of and gap beteween echelle orders with ' \ + 'Legendre polynomials, this is the sigma-clipping threshold ' \ + 'when excluding data from the fit. See ``add_missed_orders``.' + + defaults['order_outlier'] = None + dtypes['order_outlier'] = [int, float] + descr['order_outlier'] = 'When fitting the width of echelle orders with Legendre ' \ + 'polynomials, this is the sigma-clipping threshold used to ' \ + 'identify outliers. Orders clipped by this threshold are ' \ + '*removed* from further consideration, whereas orders clipped ' \ + 'by ``order_fitrej`` are excluded from the polynomial fit ' \ + 'but are not removed. Note this is *only applied to the order ' \ + 'widths*, not the order gaps. If None, no "outliers" are ' \ + 'identified/removed. Should be larger or equal to ' \ + '``order_fitrej``.' + dtypes['order_spat_range'] = list descr['order_spat_range'] = 'The spatial range of the detector/mosaic over which to ' \ 'predict order locations. If None, the full ' \ @@ -3684,6 +3705,19 @@ def __init__(self, filt_iter=None, sobel_mode=None, edge_thresh=None, sobel_enha 'the code attempts to convert the short slits into slit gaps. This ' \ 'is particularly useful for blue orders in Keck-HIRES data.' + defaults['max_overlap'] = None + dtypes['max_overlap'] = float + descr['max_overlap'] = 'When adding missing echelle orders based on where existing ' \ + 'orders are found, the prediction can yield overlapping orders. ' \ + 'The edges of these orders are adjusted to eliminate the ' \ + 'overlap, and orders can be added up over the spatial range of ' \ + 'the detector set by ``order_spate_range``. If this value is ' \ + 'None, orders are added regardless of how much they overlap. ' \ + 'If not None, this defines the maximum fraction of an order ' \ + 'spatial width that can overlap with other orders. For example, ' \ + 'if ``max_overlap=0.5``, any order that overlaps its neighboring ' \ + 'orders by more than 50% will not be added as a missing order.' + defaults['use_maskdesign'] = False dtypes['use_maskdesign'] = bool descr['use_maskdesign'] = 'Use slit-mask designs to identify slits.' @@ -3789,9 +3823,10 @@ def from_dict(cls, cfg): 'bound_detector', 'minimum_slit_dlength', 'dlength_range', 'minimum_slit_length', 'minimum_slit_length_sci', 'length_range', 'minimum_slit_gap', 'clip', 'order_match', 'order_offset', 'add_missed_orders', 'order_width_poly', - 'order_gap_poly', 'order_spat_range', 'overlap', 'use_maskdesign', - 'maskdesign_maxsep', 'maskdesign_step', 'maskdesign_sigrej', - 'maskdesign_filename', 'pad', 'add_slits', 'add_predict', 'rm_slits'] + 'order_gap_poly', 'order_fitrej', 'order_outlier', 'order_spat_range','overlap', + 'max_overlap', 'use_maskdesign', 'maskdesign_maxsep', 'maskdesign_step', + 'maskdesign_sigrej', 'maskdesign_filename', 'pad', 'add_slits', 'add_predict', + 'rm_slits'] badkeys = np.array([pk not in parkeys for pk in k]) if np.any(badkeys): @@ -3829,6 +3864,10 @@ def validate(self): if not self['auto_pca'] and self['sync_predict'] == 'pca': warnings.warn('sync_predict cannot be pca if auto_pca is False. Setting to nearest.') self['sync_predict'] = 'nearest' + if self['max_overlap'] is not None and (self['max_overlap'] < 0 or self['max_overlap'] > 1): + msgs.error('If defined, max_overlap must be in the range [0,1].') + if self['order_outlier'] is not None and self['order_outlier'] < self['order_fitrej']: + msgs.warn('Order outlier threshold should not be less than the rejection threshold.') class WaveTiltsPar(ParSet): diff --git a/pypeit/scripts/chk_flexure.py b/pypeit/scripts/chk_flexure.py index d384dc8e29..74faf77b01 100644 --- a/pypeit/scripts/chk_flexure.py +++ b/pypeit/scripts/chk_flexure.py @@ -15,10 +15,13 @@ def get_parser(cls, width=None): parser = super().get_parser(description='Print QA on flexure to the screen', width=width) - parser.add_argument('input_file', type=str, nargs='+', help='One or more PypeIt spec2d or spec1d file') + parser.add_argument('input_file', type=str, nargs='+', + help='One or more PypeIt spec2d or spec1d file') inp = parser.add_mutually_exclusive_group(required=True) - inp.add_argument('--spec', default=False, action='store_true', help='Check the spectral flexure') - inp.add_argument('--spat', default=False, action='store_true', help='Check the spatial flexure') + inp.add_argument('--spec', default=False, action='store_true', + help='Check the spectral flexure') + inp.add_argument('--spat', default=False, action='store_true', + help='Check the spatial flexure') parser.add_argument('--try_old', default=False, action='store_true', help='Attempt to load old datamodel versions. A crash may ensue..') return parser @@ -29,9 +32,11 @@ def main(args): from IPython import embed from astropy.io import fits from pypeit import msgs - from pypeit.core import flexure + from pypeit import specobjs + from pypeit import spec2dobj chk_version = not args.try_old + flexure_type = 'spat' if args.spat else 'spec' # Loop over the input files for in_file in args.input_file: @@ -41,23 +46,21 @@ def main(args): # What kind of file are we?? hdul = fits.open(in_file) head0 = hdul[0].header - file_type = None + if 'PYP_CLS' in head0.keys() and head0['PYP_CLS'].strip() == 'AllSpec2DObj': - file_type = 'spec2d' + # load the spec2d file + allspec2D = spec2dobj.AllSpec2DObj.from_fits(in_file, chk_version=chk_version) + allspec2D.flexure_diagnostics(flexure_type=flexure_type) elif 'DMODCLS' in head0.keys() and head0['DMODCLS'].strip() == 'SpecObjs': - file_type = 'spec1d' + if flexure_type == 'spat': + msgs.error("Spat flexure not available in the spec1d file, try with a " + "spec2d file") + # load the spec1d file + sobjs = specobjs.SpecObjs.from_fitsfile(in_file, chk_version=chk_version) + sobjs.flexure_diagnostics() else: msgs.error("Bad file type input!") - # Check the flexure - flexure.flexure_diagnostic(in_file, file_type=file_type, flexure_type='spat' if args.spat else 'spec', - chk_version=chk_version) - # space between files for clarity print('') - - - - - diff --git a/pypeit/scripts/chk_tilts.py b/pypeit/scripts/chk_tilts.py index 95f1d2ded2..baddabc85f 100644 --- a/pypeit/scripts/chk_tilts.py +++ b/pypeit/scripts/chk_tilts.py @@ -32,14 +32,18 @@ def get_parser(cls, width=None): @staticmethod def main(args): + from pathlib import Path from pypeit import wavetilts chk_version = not args.try_old + # tilts file path + file = Path(args.file).absolute() + # Load - tilts = wavetilts.WaveTilts.from_file(args.file, chk_version=chk_version) + tilts = wavetilts.WaveTilts.from_file(file, chk_version=chk_version) tilts.show(in_ginga=np.logical_not(args.mpl), show_traces=args.show_traces, - chk_version=chk_version) + calib_dir=file.parent, chk_version=chk_version) diff --git a/pypeit/scripts/trace_edges.py b/pypeit/scripts/trace_edges.py index f4827f4168..79be6ec845 100644 --- a/pypeit/scripts/trace_edges.py +++ b/pypeit/scripts/trace_edges.py @@ -175,7 +175,7 @@ def main(args): debug=args.debug, show_stages=args.show, qa_path=qa_path) - print('Tracing for detector {0} finished in {1} s.'.format(det, time.perf_counter()-t)) + msgs.info(f'Tracing for detector {det} finished in { time.perf_counter()-t:.1f} s.') # Write the two calibration frames edges.to_file() edges.get_slits().to_file() diff --git a/pypeit/spec2dobj.py b/pypeit/spec2dobj.py index 6aaae67396..5e693e44c0 100644 --- a/pypeit/spec2dobj.py +++ b/pypeit/spec2dobj.py @@ -728,3 +728,45 @@ def __repr__(self): txt += ') >' return txt + def flexure_diagnostics(self, flexure_type='spat'): + """ + Print and return the spectral or spatial flexure of a spec2d file. + + Args: + flexure_type (:obj:`str`, optional): + Type of flexure to check. Options are 'spec' or 'spat'. Default + is 'spec'. + + Returns: + :obj:`dict`: Dictionary with the flexure values for each detector. If + flexure_type is 'spec', the spectral flexure is stored in an astropy table. + If flexure_type is 'spat', the spatial flexure is stored in a float. + + """ + if flexure_type not in ['spat', 'spec']: + msgs.error(f'flexure_type must be spat or spec, not {flexure_type}') + return_flex = {} + # Loop on Detectors + for det in self.detectors: + print('') + print('=' * 50 + f'{det:^7}' + '=' * 51) + # get and print the spectral flexure + if flexure_type == 'spec': + spec_flex = self[det].sci_spec_flexure + spec_flex.rename_column('sci_spec_flexure', 'global_spec_shift') + if np.all(spec_flex['global_spec_shift'] != None): + spec_flex['global_spec_shift'].format = '0.3f' + # print the table + spec_flex.pprint_all() + # return the table + return_flex[det] = spec_flex + # get and print the spatial flexure + if flexure_type == 'spat': + spat_flex = self[det].sci_spat_flexure + # print the value + print(f'Spat shift: {spat_flex}') + # return the value + return_flex[det] = spat_flex + + return return_flex + diff --git a/pypeit/specobjs.py b/pypeit/specobjs.py index 1093687079..85151e1663 100644 --- a/pypeit/specobjs.py +++ b/pypeit/specobjs.py @@ -1057,6 +1057,29 @@ def get_extraction_groups(self, model_full_slit=False) -> List[List[int]]: return groups + def flexure_diagnostics(self): + """ + Print and return the spectral flexure of a spec1d file. + + Returns: + :obj:`astropy.table.Table`: Table with the spectral flexure. + """ + spec_flex = Table() + spec_flex['NAME'] = self.NAME + spec_flex['global_spec_shift'] = self.FLEX_SHIFT_GLOBAL + if np.all(spec_flex['global_spec_shift'] != None): + spec_flex['global_spec_shift'].format = '0.3f' + spec_flex['local_spec_shift'] = self.FLEX_SHIFT_LOCAL + if np.all(spec_flex['local_spec_shift'] != None): + spec_flex['local_spec_shift'].format = '0.3f' + spec_flex['total_spec_shift'] = self.FLEX_SHIFT_TOTAL + if np.all(spec_flex['total_spec_shift'] != None): + spec_flex['total_spec_shift'].format = '0.3f' + # print the table + spec_flex.pprint_all() + # return the table + return spec_flex + #TODO Should this be a classmethod on specobjs?? def get_std_trace(detname, std_outfile, chk_version=True): diff --git a/pypeit/wavecalib.py b/pypeit/wavecalib.py index ca223a39e2..969b6b3079 100644 --- a/pypeit/wavecalib.py +++ b/pypeit/wavecalib.py @@ -282,9 +282,14 @@ def build_fwhmimg(self, tilts, slits, initial=False, spat_flexure=None): # Generate the slit mask and slit edges - pad slitmask by 1 for edge effects slitmask = slits.slit_img(pad=1, initial=initial, flexure=spat_flexure) slits_left, slits_right, _ = slits.select_edges(initial=initial, flexure=spat_flexure) + # need to exclude slits that are masked (are bad) + bad_slits = slits.bitmask.flagged(slits.mask, and_not=slits.bitmask.exclude_for_reducing) + ok_spat_ids = slits.spat_id[np.logical_not(bad_slits)] # Build a map of the spectral FWHM fwhmimg = np.zeros(tilts.shape) for sl, spat_id in enumerate(slits.spat_id): + if spat_id not in ok_spat_ids: + continue this_mask = slitmask == spat_id spec, spat = np.where(this_mask) spat_loc = (spat - slits_left[spec, sl]) / (slits_right[spec, sl] - slits_left[spec, sl]) @@ -328,13 +333,11 @@ def build_waveimg(self, tilts, slits, spat_flexure=None, spec_flexure=None): spec_flex /= (slits.nspec - 1) # Setup - #ok_slits = slits.mask == 0 -# bpm = slits.mask.astype(bool) -# bpm &= np.logical_not(slits.bitmask.flagged(slits.mask, flag=slits.bitmask.exclude_for_reducing)) bpm = slits.bitmask.flagged(slits.mask, and_not=slits.bitmask.exclude_for_reducing) ok_slits = np.logical_not(bpm) # image = np.zeros_like(tilts) + # Grab slit_img slitmask = slits.slit_img(flexure=spat_flexure, exclude_flag=slits.bitmask.exclude_for_reducing) # Separate detectors for the 2D solutions? @@ -342,9 +345,7 @@ def build_waveimg(self, tilts, slits, spat_flexure=None, spec_flexure=None): # Error checking if self.det_img is None: msgs.error("This WaveCalib object was not generated with ech_separate_2d=True") - # Grab slit_img - slit_img = slits.slit_img() - + # Unpack some 2-d fit parameters if this is echelle for islit in np.where(ok_slits)[0]: slit_spat = slits.spat_id[islit] @@ -354,9 +355,7 @@ def build_waveimg(self, tilts, slits, spat_flexure=None, spec_flexure=None): if self.par['echelle'] and self.par['ech_2dfit']: # evaluate solution -- if self.par['ech_separate_2d']: - ordr_det = slits.det_of_slit( - slit_spat, self.det_img, - slit_img=slit_img) + ordr_det = slits.det_of_slit(slit_spat, self.det_img, slit_img=slitmask) # There are ways for this to go sour.. # if the seperate solutions are not aligned with the detectors # or if one reruns with a different number of detectors diff --git a/pypeit/wavetilts.py b/pypeit/wavetilts.py index 2b24d0005f..e0949b9d99 100644 --- a/pypeit/wavetilts.py +++ b/pypeit/wavetilts.py @@ -25,6 +25,7 @@ from pypeit.display import display from pypeit.core import arc from pypeit.core import tracewave +from pypeit.core.wavecal import autoid from pypeit.images import buildimage @@ -174,7 +175,7 @@ def spatid_to_zero(self, spat_id): return np.where(mtch)[0][0] def show(self, waveimg=None, wcs_match=True, in_ginga=True, show_traces=False, - chk_version=True): + calib_dir=None, chk_version=True): """ Show in ginga or mpl Tiltimg with the tilts traced and fitted overlaid @@ -188,6 +189,9 @@ def show(self, waveimg=None, wcs_match=True, in_ginga=True, show_traces=False, If True, show the image in ginga. Otherwise, use matplotlib. show_traces (bool, optional): If True, show the traces of the tilts on the image. + calib_dir (`Path`_): + Path to the calibration directory. If None, the path is taken from the + WaveTilts object. chk_version (:obj:`bool`, optional): When reading in existing files written by PypeIt, perform strict version checking to ensure a valid file. If False, the code @@ -195,7 +199,14 @@ def show(self, waveimg=None, wcs_match=True, in_ginga=True, show_traces=False, failures. User beware! """ # get tilt_img_dict - cal_file = Path(self.calib_dir).absolute() / self.tiltimg_filename + _calib_dir = self.calib_dir + if calib_dir is not None and calib_dir.exists(): + _calib_dir = calib_dir + msgs.info(f'Searching for other calibration files in {str(_calib_dir)}') + else: + msgs.info(f'Searching for other calibration files in the default directory {str(_calib_dir)}') + + cal_file = Path(_calib_dir).absolute() / self.tiltimg_filename if cal_file.exists(): tilt_img_dict = buildimage.TiltImage.from_file(cal_file, chk_version=chk_version) else: @@ -203,7 +214,7 @@ def show(self, waveimg=None, wcs_match=True, in_ginga=True, show_traces=False, # get slits slitmask = None - cal_file = Path(self.calib_dir).absolute() / self.slits_filename + cal_file = Path(_calib_dir).absolute() / self.slits_filename if cal_file.exists(): slits = slittrace.SlitTraceSet.from_file(cal_file, chk_version=chk_version) _slitmask = slits.slit_img(initial=True, flexure=self.spat_flexure) @@ -220,7 +231,7 @@ def show(self, waveimg=None, wcs_match=True, in_ginga=True, show_traces=False, # get waveimg same_size = (slits.nspec, slits.nspat) == tilt_img_dict.image.shape if waveimg is None and slits is not None and same_size and in_ginga: - wv_calib_name = wavecalib.WaveCalib.construct_file_name(self.calib_key, calib_dir=self.calib_dir) + wv_calib_name = wavecalib.WaveCalib.construct_file_name(self.calib_key, calib_dir=_calib_dir) if Path(wv_calib_name).absolute().exists(): wv_calib = wavecalib.WaveCalib.from_file(wv_calib_name, chk_version=chk_version) tilts = self.fit2tiltimg(slitmask, flexure=self.spat_flexure) @@ -268,24 +279,32 @@ class BuildWaveTilts: slits (:class:`~pypeit.slittrace.SlitTraceSet`): Slit edges spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): - Spectrograph object + The `Spectrograph` instance that sets the instrument used. Used to set + :attr:`spectrograph`. par (:class:`~pypeit.par.pypeitpar.WaveTiltsPar` or None): - The parameters used to fuss with the tilts + The parameters used for the tilt calibration. + Uses ``['calibrations']['tilts']``. wavepar (:class:`~pypeit.par.pypeitpar.WavelengthSolutionPar` or None): - The parameters used for the wavelength solution - det (int): Detector index + The parameters used for the wavelength solution. + Uses ``['calibrations']['wavelengths']``. + det (int): + Detector index qa_path (:obj:`str`, optional): Directory for QA output. spat_flexure (float, optional): If input, the slitmask and slit edges are shifted prior to tilt analysis. + measured_fwhms (`numpy.ndarray`_, optional): + FWHM of the arc lines measured during wavecalib. If provided, this + will be used for arc/sky line detection. + Attributes: spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): - ?? + The `Spectrograph` instance that sets the instrument used. steps (list): - ?? + List of the processing steps performed mask (`numpy.ndarray`_): boolean array; True = Ignore this slit all_trcdict (list): @@ -304,7 +323,7 @@ class BuildWaveTilts: # TODO This needs to be modified to take an inmask def __init__(self, mstilt, slits, spectrograph, par, wavepar, det=1, qa_path=None, - spat_flexure=None): + spat_flexure=None, measured_fwhms=None): # TODO: Perform type checking self.spectrograph = spectrograph @@ -316,6 +335,7 @@ def __init__(self, mstilt, slits, spectrograph, par, wavepar, det=1, qa_path=Non self.det = det self.qa_path = qa_path self.spat_flexure = spat_flexure + self.measured_fwhms = measured_fwhms if measured_fwhms is not None else np.array([None] * slits.nslits) # Get the non-linear count level if self.mstilt.is_mosaic: @@ -385,23 +405,26 @@ def extract_arcs(self): return arccen, arccen_bpm - def find_lines(self, arcspec, slit_cen, slit_idx, bpm=None, debug=False): + def find_lines(self, arcspec, slit_cen, slit_idx, fwhm, bpm=None, debug=False): """ Find the lines for tracing Wrapper to tracewave.tilts_find_lines() Args: - arcspec (): - ?? - slit_cen (): - ?? + arcspec (`numpy.ndarray`_): + 1D spectrum to be searched for significant detections. + slit_cen (`numpy.ndarray`_): + Trace down the center of the slit. Must match the shape of arcspec. slit_idx (int): - Slit index, zero-based + Slit index, zero-based. + fwhm (float): + FWHM of the arc lines. bpm (`numpy.ndarray`_, optional): - ?? + Bad-pixel mask for input spectrum. If None, all pixels considered good. + If passed, must match the shape of arcspec. debug (bool, optional): - ?? + Show a QA plot for the line detection. Returns: tuple: 2 objectcs @@ -423,7 +446,7 @@ def find_lines(self, arcspec, slit_cen, slit_idx, bpm=None, debug=False): sig_neigh=self.par['sig_neigh'], nfwhm_neigh=self.par['nfwhm_neigh'], only_these_lines=only_these_lines, - fwhm=self.wavepar['fwhm'], + fwhm=fwhm, nonlinear_counts=self.nonlinear_counts, bpm=bpm, debug_peaks=False, debug_lines=debug) @@ -478,7 +501,7 @@ def fit_tilts(self, trc_tilt_dict, thismask, slit_cen, spat_order, spec_order, s self.steps.append(inspect.stack()[0][3]) return self.all_fit_dict[slit_idx]['coeff2'] - def trace_tilts(self, arcimg, lines_spec, lines_spat, thismask, slit_cen, + def trace_tilts(self, arcimg, lines_spec, lines_spat, thismask, slit_cen, fwhm, debug_pca=False, show_tracefits=False): """ Trace the tilts @@ -500,6 +523,12 @@ def trace_tilts(self, arcimg, lines_spec, lines_spat, thismask, slit_cen, is (nspec, nspat) with dtype=bool. slit_cen (:obj:`int`): Integer index indicating the slit in question. + fwhm (:obj:`float`): + FWHM of the arc lines. + debug_pca (:obj:`bool`, optional): + Show the PCA modeling QA plots. + show_tracefits (:obj:`bool`, optional): + Show the trace fits. Returns: dict: Dictionary containing information on the traced tilts required @@ -507,7 +536,7 @@ def trace_tilts(self, arcimg, lines_spec, lines_spat, thismask, slit_cen, """ trace_dict = tracewave.trace_tilts(arcimg, lines_spec, lines_spat, thismask, slit_cen, - inmask=self.gpm, fwhm=self.wavepar['fwhm'], + inmask=self.gpm, fwhm=fwhm, spat_order=self.par['spat_order'], maxdev_tracefit=self.par['maxdev_tracefit'], sigrej_trace=self.par['sigrej_trace'], @@ -561,11 +590,13 @@ def model_arc_continuum(self, debug=False): for i in range(nslits): if self.tilt_bpm[i]: continue + # get FWHM for this slit + fwhm = autoid.set_fwhm(self.wavepar, measured_fwhm=self.measured_fwhms[i]) # TODO: What to do with the following iter_continuum parameters?: # sigthresh, sigrej, niter_cont, cont_samp, cont_frac_fwhm arc_continuum[:,i], arc_fitmask[:,i] \ = arc.iter_continuum(self.arccen[:,i], gpm=np.invert(self.arccen_bpm[:,i]), - fwhm=self.wavepar['fwhm']) + fwhm=fwhm) # TODO: Original version. Please leave it for now. # arc_fitmask[:,i], coeff \ # = utils.robust_polyfit_djs(spec, self.arccen[:,i], self.par['cont_order'], @@ -677,6 +708,7 @@ def run(self, doqa=True, debug=False, show=False): # Subtract arc continuum _mstilt = self.mstilt.image.copy() if self.par['rm_continuum']: + msgs.info('Subtracting the continuum') continuum = self.model_arc_continuum(debug=debug) _mstilt -= continuum if debug: @@ -713,11 +745,13 @@ def run(self, doqa=True, debug=False, show=False): self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADTILTCALIB') continue msgs.info(f'Computing tilts for slit/order {self.slits.slitord_id[slit_idx]} ({slit_idx+1}/{self.slits.nslits})') + # Get the arc FWHM for this slit + fwhm = autoid.set_fwhm(self.wavepar, measured_fwhm=self.measured_fwhms[slit_idx], verbose=True) # Identify lines for tracing tilts msgs.info('Finding lines for tilt analysis') self.lines_spec, self.lines_spat \ = self.find_lines(self.arccen[:,slit_idx], self.slitcen[:,slit_idx], - slit_idx, + slit_idx, fwhm, bpm=self.arccen_bpm[:,slit_idx], debug=debug) if self.lines_spec is None: @@ -733,7 +767,7 @@ def run(self, doqa=True, debug=False, show=False): # each line. msgs.info('Trace the tilts') self.trace_dict = self.trace_tilts(_mstilt, self.lines_spec, self.lines_spat, - thismask, self.slitcen[:, slit_idx]) + thismask, self.slitcen[:, slit_idx], fwhm) # IF there are < 2 usable arc lines for tilt tracing, PCA fit does not work and the reduction crushes # TODO investigate why some slits have <2 usable arc lines if np.sum(self.trace_dict['use_tilt']) < 2: