diff --git a/CHANGES.rst b/CHANGES.rst index cff1077..9278397 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,79 @@ +2.3.0 +----- + +Critical fixes +^^^^^^^^^^^^^^ + +- `graphics.arrows` actually works now (again?). + +- `image.analysis.azavg` bug fix for raps parameter as an integer. + +New features +^^^^^^^^^^^^ + +- `catalogs` + - `brightest` to sort out bright sources from a catalog. + - `faintest` to sort out faint sources from a catalog. + - `find_offset` to determine the offset between two catalogs. + - `nearest_match` to find neighbors in two lists. + - `project_catalog` to project RA, Dec onto image plane. + +- `image` + - `analysis.anphot`, `apphot`, `bgphot` allow multiple sources. + - `analysis.apphot_by_wcs` for aperture photometry by coordinates. + - `analysis.find` for rudimentary source finding. + - `core.imshift` allow whole pixel shifts. + - `core.rebin` handles scale factor 1 by special case. + - `process.align_by_centroid` and `align_by_wcs` for image + alignment. + +- `observing` + - `Observer.finding_chart` for creating a finding chart with DS9. + - `plot_transit_time` for doing just that. + +- `NEATM.fit` for least-squares fitting of a spectrum. + +- New `photometry` module, with lots of Hale Bopp filter support in + `hb` submodule. + +- `scripts/` + - `ephemeris` may now output coma flux estimates, and accepts kernel + file names from the command line. + - New `transit` script for generating plots of transit times. + +- `util` functions + - `gaussfit` for Gaussian fitting. + - `glfit` for Gaussian + linear function fitting. + - `stat_avg` for array binning, considering measurement + uncertainties. + - `write_table` for quick writing of an astropy table with a simple + header. + - `xyz2lb` to convert Cartesian coordinates to angles. + +Other improvements +^^^^^^^^^^^^^^^^^^ + +- `calib.filter_trans` modified to use np.loadtxt. + +- `catalogs.spatial_match` and `triangles` overhauls. + +- `comet.m2afrho` updated, but still experimental. + +- `graphics.niceplot` keyword arguments to prevent changes to line + widths, marker sizes, and marker edge widths. + +- `image` + - `analysis.gcentroid` uses float when passed a float. + - `process.fixpix` behind the scenes improvements and limit fixing + by area. + - `analysis.azavg` bug fix for raps parameter as an integer. + +- `observing.Observer` includes date in string representation. + +- `util` + - `getrot` fix for current astropy.io.fits behavior. + - `planckfit` fix for when leastsq refuses to fit the data. + 2.2.4 ----- diff --git a/README.rst b/README.rst index 31a04fc..a754651 100644 --- a/README.rst +++ b/README.rst @@ -4,7 +4,7 @@ mskpy MSK's personal Python library, mostly for astronomy work. -Requires: numpy, scipy, astropy v0.3. +Requires: numpy, scipy, astropy v1.0. Recommended: pyspice, matplotlib. diff --git a/mskpy/__init__.py b/mskpy/__init__.py index 3ebb850..917e554 100644 --- a/mskpy/__init__.py +++ b/mskpy/__init__.py @@ -20,6 +20,7 @@ modeling - For fitting models to data. models - Surface and dust models. observing - Tools for observing preparations. + photometry - Tools for photometry. polarimetry - Classes and functions for polarimetry. util - Grab bag of utility functions. diff --git a/mskpy/calib.py b/mskpy/calib.py index 50450e2..69f68d9 100644 --- a/mskpy/calib.py +++ b/mskpy/calib.py @@ -254,10 +254,15 @@ def filter_trans(name): except KeyError: raise KeyError("filter {} cannot be found.".format(name.lower())) - table = ascii.read(_filterdir + '/' + fil[0]) + #table = ascii.read(_filterdir + '/' + fil[0], format='fixed_width_no_header') + #cols = fil[1] + #w = table.columns[cols[0]].data * u.um + #tr = table.columns[cols[1]].data + + table = np.loadtxt(_filterdir + '/' + fil[0]).T cols = fil[1] - w = table.columns[cols[0]].data * u.um - tr = table.columns[cols[1]].data + w = table[cols[0]] * u.um + tr = table[cols[1]] return w, tr @@ -323,3 +328,8 @@ def cohen_standard(star, unit=u.Unit('W/(m2 um)')): fl = fl.to(unit, equivalencies=equiv) return wave, fl + +# update module docstring +from .util import autodoc +autodoc(globals()) +del autodoc diff --git a/mskpy/catalogs.py b/mskpy/catalogs.py index 7d668d0..ef70c86 100644 --- a/mskpy/catalogs.py +++ b/mskpy/catalogs.py @@ -7,34 +7,323 @@ .. autosummary:: :toctree: generated/ - spatial_match + brightest + faintest + find_offset + project_catalog + nearest_match triangles + triangle_match """ __all__ = [ - 'spatial_match', - 'triangles' + 'brightest', + 'faintest', + 'find_offset', + 'project_catalog', + 'nearest_match', + 'triangles', + 'triangle_match', ] import numpy as np -def spatial_match(cat0, cat1, tol=0.01, min_score=0, full_output=False, - verbose=True): - """Find spatially matching sourse between two lists. +def brightest(cat0, flux, n, full_output=False): + """Return the n brightest objects in the catalog. + + Parameters + ---------- + cat0 : array + 2xN array of positions. + flux : array + N-element array of object brightness. + n : int + Return the brightest `n` objects. + full_output : bool, optional + Return optional output. + + Returns + ------- + cat : ndarray + i : ndarray + + """ + i = np.argsort(flux)[::-1][:n] + if full_output: + return np.array(cat0)[:, i], i + else: + return np.array(cat0)[:, i] + +def faintest(cat0, flux, n, full_output=False): + """Return the n faintest objects in the catalog. + + Parameters + ---------- + cat0 : array + 2xN array of positions. + flux : array + N-element array of object brightness. + n : int + Return the brightest `n` objects. + full_output : bool, optional + Return optional output. + + Returns + ------- + cat : ndarray + i : ndarray + + """ + i = np.argsort(flux)[:n] + if full_output: + return np.array(cat0)[:, i], i + else: + return np.array(cat0)[:, i] + +def find_offset(cat0, cat1, matches, tol=3.0): + """Find the offset between two catalogs, given matched stars. + + The matched star list may have false matches. + + Parameters + ---------- + cat0, cat1 : array + 2xN array of positions. + matches : dict + The best match for star `i` of `cat0` is `matches[i]` in `cat1`. + tol : float + The distance tolerance. + + Returns + ------- + dy, dx : float + + """ + from .util import midstep, meanclip + + d = cat0[:, matches.keys()] - cat1[:, matches.values()] + bins = (np.arange(d[0].min() - 2 * tol, d[0].max() + 2 * tol, 2 * tol), + np.arange(d[1].min() - 2 * tol, d[1].max() + 2 * tol, 2 * tol)) + h, edges = np.histogramdd(d.T, bins=bins) + i = np.unravel_index(h.argmax(), h.shape) + peak = midstep(edges[0])[i[0]], midstep(edges[1])[i[1]] + i = np.prod(np.abs(d.T - peak) < tol, 1, dtype=bool) + j = meanclip(d[0, i], full_output=True)[2] + k = meanclip(d[1, i], full_output=True)[2] + i = i[list(set(np.r_[j, k]))] + return d[:, i].mean(1) + +def project_catalog(cat, wcs=None): + """Project a catalog onto the image plane. + + The default is the tangent image plane, but any WCS transformation + can be used. + + Parameters + ---------- + cat : astropy SkyCoord, Quantity, or Angle + The coordinate list. If a `Quantity` or `Angle`, `cat` must be + a 2xN array: (lat, long). + wcs : astropy.wcs.WCS, optional + The world coordinate system object for transformation. The + default assumes the tangent plane centered on the latitude + and longitude of the catalog. + + Returns + ------- + flat_cat : ndarray + A 2xN array of pixel positions, (y, x). + + """ + + from astropy.wcs import WCS + from astropy.coordinates import SkyCoord + + if not isinstance(cat, SkyCoord): + assert len(cat) == 2 + cat = SkyCoord(ra=cat[1], dec=cat[0]) + + if wcs is None: + wcs = WCS(naxis=2) + wcs.wcs.crpix = (0, 0) + ra0 = (cat.ra.max() - cat.ra.min()) / 2.0 + cat.ra.min() + dec0 = (cat.dec.max() - cat.dec.min()) / 2.0 + cat.dec.min() + wcs.wcs.crval = (ra0.value, dec0.value) + wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"] + wcs.wcs.cdelt = np.array([-1, 1]) / 3600.0 + + x, y = cat.to_pixel(wcs) + return np.vstack((y, x)) + +def nearest_match(cat0, cat1, tol=1.0, **kwargs): + """Find nearest neighbors in two lists. Parameters ---------- cat0, cat1 : arrays - Each catalog is an 2xN array of (y, x) positions. + Each catalog is a 2xN array of (y, x) positions. tol : float, optional - The match tolerance. - min_score : float, optional - Only return matches with scores greater than `min_score`. + The radial match tolerance in pixels. + **kwargs + Keyword arguments for `meanclip` when computing mean offsets. + + Returns + ------- + matches : dictionary + The best match for star `i` of `cat0` is `matches[i]` in `cat1`. + Stars that are matched multiple times, or not matched at all, + are not returned. + dyx : ndarray + Sigma-clipped mean offset. Clipping is done in x and y + separately, and only the union of the two is returned. + + """ + + from scipy.spatial.ckdtree import cKDTree + from .util import takefrom, meanclip + + assert len(cat0) == 2 + assert len(cat1) == 2 + + tree = cKDTree(cat0.T) + d, i = tree.query(cat1.T) # 0 of cat1 -> i[0] of cat0 + + matches = dict() + for k, j in enumerate(i): + if d[k] < tol: + matches[j] = k + + dyx = cat0[:, matches.keys()] - cat1[:, matches.values()] + mcx = meanclip(dyx[1], full_output=True) + mcy = meanclip(dyx[0], full_output=True) + j = list(set(np.concatenate((mcx[2], mcy[2])))) + + return matches, dyx[:, j] + +def triangles(y, x, max_ratio=100, min_sep=0): + """Describe all possible triangles in a set of points. + + Following IRAF's ccxymatch, triangles computes: + + * length of the perimeter + * ratio of longest to shortest side + * cosine of the angle between the longest and shortest side + * the direction of the arrangement of the vertices of each + triangle + + Parameters + ---------- + y, x : arrays + Lists of coordinates. + max_ratio : float + Maximum ratio of longest to shortest side. ccxymatch claims 5 + to 10 is best, but I found better results with 100. + min_sep : float + The minimum distance between two vertices. + + Returns + ------- + v : array + Indices of the `y` and `x` arrays which define the vertices of + each triangle (3xN). The first vertex is opposite side a, the + second, b, and the third, c. + s : array + The shape of each triangle (4xN): perimeter, a/c, cos(beta), + orientation. + + """ + + from itertools import combinations + + v = np.array(list(combinations(range(len(y)), 3))) + + dy = y[v] - np.roll(y[v], 1, 1) + dx = x[v] - np.roll(x[v], 1, 1) + sides = np.sqrt(dy**2 + dx**2) + + # numpy magic from + # http://stackoverflow.com/questions/10921893/numpy-sorting-a-multidimensional-array-by-a-multidimensional-array/ + i = np.argsort(sides, 1)[:, ::-1] # indices of sides a, b, c + i = list(np.ogrid[[slice(j) for j in i.shape]][:-1]) + [i] + v = v[i] + dy = dy[i] + dx = dx[i] + abc = sides[i] + + a2c = abc[:, 0] / abc[:, 2] + i = (a2c < max_ratio) * (abc[:, 2] > min_sep) + dx = dx[i] + dy = dy[i] + abc = abc[i] + a2c = a2c[i] + + perimeter = abc.sum(1) + cbet = ((abc[:, 0]**2 + abc[:, 2]**2 - abc[:, 1]**2) + / (2 * abc[:, 0] * abc[:, 2])) + rot = np.sign((x[v[:, 0]] - x[v[:, 2]]) * (y[v[:, 1]] - y[v[:, 0]]) + - (x[v[:, 0]] - x[v[:, 1]]) * (y[v[:, 2]] - y[v[:, 0]])) + rot = rot[i] + shapes = np.c_[perimeter, a2c, cbet, rot] + + return v.T, shapes.T + +def triangle_match(cat0, cat1, tol=0.01, a2c_tol=1.0, cbet_tol=0.2, + psig=1.0, pscale=None, min_frac=0, msig=None, + full_output=False, verbose=True, **kwargs): + """Find spatially matching sources between two lists. + + Following IRAF's ccxymatch. Compare the shapes and orientations + of triangles within two catalog lists: + + 1) For each triangle, find the nearest neighbor in `(a/c, + cos(beta))` space, where `a` and `c` are the longest and shorest + sides, respectively, and `beta` is the angle between the + longtest and shortest side (i.e., the opening angle of the + vertex opposite side `b`). + + 2) If the distance between two triangles in `(a/c, cos(beta))` + space is `d`, then ignore triangles with `d < tol`. + + 3) Compute the ratio of the matched triangle perimeters, and + reject poor matches based on a sigma-clipping algorithm. This + step essentially finds the scale factor between the two + catalogs. + + 4) Compare matched triangle orientations, and compute the number + that match in the clockwise sense. If this is more than half, + then reject those that match in the counter-clockwise sense, + otherwise, reject the clockwise matches. + + 5) Finally, for each triangle find the actual stars that match. + Optionally, remove stars that do not match a given fraction of + the time. + + Parameters + ---------- + cat0, cat1 : arrays + Each catalog is a 2xN array of (y, x) positions. + tol : float, optional + The triangle match tolerance in (a/c, cos(beta)) space. + a2c_tol : float, optional + A tolerance factor for the a/c ratio matching. + cbet_tol : float, optional + A tolerance factor for the cos(beta) matching. + psig : float, optional + Clip perimeter ratios at this sigma. + pscale : float, optional + If set, only consider perimeter ratios near `pscale`. + min_frac : float, optional + Only return stars with `frac > min_frac`. + msig : float, optional + Only return stars with matches better than `msig` above the + match_matrix background. full_output : bool, optional Set to `True` to also return `match_matrix`. verbose : bool, optional Print some feedback for the user. + **kwargs + `triangles` keyword arguments. Returns ------- @@ -42,88 +331,123 @@ def spatial_match(cat0, cat1, tol=0.01, min_score=0, full_output=False, The best match for star `i` of `cat0` is `matches[i]` in `cat1`. Stars that are matched multiple times, not matched at all, or with scores less than `min_score` are not returned. - score : dictionary + frac : dictionary Fraction of times star `i` matched star `matches[i]` out of all times stars `i` and `matches[i]` were matched to any star. match_matrix : ndarray, optional If `full_output` is `True`, also return the matrix of all star matches. - - Notes - ----- - - Based on the description of DAOPHOT's catalog matching via - triangles at - http://ned.ipac.caltech.edu/level5/Stetson/Stetson5_2.html + scores : ndarray, optional + The scores for each triangle's nearest neighbor, or the radial + distance for each nearest-neighbor. """ from scipy.spatial.ckdtree import cKDTree + from .util import takefrom, meanclip + + msig = -100 if msig is None else msig + + assert len(cat0) == 2 + v0, s0 = triangles(*cat0, **kwargs) + + assert len(cat1) == 2 + v1, s1 = triangles(*cat1, **kwargs) - v0, s0 = triangles(*cat0) - v1, s1 = triangles(*cat1) N0 = len(cat0[0]) N1 = len(cat1[0]) - tree = cKDTree(s0) - d, i = tree.query(s1) # nearest matches between triangles + + # Find closest triangles, considering a/c and cos(beta) + shape_scale = 1.0 / np.array((a2c_tol, cbet_tol)) + tree = cKDTree(s0[1:3].T * shape_scale) + d, i = tree.query(s1[1:3].T * shape_scale) + + # reject based on tolerance + good = d <= tol + + if verbose: + print ("""[triangle_match] cat0 = {} triangles, cat1 = {} triangles +[triangle_match] Best match score = {:.2g}, worst match sorce = {:.2g} +[triangle_match] {} triangle pairs at or below given tolerance ({})""").format( + v0.shape[1], v1.shape[1], min(d), max(d), sum(d <= tol), tol) + + # reject based on perimeter + perimeter_ratios = s1[0] / s0[0, i] + if pscale is not None: + a = good.sum() + for j in range(3): + sig = np.sqrt(np.mean((perimeter_ratios[good] - pscale)**2)) + good *= np.abs(perimeter_ratios - pscale) < sig * psig + b = good.sum() + if verbose: + print ("""[triangle_match] One time perimeter_ratio sigma clip about {} +[triangle_match] {} triangles rejected.""").format(pscale, a - b) + + + mc = meanclip(perimeter_ratios[good], lsig=psig, hsig=psig, + full_output=True) + + if mc[1] <= 0: + print "[triangle_match] Low measured perimeter ratio sigma ({}), skipping perimeter rejection".format(mc[1]) + else: + p_good = (np.abs(perimeter_ratios - mc[0]) / mc[1] <= psig) * good + good *= p_good + + if verbose: + print ("""[triangle_match] Sigma-clipped perimeter ratios = {} +/- {} +[triangle_match] {} triangle pairs with perimeter ratios within {} sigma.""" + ).format(mc[0], mc[1], p_good.sum(), psig) + + # reject based on orientation of vertices + ccw = s0[3, i] == s1[3] + cw_count = (~ccw[good]).sum() + ccw_count = ccw[good].sum() + if ccw_count >= cw_count: + good *= ccw + else: + good *= ~ccw if verbose: - print ("""[spatial_match] cat0 = {} triangles, cat1 = {} triangles -[spatial_match] Best match score = {:.2g}, worst match sorce = {:.2g} -[spatial_match] {} triangle pairs at or below given tolerance ({})""".format( - len(v0), len(v1), min(d), max(d), sum(d <= tol), tol)) + print ("""[triangle_match] Orientation matches = {}{} +[triangle_match] Anti-orientation matches = {}{}""").format( + cw_count, " (rejected)" if cw_count <= ccw_count else "", + ccw_count, " (rejected)" if cw_count > ccw_count else "") match_matrix = np.zeros((N0, N1), int) for k, j in enumerate(i): - if d[k] <= tol: - match_matrix[v0[j][0], v1[k][0]] += 1 - match_matrix[v0[j][1], v1[k][1]] += 1 - match_matrix[v0[j][2], v1[k][2]] += 1 + if good[k]: + match_matrix[v0[0][j], v1[0][k]] += 1 + match_matrix[v0[1][j], v1[1][k]] += 1 + match_matrix[v0[2][j], v1[2][k]] += 1 + mc = meanclip(match_matrix, full_output=True) m0 = match_matrix.argmax(1) m1 = match_matrix.argmax(0) matches = dict() - scores = dict() + frac = dict() + rej = 0 for i in range(len(m0)): if i == m1[m0[i]]: + if match_matrix[i, m0[i]] < (mc[0] + msig * mc[1]): + rej += 1 + continue matches[i] = m0[i] peak = match_matrix[i, m0[i]] * 2 total = match_matrix[i, :].sum() + match_matrix[:, m0[i]].sum() - scores[i] = peak / float(total) + frac[i] = peak / float(total) + + if verbose: + print "[triangle_match] {} stars failed the msig test".format(rej) for k in matches.keys(): - if scores[k] < min_score: - del matches[k], scores[k] + if frac[k] < min_frac: + del matches[k], frac[k] + + if verbose: + print "[triangle_match] {} stars matched".format(len(matches)) if full_output: - return matches, scores, match_matrix + return matches, frac, match_matrix, d else: - return matches, scores - -def triangles(y, x): - """Describe all possible triangles in a set of points. - - Parameters - ---------- - y, x : arrays - Lists of coordinates. - - Returns - ------- - v : array - Indices of the `y` and `x` arrays which define the vertices of - each triangle (Nx3). - s : array - The shape of each triangle (Nx2): b/a, c/a - - """ - - from itertools import combinations - - v = np.array(list(combinations(range(len(y)), 3))) - dy = y[v] - np.roll(y[v], 1, 1) - dx = x[v] - np.roll(x[v], 1, 1) - abc = np.sort(np.sqrt(dy**2 + dx**2), 1)[:, ::-1] # lengths of sides abc - shapes = abc[:, 1:] / abc[:, :1] # b/a, c/a + return matches, frac - return v, shapes diff --git a/mskpy/comet.py b/mskpy/comet.py index 85ef6b9..87ce68d 100644 --- a/mskpy/comet.py +++ b/mskpy/comet.py @@ -160,6 +160,12 @@ def fluxd(self, observer, date, wave, rap=1.0 * u.arcsec, return fluxd + def _add_lc_columns(self, lc): + from astropy.table import Column + afrho = self.Afrho1 * lc['rh'].data**self.k + lc.add_column(Column(afrho, name='Afrho', format='{:.4g}')) + return lc + class Comet(SolarSysObject): """A comet. @@ -310,6 +316,9 @@ def fluxd(self, observer, date, wave, rap=1.0 * u.arcsec, return fluxd + def _add_lc_columns(self, lc): + return self.coma._add_lc_columns(lc) + def afrho2fluxd(wave, afrho, rap, geom, sun=None, unit=u.Unit('W/(m2 um)'), bandpass=None): """Convert A(th)frho to flux density. @@ -622,23 +631,23 @@ def fluxd2efrho(wave, flux, rho, geom, Tscale=1.1): I = flux / Om # W/m2/um/sr return I * _rho / B -def m2afrho(m, geom): +def m2afrho(m, g, C=8.5e17, m_sun=-27.1): """Convert JPL/HORIZONS apparent magnitude, m, to Afrho. *** EXPERIMENTAL *** - Based on Comet C/2007 N3 (Lulin) in I-band: - - 3200 cm = 8.49 mag at rh=1.49 AU, Delta=0.49 AU. - -2.5 log10(3200 / c) = 8.49 + 5 * log10(1.49 * 0.49) - c = 4.0e6 cm + Based on a few comets. See MSK's notes. Parameters ---------- m : float Comet's apparent magnitude from JPL. - geom : dict of Quantity, or ephem.Geom + g : dict of Quantity, or ephem.Geom The observing circumstances (rh and delta). + C : float + Conversion constant. [cm] + m_sun : float + Apparent magnitude of the Sun. Returns ------- @@ -647,8 +656,12 @@ def m2afrho(m, geom): """ print " *** EXPERIMENTAL *** " - M = m - 5 * np.log10(geom['rh'] * geom['delta']) - return 4.0e6 * 10**(M / -2.5) + #M = m - 5 * np.log10(geom['rh'].to(u.au).value + # * geom['delta'].to(u.au).value) + #return 4.0e6 * 10**(M / -2.5) + afrho = (C * g['delta'].to(u.au)**2 * g['rh'].to(u.au)**2 / u.au**4 + * 10**(-0.4 * (m - m_sun))) * u.cm + return afrho def M2afrho1(M1): """Convert JPL's absolute magnitude, M1, to Afrho at 1 AU. diff --git a/mskpy/ephem/ssobj.py b/mskpy/ephem/ssobj.py index 086a737..4e1d3e4 100644 --- a/mskpy/ephem/ssobj.py +++ b/mskpy/ephem/ssobj.py @@ -133,7 +133,8 @@ def v(self, date): return self.state.v(date) def ephemeris(self, observer, dates, num=None, columns=None, - cformats=None, date_format=None, ltt=False, **kwargs): + cformats=None, ra_unit='hourangle', date_format=None, + ltt=False, **kwargs): """Ephemeris for an observer. Parameters @@ -154,6 +155,8 @@ def ephemeris(self, observer, dates, num=None, columns=None, cformats : dict or list, optional A dictionary of formats with keys corresponding to `columns`. + ra_unit : str or astropy Unit, optional + The unit of Right Ascention output, e.g., hourangle or deg. date_format : function A function to format the `date` column before creating the table. @@ -169,11 +172,14 @@ def ephemeris(self, observer, dates, num=None, columns=None, `date_format` should be removed when astropy `Table` can handle Time objects. + Override `_add_lc_columns` to add additional columns to + lightcurve output. + """ from astropy.table import Table, Column from astropy.units import Quantity - from ..util import dh2hms, date2time + from ..util import date2time, dh2hms dates = date2time(dates) if num is not None: @@ -199,8 +205,8 @@ def ephemeris(self, observer, dates, num=None, columns=None, _cformats = dict( date = '{:s}', - ra = lambda x: dh2hms(x / 15.0)[:-7], - dec = lambda x: dh2hms(x)[:-7], + ra = lambda x: dh2hms(x, "{:2d}:{:02d}"), + dec = lambda x: dh2hms(x, "{:2d}:{:02d}"), lam = '{:.0f}', bet = '{:+.0f}', rh = '{:.3f}', @@ -223,8 +229,10 @@ def ephemeris(self, observer, dates, num=None, columns=None, for c in columns: if c == 'date': data = [date_format(d) for d in g[c]] + elif c == 'ra': + data = g[c].to(ra_unit) else: - data = g[c].value + data = g[c] if c in _cformats: cf = _cformats[c] @@ -318,6 +326,8 @@ def lightcurve(self, observer, dates, wave, verbose=True, **kwargs): if verbose: print() + lc = self._add_lc_columns(lc) + cf = kwargs.get('cformat', dict()).get('fluxd', '{:9.3g}') unit = str(unit) for i in range(fluxd.shape[1]): @@ -326,6 +336,17 @@ def lightcurve(self, observer, dates, wave, verbose=True, **kwargs): format=cf, unit=unit)) return lc + def _add_lc_columns(self, lc): + """Add additional columns to a lightcurve table. + + Parameters + ---------- + lc : Table + The current lightcurve table. + + """ + return lc + def observe(self, target, date, ltt=False): """Distance, phase angle, etc. to another object. diff --git a/mskpy/graphics.py b/mskpy/graphics.py index b068d6c..2eb6104 100644 --- a/mskpy/graphics.py +++ b/mskpy/graphics.py @@ -48,9 +48,10 @@ def arrows(xy, length, rot=0, angles=[0, 90], labels=['N', 'E'], length : float Length of the arrows in data units. rot : float, optional - The image orientation (position angle of north). + The image orientation (position angle of north) in units of degrees. angles : array, floats, optional - The position angles at which to place arrows, measured E of N. + The position angles at which to place arrows, measured E of N, + in units of degrees. labels : array, strings, optional Labels for each arrow, or None for no labels. offset : float, optional @@ -81,12 +82,9 @@ def arrows(xy, length, rot=0, angles=[0, 90], labels=['N', 'E'], alist = [] for i in range(len(angles)): - ixy = length * inset * np.array( - -np.sin(rot + np.radians(angles[i])), - np.cos(rot + np.radians(angles[i]))) - dxy = length * offset * np.array( - -np.sin(rot + np.radians(angles[i])), - np.cos(rot + np.radians(angles[i]))) + a = np.radians(rot + angles[i]) + ixy = length * inset * np.array([-np.sin(a), np.cos(a)]) + dxy = length * offset * np.array([-np.sin(a), np.cos(a)]) alist += [ax.annotate(labels[i], xy + ixy, xy + dxy, ha='center', va='center', fontsize=fontsize, arrowprops=arrowprops, @@ -252,7 +250,7 @@ def nicelegend(*args, **kwargs): def niceplot(ax=None, axfs='12', lfs='14', tightlayout=True, - **kwargs): + mew=1.25, lw=2.0, ms=7.0, **kwargs): """Pretty up a plot for publication. Parameters @@ -283,11 +281,21 @@ def niceplot(ax=None, axfs='12', lfs='14', tightlayout=True, plt.setp(labels, fontsize=lfs) # for plot markers, ticks - mew = kwargs.pop('markeredgewidth', kwargs.pop('mew', 1.25)) - ms = kwargs.pop('markersize', kwargs.pop('ms', 7.0)) - lw = kwargs.pop('linewidth', kwargs.pop('lw', 2.0)) + lines = ax.get_lines() + mew = kwargs.pop('markeredgewidth', kwargs.pop('mew', None)) + if mew is not None: + plt.setp(lines, mew=mew) - plt.setp(ax.get_lines(), mew=mew, ms=ms, lw=lw, **kwargs) + ms = kwargs.pop('markersize', kwargs.pop('ms', None)) + if ms is not None: + plt.setp(lines, ms=ms) + + lw = kwargs.pop('linewidth', kwargs.pop('lw', None)) + if lw is not None: + plt.setp(lines, lw=lw) + + if len(kwargs) > 0: + plt.setp(lines, **kwargs) lines = ax.xaxis.get_minorticklines() + ax.xaxis.get_majorticklines() + \ ax.yaxis.get_minorticklines() + ax.yaxis.get_majorticklines() diff --git a/mskpy/image/analysis.py b/mskpy/image/analysis.py index 4601ba5..ad83322 100644 --- a/mskpy/image/analysis.py +++ b/mskpy/image/analysis.py @@ -9,10 +9,12 @@ anphot apphot + apphot_by_wcs azavg bgfit bgphot centroid + find fwhm gcentroid imstat @@ -35,11 +37,13 @@ __all__ = [ 'anphot', 'apphot', + 'apphot_by_wcs', 'azavg', 'bgfit', 'bgphot', 'centroid', 'gcentroid', + 'find', 'fwhm', 'imstat', 'linecut', @@ -51,13 +55,11 @@ class UnableToCenter(Exception): pass -def anphot(im, yx, rap, subsample=4): +def anphot(im, yx, rap, subsample=4, squeeze=True): """Simple annular aperture photometry. - Pixels may be sub-sampled (drizzled). - - `anphot` is not optimized and is best for extended objects in - small images. + Pixels may be sub-sampled, and sub-sampling may be CPU and memory + intensive. Parameters ---------- @@ -66,86 +68,83 @@ def anphot(im, yx, rap, subsample=4): photometry. For data cubes, the first axis iterates over the images. All images must have the same shape. yx : array - The `y, x` center of the aperture(s). Only one center is - allowed. [pixels] + The `y, x` center of the aperture(s), or an Nx2 length array of + centers. [pixels] rap : float or array Aperture radii. The inner-most aperture will be the annulus 0 to `min(rap)`. [pixels] subsample : int, optional The sub-pixel sampling factor. Set to `<= 1` for no sampling. This will sub-sample the entire image. + squeeze : bool, optional + Set to `True` to sqeeze single length dimensions out of the + results. Returns ------- n : ndarray - The number of pixels per annular bin. + The number of pixels per annular bin, either shape `(len(rap),)` + or `(len(yx), len(rap))`. f : ndarray - The annular photometry. If `im` is a set of images of depth - `N`, `f` will have shape `N x len(rap)` + The annular photometry. The shape will be one of: + `(len(rap),)` + `(len(yx), len(rap))` + `(len(im), len(yx), len(rap))` """ _im = np.array(im) - ndim = _im.ndim # later, we will flatten the images - - assert ndim in [2, 3], ("Only images, data cubes, or tuples/lists" - " of images are allowed.") + assert _im.ndim in [2, 3], ("Only images, data cubes, or tuples/lists" + " of images are allowed.") + if _im.ndim == 2: + _im = _im.reshape((1,) + _im.shape) yx = np.array(yx, float) - assert yx.shape == (2,), "yx has incorrect shape." + assert yx.ndim in [1, 2], "yx must be one or two dimensional." + if yx.ndim == 1: + assert yx.shape[0] == 2, "yx must have length 2." + yx = yx.reshape((1, 2)) + else: + assert yx.shape[1] == 2, "Second axis of yx must have length 2." if not np.iterable(rap): rap = np.array([rap]) if subsample > 1: - if ndim == 3: - _im = np.array([core.rebin(x, subsample, flux=True) for x in _im]) - else: - _im = core.rebin(_im, subsample, flux=True) - + _im = np.array([core.rebin(x, subsample, flux=True) for x in _im]) yx = yx * subsample + (subsample - 1) / 2.0 sz = _im.shape[-2:] - r = core.rarray(sz, yx=yx, subsample=10) / float(subsample) + # flatten all arrays for digitize + N = _im.shape[0] + M = _im.shape[1] * _im.shape[2] + _im = _im.flatten().reshape((N, M)) + + n = np.zeros((len(yx), len(rap))) + f = np.zeros((N, len(yx), len(rap))) # annular photometry via histograms, histogram via digitize() to # save CPU time when mulitiple images are passed - - # flatten all arrays for digitize - r = r.flatten() - if ndim == 3: - N = _im.shape[0] - M = _im.shape[1] * _im.shape[2] - _im = _im.flatten().reshape((N, M)) - else: - _im = _im.flatten() - - bins = np.digitize(r, rap) - n = np.zeros(len(rap)) - if ndim == 3: - f = np.zeros((len(_im), len(rap))) - for i in range(len(rap)): - j = np.flatnonzero(bins == i) - f[:, i] = np.sum(_im[:, j], 1) - n[i] = len(j) - else: - f = np.zeros(len(rap)) - for i in range(len(rap)): - j = np.flatnonzero(bins == i) - f[i] = np.sum(_im[j]) - n[i] = len(j) + for i in range(len(yx)): + r = core.rarray(sz, yx=yx[i], subsample=10) / float(subsample) + bins = np.digitize(r.flatten(), rap) + for j in range(len(rap)): + ap = np.flatnonzero(bins == j) + f[:, i, j] = np.sum(_im[:, ap], 1) + n[i, j] = len(ap) n /= float(subsample**2) - return n, f + if squeeze: + return n.squeeze(), f.squeeze() + else: + return n, f -def apphot(im, yx, rap, subsample=4): +def apphot(im, yx, rap, subsample=4, **kwargs): """Simple aperture photometry. - Pixels may be sub-sampled (drizzled). - - `apphot` is not optimized and is best for single objects in small - images. + Pixels may be sub-sampled, and sub-sampling may be CPU and memory + intensive. Parameters ---------- @@ -154,26 +153,121 @@ def apphot(im, yx, rap, subsample=4): photometry. For data cubes, the first axis iterates over the images. All images must have the same shape. yx : array - The `y, x` center of the aperture(s). Only one center is - allowed. [pixels] + The `y, x` center of the aperture(s), or an Nx2 length array of + centers. [pixels] rap : float or array Aperture radii. [pixels] subsample : int, optional The sub-pixel sampling factor. Set to `<= 1` for no sampling. This will sub-sample the entire image. + **kwargs + Any `anphot` keyword argument. Returns ------- n : ndarray - The number of pixels per aperture. + The number of pixels per aperture, either shape `(len(rap),)` or + `(len(yx), len(rap))`. f : ndarray - The aperture photometry. If `im` is a set of images of depth - `N`, `f` will have shape `N x len(rap)`. - + The annular photometry. The shape will be one of: + `(len(rap),)` + `(len(yx), len(rap))` + `(len(im), len(yx), len(rap))` (for multiple images) + """ - n, f = anphot(im, yx, rap, subsample=subsample) + n, f = anphot(im, yx, rap, subsample=subsample, **kwargs) return n.cumsum(-1), f.cumsum(-1) +def apphot_by_wcs(im, coords, wcs, rap, centroid=False, + cfunc=None, ckwargs={}, **kwargs): + """Simple aperture photometry, using a world coordinate system. + + The WCS only affects source locations, and does not affect + aperture areas. + + Pixels may be sub-sampled, and sub-sampling may be CPU and memory + intensive. + + Parameters + ---------- + im : array + An image, cube, or tuple of images on which to measure + photometry. For data cubes, the first axis iterates over the + images. All images must have the same shape. + coords : astropy SkyCoord + The coordinates (e.g., RA, Dec) of the targets. Only sources + interior to the image and at least `2 * max(rap)` from the edges + are considered. + wcs : astropy WCS + The world coordinate system transformation. + rap : float or array + Aperture radii. [pixels] + centroid : bool, optional + Set to `True` to centroid on each source with `cfunc`. When + multiple images are provided, the centroid is based on the first + image. + cfunc : function, optional + The centroiding function to use, or `None` for `gcentroid`. + ckwargs: dict + Any `cfunc` keyword arguments. + **kwargs: + Any `apphot` keyword arguments. + + Returns + ------- + yx : ndarray + Pixel positions of all sources, `Nx2`. + n : ndarray + The number of pixels per aperture, either shape `(len(rap),)` or + `(len(yx), len(rap))`. + f : ndarray + The annular photometry. The shape will be one of: + `(len(rap),)` + `(len(yx), len(rap))` + `(len(im), len(yx), len(rap))` (for multiple images) + + """ + + squeeze = kwargs.pop('squeeze', True) + + x, y = coords.to_pixel(wcs) + yx = np.c_[y, x] + + n = np.zeros((len(yx), np.size(rap))) + shape = np.array(im).shape + if len(shape) == 3: + shape = shape[1:] + f = np.zeros((shape[0], ) + n.shape) + else: + f = np.zeros((1, ) + n.shape) + + max_rap = np.max(rap) + sources = np.flatnonzero((x >= 2 * max_rap) + * (x <= (shape[1] - 2 * max_rap)) + * (y >= 2 * max_rap) + * (y <= (shape[0] - 2 * max_rap))) + + if centroid: + if cfunc is None: + cfunc = gcentroid + + for i in sources: + try: + if len(shape) == 3: + yx[i] = cfunc(im[0], yx[i], **ckwargs) + else: + yx[i] = cfunc(im, yx[i], **ckwargs) + except UnableToCenter: + pass + + _n, _f = apphot(im, yx[sources], rap, squeeze=False, **kwargs) + n[sources] = _n + f[:, sources] = _f + if squeeze: + return yx, n.squeeze(), f.squeeze() + else: + return yx, n, f + def azavg(im, yx, raps=None, subsample=4, **kwargs): """Create an aziumthally averaged image. @@ -222,6 +316,9 @@ def azavg(im, yx, raps=None, subsample=4, **kwargs): if raps is None: maxr = int(r.max()) + 1 raps = np.logspace(0, np.log2(maxr), int(np.log2(maxr) * 2), base=2) + elif isinstance(raps, int): + maxr = int(r.max()) + 1 + raps = np.logspace(0, np.log2(maxr), raps, base=2) n, f = anphot(im, yx, raps, subsample=subsample) n, f, raps = takefrom((n, f, raps), n != 0) @@ -280,7 +377,7 @@ def bgfit(im, unc=None, order=1, mask=True): return bg -def bgphot(im, yx, rap, ufunc=np.mean, **kwargs): +def bgphot(im, yx, rap, ufunc=np.mean, squeeze=True, **kwargs): """Background photometry and error analysis in an annulus. Pixels are not sub-sampled. The annulus is processed via @@ -293,23 +390,24 @@ def bgphot(im, yx, rap, ufunc=np.mean, **kwargs): photometry. For data cubes, the first axis iterates over the images. All images must have the same shape. yx : array - The `y, x` center of the aperture. Only one center is - allowed. [pixels] + The `y, x` center of the aperture, or an `Nx2` array of centers. rap : array Inner and outer radii of the annulus. [pixels] ufunc : function, optional The function with which to determine the background average. + squeeze : bool, optional + Set to `True` to sqeeze single length dimensions out of the + results. **kwargs : Keyword arguments passed to `util.uclip`. Returns ------- n : ndarray - The number of pixels per annular bin. Same shape comment as for - `bg`. + The number of pixels for each aperture. bg : ndarray - The background level in the annulus. If `im` is a set of images - of depth `N`, `bg` will have shape `N x len(rap)` + The background level in the annulus, shape `(len(yx),)` or + `(len(im), len(yx))`. sig : ndarray The standard deviation of the background pixels. Same shape comment as for `bg`. @@ -319,36 +417,40 @@ def bgphot(im, yx, rap, ufunc=np.mean, **kwargs): from ..util import uclip _im = np.array(im) - ndim = _im.ndim # later, we will flatten the images - - assert ndim in [2, 3], ("Only images, data cubes, or tuples/lists" - " of images are allowed.") + assert _im.ndim in [2, 3], ("Only images, data cubes, or tuples/lists" + " of images are allowed.") + if _im.ndim == 2: + _im = _im.reshape((1,) + _im.shape) yx = np.array(yx, float) - assert yx.shape == (2,), "yx has incorrect shape." + assert yx.ndim in [1, 2], "yx must be one or two dimensional." + if yx.ndim == 1: + assert yx.shape[0] == 2, "yx must have length 2." + yx = yx.reshape((1, 2)) + + assert yx.shape[1] == 2, "Second axis of yx must have length 2." rap = np.array(rap, float) assert rap.shape == (2,), "rap has incorrect shape." sz = _im.shape[-2:] - r = core.rarray(sz, yx=yx, subsample=4) - annulus = (r >= rap.min()) * (r <= rap.max()) + n = np.zeros(len(yx)) + bg, sig = np.zeros((2, len(_im), len(yx))) - if ndim == 3: - n, bg, sig = np.zeros((3, _im.shape[0])) - for i in range(len(m)): - f = _im[i][annulus] - bg[i], j, niter = uclip(f, ufunc, full_output=True, **kwargs) - n[i] = len(j) - sig[i] = np.std(f[j]) - else: - f = _im[annulus] - bg, j, niter = uclip(f, ufunc, full_output=True, **kwargs) - n = len(j) - sig = np.std(f[j]) + for i in range(len(yx)): + r = core.rarray(sz, yx=yx[i], subsample=10) + annulus = (r >= rap.min()) * (r <= rap.max()) + for j in range(len(_im)): + f = _im[j][annulus] + bg[j, i], k, niter = uclip(f, ufunc, full_output=True, **kwargs) + n[i] = len(k) + sig[j, i] = np.std(f[k]) - return n, bg, sig + if squeeze: + return n.squeeze(), bg.squeeze(), sig.squeeze() + else: + return n, bg, sig def centroid(im, yx=None, box=None, niter=1, shrink=True, silent=True): """Centroid (center of mass) of an image. @@ -420,6 +522,99 @@ def centroid(im, yx=None, box=None, niter=1, shrink=True, silent=True): return cyx +def find(im, sigma=None, thresh=2, centroid=None, fwhm=2, **kwargs): + """Find sources in an image. + + Generally designed for point-ish sources. + + Parameters + ---------- + im : array + The image to search. + sigma : float, optional + The 1-sigma uncertainty in the background, or `None` to estimate + the uncertainty with the sigma-clipped mean and standard + deviation of `meanclip`. If provided, then the image should be + background subtracted. + thresh : float, optional + The detection threshold in sigma. If a pixel is detected above + `sigma * thresh`, it is an initial source candidate. + centroid : function, optional + The centroiding function, or `None` to use `gcentroid`. The + function is passed a subsection of the image. All other + parameters are provided via `kwargs`. + fwhm : int, optional + A rough estimate of the FWHM of a source, used for binary + morphology operations. + **kwargs + Any keyword arguments for `centroid`. + + Returns + ------- + cat : ndarray + A catalog of `(y, x)` source positions. + f : ndarray + An array of approximate source fluxes (a background estimate is + removed if `sigma` is `None`). + + """ + + import scipy.ndimage as nd + from ..util import meanclip + + assert isinstance(fwhm, int), 'FWHM must be integer' + + _im = im.copy() + + if sigma is None: + stats = meanclip(_im, full_output=True)[:2] + _im -= stats[0] + sigma = stats[1] + + if centroid is None: + centroid = gcentroid + + det = _im > thresh * sigma + det = nd.binary_erosion(det, iterations=fwhm) # remove small objects + det = nd.binary_dilation(det, iterations=fwhm * 2 + 1) # grow aperture size + label, n = nd.label(det) + + yx = [] + f = [] + bad = 0 + for i in nd.find_objects(label): + star = _im[i] + + if not np.isfinite(star.sum()): + bad += 1 + continue + + try: + cen = centroid(star, **kwargs) + except: + bad += 1 + continue + + if any(np.array(cen) < -0.5): + bad += 1 + continue + + if any((cen[0] >= star.shape[0] - 0.5, cen[1] >= star.shape[1] - 0.5)): + bad += 1 + continue + + cen += np.array((i[0].start, i[1].start)) + + if not any(np.isfinite(cen)): + bad += 1 + continue + + yx.append(cen) + f.append(star.sum()) + + print('[find] {} good, {} bad sources'.format(len(yx), bad)) + return np.array(yx), np.array(f) + def fwhm(im, yx, bg=True, **kwargs): """Compute the FWHM of an image. @@ -496,7 +691,7 @@ def gcentroid(im, yx=None, box=None, niter=1, shrink=True, silent=True): from ..util import gaussian if yx is None: - yx = np.unravel_index(np.nanargmax(im), im.shape) + yx = np.array(np.unravel_index(np.nanargmax(im), im.shape), float) # the array index location of yx iyx = np.round(yx).astype(int) @@ -518,8 +713,10 @@ def gfit(p, x, f): return np.sum((amplitude * gaussian(x, mu, sigma) - f)[i]**2) if halfbox[0] > 0: - yr = [iyx[0] - halfbox[0], iyx[0] + halfbox[0] + 1] - xr = [iyx[1] - halfbox[1], iyx[1] + halfbox[1] + 1] + yr = [max(iyx[0] - halfbox[0], 0), + min(iyx[0] + halfbox[0] + 1, im.shape[0] - 1)] + xr = [max(iyx[1] - halfbox[1], 0), + min(iyx[1] + halfbox[1] + 1, im.shape[1] - 1)] ap = (slice(*yr), slice(*xr)) y = np.arange(*yr, dtype=float) f = np.sum(im[ap], 1) @@ -532,8 +729,12 @@ def gfit(p, x, f): cyx[0] = fit['x'][1] if halfbox[1] > 0: - yr = [iyx[0] - halfbox[1], iyx[0] + halfbox[1] + 1] - xr = [iyx[1] - halfbox[0], iyx[1] + halfbox[0] + 1] + #yr = [iyx[0] - halfbox[1], iyx[0] + halfbox[1] + 1] + #xr = [iyx[1] - halfbox[0], iyx[1] + halfbox[0] + 1] + yr = [max(iyx[0] - halfbox[1], 0), + min(iyx[0] + halfbox[1] + 1, im.shape[0] - 1)] + xr = [max(iyx[1] - halfbox[0], 0), + min(iyx[1] + halfbox[0] + 1, im.shape[1] - 1)] ap = (slice(*yr), slice(*xr)) x = np.arange(*xr) f = np.sum(im[ap], 0) diff --git a/mskpy/image/core.py b/mskpy/image/core.py index 1d31b8f..1626ab0 100644 --- a/mskpy/image/core.py +++ b/mskpy/image/core.py @@ -32,7 +32,7 @@ import numpy as np def imshift(im, yx, subsample=4): - """Shift an image, allowing for sub-pixel offsets (drizzle method). + """Shift an image, allowing for sub-pixel offsets. Parameters ---------- @@ -42,7 +42,8 @@ def imshift(im, yx, subsample=4): `y, x` offsets. Positive values move pixels to the up/right. [unsampled pixels] subsample : int, optional - The sub-sampling factor. + The sub-sampling factor. If <=1, then the image is only shifted + whole pixels. Returns ------- @@ -52,7 +53,7 @@ def imshift(im, yx, subsample=4): """ if subsample <= 1: - raise ValueError("sample must be > 1.") + subsample = 1 sy = int(round(yx[0] * subsample)) # whole sampled pixels sx = int(round(yx[1] * subsample)) @@ -147,6 +148,10 @@ def magni(a, factor): b /= float(factor) return b + if factor == 1: + # done! + return a + _a = a.copy() if factor < 0: for i in range(_a.ndim): diff --git a/mskpy/image/process.py b/mskpy/image/process.py index 78f795d..80bdd65 100644 --- a/mskpy/image/process.py +++ b/mskpy/image/process.py @@ -7,6 +7,8 @@ .. autosummary:: :toctree: generated/ + align_by_centroid + align_by_wcs columnpull combine crclean @@ -22,6 +24,8 @@ from . import core, analysis __all__ = [ + 'align_by_centroid', + 'align_by_wcs', 'columnpull', 'combine', 'crclean', @@ -31,6 +35,139 @@ 'stripes' ] +def align_by_centroid(data, yx, cfunc=None, ckwargs=dict(box=5), + **kwargs): + """Align a set of images by centroid of a single source. + + Parameters + ---------- + data : list or array + The list of FITS files, or stack of images to align. If the + first element is a string, then a file list is assumed. + yx : array + The approximate (y, x) coordinate of the source. + cfunc : function, optional + The centroiding function or `None` to use `gcentroid`. + ckwargs : dict + Keyword arguments for `cfunc`. + **kwargs + Keyword arguments for `imshift`. + + Results + ------- + stack : ndarray + The aligned images. + dyx : ndarray + The offsets. + + """ + + import astropy.units as u + from astropy.io import fits + from .analysis import gcentroid + + if cfunc is None: + cfunc = gcentroid + + if isinstance(data[0], str): + im = fits.getdata(files[0]) + stack = np.zeros((len(data), ) + im.shape) + stack[0] = im + del im + for i in range(1, len(data)): + stack[i] = fits.getdata(data[i]) + else: + stack = data.copy() + + y0, x0 = cfunc(stack[0], yx, **ckwargs) + + dyx = np.zeros((len(stack), 2)) + for i in range(1, len(stack)): + y, x = cfunc(stack[i], yx, **ckwargs) + dyx[i] = y0 - y, x0 - x + stack[i] = core.imshift(stack[i], dyx[i], **kwargs) + if int(dyx[i, 0]) < 0: + stack[i, :, int(dyx[i, 0]):] = np.nan + elif int(dyx[i, 0]) > 0: + stack[i, :, :int(dyx[i, 0])] = np.nan + if int(dyx[i, 1]) < 0: + stack[i, int(dyx[i, 1]):] = np.nan + elif int(dyx[i, 1]) > 0: + stack[i, :int(dyx[i, 1])] = np.nan + + return stack, dyx + +def align_by_wcs(files, target=None, observer=None, time_key='DATE-OBS', + **kwargs): + """Align a set of images using their world coordinate systems. + + Parameters + ---------- + files : list + The list of FITS files to align. + target : SolarSysObject + Align in the reference frame of this object. + observer : SolarSysObject + Observe `target` with this observer. + time_key : string + The header keyword for the observation time. + **kwargs + Keyword arguments for `imshift`. + + Results + ------- + stack : ndarray + The aligned images. + dyx : ndarray + The offsets. + + """ + + import astropy.units as u + from astropy.io import fits + from astropy.wcs import WCS + from astropy.coordinates import Angle + + im, h0 = fits.getdata(files[0], header=True) + wcs0 = WCS(h0) + stack = np.zeros((len(files), ) + im.shape) + stack[0] = im + + y0, x0 = np.array(im.shape) / 2.0 + if target is not None: + assert observer is not None, "observer required" + g0 = observer.observe(target, h0[time_key]) + xt, yt = wcs0.wcs_world2pix(np.c_[g0.ra, g0.dec], 0)[0] + + ra0, dec0 = Angle(wcs0.wcs_pix2world(np.c_[x0, y0], 0)[0] * u.deg) + dra = 0 * u.deg + ddec = 0 * u.deg + + dyx = np.zeros((len(files), 2)) + for i in range(1, len(files)): + im, h = fits.getdata(files[i], header=True) + wcs = WCS(h) + if target is not None: + g = observer.observe(target, h[time_key]) + dra = g.ra - g0.ra + ddec = g.dec - g0.dec + + x, y = wcs.wcs_world2pix(np.c_[ra0 + dra, dec0 + ddec], 0)[0] + dyx[i] = y0 - y, x0 - x + stack[i] = core.imshift(im, dyx[i], **kwargs) + if int(dyx[i, 0]) != 0: + if int(dyx[i, 0]) < 0: + stack[i, :, int(dyx[i, 0]):] = np.nan + else: + stack[i, :, :int(dyx[i, 0])] = np.nan + if int(dyx[i, 1]) != 0: + if int(dyx[i, 1]) < 0: + stack[i, int(dyx[i, 1]):] = np.nan + else: + stack[i, :int(dyx[i, 1])] = np.nan + + return stack, dyx + def columnpull(column, index, bg, stdev): """Define a column pull detector artifact. @@ -192,10 +329,10 @@ def crclean(im, thresh, niter=1, unc=None, gain=1.0, rn=0.0, fwhm=2.0): return clean -def fixpix(im, mask): +def fixpix(im, mask, max_area=10): """Replace masked values replaced with a linear interpolation. - Probably only good for isolated badpixels. + Probably only good for isolated bad pixels. Parameters ---------- @@ -203,6 +340,8 @@ def fixpix(im, mask): The image. mask : array `True` where `im` contains bad pixels. + max_area : int + Only fix areas smaller or equal to this value. Returns ------- @@ -211,7 +350,7 @@ def fixpix(im, mask): """ from scipy.interpolate import interp2d - from scipy.ndimage import binary_dilation, label + from scipy.ndimage import binary_dilation, label, find_objects # create domains around masked pixels dilated = binary_dilation(mask) @@ -219,20 +358,15 @@ def fixpix(im, mask): # loop through each domain, replace bad pixels with the average # from nearest neigboors - x = core.xarray(im.shape) - y = core.yarray(im.shape) cleaned = im.copy() - for d in (np.arange(n) + 1): - i = (domains == d) # find the current domain - - # extract the sub-image - x0, x1 = x[i].min(), x[i].max() + 1 - y0, y1 = y[i].min(), y[i].max() + 1 - subim = im[y0:y1, x0:x1] - submask = mask[y0:y1, x0:x1] - subgood = (submask == False) - - cleaned[i * mask] = subim[subgood].mean() + for i in find_objects(domains): + submask = mask[i] + if submask.sum() > max_area: + continue + subim = im[i].copy() + subgood = (submask == False) * dilated[i] + subim[submask] = subim[subgood].mean() + cleaned[i] = subim return cleaned diff --git a/mskpy/models/surfaces.py b/mskpy/models/surfaces.py index 3d9fcf0..be4bcad 100644 --- a/mskpy/models/surfaces.py +++ b/mskpy/models/surfaces.py @@ -65,9 +65,9 @@ class NEATM(SurfaceRadiation): If you use this model, please reference Harris (1998, Icarus, 131, 291-301). - If unknown, use `eta=1.0` `epsilon=0.95` for a comet (Fernandez et - al., Icarus, submitted), and `eta=0.96` `epsilon=0.9` (or - `eta=0.91` with `epsilon=0.95`) for an asteroid (Mainzer et + If unknown, use `eta=1.03` `epsilon=0.95` for a comet (Fernandez + et al., 2013, Icarus 226, 1138-1170), and `eta=0.96` `epsilon=0.9` + (or `eta=0.91` with `epsilon=0.95`) for an asteroid (Mainzer et al. 2011, ApJ 736, 100). Parameters @@ -213,6 +213,67 @@ def T0(self, rh): / sigma)**0.25 return T0 * u.K + def fit(self, g, wave, fluxd, unc, **kwargs): + """Least-squares fit to a spectrum, varying `D` and `eta`. + + Uses the object's current state as the initial parameter set. + + Parameters + ---------- + g : dict-like + A dictionary-like object with the keys 'rh' (heliocentric + distance), 'delta' (observer-target distance), and 'phase' + (phase angle) as Quantities. + wave : Quantity + The spectrum wavelengths. + fluxd : Quantity + The spectrum flux density. + unc : Quantity + The uncertainties on `fluxd`. + **kwargs + Any keyword arguments for `scipy.optimize.leastsq`. + + Returns + ------- + fit : NEATM + Best-fit parameters. + fiterr : tuple + `(D, eta)` fit errors (assuming independent variables) or + `None` if they cannot be computed. + result : tuple + The full output from `scipy.optimize.leastsq`. + + """ + + from copy import copy + from scipy.optimize import leastsq + + def chi(p, neatm, g, wave, fluxd, unc): + neatm.D = u.Quantity(abs(p[0]), u.km) + neatm.eta = abs(p[1]) + model = neatm.fluxd(g, wave, unit=fluxd.unit).value + chi = (model - fluxd.value) / unc.value + rchisq = (chi**2).sum() / (len(wave) - 2.0) + print(neatm.D, neatm.eta, rchisq) + return chi + + neatm = copy(self) + args = (neatm, g, wave, fluxd, unc) + kwargs['epsfcn'] = kwargs.get('epsfcn', 1e-5) + + kwargs['full_output'] = True + result = leastsq(chi, (self.D.value, self.eta), args, **kwargs) + + neatm.D = u.Quantity(result[0][0], u.km) + neatm.eta = result[0][1] + cov = result[1] + if cov is None: + err = None + else: + err = np.sqrt(np.diagonal(cov)) + + return neatm, err, result + def _point_emission(self, phi, theta, wave, T0): """The emission from a single point. diff --git a/mskpy/observing/__init__.py b/mskpy/observing/__init__.py index 014a414..1a20bd6 100644 --- a/mskpy/observing/__init__.py +++ b/mskpy/observing/__init__.py @@ -12,6 +12,7 @@ --------- am_plot file2targets + plot_transit_time """ from __future__ import print_function @@ -27,7 +28,8 @@ 'Target', 'am_plot', - 'file2targets' + 'file2targets', + 'plot_transit_time' ] class Target(object): @@ -78,6 +80,7 @@ class Observer(object): ------- airmass altaz + finding_chart lst lst0 plot_am @@ -125,11 +128,11 @@ def lst0(self): def __repr__(self): if self.name is not None: - return ''.format( - self.name, self.lon.degree, self.lat.degree) + return ''.format( + self.name, self.lon.degree, self.lat.degree, self.date.iso[:10]) else: - return ''.format( - self.lon.degree, self.lat.degree) + return ''.format( + self.lon.degree, self.lat.degree, self.date.iso[:10]) def _radec(self, target, date): @@ -186,6 +189,98 @@ def altaz(self, target): self.lat.degree) return Angle(alt, unit=u.deg), Angle(az, unit=u.deg) + def finding_chart(self, target, ds9, trange=[-6, 6] * u.hr, ticks=1 * u.hr, + fov=1 * u.arcmin, frame=1, dss=True): + """Plot a DS9 finding chart for a moving target. + + Parameters + ---------- + target : SolarSysObject + The target to observe. + ds9 : pysao.ds9 + Plot to this DS9 instance. + trange : Quantity, optional + Plot the target's path over this time span, centered on the + observer's date (`self.date`). + ticks : Quantity, optional + Plot tick marks using this interval. The first tick is a circle. + fov : Quantity, optional + Angular size of a box or rectangle to draw, indicating your + instrument's FOV, or `None`. + frame : int, optional + DS9 frame number to display image. + dss : bool, optional + Set to `True` to retrieve a DSS image. + + """ + + import matplotlib.pyplot as plt + import pysao + from ..ephem import Earth, SolarSysObject + + assert isinstance(target, SolarSysObject), "target must be a SolarSysObject" + trange = u.Quantity(trange, u.hr) + ticks = u.Quantity(ticks, u.hr) + ds9 = ds9 if ds9 is not None else pysao.ds9() + + # DSS + g = Earth.observe(target, self.date, ltt=True) + ds9.set('frame {}'.format(frame)) + ds9.set('dsssao frame current') + ds9.set('dsssao size 60 60') + ds9.set('dsssao coord {} {}'.format( + g['ra'].to_string(u.hr, sep=':'), + g['dec'].to_string(u.deg, sep=':'))) + ds9.set('dsssao close') + ds9.set('cmap b') + ds9.set('align') + + # FOV + if fov is not None: + if fov.size == 1: + fov = [fov, fov] + fov_deg = u.Quantity(fov, u.deg).value + reg = 'fk5; box {} {} {} {} 0'.format( + g['ra'].to_string(u.hr, sep=':'), + g['dec'].to_string(u.deg, sep=':'), + fov_deg[0], fov_deg[1]) + ds9.set('regions', reg) + + # path + dt = np.linspace(trange[0], trange[1], 31) + g = Earth.observe(target, self.date + dt, ltt=True) + for i in range(len(g) - 1): + ds9.set('regions', 'fk5; line {} {} {} {}'.format( + g[i]['ra'].to_string(u.hr, sep=':'), + g[i]['dec'].to_string(u.deg, sep=':'), + g[i+1]['ra'].to_string(u.hr, sep=':'), + g[i+1]['dec'].to_string(u.deg, sep=':'))) + + # ticks + dt1 = np.arange(0, trange[0].value, -ticks.value) + if dt1[-1] != trange[0].value: + dt1 = np.concatenate((dt1, [trange[0].value])) + dt2 = np.arange(ticks.value, trange[1].value, ticks.value) + if dt2[-1] != trange[0].value: + dt2 = np.concatenate((dt2, [trange[1].value])) + dt = np.concatenate((dt1[::-1], dt2)) * u.hr + del dt1, dt2 + g = Earth.observe(target, self.date + dt, ltt=True) + for i in range(len(g)): + s = 'fk5; point({},{}) # point=cross'.format( + g[i]['ra'].to_string(u.hr, sep=':'), + g[i]['dec'].to_string(u.deg, sep=':')) + if i == 0: + s = s.replace('cross', 'circle') + ds9.set('regions', s) + + g = Earth.observe(target, self.date, ltt=True) + ds9.set('regions', 'fk5; point({},{}) # point=x'.format( + g['ra'].to_string(u.hr, sep=':'), + g['dec'].to_string(u.deg, sep=':'))) + + return ds9 + def plot_am(self, target, N=100, ax=None, **kwargs): """Plot the airmass of this target, centered on the current date. @@ -442,6 +537,60 @@ def file2targets(filename): print("Skipped {} possible targets".format(skipped)) return targets +def plot_transit_time(target, g_sun, observer=None, ax=None, **kwargs): + """Plot the transit time of a target. + + Parameters + ---------- + target : SolarSysObject + g_sun : Geom + The Sun's observing geometry for the dates of interest. + observer : SolarSysObject + The observer, or `None` for Earth. + ax : matplotlib axis, optional + The axis to which to plot, or `None` for current. + **kwargs + Any plot keyword. + + """ + + import matplotlib.pyplot as plt + from ..ephem import Earth + + if observer is None: + observer = Earth + + if ax is None: + ax = plt.gca() + + g = observer.observe(target, g_sun.date) + tt = (g.ra - g_sun.ra - 12 * u.hourangle).wrap_at(180 * u.deg).hourangle + + cut = np.concatenate((np.abs(np.diff(tt)) > 12, [True])) + i = 0 + name = target.name + for j in np.flatnonzero(cut): + line = ax.plot(tt[i:j], g.date[i:j].datetime, label=name, + **kwargs)[0] + name = None + i = j + 1 + + # mark perihelion + i = g.rh.argmin() + if (i > 0) and (i < (len(g) - 1)): + ax.plot([tt[i]], [g.date[i].datetime], '.', color=line.get_color()) + ax.annotate(' q={:.1f}'.format(g.rh[i].value), + [tt[i], g.date[i].datetime], color=line.get_color(), + fontsize=8) + + # pick 12 points and plot rh + x = np.random.randint(0, len(g) / 10) + for i in np.linspace(0 + x, len(g) - x - 1, 12).astype(int): + ax.plot([tt[i]], [g.date[i].datetime], '.', color=line.get_color()) + ax.annotate(' {:.1f}'.format(g.rh[i].value), + [tt[i], g.date[i].datetime], color=line.get_color(), + fontsize=8) + #class MovingObserver(Observer): # def __init__(self, obj): # self.observer = obj diff --git a/mskpy/photometry/__init__.py b/mskpy/photometry/__init__.py new file mode 100644 index 0000000..afde5ab --- /dev/null +++ b/mskpy/photometry/__init__.py @@ -0,0 +1,39 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +photometry --- Tools for working with photometry. +================================================= + +.. autosummary:: + :toctree: generated/ + + Modules + ------- + hb - Hale-Bopp filter set calibration. + + Functions + --------- + airmass_app + airmass_loc + cal_airmass + cal_color_airmass + +""" + +import numpy as np +import astropy.units as u + +from .core import * +from . import hb + +__all__ = [ + 'airmass_app', + 'airmass_loc', + 'cal_airmass', + 'cal_color_airmass', + 'hb' +] + +# update module docstring +from ..util import autodoc +autodoc(globals()) +del autodoc diff --git a/mskpy/photometry/core.py b/mskpy/photometry/core.py new file mode 100644 index 0000000..73c238a --- /dev/null +++ b/mskpy/photometry/core.py @@ -0,0 +1,173 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +core --- Core code for photometry. +================================== + +.. autosummary:: + :toctree: generated/ + + Functions + --------- + airmass_app + airmass_loc + cal_airmass + cal_color_airmass + +""" + +import numpy as np +import astropy.units as u + +__all__ = [ + 'airmass_app', + 'airmass_loc', + 'cal_airmass', + 'cal_color_airmass', +] + +def airmass_app(z_true, h): + """Apparent airmass. + + Hardie's 1962 formula, with a correction for atmospheric + refraction. Used by Farnham and Schleicher 2000. + + For OH, use `airmass_loc`. + + Parameters + ---------- + z_true : Angle or Quantity + The object's true zenith angle. + h : Quantity + The observer's elevation. + + """ + + tan = np.tan(z_true) + exp = np.exp(-h.to(u.km).value / 8.0) + z_app = z_true - (60.4 * tan - 0.0668 * tan**3) / 3600. * exp * u.deg + sec = 1.0 / np.cos(z_app) + X = (sec - 0.0018167 * (sec - 1) - 0.002875 * (sec - 1)**2 + - 0.0008083 * (sec - 1)**3) + return X.value + +def airmass_loc(z_true): + """Airmass based on local zenith angle. + + Use for OH extinction. + + Parameters + ---------- + z : Angle or Quantity + The object's true zenith angle. + + """ + + R = 6378. + H = 22. + X = (R + H) / np.sqrt((R + H)**2 - (R * np.sin(z_true))**2) + return X.value + +def cal_airmass(m, munc, M, X, guess=(25., -0.1), + covar=False): + """Calibraton coefficients, based on airmass. + + Parameters + ---------- + m : array + Instrumental (uncalibrated) magnitude. + munc : array + Uncertainties on m. + M : array + Calibrated magnitude. + X : array + Airmass. + guess : array, optional + An intial guess for the fitting algorithm. + covar : bool, optional + Set to `True` to return the covariance matrix. + + Results + ------- + A : ndarray + The photometric zero point, and airmass correction slope. [mag, + mag/airmass] + unc or cov : ndarray + Uncertainties on each parameter, based on least-squares fitting, + or the covariance matrix, if `covar` is `True`. + + """ + + from scipy.optimize import leastsq + + def chi(A, m, munc, M, X): + model = M - A[0] + A[1] * X + chi = (np.array(m) - model) / np.array(munc) + return chi + + output = leastsq(chi, guess, args=(m, munc, M, X), + full_output=True, epsfcn=1e-3) + fit = output[0] + cov = output[1] + err = np.sqrt(np.diag(cov)) + + if covar: + return fit, cov + else: + return fit, err + +def cal_color_airmass(m, munc, M, color, X, guess=(25., -0.1, -0.01), + covar=False): + """Calibraton coefficients, based on airmass and color index. + + Parameters + ---------- + m : array + Instrumental (uncalibrated) magnitude. + munc : array + Uncertainties on m. + M : array + Calibrated magnitude. + color : array + Calibrated color index, e.g., V - R. + X : array + Airmass. + guess : array, optional + An intial guess for the fitting algorithm. + covar : bool, optional + Set to `True` to return the covariance matrix. + + Results + ------- + A : ndarray + The photometric zero point, airmass correction slope, and color + correction slope. [mag, mag/airmass, mag/color index] + unc or cov : ndarray + Uncertainties on each parameter, based on least-squares fitting, + or the covariance matrix, if `covar` is `True`. + + """ + + from scipy.optimize import leastsq + + def chi(A, m, munc, M, color, X): + model = M - A[0] + A[1] * X + A[2] * color + chi = (np.array(m) - model) / np.array(munc) + return chi + + output = leastsq(chi, guess, args=(m, munc, M, color, X), + full_output=True, epsfcn=1e-3) + fit = output[0] + cov = output[1] + err = np.sqrt(np.diag(cov)) + + if covar: + return fit, cov + else: + return fit, err + + +# update module docstring +from ..util import autodoc +autodoc(globals()) +del autodoc + diff --git a/mskpy/photometry/hb.py b/mskpy/photometry/hb.py new file mode 100644 index 0000000..9a93655 --- /dev/null +++ b/mskpy/photometry/hb.py @@ -0,0 +1,619 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +hb --- Hale-Bopp filter set photometric calibration +=================================================== + +.. autosummary:: + :toctree: generated/ + + cal_oh + continuum_color + ext_aerosol_bc + ext_aerosol_oh + ext_total_oh + ext_ozone_oh + ext_rayleigh_oh + flux_gas + flux_oh + fluxd_continuum + + todo: Need more uncertainty propagations. + +""" + +import numpy as np +import astropy.units as u + +from .core import * + +__all__ = [ + 'cal_oh', + 'continuum_color', + 'ext_aerosol_bc', + 'ext_aerosol_oh', + 'ext_total_oh', + 'ext_ozone_oh', + 'ext_rayleigh_oh', + 'flux_gas', + 'flux_oh', + 'fluxd_continuum', +] + +# Table I of Farnham et al. 2000 +filters = set(['OH', 'NH', 'CN', 'C3', 'CO+', 'C2', 'H2O+', 'UC', + 'BC', 'GC', 'RC']) + +cw = { # center wavelengths + 'OH': u.Quantity(0.3097, 'um'), + 'NH': u.Quantity(0.3361, 'um'), + 'UC': u.Quantity(0.3449, 'um'), + 'CN': u.Quantity(0.3869, 'um'), + 'C3': u.Quantity(0.4063, 'um'), + 'CO+': u.Quantity(0.4266, 'um'), + 'BC': u.Quantity(0.4453, 'um'), + 'C2': u.Quantity(0.5135, 'um'), + 'GC': u.Quantity(0.5259, 'um'), + 'H2O+': u.Quantity(0.7028, 'um'), + 'RC': u.Quantity(0.7133, 'um'), + 'R': u.Quantity(0.641, 'um') # Bessell 1998 +} + +cw_50 = { # 50% power width + 'OH': u.Quantity( 58, 'AA'), + 'NH': u.Quantity( 54, 'AA'), + 'UC': u.Quantity( 79, 'AA'), + 'CN': u.Quantity( 56, 'AA'), + 'C3': u.Quantity( 58, 'AA'), + 'CO+': u.Quantity( 64, 'AA'), + 'BC': u.Quantity( 61, 'AA'), + 'C2': u.Quantity(119, 'AA'), + 'GC': u.Quantity( 56, 'AA'), + 'H2O+': u.Quantity(164, 'AA'), + 'RC': u.Quantity( 58, 'AA') +} + + +# Table VI of Farnham et al. 2000 +F_0 = { # Zero magnitude flux density + 'OH': u.Quantity(1.0560e-7, 'W/(m2 um)'), + 'NH': u.Quantity(8.420e-8, 'W/(m2 um)'), + 'CN': u.Quantity(8.6e-8, 'W/(m2 um)'), + 'C3': u.Quantity(8.160e-8, 'W/(m2 um)'), + 'CO+': u.Quantity(7.323e-8, 'W/(m2 um)'), + 'C2': u.Quantity(3.887e-8, 'W/(m2 um)'), + 'H2O+': u.Quantity(1.380e-8, 'W/(m2 um)'), + 'UC': u.Quantity(7.802e-8, 'W/(m2 um)'), + 'BC': u.Quantity(6.210e-8, 'W/(m2 um)'), + 'GC': u.Quantity(3.616e-8, 'W/(m2 um)'), + 'RC': u.Quantity(1.316e-8, 'W/(m2 um)'), + 'R': u.Quantity(2.177e-8, 'W/(m2 um)') # Bessell 1998 +} + +MmBC_sun = { # M - BC for the Sun + 'OH': 1.791, + 'NH': 1.188, + 'CN': 1.031, + 'C3': 0.497, + 'CO+': 0.338, + 'C2': -0.423, + 'H2O+': -1.249, + 'UC': 1.101, + 'BC': 0.000, + 'GC': -0.507, + 'RC': -1.276, + 'R': -0.90 # From solar mags, below +} + +gamma_XX_XX = { + 'OH': u.Quantity(1.698e-2, '1/AA'), + 'NH': u.Quantity(1.907e-2, '1/AA'), + 'CN': u.Quantity(1.812e-2, '1/AA'), + 'C3': u.Quantity(3.352e-2, '1/AA'), + 'CO+': u.Quantity(1.549e-2, '1/AA'), + 'C2': u.Quantity(5.433e-3, '1/AA'), + 'H2O+': u.Quantity(5.424e-3, '1/AA') +} + +gamma_prime_XX_XX = { + 'OH': 0.98, + 'NH': 0.99, + 'CN': 0.99, + 'C3': 0.19, + 'CO+': 0.99, + 'C2': 0.66, + 'H2O+': 1.00 +} + +Msun = { # apparent magnitude of the Sun, based on Appendix D. + 'UC': -25.17, + 'BC': -26.23, + 'GC': -26.77, + 'RC': -27.44, + 'R': -27.13, # Bessell 1998 +} + +S0 = { # Solar flux density at 1 AU, based on Appendix D. + 'UC': u.Quantity(908.9, 'W/(m2 um)'), + 'BC': u.Quantity(1934, 'W/(m2 um)'), + 'GC': u.Quantity(1841, 'W/(m2 um)'), + 'RC': u.Quantity(1250, 'W/(m2 um)'), + 'R': u.Quantity(1534, 'W/(m2 um)') # Bessell 1998 +} + +def cal_oh(oh, oh_unc, OH, z_true, b, c, E_bc, h, guess=(20, 0.15), + covar=False): + """OH calibraton coefficients. + + Considers Rayleigh and ozone components for the OH filter. + + Solves Eq. 10 of Farnham et al. 2000. + + Parameters + ---------- + oh, oh_unc : array + Instrumental (uncalibrated) OH magnitude and uncertainty. + OH : array + Calibrated OH magnitude. + z_true : Angle or Quantity + True zenith angle. + b : array + Coefficient sets to use for `ext_rayleigh_oh`. + c : array + Coefficient sets to use for `ext_ozone_oh`. + E_bc : float + BC airmass extinction. [mag/airmass] + h : Quantity + The observer's elevation. + guess : array, optional + An intial guess for the fitting algorithm: OH zero point, BC zero point, + covar : bool, optional + Set to `True` to return the covariance matrix. + + Results + ------- + A : ndarray + The OH magnitude zero point, ozone amount. + unc or cov : ndarray + Uncertainties on each parameter, based on least-squares fitting, + or the covariance matrix, if `covar` is `True`. + + """ + + from scipy.optimize import leastsq + + assert np.size(E_bc) == 1 + + def chi(A, oh, oh_unc, OH, z_true, b, c, E_bc, h): + zp, toz = A + toz = np.abs(toz) + model = OH - zp + ext_total_oh(toz, z_true, b, c, E_bc, h) + chi = (oh - model) / oh_unc + return chi + + args = (np.array(oh), np.array(oh_unc), np.array(OH), + z_true, b, c, E_bc, h) + output = leastsq(chi, guess, args=args, full_output=True, epsfcn=1e-5) + + fit = output[0] + fit[1] = np.abs(fit[1]) # toz is positive + cov = output[1] + err = np.sqrt(np.diag(cov)) + + if covar: + return fit, cov + else: + return fit, err + +def continuum_color(w0, m0, m0_unc, w1, m1, m1_unc, s0=None, s1=None): + """Comet continuum color. + + The color in percent per 0.1 um = 10**(-0.4 * Rm) + + Parameters + ---------- + w0 : string or Quantity + The shorter wavelength filter name, or the effective wavelength. + May be any of UC, BC, GC, RC, or R. + m0 : float + The apparant magnitude through the shorter wavelength filter. + m0_unc : Quantity + The uncertainty in `f0`. + w1, m1, m1_unc : various + The same as above, but for the longer wavelength filter. + s0, s1 : float, optional + The magnitude of the Sun. + + Returns + ------- + Rm, Rm_unc : Quantity + The color in mangitudes per 0.1 um, and uncertainty. + + """ + + from ..calib import solar_flux + + if isinstance(w0, str): + w0 = w0.upper() + if isinstance(w1, str): + w1 = w1.upper() + + if w0 in filters and w1 in filters: + assert cw[w1] > cw[w0], 'w0 must be the shorter wavelength bandpass' + dw = (cw[w1] - cw[w0]).to(0.1 * u.um) + ci = MmBC_sun[w0] - MmBC_sun[w1] + Rm = (m0 - m1 - ci) * u.mag / dw + Rm_unc = np.sqrt(m0_unc**2 + m1_unc**2) * u.mag / dw + return Rm, Rm_unc + + if w0 in Msun: + s0 = Msun[w0] + w0 = cw[w0] + if w1 in Msun: + s1 = Msun[w1] + w1 = cw[w1] + + assert isinstance(w0, u.Quantity) + assert w0.unit.is_equivalent(u.m) + + assert s0 is not None + assert s1 is not None + + assert isinstance(w1, u.Quantity) + assert w1.unit.is_equivalent(u.m) + assert w0 < w1 + + dw = (w1 - w0).to(0.1 * u.um) + ci = s0 - s1 + Rm = (m0 - m1 - ci) * u.mag / dw + Rm_unc = np.sqrt(m0_unc**2 + m1_unc**2) * u.mag / dw + return Rm, Rm_unc + +def ext_aerosol_bc(E_bc, h): + + """Aerosol extinction for BC filter. + + E_A_BC, Eq. 13 of Farnham et al. 2000. + + Parameters + ---------- + E_bc : float + The total linear extinction in the BC filter. + h : Quantity + The observer's elevation. + + Returns + ------- + E : array + The extinction in magnitudes. + + """ + + return E_bc - 0.2532 * np.exp(-h.to(u.km).value / 7.5) + +def ext_aerosol_oh(E_bc, h): + """Aerosol extinction for OH filter. + + E_A_OH, Eq. 14 of Farnham et al. 2000. + + Parameters + ---------- + E_bc : float + The total linear extinction in the BC filter. + h : Quantity + The observer's elevation. + + Returns + ------- + E : array + The extinction in magnitudes. + + """ + + #return (3097 / 4453.)**-0.8 * E_aerosol_bc(E_bc, h) + return 1.33712 * ext_aerosol_bc(E_bc, h) + +def ext_ozone_oh(z_true, toz, c): + """Ozone extinction for OH filter. + + G_O_OH, Eq. 15 of Farnham et al. 2000. + + Parameters + ---------- + z_true : Angle or Quantity + The source true zenith angle. + toz : float + The amount of ozone. 0.15 is a good starting + guess. + c : string or 4x4 array + The ozone c_ij coefficients. Use 'B', 'G', 'OH', or '25%' for + the corresponding coefficients from Table VIII of Farhnam et + al. (2000). + + Returns + ------- + G : array + The extinction in magnitudes. + + """ + + if isinstance(c, str): + assert c.upper() in ['B', 'G', 'OH', '25%'], 'Invalid c name' + if c.upper() == 'B': + c = np.array([[ 1.323e-2, -1.605e-1, 4.258e-1, 9.099e-1], + [-1.731e-2, 3.273, -2.815e-1, -2.221], + [ 5.349e-3, -5.031e-2, -4.182e-1, 1.649], + [ 2.810e-4, -1.195e-2, 1.063e-1, -3.877e-1]]) + elif c.upper() == 'G': + c = np.array([[ 2.880e-2, -3.912e-1, 1.597, -7.460e-1], + [-5.284e-2, 3.753, -2.632, 8.912e-1], + [ 2.634e-2, -3.380e-1, 8.699e-1, 4.253e-2], + [-3.141e-3, 3.359e-2, -9.235e-2, -1.677e-1]]) + elif c.upper() == 'OH': + c = np.zeros((4, 4)) + c[1, 0] = -1.669e-3 + c[1, 1] = 3.365 + c[1, 2] = -2.973e-2 + c[2, 0] = 3.521e-4 + c[2, 1] = -6.662e-3 + c[2, 2] = -5.335e-2 + elif c.upper() == '25%': + c = np.array([[-2.523e-2, 4.382e-1, -2.415, 4.993], + [ 4.276e-2, 2.538, 4.109, -8.518], + [-2.510e-2, 4.148e-1, -2.405, 4.666], + [ 5.070e-3, -8.168e-2, 4.272e-1, -8.618e-1]]) + else: + c = np.array(c) + assert c.shape == (4, 4), "c should have shape (4, 4)" + + a = np.matrix(c) * np.matrix([[1, toz, toz**2, toz**3]]).T + a = np.squeeze(np.array(a)) + + X = airmass_loc(z_true) + if np.size(X) == 1: + G = np.dot(a, np.r_[1, X, X**2, X**3]) + else: + G = np.r_[[np.dot(a, np.r_[1, x, x**2, x**3]) for x in X]] + + return G + +def ext_rayleigh_oh(z_true, h, b): + """Rayliegh extinction for OH filter. + + G_R_OH, Eq. 11 of Farnham et al. 2000. + + Parameters + ---------- + z_true : Angle or Quantity + The source true zenith angle. + h : Quantity + The observer's elevation. + b : string or 2-element array + The Rayleigh b_i coefficients. Use 'B', 'G', 'OH', or '25%' for + the corresponding coefficients from Table VIII of Farhnam et + al. (2000). + + Returns + ------- + G : array + The extinction in magnitudes. + + """ + + if isinstance(b, str): + b = b.upper() + assert b in ['B', 'G', 'OH', '25%'], 'Invalid b name' + if b == 'B': + b = np.r_[1.159, -4.433e-4] + elif b == 'G': + b = np.r_[1.158, -5.359e-4] + elif b == 'OH': + b = np.r_[1.170, 0] + elif b == '25%': + b = np.r_[1.168, -1.918e-4] + else: + b = np.array(b) + assert b.shape == (2,), "b should have shape (2,)" + + X = airmass_app(z_true, h) + e = np.exp(-h.to(u.km).value / 7.5) + if np.size(X) == 1: + G = np.dot(b, [X, X**2]) * e + else: + G = np.r_[[np.dot(b, [x, x**2]) for x in X]] * e + + return G + +def ext_total_oh(toz, z_true, b, c, E_bc, h): + """Total OH extinction. + + G_R_OH + E_A_OH + G_O_OH, Eq. 10 of Farnham and Schleicher 2000. + + Parameters + ---------- + toz : float + Amount of ozone. + z_true : Angle or Quantity + True zenith angle. + b : array + Coefficient sets to use for `ext_rayleigh_oh`. + c : array + Coefficient sets to use for `ext_ozone_oh`. + E_bc : float + BC airmass extinction. [mag/airmass] + h : Quantity + The observer's elevation. + guess : array, optional + An intial guess for the fitting algorithm: OH zero point, BC zero point, + covar : bool, optional + Set to `True` to return the covariance matrix. + + """ + + return (ext_rayleigh_oh(z_true, h, b) + + ext_aerosol_oh(E_bc, h) * airmass_app(z_true, h) + + ext_ozone_oh(z_true, toz, c)) + +def fluxd_continuum(bc, bc_unc, Rm, Rm_unc, filt): + """Extrapolate BC continuum to another filter. + + Table VI, Eqs. 34-40 and 42 of Farhnam et al. 2000. + + Parameters + ---------- + bc, bc_unc : float + Observed BC magnitude and uncertainty. + Rm, Rm_unc : float or Quantity + Observed color, in units of magnitudes per 1000 A, and uncertainty. + filt : string + Name of the other filter: OH, NH, CN, C3, CO+, C2. + + Returns + ------- + fc, fc_unc : Quantity + Continuum flux density and uncertainty. + + """ + + Rm = u.Quantity(Rm, '10 mag / um').value + Rm_unc = u.Quantity(Rm_unc, '10 mag / um').value + + filt = filt.upper() + if filt == 'OH': + fc = (10**(-0.4 * bc) * 10**(-0.4 * MmBC_sun[filt]) + * 10**(-0.5440 * Rm)) + m_unc = np.sqrt(bc_unc**2 + (0.544 / 0.4 * Rm_unc)**2) + fc_unc = fc * m_unc / 1.0857 + else: + raise ValueError('{} not yet implemented.'.format(filt)) + + fc *= F_0[filt] + fc_unc *= F_0[filt] + return fc, fc_unc + +def flux_oh(oh, oh_unc, bc, bc_unc, Rm, Rm_unc, zp, toz, z_true, E_bc, h): + """Flux from OH. + + Appendix A and D of Farnham et al. 2000. + + Parameters + ---------- + oh, oh_unc : float + OH instrumental magnitude and uncertainty. + bc, bc_unc : float + BC instrumental magnitude and uncertainty. + Rm, Rm_unc : float or Quantity + Continuum color in units of magnitudes per 0.1 um, and + uncertainty. + zp : float + OH magnitude zero point. + toz : float + Amount of ozone. + z_true : Angle or Quantity + True zenith angle. + E_bc : float + BC airmass extinction. [mag/airmass] + h : Quantity + The observer's elevation. + + Returns + ------- + E_tot, E_unc : float + Total OH extinction and uncertainty. + m_oh, m_oh_unc : float + Calibrated OH apparent magnitude and uncertainty (including + continuum). + f_oh, f_oh_unc : Quantity + Total band flux from OH and uncertainty. + + Notes + ----- + 1) Compute extinction for pure OH and 25% continuum cases. + + 2) Use pure OH extinction to compute band flux. + + 3) From step 2, compute percent continuum contribution, and + linearly interpolate between the two cases of step 1 to + determine actual extinction. + + 4) Given extinction from step 3, re-compute band flux. + + """ + + E_0 = ext_total_oh(toz, z_true, 'OH', 'OH', E_bc, h) + E_25 = ext_total_oh(toz, z_true, '25%', '25%', E_bc, h) + E_100 = ext_total_oh(toz, z_true, 'G', 'G', E_bc, h) + fc, fc_unc = fluxd_continuum(bc, bc_unc, Rm, Rm_unc, 'OH') + + f = 10**(-0.4 * (oh + zp - E_0)) * F_0['OH'] + f_unc = oh_unc * f / 1.0857 # first estimate + + frac = fc / f # fraction that is continuum + frac_unc = np.sqrt(f_unc**2 * fc**2 + fc_unc**2 * f**2) / f**2 + #E_unc = frac_unc / frac * 1.0857 + #E_unc = np.sqrt(oh_unc**2 + ((fc_unc / fc) * 1.0857)**2) + + if frac <= 0.25: + E_tot = 4 * ((0.25 - frac) * E_0 + frac * E_25) + E_unc = 4 * (E_0 + E_25) * frac_unc + else: + #assert frac < 0.25, "Continuum = {:%}, more than 25% of observed OH band flux density. Untested code.".format(frac) + #print "Etot", 4 * ((0.25 - frac) * E_0 + frac * E_25), E_tot + # the following yields the same as above at the <0.001 mag level + E_tot = ((1 - frac) * E_25 + (frac - 0.25) * E_100) / 0.75 + E_unc = np.abs(E_100 - E_25) * frac_unc / 0.75 + + m = oh + zp - E_tot + m_unc = np.sqrt(oh_unc**2 + E_unc**2) + + f = F_0['OH'] * 10**(-0.4 * m) + f_unc = m_unc * f / 1.0857 + + f_oh, f_oh_unc = flux_gas(f, f_unc, fc, fc_unc, 'OH') + return E_tot, E_unc, m, m_unc, f_oh, f_oh_unc + +def flux_gas(f, f_unc, fc, fc_unc, filt): + """Gas emission band total flux. + + Table VI and Eqs. 45-51 of Farnham et al. 2000. + + Parameters + ---------- + f, f_unc : float or Quantity + The observed flux density through the gas filter and + uncertainty. + fc, fc_unc : float or Quantity + The estimated continuum flux density through the gas filter and + uncertainty. + filt : str + The name of the gas filter. + + Returns + ------- + fgas, fgas_unc : Quantity + + """ + + filt = filt.upper() + assert filt in filters + + if isinstance(f, u.Quantity): + f_unc = u.Quantity(f_unc, f.unit) + fc = u.Quantity(fc, f.unit) + fc_unc = u.Quantity(fc_unc, f.unit) + elif isinstance(fc, u.Quantity): + fc_unc = u.Quantity(fc_unc, f.unit) + f = u.Quantity(f, f.unit) + f_unc = u.Quantity(f_unc, f.unit) + + if filt in ['OH', 'C3', 'C2', 'H2O+']: + fgas = (f - fc) / gamma_XX_XX[filt] + fgas_unc = np.sqrt(f_unc**2 + fc_unc**2) / gamma_XX_XX[filt] + else: + raise ValueError('{} not yet implemented.'.format(filt)) + + return fgas.decompose([u.W, u.m]), fgas_unc.decompose([u.W, u.m]) + +# update module docstring +from ..util import autodoc +autodoc(globals()) +del autodoc diff --git a/mskpy/util.py b/mskpy/util.py index 11cf3db..46b0261 100644 --- a/mskpy/util.py +++ b/mskpy/util.py @@ -25,6 +25,8 @@ Optimizations ------------- + gaussfit + glfit linefit planckfit @@ -35,6 +37,7 @@ leading_num_key nearest takefrom + stat_avg whist Spherical/Celestial/vectorial geometry @@ -45,6 +48,7 @@ spherical_coord_rotate state2orbit vector_rotate + xyz2lb Statistics ---------- @@ -94,6 +98,7 @@ autodoc spectral_density_sb timesten + write_table """ @@ -113,6 +118,8 @@ 'fitslog', 'getrot', + 'gaussfit', + 'glfit', 'linefit', 'planckfit', @@ -120,6 +127,7 @@ 'groupby', 'leading_num_key', 'nearest', + 'stat_avg', 'takefrom', 'whist', @@ -129,6 +137,7 @@ 'spherical_coord_rotate', 'state2orbit', 'vector_rotate', + 'xyz2lb', 'kuiper', 'kuiperprob', @@ -169,7 +178,8 @@ 'asValue', 'autodoc', 'spectral_density_sb', - 'timesten' + 'timesten', + 'write_table' ] def archav(y): @@ -605,27 +615,27 @@ def getrot(h): # Does CDELTx exist? cdelt = np.zeros(2) cdeltDefined = False - if (h.has_key('CDELT1') and h.has_key('CDELT2')): + if (('CDELT1' in h) and ('CDELT2' in h)): # these keywords take precedence over the CD matrix cdeltDefined = True cdelt = np.array([h['CDELT1'], h['CDELT2']]) # Transformation matrix? tmDefined = False - if (h.has_key('CD1_1') and h.has_key('CD1_2') and - h.has_key('CD2_1') and h.has_key('CD2_2')): + if (('CD1_1' in h) and ('CD1_2' in h) and + ('CD2_1' in h) and ('CD2_2' in h)): tmDefined = True cd = np.array(((h['CD1_1'], h['CD1_2']), (h['CD2_1'], h['CD2_2']))) - if (h.has_key('PC1_1') and h.has_key('PC1_2') and - h.has_key('PC2_1') and h.has_key('PC2_2')): + if (('PC1_1' in h) and ('PC1_2' in h) and + ('PC2_1' in h) and ('PC2_2' in h)): tmDefined = True cd = np.array(((h['PC1_1'], h['PC1_2']), (h['PC2_1'], h['PC2_2']))) if not tmDefined: # if CDELT is defined but the transformation matrix isn't, # then CROT should be defined - if cdeltDefined and h.has_key('CROTA2'): + if cdeltDefined and ('CROTA2' in h): rot = h['CROTA2'] return cdelt, rot @@ -663,6 +673,100 @@ def getrot(h): return cdelt * 3600.0, np.degrees(rot1) +def gaussfit(x, y, err, guess, covar=False): + """A quick Gaussian fitting function. + + Parameters + ---------- + x, y : array + The independent and dependent variables. + err : array + `y` errors, set to `None` for unweighted fitting. + guess : tuple + Initial guess: `(amplitude, mu, sigma)`. + covar : bool, optional + Set to `True` to return the covariance matrix rather than the + error. + + Returns + ------- + fit : tuple + Best-fit parameters. + err or cov : tuple or ndarray + Errors on the fit or the covariance matrix of the fit (see + `covar` keyword). + + """ + + from scipy.optimize import leastsq + + def chi(p, x, y, err): + A, mu, sigma = p + model = A * gaussian(x, mu, sigma) + chi = (np.array(y) - model) / np.array(err) + return chi + + if err is None: + err = np.ones(len(y)) + + output = leastsq(chi, guess, args=(x, y, err), full_output=True, + epsfcn=1e-4) + fit = output[0] + cov = output[1] + err = np.sqrt(np.diag(cov)) + + if covar: + return fit, cov + else: + return fit, err + +def glfit(x, y, err, guess, covar=False): + """A quick Gaussian + line fitting function. + + Parameters + ---------- + x, y : array + The independent and dependent variables. + err : array + `y` errors, set to `None` for unweighted fitting. + guess : tuple + Initial guess: `(amplitude, mu, sigma, m, b)`. + covar : bool, optional + Set to `True` to return the covariance matrix rather than the + error. + + Returns + ------- + fit : tuple + Best-fit parameters. + err or cov : tuple or ndarray + Errors on the fit or the covariance matrix of the fit (see + `covar` keyword). + + """ + + from scipy.optimize import leastsq + + def chi(p, x, y, err): + A, mu, sigma, m, b = p + model = A * gaussian(x, mu, sigma) + m * x + b + chi = (np.array(y) - model) / np.array(err) + return chi + + if err is None: + err = np.ones(len(y)) + + output = leastsq(chi, guess, args=(x, y, err), full_output=True, + epsfcn=1e-4) + fit = output[0] + cov = output[1] + err = np.sqrt(np.diag(cov)) + + if covar: + return fit, cov + else: + return fit, err + def linefit(x, y, err, guess, covar=False): """A quick line fitting function. @@ -752,14 +856,17 @@ def chi(p, wave, fluxd, err): output = leastsq(chi, guess, args=(wave, fluxd, err), full_output=True, epsfcn=1e-3) + print output[-2] fit = output[0] cov = output[1] - err = np.sqrt(np.diag(cov)) if covar: return fit, cov else: - return fit, err + if cov is None: + return fit, None + else: + return fit, np.sqrt(np.diag(cov)) def between(a, limits, closed=True): """Return True for elements within the given limits. @@ -893,6 +1000,52 @@ def nearest(array, v): """ return np.abs(np.array(array) - v).argmin() +def stat_avg(x, y, u, N): + """Bin an array, weighted by measurement errors. + + Parameters + ---------- + x : array + The independent variable. + y : array + The parameter to average. + u : array + The uncertainties on y. weights for each `y`. + N : int + The number of points to bin. The right-most bin may contain + fewer than `N` points. + + Returns + ------- + bx, by, bu : ndarray + The binned data. The `x` data is straight averaged (unweighted). + n : ndarray + The number of points in each bin. + + """ + + nbins = x.size / N + remainder = x.size % nbins + shape = (nbins, N) + + w = (1.0 / np.array(u)**2) + _w = w[:-remainder].reshape(shape) + _x = np.array(x)[:-remainder].reshape(shape) + _y = np.array(y)[:-remainder].reshape(shape) + + _x = _x.mean(1) + _y = (_y * _w).sum(1) / _w.sum(1) + _u = np.sqrt(1.0 / _w.sum(1)) + + n = np.ones(len(_x)) * N + if remainder > 0: + _x = np.r_[_x, np.mean(x[-remainder:])] + _y = np.r_[_y, (np.array(y[-remainder:]) / w[-remainder:]).sum()] + _u = np.r_[_u, np.sqrt(1.0 / w[-remainder:].sum())] + n = np.r_[n, remainder] + + return _x, _y, _u, n + def takefrom(arrays, indices): """Return elements from each array at the given indices. @@ -1283,6 +1436,34 @@ def rot(r, nhat, theta): else: return np.array([rot(r, nhat, t) for t in th]) +def xyz2lb(r): + """Transform a vector to angles. + + Parameters + ---------- + r : array + The vector, shape = (3,) or (n, 3). + + Returns + ------- + lam : float or array + Longitude. [degrees] + bet : float or array + Latitude. [degrees] + + """ + + r = np.array(r) + if r.ndim == 1: + lam = np.arctan2(r[1], r[0]) + bet = np.arctan2(r[2], np.sqrt(r[0]**2 + r[1]**2)) + else: + # assume it is an array of vectors + lam = np.arctan2(r[:, 1], r[:, 0]) + bet = np.arctan2(r[:, 2], np.sqrt(r[:, 0]**2 + r[:, 1]**2)) + + return np.degrees(lam), np.degrees(bet) + def kuiper(x, y): """Compute Kuiper's statistic and probablity. @@ -2703,5 +2884,40 @@ def timesten(v, sigfigs): s = r"${0}\times10^{{{1:d}}}$".format(s[0], int(s[1])) return s +def write_table(fn, tab, header, comments=[], **kwargs): + """Write an astropy Table with a simple header. + + Parameters + ---------- + fn : string + The name of the file to write to. + tab : astropy Table + The table to write. + header : dict + A dictionary of keywords to save or `None`. Use an + `OrderedDict` to preserve header keyword order. + comments : list + A list of comments to add to the top of the file. Each line + will be prepended with a comment character. + **kwargs + Keyword arguments for `tab.write()`. Default format is + 'ascii.fixed_width_two_line'. + + """ + + format = kwargs.pop('format', 'ascii.fixed_width_two_line') + with open(fn, 'w') as outf: + outf.write("# {}\n#\n".format(date2time(None).iso)) + + for c in comments: + outf.write("# {}\n".format(c)) + + for k, v in header.items(): + outf.write("# {} = {}\n".format(k, str(v))) + + outf.write('#\n') + + tab.write(outf, format=format, **kwargs) + # summarize the module autodoc(globals()) diff --git a/scripts/ephemeris b/scripts/ephemeris index e6c6d6a..ad26565 100755 --- a/scripts/ephemeris +++ b/scripts/ephemeris @@ -1,14 +1,23 @@ #!/usr/bin/python from __future__ import print_function +import sys import argparse import datetime -from mskpy import ephem, util, between +import astropy.units as u +import mskpy +from mskpy import ephem, util, between, Coma today = datetime.date.today() +class ListAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + v = [float(x) for x in values.split(',')] + setattr(namespace, self.dest, v) + parser = argparse.ArgumentParser(description='Generate an ephemeris.') parser.add_argument('target', type=str, action='store', nargs='*', help='The name of the target') + parser.add_argument('--start', type=str, action='store', default=today.isoformat(), help='The first day of the ephemeris, YYYY-MM-DD [today].') @@ -16,10 +25,34 @@ parser.add_argument('--end', type=str, action='store', default=None, help='The last day of the ephemeris, YYYY-MM-DD [today + n].') parser.add_argument('-n', type=int, action='store', default=30, help='The number of days for the ephemeris.') -parser.add_argument('--selong', type=str, action='store', default='0,180', - help='The allowed solar elongation limits.') + parser.add_argument('--observer', type=str, action='store', default='Earth', help='The observer.') +parser.add_argument('--selong', type=str, action=ListAction, default=[0, 180], + help='The allowed solar elongation limits.') + +parser.add_argument('--kernel', type=str, action='append', default=[], + help='Load this kernel.') + +parser.add_argument('--afrho', type=float, action='store', default=None, + help='Afrho parameter at phase angle = 0 [cm].') +parser.add_argument('-k', type=float, action='store', default=-2.0, + help='Afrho heliocentric distance power-law slope.') +parser.add_argument('--rh', type=float, action='store', default=1.0, + help='Afrho parameter specified at this rh [AU].') +parser.add_argument('--ef2af', type=float, action='store', default=3.5, + help='The ratio of IR emissivity to albedo.') +parser.add_argument('--tscale', type=float, action='store', default=1.1, + help='Effective temperature scale factor.') +parser.add_argument('--phasef', type=str, action='store', default='phaseHM', + help='Phase function: phaseK, phaseH, phaseHM.') +parser.add_argument('--wave', type=str, action=ListAction, + default=[0.6, 2.2, 10, 20], + help='Wavelengths for estimate flux densities.') +parser.add_argument('--rap', type=float, action='store', default=1.0, + help='Aperture radius [arcsec].') +parser.add_argument('--unit', type=str, action='store', default='Jy', + help='Flux density unit.') args = parser.parse_args() if args.target == []: @@ -36,15 +69,43 @@ else: end = args.end n = int(util.cal2time(end).jd - util.cal2time(args.start).jd + 1) +if len(args.kernel) > 0: + for k in args.kernel: + ephem.core.load_kernel(k) + target = ephem.getspiceobj(' '.join(args.target)) try: observer = eval('ephem.' + args.observer.capitalize()) except AttributeError: observer = ephem.getspiceobj(' '.join(args.observer)) -selong_limits = [float(x) for x in args.selong.split(',')] +ephemeris_only = True if args.afrho is None else False + +arg_list = ['start', 'end', 'n', 'observer', 'selong'] +if ephemeris_only: + t = target.ephemeris(observer, [args.start, end], num=n) +else: + arg_list.extend(['afrho', 'k', 'rh', 'ef2af', 'tscale', 'phasef', 'wave', + 'rap', 'unit']) + afrho1 = args.afrho / args.rh**args.k * u.cm + coma = Coma(target.state, afrho1, k=args.k, + ef2af=args.ef2af, Tscale=args.tscale, + phasef=getattr(mskpy.models, args.phasef)) + t = coma.lightcurve(observer, [args.start, end], args.wave * u.um, num=n, + rap=args.rap * u.arcsec, unit=u.Unit(args.unit), + verbose=False) + +t = t[between(t['selong'], args.selong)] + +print("""# ephemeris +# target = {} +#""".format(' '.join(args.target))) +for k in arg_list: + if k[0] == '_': + continue + + print("# {} = {}".format(k, getattr(args, k))) -t = target.ephemeris(observer, [args.start, end], num=n) -t = t[between(t['selong'], selong_limits)] +print("#") t.pprint(max_width=-1, max_lines=-1) diff --git a/scripts/transit b/scripts/transit new file mode 100755 index 0000000..05a69c5 --- /dev/null +++ b/scripts/transit @@ -0,0 +1,91 @@ +#!/usr/bin/python +from __future__ import print_function +import sys +import argparse +import datetime +import itertools +import numpy as np +import matplotlib.pyplot as plt +from matplotlib import ticker +import astropy.units as u +import mskpy +from mskpy import getspiceobj, date2time, cal2time +from mskpy import ephem, Sun, Earth +from mskpy.observing import plot_transit_time + +today = datetime.date.today() + +class ListAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + v = [x.strip() for x in ' '.join(values).split(',')] + setattr(namespace, self.dest, v) + +parser = argparse.ArgumentParser(description='Generate a plot of transit times.') +parser.add_argument('target', type=str, action=ListAction, nargs='*', + help='Comma-separated list of targets.') + +parser.add_argument('--start', type=str, action='store', + default=today.isoformat(), + help='The first day, YYYY-MM-DD [today].') +parser.add_argument('--end', type=str, action='store', default=None, + help='The last day, YYYY-MM-DD [start + n].') +parser.add_argument('-n', type=int, action='store', default=365, + help='The number of days to plot, if end is not specified.') + +parser.add_argument('--observer', type=str, action='store', default='Earth', + help='The observer.') + +parser.add_argument('-o', type=str, action='store', default='transit.pdf', + help='Plot file name') + +args = parser.parse_args() +if args.target == ['']: + parser.print_help() + print() + exit() + +if args.end is None: + start = cal2time(args.start) + end = datetime.date.fromordinal(start.datetime.toordinal() + + args.n).isoformat() + n = args.n + 1 +else: + end = args.end + n = int(util.cal2time(end).jd - util.cal2time(args.start).jd + 1) + +targets = [ephem.getspiceobj(t) for t in args.target] + +try: + observer = eval('ephem.' + args.observer.capitalize()) +except AttributeError: + observer = ephem.getspiceobj(' '.join(args.observer)) + +date_range = date2time([start, end]) +jd = np.arange(date_range[0].jd, date_range[1].jd) +if jd[-1] != date_range[1].jd: + jd = np.concatenate((jd, [date_range[1].jd])) +dates = date2time(jd) +g_sun = Earth.observe(Sun, jd) + +fig = plt.figure(1, (10, 10)) +fig.clear() +ax = plt.gca() + +style = itertools.cycle(itertools.product(['-', '--', '-.', ':'], 'bgrcym')) +for target in targets: + ls, color = style.next() + plot_transit_time(target, g_sun, observer=observer, ax=ax, + color=color, ls=ls) + +plt.setp(ax, xlim=[-7, 7], xlabel='Transit time (hr)') + +f = lambda x, pos: mskpy.dh2hms(x % 24.0, '{:02d}:{:02d}') +plt.setp(ax.xaxis, minor_locator=ticker.AutoMinorLocator(), + major_formatter=ticker.FuncFormatter(f)) + +#ax.invert_yaxis() +mskpy.nicelegend(frameon=False, loc='upper left', bbox_to_anchor=(1.0, 0.95)) +mskpy.niceplot(axfs='10') +fig.subplots_adjust(right=0.8) +plt.draw() +plt.savefig(args.o) diff --git a/setup.py b/setup.py index 7844b2f..61ea5f0 100755 --- a/setup.py +++ b/setup.py @@ -42,14 +42,16 @@ def find_data_files(): files = find_data_files() setup(name='mskpy', - version='2.2.4', + version='2.3.0', description='General purpose and astronomy related tools', author="Michael S. Kelley", author_email="msk@astro.umd.edu", url="https://github.com/mkelley/mskpy", packages=['mskpy', 'mskpy.lib', 'mskpy.models', 'mskpy.image', - 'mskpy.ephem', 'mskpy.instruments', 'mskpy.observing'], + 'mskpy.ephem', 'mskpy.instruments', 'mskpy.observing', + 'mskpy.photometry'], data_files=files, + scripts=['scripts/ephemeris', 'scripts/transit'], requires=['numpy', 'scipy', 'astropy'], ext_modules=[ext1], cmdclass={'test': PyTest, 'install': my_install},