diff --git a/hera_cal/frf.py b/hera_cal/frf.py index 7a1e65ac5..745389adb 100644 --- a/hera_cal/frf.py +++ b/hera_cal/frf.py @@ -4,6 +4,7 @@ import numpy as np from hera_filters import dspec +from hera_cal.noise import predict_noise_variance_from_autos from . import utils from scipy.interpolate import interp1d @@ -1998,3 +1999,250 @@ def time_average_argparser(): ap.add_argument("--equalize_interleave_ntimes", action="store_true", default=False, help=desc) return ap + +def get_frop_for_noise(times, filter_cent_use, filter_half_wid_use, freqs, + t_avg=300., eigenval_cutoff=1e-9, weights=None, + coherent_avg=True, wgt_tavg_by_nsample=True, + nsamples=None, rephase=True, bl_vec=None, + lat=-30.721526120689507, dlst=None): + """ + Get FRF + coherent time average operator for the purposes of noise + covariance calculation. Composes time averaging and the main lobe FRF into + a single operation. + + Parameters: + times (array): + (Interleaved) Julian Dates on hd, converted to seconds. + filter_cent_use (float): + Filter center for main lobe filter, in Hz. + filter_half_wid_use (float): + Filter half width for main lobe filter, in Hz. + freqs (array): + Frequencies in the data (in Hz). + t_avg (float): + Desired coherent averaging length, in seconds. + eigenval_cutoff (float): + Eigenvalue cutoff for DPSS modes. + weights (array): + Array of weights to use for main lobe filter. + None (default) uses uniform weights. + coherent_avg (bool): + Whether to coherently average on the t_avg cadence or not. + wgt_tavg_by_nsample (bool): + Whether to weight the time average by nsamples. + Otherwise do uniform weighting. + nsamples (array): + Array proportional to how many nights contributed to each sample. + rephase (bool): + Whether to rephase before averaging. + bl_vec (array): + Baseline vector in meters in ENU coordinate system + lat (float): + Array latitude in degrees North. + dlst (float or array_like): + Amount of LST to rephase by, in radians. If float, rephases all + times by the same amount. + + Returns: + frop (array): + Filter operator. Shape (Ntimes_coarse, Ntimes_fine, Nfreqs). + """ + + # Time handling is a slightly modified port from hera_filters/hera_cal + + Ntimes = len(times) + Nfreqs = len(freqs) + + dmatr, _ = dspec.dpss_operator(times, np.array([filter_cent_use, ]), + np.array([filter_half_wid_use, ]), + eigenval_cutoff=np.array([eigenval_cutoff, ])) + Nmodes = dmatr.shape[-1] + + if weights is None: + weights = np.ones([Ntimes, Nfreqs]) + elif weights.shape != (Ntimes, Nfreqs): + raise ValueError(f"weights has wrong shape {weights.shape} " + f"compared to (Ntimes, Nfreqs) = {(Ntimes, Nfreqs)}" + " May need to be sliced along an axis.") + + #####Index Legend##### + # a = DPSS mode # + # f = frequency # + # t = fine time # + # T = coarse time # + #####Index Legend##### + + ddagW = dmatr.T.conj()[:, np.newaxis] * weights.T # aft + ddagWd = ddagW @ dmatr # afa + lsq = np.linalg.solve(ddagWd.swapaxes(0,1), ddagW.swapaxes(0,1)) # fat + + if coherent_avg: + dtime = np.median(np.abs(np.diff(times))) + chunk_size = int(np.round((t_avg / dtime))) + Nchunk = int(np.ceil(Ntimes / chunk_size)) + chunk_remainder = Ntimes % chunk_size + + tavg_weights = nsamples if wgt_tavg_by_nsample else np.where(weights, np.ones([Ntimes, Nfreqs]), 0) + if tavg_weights.shape != (Ntimes, Nfreqs): + raise ValueError(f"nsamples has wrong shape {nsamples.shape} " + f"compared to (Ntimes, Nfreqs) = {(Ntimes, Nfreqs)}" + " May need to be sliced along an axis.") + if chunk_remainder > 0: # Stack some 0s that get 0 weight so we can do the reshaping below without incident + + dmatr_stack_shape = [chunk_size - chunk_remainder, Nmodes] + weights_stack_shape = [chunk_size - chunk_remainder, Nfreqs] + dmatr = np.vstack((dmatr, np.zeros(dmatr_stack_shape, dtype=complex))) + tavg_weights = np.vstack((tavg_weights, np.zeros(weights_stack_shape, dtype=complex))) + dlst = np.append(dlst, np.zeros(chunk_size - chunk_remainder, dtype=float)) + + dres = dmatr.reshape(Nchunk, chunk_size, Nmodes) + wres = tavg_weights.reshape(Nchunk, chunk_size, Nfreqs) + wnorm = wres.sum(axis=1)[:, np.newaxis] + # normalize for an average + wres = np.where(wnorm > 0, wres / wnorm, 0) + + # Apply the rephase to the weights matrix since it's mathematically equivalent and convenient + if rephase: + wres = utils.lst_rephase(wres.reshape(Nchunk * chunk_size, 1, Nfreqs), + bl_vec, freqs, dlst, lat=lat, inplace=False) + wres = wres.reshape(Nchunk, chunk_size, Nfreqs) + + # does "Ttf,Tta->Tfa" much faster than einsum and fancy indexing + dchunk = np.zeros([Nchunk, Nfreqs, Nmodes], dtype=complex) + for coarse_time_ind in range(Nchunk): + dchunk[coarse_time_ind] = np.tensordot(wres[coarse_time_ind].T, + dres[coarse_time_ind], + axes=1) + + # does "Tfa,fat->Ttf" faster than einsum and fancy indexing + frop = np.zeros([Nchunk, Ntimes, Nfreqs], dtype=complex) + for freq_ind in range(Nfreqs): + frop[:, :, freq_ind] = np.tensordot(dchunk[:, freq_ind], + lsq[freq_ind], + axes=1) + else: + # ta,fat->ttf + frop = np.tensordot(dmatr, lsq.transpose(1, 2, 0), axes=1) + + return frop + +def prep_var_for_frop(data, nsamples, weights, cross_antpairpol, freq_slice, + auto_ant=None, default_value=0.): + """ + Wrapper around hera_cal.noise.predict_noise_variance_from_autos that preps + the noise variance calculation for FRF + coherent average. + + Parameters: + data: DataContainer + Must contain the cross_antpairpol in question as well as some + corresponding autos. + nsamples: DataContainer + Contains the nsamples associated with the cross_antpairpol in question. + weights: DataContainer + Contains the weights associated with the cross_antpairpol in question. + cross_antpairpol: tuple + Tuple whose first two entries are antennas and last entry is a + polarization. + freq_slice: slice + A slice into the frequency axis of the data that all gets filtered + identically (a PS analysis band). + auto_ant: int + If not None, should be a single integer specifying a single + antenna's autos (this is used because the redundantly averaged data + have a single auto file for all antennas). + default_value: (float) + The default variance to use in locations with 0 nsamples to avoid + nans. + + Returns: + var: array + Noise variance (Ntimes, Nfreqs) + """ + var = predict_noise_variance_from_autos(cross_antpairpol, data, + nsamples=nsamples, auto_ant=auto_ant) + var = var[:, freq_slice] + + var_isnotfinite = ~np.isfinite(var) + if np.any(var_isnotfinite): + # Check all the infs/nans are weighted to zero and replace with default value + all_nonfinite_zero = np.all(weights[cross_antpairpol][:, freq_slice][var_isnotfinite]) == 0 + if not all_nonfinite_zero: + warnings.warn("Not all nonfinite variance locations are of zero weight!") + + print(f"Replacing nonfinite variances with default value: {default_value}") + var[var_isnotfinite] = default_value + + return var + +def get_FRF_cov(frop, var): + """ + Get noise covariance from noise variance and given FRF + coherent average operator. + + Parameters: + frop: array + A linear operator that performs the FRF and coherent averaging + operations in one step. + var: array + Noise variance for a single antpairpol. + + Returns: + cov: array + The desired covariance. Shape (Nfreqs, Ntimes, Ntimes) + """ + Nfreqs = frop.shape[2] + Ncoarse = frop.shape[0] + cov = np.zeros([Nfreqs, Ncoarse, Ncoarse], dtype=complex) + for freq_ind in range(Nfreqs): + cov[freq_ind] = np.tensordot((frop[:, :, freq_ind] * var[:, freq_ind]), + frop[:, :, freq_ind].T.conj(), axes=1) + + return cov + +def get_corr(cov): + """ + Get correlation matrices from a sequence of covariance matrices. + + Parameters: + cov: array + The covariance matrices. + Returns: + corr: array + The corresponding correlation matrices. + """ + Ntimes = cov.shape[1] + diags = cov[:, np.arange(Ntimes), np.arange(Ntimes)] + corr = cov / np.sqrt(diags[:, None] * diags[:, :, None]) + + return corr + +def get_correction_factor_from_cov(cov, tslc=None): + """ + Get a correction factor for PS noise variance prediction in the absence + of propagating the noise covariances all the way to delay space. This is + based on HERA memo #132, where it is shown that one may calculate an + effective number of degrees of freedom based on the variance of a + generalized chi-square random variable. + + Parameters: + cov: array + The time-time covariance matrix at every frequency + tslc: slice + A time slice to use in case some times should be excluded from the + calculation of this factor. + + Returns: + correction_factor: array + The correction factor. + """ + corr = get_corr(cov) + + if tslc is None: + Ncotimes = corr.shape[1] + Neff = Ncotimes**2 / np.sum(np.abs(corr)**2, axis=(1, 2)) + else: + Ncotimes = tslc.stop - tslc.start + Neff = Ncotimes**2 / np.sum(np.abs(corr[:, tslc, tslc])**2, axis=(1, 2)) + correction_factor = np.mean(Ncotimes / Neff) # Average over frequency since they are independent. + + return correction_factor + diff --git a/hera_cal/noise.py b/hera_cal/noise.py index f73db8752..69ba8259a 100644 --- a/hera_cal/noise.py +++ b/hera_cal/noise.py @@ -68,7 +68,8 @@ def infer_dt(times_by_bl, bl, default_dt=None): raise ValueError('Cannot infer dt when all len(times_by_bl) == 1 or fewer.') -def predict_noise_variance_from_autos(bl, data, dt=None, df=None, nsamples=None): +def predict_noise_variance_from_autos(bl, data, dt=None, df=None, nsamples=None, + auto_ant=None): '''Predict the noise variance on a baseline using autocorrelation data using the formla sigma^2 = Vii * Vjj / Delta t / Delta nu. @@ -81,6 +82,11 @@ def predict_noise_variance_from_autos(bl, data, dt=None, df=None, nsamples=None) from the frequencies stored in the DataContainer nsamples: DataContainer mapping bl tuples to numpy arrays of the number integrations for that given baseline. Must include nsamples[bl]. + auto_ant: int + For some cases, like redundant averaging, the auto corresponding + to the individual antennas is not available. In this case, the + user should use this keyword to specify a single auto antenna from + which to derive this statistic. Returns: Noise variance predicted on baseline bl in units of data squared. @@ -92,7 +98,11 @@ def predict_noise_variance_from_autos(bl, data, dt=None, df=None, nsamples=None) df = np.median(np.ediff1d(data.freqs)) ap1, ap2 = split_pol(bl[2]) - auto_bl1, auto_bl2 = (bl[0], bl[0], join_pol(ap1, ap1)), (bl[1], bl[1], join_pol(ap2, ap2)) + if auto_ant is None: + auto_bl1, auto_bl2 = (bl[0], bl[0], join_pol(ap1, ap1)), (bl[1], bl[1], join_pol(ap2, ap2)) + else: + auto_bl1, auto_bl2 = (auto_ant, auto_ant, join_pol(ap1, ap1)), (auto_ant, auto_ant, join_pol(ap2, ap2)) + var = np.abs(data[auto_bl1] * data[auto_bl2] / dt / df) if nsamples is not None: return var / nsamples[bl] diff --git a/hera_cal/tests/test_frf.py b/hera_cal/tests/test_frf.py index 45a758ad7..fe663d3a5 100644 --- a/hera_cal/tests/test_frf.py +++ b/hera_cal/tests/test_frf.py @@ -17,8 +17,9 @@ from scipy import stats from scipy import constants from pyuvdata import UVFlag, UVBeam +from hera_filters import dspec from .. import utils -from .. import datacontainer, io, frf +from .. import datacontainer, io, frf, noise from ..data import DATA_PATH import warnings @@ -185,9 +186,9 @@ def test_filter_data(self): bl = (24, 25, 'ee') window = 'blackmanharris' ec = 0 - np.random.seed(0) - self.F.data[bl] = np.reshape(stats.norm.rvs(0, 1, self.F.Ntimes * self.F.Nfreqs) - + 1j * stats.norm.rvs(0, 1, self.F.Ntimes * self.F.Nfreqs), (self.F.Ntimes, self.F.Nfreqs)) + rng = np.random.default_rng(seed=0) + self.F.data[bl] = np.reshape(rng.normal(0, 1, self.F.Ntimes * self.F.Nfreqs) + + 1j * rng.normal(0, 1, self.F.Ntimes * self.F.Nfreqs), (self.F.Ntimes, self.F.Nfreqs)) # fr filter noise self.F.filter_data(self.F.data, frps, overwrite=True, verbose=False, axis=0, keys=[bl]) @@ -1066,3 +1067,245 @@ def test_tophat_linear_argparser(self): assert a.max_frate_coeffs[1] == -0.229 assert a.time_thresh == 0.05 assert not a.factorize_flags + +def test_get_frop_for_noise(): + uvh5 = os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.uvh5") + hd = io.HERAData([uvh5]) + bl = (53, 54, 'ee') + data, flags, nsamples = hd.read(bls=(53, 54, "ee")) + times = data.times * 24 * 3600 + + # Have to get some FRF parameters, but getting realistic ones is cumbersome + # This file has ~600s so the resolution is only ~1.7 mHz + # Integration time is 10s, so the Nyquist fringe rate is 200 mHz + # This is a short baseline so its fringe-rate profile is living in the first bin or so + # Just filter around 0 and see that it's similar to what the pipeline gives + filt_cent = 0 + filt_hw = 0.005 + eval_cutoff = 1e-12 + + # Make random weights for extra fun + rng = np.random.default_rng(seed=1) + weights = rng.exponential(size=data.shape) + + + frop = frf.get_frop_for_noise(times, filt_cent, filt_hw, + freqs=data.freqs, weights=weights, + coherent_avg=False, + eigenval_cutoff=eval_cutoff) + + frf_dat = (frop * data[bl]).sum(axis=1) + frf_dat_pipeline = copy.deepcopy(data) + + # From main_beam_FR_filter in single_baseline_notebook + # TODO: graduate that code into hera_cal and put here + d_mdl = np.zeros_like(data[bl]) + + d_mdl, _, _ = dspec.fourier_filter(times, data[bl], + wgts=weights, filter_centers=[filt_cent], + filter_half_widths=[filt_hw], + mode='dpss_solve', + eigenval_cutoff=[eval_cutoff], + suppression_factors=[eval_cutoff], + max_contiguous_edge_flags=len(data.times), + filter_dims=0) + frf_dat_pipeline[bl] = d_mdl + + assert np.allclose(frf_dat_pipeline[bl], frf_dat) + + # Now do coherent averaging + # TODO: These lines are non-interleaved versions of some lines in the + # single-baseline PS notebook. They seem useful -- maybe just make them + # standard functions? + dt = times[1] - times[0] + Navg = int(np.round(300. / dt)) + n_avg_int = int(np.ceil(len(data.lsts) / Navg)) + target_lsts = [np.mean(np.unwrap(data.lsts)[i * Navg:(i+1) * Navg]) + for i in range(n_avg_int)] + dlst = [target_lsts[i] - l for i in range(n_avg_int) + for l in np.unwrap(data.lsts)[i * Navg:(i+1) * Navg]] + bl_vec = data.antpos[bl[0]] - data.antpos[bl[1]] + + + frop = frf.get_frop_for_noise(times, filt_cent, filt_hw, + freqs=data.freqs, weights=weights, + coherent_avg=True, + eigenval_cutoff=eval_cutoff, dlst=dlst, + nsamples=nsamples[bl], t_avg=300., + bl_vec=bl_vec) + frf_dat = (frop * data[bl]).sum(axis=1) + + utils.lst_rephase(frf_dat_pipeline, {bl: bl_vec}, data.freqs, dlst, + lat=hd.telescope_location_lat_lon_alt_degrees[0], + inplace=True) + avg_data = frf.timeavg_waterfall(frf_dat_pipeline[bl], Navg, + flags=np.zeros_like(flags[bl]), + nsamples=nsamples[bl], + extra_arrays={'times': data.times}, + lsts=data.lsts, freqs=data.freqs, + rephase=False, bl_vec=bl_vec, + verbose=False)[0] + + assert np.allclose(avg_data, frf_dat) + + # Test uniform weights + weights = np.ones_like(data[bl]) + frop = frf.get_frop_for_noise(times, filt_cent, filt_hw, + freqs=data.freqs, weights=weights, + coherent_avg=False, + eigenval_cutoff=eval_cutoff) + d_mdl, _, _ = dspec.fourier_filter(times, data[bl], + wgts=weights, + filter_centers=[filt_cent], + filter_half_widths=[filt_hw], + mode='dpss_solve', + eigenval_cutoff=[eval_cutoff], + suppression_factors=[eval_cutoff], + max_contiguous_edge_flags=len(data.times), + filter_dims=0) + frf_dat_pipeline[bl] = d_mdl + frf_dat = (frop * data[bl]).sum(axis=1) + assert np.allclose(frf_dat_pipeline[bl], frf_dat) + + # Check that setting weights to None does the same as uniform weights + frop_none = frf.get_frop_for_noise(times, filt_cent, filt_hw, + freqs=data.freqs, weights=None, + coherent_avg=False, + eigenval_cutoff=eval_cutoff) + assert np.array_equal(frop, frop_none) + +def test_prep_var_for_frop(): + uvh5 = os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.uvh5") + hd = io.HERAData([uvh5]) + data, flags, nsamples = hd.read() + # The first 37 freqs give 0 variance so just exclude them to make the rest + # of the test more transparent + freq_slice = slice(37, 1024) + cross_antpairpol = (53, 54, 'ee') + weights = copy.deepcopy(nsamples) + + # This should return the exact same thing as the original noise prediction function + var_for_frop = frf.prep_var_for_frop(data, nsamples, weights, + cross_antpairpol, freq_slice) + var = noise.predict_noise_variance_from_autos(cross_antpairpol, data, nsamples=nsamples) + + assert np.array_equal(var[:, freq_slice], var_for_frop) + + # Now tell it to only use one antenna; they should all be different + var_for_frop = frf.prep_var_for_frop(data, nsamples, weights, + cross_antpairpol, freq_slice, + auto_ant=53) + + assert np.logical_not(np.any(var_for_frop == var[:, freq_slice])) + + # Now give it some nsamples == 0 to deal with + # Should replace all of one channel with a peculiarly chosen value of 2.3 + nsamples[cross_antpairpol][:, 48] = 0 + with pytest.warns(UserWarning, + match="Not all nonfinite variance locations are of zero weight!"): + var_for_frop = frf.prep_var_for_frop(data, nsamples, weights, + cross_antpairpol, freq_slice, + auto_ant=53, + default_value=2.3) + assert np.all(var_for_frop[:, 48-37] == 2.3) + +def test_get_FRF_cov(): + uvh5 = os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.uvh5") + hd = io.HERAData([uvh5]) + data, flags, nsamples = hd.read() + # The first 37 freqs give 0 variance so just exclude them to make the rest + # of the test more transparent + freq_slice = slice(37, 1024) + cross_antpairpol = (53, 54, 'ee') + weights = copy.deepcopy(nsamples) + times = data.times * 24 * 3600 + eval_cutoff = 1e-12 + + var_for_frop = frf.prep_var_for_frop(data, nsamples, weights, + cross_antpairpol, freq_slice, + auto_ant=53) + + dt = times[1] - times[0] + Navg = int(np.round(300. / dt)) + n_avg_int = int(np.ceil(len(data.lsts) / Navg)) + target_lsts = [np.mean(np.unwrap(data.lsts)[i * Navg:(i+1) * Navg]) + for i in range(n_avg_int)] + dlst = [target_lsts[i] - l for i in range(n_avg_int) + for l in np.unwrap(data.lsts)[i * Navg:(i+1) * Navg]] + bl_vec = data.antpos[cross_antpairpol[0]] - data.antpos[cross_antpairpol[1]] + + # Need to give it some complex structure + filt_cent = 0.005 + filt_hw = 0.005 + frop = frf.get_frop_for_noise(times, filt_cent, filt_hw, + freqs=data.freqs[freq_slice], + weights=weights[cross_antpairpol][:, freq_slice], + coherent_avg=True, + eigenval_cutoff=eval_cutoff, dlst=dlst, + nsamples=nsamples[cross_antpairpol][:, freq_slice], + t_avg=300., bl_vec=bl_vec) + + cov = frf.get_FRF_cov(frop, var_for_frop) + + # Check that at least it's (close to) hermitian at every frequency + # Tests for value sensibility exist in single baseline PS notebook + for freq_ind in range(1024 - 37): + assert np.allclose(cov[freq_ind], cov[freq_ind].T.conj()) + + + # Pretend we forgot to slice things + with pytest.raises(ValueError, match="nsamples has wrong shape"): + frop = frf.get_frop_for_noise(times, filt_cent, filt_hw, + freqs=data.freqs[freq_slice], + weights=weights[cross_antpairpol][:, freq_slice], + coherent_avg=True, + eigenval_cutoff=eval_cutoff, + dlst=dlst, + nsamples=nsamples[cross_antpairpol], + t_avg=300., bl_vec=bl_vec) + + with pytest.raises(ValueError, match="weights has wrong shape"): + frop = frf.get_frop_for_noise(times, filt_cent, filt_hw, + freqs=data.freqs[freq_slice], + weights=weights[cross_antpairpol], + coherent_avg=True, + eigenval_cutoff=eval_cutoff, + dlst=dlst, + nsamples=nsamples[cross_antpairpol], + t_avg=300., bl_vec=bl_vec) + +def test_get_corr_and_factor(): + # Actually just going to test an easy analytic case here + base = np.array([[2, 1, 0], + [1, 3, 1], + [0, 1, 4]]) + cov = np.array([base, 2 * base]) + corr = frf.get_corr(cov) + + answer = np.array([[1, 1/np.sqrt(6), 0], + [1/np.sqrt(6), 1, 1/np.sqrt(12)], + [0, 1/np.sqrt(12), 1]]) + + answer = np.array([answer, answer]) + + assert np.allclose(corr, answer) + + factor = frf.get_correction_factor_from_cov(cov) + + # A block diagonal "implementation" + blocklen = np.prod(cov.shape[:2]) + sum_sq = 7 + Neff = blocklen**2 / sum_sq + factor_answer = blocklen / Neff + + assert np.allclose(factor, factor_answer) + + tslc = slice(1, 3) + factor_slice = frf.get_correction_factor_from_cov(cov, tslc=tslc) + + blocklen = 4 + sum_sq = 2 * (2 + 2 / 12) + Neff = blocklen**2 / sum_sq + factor_slice_answer = blocklen / Neff + + assert np.allclose(factor_slice, factor_slice_answer) \ No newline at end of file diff --git a/hera_cal/tests/test_redcal.py b/hera_cal/tests/test_redcal.py index 5a63e31ee..120edfbd2 100644 --- a/hera_cal/tests/test_redcal.py +++ b/hera_cal/tests/test_redcal.py @@ -306,7 +306,8 @@ def test_init(self): antpos = linear_array(NANTS) reds = om.get_reds(antpos, pols=['ee'], pol_mode='1pol') info = om.RedundantCalibrator(reds) - gains, true_vis, d = sim_red_data(reds, gain_scatter=.05) + rng = np.random.default_rng(21) + gains, true_vis, d = sim_red_data(reds, gain_scatter=.05, rng=rng) w = dict([(k, 1.) for k in d.keys()]) meta, sol = info.logcal(d) @@ -729,7 +730,8 @@ def test_omnical(self): antpos = linear_array(NANTS) reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol') info = om.RedundantCalibrator(reds) - gains, true_vis, d = sim_red_data(reds, gain_scatter=.0099999) + rng = np.random.default_rng(21) + gains, true_vis, d = sim_red_data(reds, gain_scatter=.0099999, rng=rng) w = dict([(k, 1.) for k in d.keys()]) sol0 = dict([(k, np.ones_like(v)) for k, v in gains.items()]) sol0.update(info.compute_ubls(d, sol0)) @@ -761,7 +763,8 @@ def test_omnical64(self): antpos = linear_array(NANTS) reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol') info = om.RedundantCalibrator(reds) - gains, true_vis, d = sim_red_data(reds, shape=(2, 1), gain_scatter=.0099999) + rng = np.random.default_rng(21) + gains, true_vis, d = sim_red_data(reds, shape=(2, 1), gain_scatter=.0099999, rng=rng) w = dict([(k, 1.) for k in d.keys()]) sol0 = dict([(k, np.ones_like(v)) for k, v in gains.items()]) sol0.update(info.compute_ubls(d, sol0)) @@ -792,7 +795,8 @@ def test_omnical128(self): antpos = linear_array(NANTS) reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol') info = om.RedundantCalibrator(reds) - gains, true_vis, d = sim_red_data(reds, shape=(2, 1), gain_scatter=.0099999) + rng = np.random.default_rng(21) + gains, true_vis, d = sim_red_data(reds, shape=(2, 1), gain_scatter=.0099999, rng=rng) w = dict([(k, 1.) for k in d.keys()]) sol0 = dict([(k, np.ones_like(v)) for k, v in gains.items()]) sol0.update(info.compute_ubls(d, sol0))