-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathmap_focalplane_corrfun.py
124 lines (111 loc) · 4.01 KB
/
map_focalplane_corrfun.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
# Map out the 2d correlation function of the focalplane, assuming it's
# stationary. Se average_cov.py if you want the full ndet*ndet covariance,
# which does not have this assumption.
import numpy as np, os, multiprocessing
from enlib import utils, mpi, config, errors, fft, array_ops, scanutils, enmap
from enact import filedb, actdata
parser = config.ArgumentParser(os.environ["HOME"]+"/.enkirc")
parser.add_argument("sel")
parser.add_argument("template")
parser.add_argument("ofile")
parser.add_argument("--nbin", type=int, default=100)
parser.add_argument("--fmax", type=float, default=10)
parser.add_argument("--ntod", type=int, default=0)
parser.add_argument("--nmulti",type=int, default=1)
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-p", "--pairdiff",action="store_true")
args = parser.parse_args()
filedb.init()
comm = mpi.COMM_WORLD
dtype = np.float32
nbin = args.nbin
fmax = args.fmax
# Read our template
template = enmap.read_map(args.template)
# Group into ar1+ar2+... groups
ids = filedb.scans[args.sel]
groups = scanutils.get_tod_groups(ids)
if args.ntod: groups = groups[:args.ntod]
# Our output array
rhs = enmap.zeros((nbin,)+template.shape[-2:], template.wcs, dtype)
div = rhs*0
def project_mat(pix, template, mat=None):
if mat is None: mat = np.full([pix.shape[-1],pix.shape[-1]],1.0)
pix = np.asarray(pix)
off = np.asarray(template.shape[-2:])/2
rpix = (pix[:,:,None] - pix[:,None,:]) + off[:,None,None]
# Flatten
rpix = rpix.reshape(2,-1)
mat = np.asarray(mat).reshape(-1)
res = utils.bin_multi(rpix, template.shape[-2:], weights=mat)
return enmap.samewcs(res, template)
def find_pairs_blind(det_pos, tol=0.2*utils.arcmin):
pairs = utils.find_equal_groups(det_pos, tol=tol)
# Pair must have exactly two members
pairs = [p for p in pairs if len(p) == 2]
pairs = np.array(pairs)
return pairs
def to_pairdiff(d, pairs):
dtod = d.tod[pairs[:,0]]-d.tod[pairs[:,1]]
dcuts = {key: d[key][pairs[:,0]]+d[key][pairs[:,1]] for key in ["cut","cut_basic","cut_noiseest"]}
d = d.restrict(dets=d.dets[pairs[:,0]])
d.tod = dtod
for key in dcuts:
d[key] = dcuts[key]
return d
# Loop through and analyse each tod-group
for ind in range(comm.rank, len(groups), comm.size):
ids = groups[ind]
entries = [filedb.data[id] for id in ids]
try:
d = actdata.read(entries, verbose=args.verbose)
d = actdata.calibrate(d, verbose=args.verbose, exclude=["autocut"])
if args.pairdiff:
pairs = find_pairs_blind(d.point_template)
d = to_pairdiff(d, pairs)
if d.ndet < 2 or d.nsamp < 2: raise errors.DataMissing("No data in tod")
except (errors.DataMissing, AssertionError, IndexError) as e:
print "Skipping %s (%s)" % (str(ids), str(e))
continue
print "Processing %s: %4d %6d" % (str(ids), d.ndet, d.nsamp)
tod = d.tod
del d.tod
tod -= np.mean(tod,1)[:,None]
tod = tod.astype(dtype)
ft = fft.rfft(tod)
nfreq= ft.shape[-1]
dfreq= d.srate/d.nsamp
del tod
# Get the pixel position of each detector in order [{y,x},ndet]
pix = np.round(template.sky2pix(d.point_template.T[::-1])).astype(int)
# This operation is slow. We can't use openmp because python is stupid,
# and it isn't worth it to put this in fortran. So use multiporcessing
def handle_bin(fbin):
print fbin
f1,f2 = [min(nfreq-1,int(i*fmax/dfreq/nbin)) for i in [fbin,fbin+1]]
fsub = ft[:,f1:f2]
cov = array_ops.measure_cov(fsub)
std = np.diag(cov)**0.5
corr = cov / std[:,None] / std[None,:]
myrhs = project_mat(pix, template, corr)
mydiv = project_mat(pix, template)
return fbin, myrhs, mydiv
def collect(args):
fbin, myrhs, mydiv = args
rhs[fbin] += myrhs
div[fbin] += mydiv
p = multiprocessing.Pool(args.nmulti)
for fbin in range(nbin):
p.apply_async(handle_bin, [fbin], callback=collect)
p.close()
p.join()
del ft
# Collect the results
if comm.rank == 0: print "Reducing"
rhs = enmap.samewcs(utils.allreduce(rhs, comm), rhs)
div = enmap.samewcs(utils.allreduce(div, comm), div)
with utils.nowarn():
map = rhs/div
if comm.rank == 0:
print "Writing"
enmap.write_map(args.ofile, map)