diff --git a/pipeline/run_all.sh b/legacy/run_all.sh similarity index 100% rename from pipeline/run_all.sh rename to legacy/run_all.sh diff --git a/pipeline/run_ext.sh b/legacy/run_ext.sh similarity index 100% rename from pipeline/run_ext.sh rename to legacy/run_ext.sh diff --git a/pipeline/transfer_validator.py b/legacy/transfer_validator.py similarity index 100% rename from pipeline/transfer_validator.py rename to legacy/transfer_validator.py diff --git a/paramfiles/paramfile_SAT.yaml b/paramfiles/paramfile_SAT.yaml deleted file mode 100644 index 8c4b8ea..0000000 --- a/paramfiles/paramfile_SAT.yaml +++ /dev/null @@ -1,166 +0,0 @@ -# Here is a sample .yaml file that can be used -# to store the metadata necessary to run the -# SO BB pipeline. - -# Define the directories -## Let's start with directories of data products -data_dirs: - root: "../data" - map_directory: "maps" - beam_directory: "beams" - bandpasses_directory: "bandpasses" - mock_directory: "mock_data" -## Then directories in which we will store outputs -output_dirs: - root: "../outputs" - mask_directory: "masks" - pre_process_directory: "pre_processing" - sims_directory: "sims" - cell_transfer_directory: "cells_transfer" - cell_data_directory: "cells_data" - cell_sims_directory: "cells_sims" - coupling_directory: "couplings" - covmat_directory: "covariances" - sacc_directory: "sacc_files" - -################################## -# Metadata related to maps # -# ------------------------------ # -# # -# The structure of input splits # -# is the following: # -# # -# tag: # -# file_root: ... # -# n_splits: ... # -# freq_tag: ... # -# exp_tag: ... # -################################## -map_sets: - SAT1_f093: - file_root: "cmb_sat1_f093" - n_splits: 4 # Number of splits - freq_tag: 93 # Freq. tag (e.g. useful to coadd freqs) - exp_tag: "SAT1" # Experiment tag (useful to get noise-bias free cross-split spectra) - filtering_tag: "mcut30" - SAT1_f145: - file_root: "cmb_sat1_f145" - n_splits: 4 - freq_tag: 145 - exp_tag: "SAT1" - filtering_tag: "mcut15" - -#################### -# Masking metadata # -#################### -masks: - # Load nhits map from disk? Give absolute location here. - # If left blank, the mask handler will download the nominal nhits map. - input_nhits_path: - - # Copies of all masks are stored under these names inside mask_directory - analysis_mask: "analysis_mask.fits" - nhits_map: "nhits_map.fits" - binary_mask: "binary_mask.fits" - galactic_mask_root: "planck_galactic_mask" - point_source_mask: "point_source_mask.fits" - - include_in_mask: [] - gal_mask_mode: "gal070" - apod_radius: 10.0 - apod_radius_point_source: 4.0 - apod_type: "C1" - -#################################### -# Metadata related to the analysis # -#################################### -## General parameters -general_pars: - nside: 256 - lmin: 30 - lmax: 600 - deltal: 10 - binning_file: "binning.npz" - pure_B: True - # Where the beam window is lower than beam_floor, set it to beam_floor - beam_floor: 1.e-2 - -## Simulation related -sim_pars: - anisotropic_noise: False - null_e_modes: False - num_sims: 10 - ## Used for cosmo TF validation and cov sims - cosmology: - cosmomc_theta: 0.0104085 - As: 2.1e-9 - ombh2: 0.02237 - omch2: 0.1200 - ns: 0.9649 - Alens: 1.0 - tau: 0.0544 - r: 0.01 - noise: - survey_years: 5. - sensitivity_mode: "baseline" - one_over_f_mode: "optimistic" - - mock_nsrcs: 80 - mock_srcs_hole_radius: 40 - -## Filtering related parameters -filtering: - - slurm: False # Run TOAST filtering locally or with SLURM scheduller - # `slurm_autosubmit` only works if `slurm` is True. - # `slurm_autosubmit` set to True to auto-submit generated sbatch scripts. - # Set to False would generate scripts but not submitted, give you a chance to check generated scripts. - slurm_autosubmit: False - scripts_dir: "../sbatch" # directory of filtering scripts - - ## Define filtering tags - tags_settings: - - # If true, beams will be applied only on the validation simulations. By default (false), beam are applied to both the estimation and validation sims, - # to account for potential effect of the beam on the TF (e.g. commutativity) - do_not_beam_est_sims: False - - mcut30: - filtering_type: "m_filterer" - m_cut: 30 - beam: null # beam null means no beam is applied to TF estimation steps - - mcut15: - filtering_type: "m_filterer" - m_cut: 15 - beam: null - - toast_SAT1_f090: - filtering_type: "toast" - template: "../paramfiles/run_toast.sh.j2" # TOAST template script. - config: "../paramfiles/defaults.toml" # TOAST toml config file - schedule: "../outputs/schedules/schedule_sat.txt" # TOAST schedule file - tf_instrument: "SAT1" # Instrument used for transfer function calculation - tf_band: "SAT_f090" # Band used for transfer function calculation - beam: null - - ## Number of sims for tf estimation and validation - tf_est_num_sims: 10 - - ## Parameters of the PL sims used for TF estimation - power_law_pars_tf_est: - amp: 1.0 - delta_ell: 10 - power_law_index: 2. - - ## Parameters of the PL sims used for TF validation - power_law_pars_tf_val: - amp: - TT: 10. - EE: 1. - BB: 0.05 - TE: 2.5 - TB: 0. - EB: 0. - delta_ell: 10 - power_law_index: 0.5 diff --git a/paramfiles/paramfile_SAT_bbpower.yaml b/paramfiles/paramfile_SAT_bbpower.yaml deleted file mode 100644 index 89f70c0..0000000 --- a/paramfiles/paramfile_SAT_bbpower.yaml +++ /dev/null @@ -1,189 +0,0 @@ -# Here is a sample .yaml file that can be used -# to store the metadata necessary to run the -# SO BB pipeline. - -# Define the directories -## Let's start with directories of data products -data_dirs: - root: "/pscratch/sd/k/kwolz/bbdev/SOOPERCOOL/data_planck" - map_directory: "maps" - beam_directory: "beams" - bandpasses_directory: "bandpasses" - mock_directory: "mock_data" -## Then directories in which we will store outputs -output_dirs: - root: "/pscratch/sd/k/kwolz/bbdev/SOOPERCOOL/outputs_planck" - mask_directory: "masks" - pre_process_directory: "pre_processing" - sims_directory: "sims" - cell_transfer_directory: "cells_transfer" - cell_data_directory: "cells_data" - cell_sims_directory: "cells_sims" - coupling_directory: "couplings" - covmat_directory: "covariances" - sacc_directory: "sacc_files" - -################################## -# Metadata related to maps # -# ------------------------------ # -# # -# The structure of input splits # -# is the following: # -# # -# tag: # -# file_root: ... # -# n_splits: ... # -# freq_tag: ... # -# exp_tag: ... # -################################## -map_sets: - planck_f030: - file_root: "planck_f030" - n_splits: 2 - freq_tag: 30 - exp_tag: "planck" - filtering_tag: "none" - planck_f100: - file_root: "planck_f100" - n_splits: 2 - freq_tag: 100 - exp_tag: "planck" - filtering_tag: "none" - planck_f143: - file_root: "planck_f143" - n_splits: 2 - freq_tag: 143 - exp_tag: "planck" - filtering_tag: "none" - planck_f217: - file_root: "planck_f217" - n_splits: 2 - freq_tag: 217 - exp_tag: "planck" - filtering_tag: "none" - planck_f353: - file_root: "planck_f353" - n_splits: 2 - freq_tag: 353 - exp_tag: "planck" - filtering_tag: "none" - -#################### -# Masking metadata # -#################### -masks: - # Load nhits map from disk? Give absolute location here. - # If left blank, the mask handler will download the nominal nhits map. - input_nhits_path: - - # Copies of all masks are stored under these names inside mask_directory - analysis_mask: "analysis_mask.fits" - nhits_map: "nhits_map.fits" - binary_mask: "binary_mask.fits" - galactic_mask_root: "planck_galactic_mask" - point_source_mask: "point_source_mask.fits" - - include_in_mask: [] - gal_mask_mode: "gal070" - apod_radius: 10.0 - apod_radius_point_source: 4.0 - apod_type: "C1" - -#################################### -# Metadata related to the analysis # -#################################### -## General parameters -general_pars: - nside: 256 - lmin: 30 - lmax: 600 - deltal: 10 - binning_file: "binning.npz" - pure_B: True - # Where the beam window is lower than beam_floor, set it to beam_floor - beam_floor: 1.e-2 - -## Simulation related -sim_pars: - anisotropic_noise: False - null_e_modes: False - num_sims: 100 - ## Used for cosmo TF validation and cov sims - cosmology: - cosmomc_theta: 0.0104085 - As: 2.1e-9 - ombh2: 0.02237 - omch2: 0.1200 - ns: 0.9649 - Alens: 1.0 - tau: 0.0544 - r: 0.01 - noise: - survey_years: 5. - sensitivity_mode: "baseline" - one_over_f_mode: "optimistic" - - mock_nsrcs: 80 - mock_srcs_hole_radius: 40 - -## Filtering related parameters -filtering: - - slurm: False # Run TOAST filtering locally or with SLURM scheduller - # `slurm_autosubmit` only works if `slurm` is True. - # `slurm_autosubmit` set to True to auto-submit generated sbatch scripts. - # Set to False would generate scripts but not submitted, give you a chance to check generated scripts. - slurm_autosubmit: False - scripts_dir: "../sbatch" # directory of filtering scripts - - ## Define filtering tags - tags_settings: - - # If true, beams will be applied only on the validation simulations. By default (false), beam are applied to both the estimation and validation sims, - # to account for potential effect of the beam on the TF (e.g. commutativity) - do_not_beam_est_sims: False - - none: # placeholder for a no-filtering operation - filtering_type: "m_filterer" - m_cut: 0 - beam: null # beam null means no beam is applied to TF estimation steps - - mcut30: - filtering_type: "m_filterer" - m_cut: 30 - beam: null # beam null means no beam is applied to TF estimation steps - - mcut15: - filtering_type: "m_filterer" - m_cut: 15 - beam: null - - toast_SAT1_f090: - filtering_type: "toast" - template: "../paramfiles/run_toast.sh.j2" # TOAST template script. - config: "../paramfiles/defaults.toml" # TOAST toml config file - schedule: "../outputs/schedules/schedule_sat.txt" # TOAST schedule file - tf_instrument: "SAT1" # Instrument used for transfer function calculation - tf_band: "SAT_f090" # Band used for transfer function calculation - beam: null - - ## Number of sims for tf estimation and validation - tf_est_num_sims: 30 - - ## Parameters of the PL sims used for TF estimation - power_law_pars_tf_est: - amp: 1.0 - delta_ell: 10 - power_law_index: 2. - - ## Parameters of the PL sims used for TF validation - power_law_pars_tf_val: - amp: - TT: 10. - EE: 1. - BB: 0.05 - TE: 2.5 - TB: 0. - EB: 0. - delta_ell: 10 - power_law_index: 0.5 diff --git a/paramfiles/paramfile_SAT_planck.yaml b/paramfiles/paramfile_SAT_planck.yaml deleted file mode 100644 index c312d12..0000000 --- a/paramfiles/paramfile_SAT_planck.yaml +++ /dev/null @@ -1,151 +0,0 @@ -# Here is a sample .yaml file that can be used -# to store the metadata necessary to run the -# SO BB pipeline. - -# Define the directories -## Let's start with directories of data products -data_dirs: - root: "../data" - map_directory: "maps" - beam_directory: "beams" - bandpasses_directory: "bandpasses" -## Then directories in which we will store outputs -output_dirs: - root: "../outputs" - mask_directory: "masks" - pre_process_directory: "pre_processing" - sims_directory: "sims" - cell_transfer_directory: "cells_transfer" - cell_data_directory: "cells_data" - cell_sims_directory: "cells_sims" - coupling_directory: "couplings" - covmat_directory: "covariances" - -################################## -# Metadata related to maps # -# ------------------------------ # -# # -# The structure of input splits # -# is the following: # -# # -# tag: # -# file_root: ... # -# n_splits: ... # -# freq_tag: ... # -# exp_tag: ... # -################################## -map_sets: - SAT1_f093: - file_root: "cmb_sat1_f093" - n_splits: 2 # Number of splits - freq_tag: 93 # Freq. tag (e.g. useful to coadd freqs) - exp_tag: "SAT1" # Experiment tag (useful to get noise-bias free cross-split spectra) - SAT1_f145: - file_root: "cmb_sat1_f145" - n_splits: 2 - freq_tag: 145 - exp_tag: "SAT1" - # planck_f030: - # file_root: "planck_f030" - # n_splits: 2 - # freq_tag: 30 - # exp_tag: "planck" - planck_f100: - file_root: "planck_f100" - n_splits: 2 - freq_tag: 100 - exp_tag: "planck" - planck_f143: - file_root: "planck_f143" - n_splits: 2 - freq_tag: 143 - exp_tag: "planck" - # planck_f217: - # file_root: "planck_f217" - # n_splits: 2 - # freq_tag: 217 - # exp_tag: "planck" - # planck_f353: - # file_root: "planck_f353" - # n_splits: 2 - # freq_tag: 353 - # exp_tag: "planck" - -#################### -# Masking metadata # -#################### -masks: - # Load nhits map from disk? Give absolute location here. - # If left blank, the mask handler will download the nominal nhits map. - input_nhits_path: - - # Copies of all masks are stored under these names inside mask_directory - analysis_mask: "analysis_mask.fits" - nhits_map: "nhits_map.fits" - binary_mask: "binary_mask.fits" - galactic_mask_root: "planck_galactic_mask" - point_source_mask: "point_source_mask.fits" - - include_in_mask: [] - gal_mask_mode: "gal070" - apod_radius: 10.0 - apod_radius_point_source: 4.0 - apod_type: "C1" - -#################################### -# Metadata related to the analysis # -#################################### -## General parameters -general_pars: - nside: 256 - lmin: 30 - lmax: 600 - deltal: 10 - binning_file: "binning.npz" - pure_B: True - -## Simulation related -sim_pars: - anisotropic_noise: False - null_e_modes: False - num_sims: 3 - cosmology: - cosmomc_theta: 0.0104085 - As: 2.1e-9 - ombh2: 0.02237 - omch2: 0.1200 - ns: 0.9649 - Alens: 1.0 - tau: 0.0544 - r: 0.01 - mock_nsrcs: 80 - mock_srcs_hole_radius: 40 - -## Filtering transfer function related parameters -tf_settings: - filtering_type: "m_filterer" # "m_filterer" or "toast" - m_cut: 30 # necessary only if `filtering_type` is set to "m_filterer" - toast: # necessary only if `filtering_type` is set to "toast" - schedule: "../outputs/schedules/schedule_sat.txt" - instrument: "SAT1" - band: "SAT_f090" - thinfp: 8 - group_size: 4 - - tf_est_num_sims: 4 - - power_law_pars_tf_est: - amp: 1.0 - delta_ell: 10 - power_law_index: 2. - - power_law_pars_tf_val: - amp: - TT: 10. - EE: 1. - BB: 0.05 - TE: 2.5 - TB: 0. - EB: 0. - delta_ell: 10 - power_law_index: 0.5 \ No newline at end of file diff --git a/paramfiles/paramfile_planck.yaml b/paramfiles/paramfile_planck.yaml new file mode 100644 index 0000000..f0cb347 --- /dev/null +++ b/paramfiles/paramfile_planck.yaml @@ -0,0 +1,126 @@ +## General output directory where all products are stored +output_directory: &out_dir /pscratch/sd/k/kwolz/share/SOOPERCOOL/tutorial_planck + +######################### +# Map sets to correlate # +######################### +map_sets: + planck_f100: + map_dir: !path [*out_dir, maps] + beam_dir: !path [*out_dir, beams] + map_template: planck_f100_bundle{id_bundle}_{map|hits}.fits + beam_file: beam_planck_f100.dat + n_bundles: 2 # Number of bundles + freq_tag: 100 # Freq. tag (e.g. useful to coadd freqs) + exp_tag: Planck # Experiment tag (useful to get noise-bias free cross-split spectra) + filtering_tag: mcut0 + planck_f143: + map_dir: !path [*out_dir, maps] + beam_dir: !path [*out_dir, beams] + map_template: planck_f143_bundle{id_bundle}_{map|hits}.fits + beam_file: beam_planck_f143.dat + n_bundles: 2 + freq_tag: 143 + exp_tag: Planck + filtering_tag: mcut0 + +#################### +# Masking metadata # +#################### +masks: + analysis_mask: !path [*out_dir, masks/analysis_mask.fits] + + # Use global hits map instead of searching over bundle-specific hits + # (not recommended to use on real SAT data) + global_hits: /global/cfs/projectdirs/sobs/www/users/so_bb/norm_nHits_SA_35FOV_ns512.fits + + # Path to products (binary) + galactic_mask: null # "/path/to/galactic_mask.fits" + point_source_catalog: null + point_source_mask: /pscratch/sd/k/kwolz/share/SOOPERCOOL/masks/npipe_mask_ecliptic_nside512.fits + + external_mask: null + + apod_radius: 10.0 + apod_radius_point_source: 1.0 + apod_type: "C2" + +#################################### +# Metadata related to the analysis # +#################################### +## General parameters +general_pars: + pix_type: hp + nside: 512 + lmin: 30 + lmax: 600 + binning_file: !path [*out_dir, binning/binning_nside512_deltal10.npz] + pure_B: True + # Where the beam window is lower than beam_floor, set it to beam_floor + beam_floor: 1.e-5 + +## Filtering-related parameters +filtering: + + ## Define filtering tags + tags_settings: + + # This is a dummy filter that leaves power spectra unchanged + mcut0: + # Filtering parameters + filtering_type: "m_filterer" + m_cut: 0 + +## Transfer-function-related metadata +transfer_settings: + transfer_directory: !path [*out_dir, transfer_functions] + + # For estimation + ## Number of sims for tf estimation + tf_est_num_sims: 10 + + ## Parameters of the PL sims used for TF estimation + power_law_pars_tf_est: + amp: 1.0 + delta_ell: 10 + power_law_index: 2. + + ## Optional beams applied on PL sims + # If true, beams will be applied only on the validation simulations. By default (false) + # beam are applied to both the estimation and validation sims, + # to account for potential effect of the beam on the TF (e.g. commutativity) + do_not_beam_est_sims: False + beams_list: ["planck_f100", "planck_f143"] + + ## Path to the sims for TF estimation + unfiltered_map_dir: + mcut0: !path [*out_dir, tf_est_sims] + unfiltered_map_template: + mcut0: "{pure_type}_power_law_tf_est_{id_sim:04d}_planck_f100.fits" + filtered_map_dir: + mcut0: !path [*out_dir, tf_est_sims] + filtered_map_template: + mcut0: "{pure_type}_power_law_tf_est_{id_sim:04d}_planck_f100.fits" + +# Covariance-related parameters +covariance: + ## Number of sims for covariance estimation + cov_num_sims: 100 + + ## Directories and file names of simulated noise maps + noise_map_sims_dir: + planck_f100: /pscratch/sd/k/kwolz/share/SOOPERCOOL/tutorial_planck/noise_sims + planck_f143: /pscratch/sd/k/kwolz/share/SOOPERCOOL/tutorial_planck/noise_sims + + noise_map_sims_template: + planck_f100: "{id_sim:04d}/{map_set}_bundle{id_bundle}_noise.fits" + planck_f143: "{id_sim:04d}/{map_set}_bundle{id_bundle}_noise.fits" + + ## Directories and file names of simulated signal alms + signal_alm_sims_dir: !path [*out_dir, signal_sims/sims] + signal_alm_sims_template: "{id_sim:04d}/alm_{freq_tag:03d}GHz_lmax1535_{id_sim:04d}.fits" + + ## Fits files with simulation input power spectra (in healpy format) + fiducial_cmb: "/pscratch/sd/k/kwolz/share/SOOPERCOOL/tutorial_planck/signal_sims/cls/Cls_Planck2018_r0.fits" + fiducial_dust: "/pscratch/sd/k/kwolz/share/SOOPERCOOL/tutorial_planck/signal_sims/cls/cl_dust_f{nu1:03d}_f{nu2:03d}.fits" + fiducial_synch: "/pscratch/sd/k/kwolz/share/SOOPERCOOL/tutorial_planck/signal_sims/cls/cl_synch_f{nu1:03d}_f{nu2:03d}.fits" diff --git a/paramfiles/paramfile_refactored.yaml b/paramfiles/paramfile_refactored.yaml index 31d4ce2..9c75452 100644 --- a/paramfiles/paramfile_refactored.yaml +++ b/paramfiles/paramfile_refactored.yaml @@ -1,12 +1,12 @@ -output_directory: &out_dir /pscratch/sd/k/kwolz/bbdev/SOOPERCOOL/debugging/outputs_refactored_noisy +output_directory: &out_dir test_CAR_PR map_sets: sat_f093: - map_dir: "../debugging/data_refactored_noisy/maps" - beam_dir: "../debugging/data_refactored_noisy/beams" - map_template: "sat_f093_bundle{id_bundle}_{map|hits}.fits" - beam_file: "beam_sat_f093.dat" - n_bundles: 2 # Number of bundles + map_dir: "/home/laposta/Documents/development/soopercool_f2f/SOOPERCOOL/pipeline/outputs_satp3_CAR/bundled_maps" + beam_dir: "/home/laposta/Documents/development/cloned_repo/SOOPERCOOL/data_256/beams" + map_template: "coadd_f090_bundle0_{map|hits}.fits" + beam_file: "beam_cmb_sat1_f093.dat" + n_bundles: 4 # Number of bundles freq_tag: 93 # Freq. tag (e.g. useful to coadd freqs) exp_tag: "SAT" # Experiment tag (useful to get noise-bias free cross-split spectra) filtering_tag: "mcut0" @@ -16,16 +16,19 @@ map_sets: #################### masks: analysis_mask: !path [*out_dir, masks/analysis_mask.fits] + # When specifying this file, the mapset/bundle-specific hits files will + # be ignored and instead a global hits file will be used (testing puroposes) + global_hits_file_overwrite: /home/wolz/bbdev/SOOPERCOOL/outputs/masks/hits_1year_200dets_sat_beams.fits # Path to products (binary) - galactic_mask: null # "/path/to/galactic_mask.fits" + galactic_mask: ~/Downloads/planck_gal_masks/planck_galmask_070_CAR_10arcmin.fits # "/path/to/galactic_mask.fits" point_source_catalog: null point_source_mask: null # "/path/to/point_source_mask.fits" external_mask: null apod_radius: 10.0 - apod_radius_point_source: 4.0 + apod_radius_point_source: 1.0 apod_type: "C1" #################################### @@ -33,11 +36,12 @@ masks: #################################### ## General parameters general_pars: - nside: 256 + pix_type: car + nside: 512 lmin: 30 lmax: 600 - binning_file: !path [*out_dir, binning/binning_nside256_deltal10.npz] - pure_B: True + binning_file: !path [*out_dir, binning/binning_nside256_deltal21.npz] + pure_B: False # Where the beam window is lower than beam_floor, set it to beam_floor beam_floor: 1.e-2 diff --git a/paramfiles/paramfile_wmap_planck.yaml b/paramfiles/paramfile_wmap_planck.yaml deleted file mode 100644 index f8b7ca4..0000000 --- a/paramfiles/paramfile_wmap_planck.yaml +++ /dev/null @@ -1,201 +0,0 @@ -# Here is a sample .yaml file that can be used -# to store the metadata necessary to run the -# SO BB pipeline. - -# Define the directories -## Let's start with directories of data products -data_dirs: - root: "/pscratch/sd/k/kwolz/bbdev/SOOPERCOOL/data_wmap_planck_PR3mask" - map_directory: "maps" - beam_directory: "beams" - bandpasses_directory: "bandpasses" - mock_directory: "mock_data" -## Then directories in which we will store outputs -output_dirs: - root: "/pscratch/sd/k/kwolz/bbdev/SOOPERCOOL/outputs_wmap_planck_PR3mask" - mask_directory: "masks" - pre_process_directory: "pre_processing" - sims_directory: "sims" - cell_transfer_directory: "cells_transfer" - cell_data_directory: "cells_data" - cell_sims_directory: "cells_sims" - coupling_directory: "couplings" - covmat_directory: "covariances" - sacc_directory: "sacc_files" - -################################## -# Metadata related to maps # -# ------------------------------ # -# # -# The structure of input splits # -# is the following: # -# # -# tag: # -# file_root: ... # -# n_splits: ... # -# freq_tag: ... # -# exp_tag: ... # -################################## -map_sets: - wmap_f023: - file_root: "wmap_f023" - n_splits: 9 - freq_tag: 23 - exp_tag: "wmap" - filtering_tag: "none" - wmap_f033: - file_root: "wmap_f033" - n_splits: 9 - freq_tag: 33 - exp_tag: "wmap" - filtering_tag: "none" - planck_f030: - file_root: "planck_f030" - n_splits: 2 - freq_tag: 30 - exp_tag: "planck" - filtering_tag: "none" - planck_f100: - file_root: "planck_f100" - n_splits: 2 - freq_tag: 100 - exp_tag: "planck" - filtering_tag: "none" - planck_f143: - file_root: "planck_f143" - n_splits: 2 - freq_tag: 143 - exp_tag: "planck" - filtering_tag: "none" - planck_f217: - file_root: "planck_f217" - n_splits: 2 - freq_tag: 217 - exp_tag: "planck" - filtering_tag: "none" - planck_f353: - file_root: "planck_f353" - n_splits: 2 - freq_tag: 353 - exp_tag: "planck" - filtering_tag: "none" - -#################### -# Masking metadata # -#################### -masks: - # Load nhits map from disk? Give absolute location here. - # If left blank, the mask handler will download the nominal nhits map. - input_nhits_path: "/global/homes/k/kwolz/bbdev/SOOPERCOOL/data/planck_dr4_pol_mask.fits" - - # Copies of all masks are stored under these names inside mask_directory - analysis_mask: "analysis_mask.fits" - nhits_map: "nhits_map.fits" - binary_mask: "binary_mask.fits" - galactic_mask_root: "planck_galactic_mask" - point_source_mask: "point_source_mask.fits" - - include_in_mask: [] - gal_mask_mode: "gal070" - apod_radius: 10.0 - apod_radius_point_source: 4.0 - apod_type: "C1" - -#################################### -# Metadata related to the analysis # -#################################### -## General parameters -general_pars: - nside: 256 - lmin: 30 - lmax: 600 - deltal: 10 - binning_file: "binning.npz" - pure_B: False - # Where the beam window is lower than beam_floor, set it to beam_floor - beam_floor: 1.e-2 - -## Simulation related -sim_pars: - anisotropic_noise: False - null_e_modes: False - num_sims: 100 - ## Used for cosmo TF validation and cov sims - cosmology: - cosmomc_theta: 0.0104085 - As: 2.1e-9 - ombh2: 0.02237 - omch2: 0.1200 - ns: 0.9649 - Alens: 1.0 - tau: 0.0544 - r: 0.01 - noise: - survey_years: 5. - sensitivity_mode: "baseline" - one_over_f_mode: "optimistic" - - mock_nsrcs: 80 - mock_srcs_hole_radius: 40 - -## Filtering related parameters -filtering: - - slurm: False # Run TOAST filtering locally or with SLURM scheduller - # `slurm_autosubmit` only works if `slurm` is True. - # `slurm_autosubmit` set to True to auto-submit generated sbatch scripts. - # Set to False would generate scripts but not submitted, give you a chance to check generated scripts. - slurm_autosubmit: False - scripts_dir: "../sbatch" # directory of filtering scripts - - ## Define filtering tags - tags_settings: - - # If true, beams will be applied only on the validation simulations. By default (false), beam are applied to both the estimation and validation sims, - # to account for potential effect of the beam on the TF (e.g. commutativity) - do_not_beam_est_sims: False - - none: # placeholder for a no-filtering operation - filtering_type: "m_filterer" - m_cut: 0 - beam: null # beam null means no beam is applied to TF estimation steps - - # mcut30: - # filtering_type: "m_filterer" - # m_cut: 30 - # beam: null # beam null means no beam is applied to TF estimation steps - - # mcut15: - # filtering_type: "m_filterer" - # m_cut: 15 - # beam: null - - toast_SAT1_f090: - filtering_type: "toast" - template: "../paramfiles/run_toast.sh.j2" # TOAST template script. - config: "../paramfiles/defaults.toml" # TOAST toml config file - schedule: "../outputs/schedules/schedule_sat.txt" # TOAST schedule file - tf_instrument: "SAT1" # Instrument used for transfer function calculation - tf_band: "SAT_f090" # Band used for transfer function calculation - beam: null - - ## Number of sims for tf estimation and validation - tf_est_num_sims: 30 - - ## Parameters of the PL sims used for TF estimation - power_law_pars_tf_est: - amp: 1.0 - delta_ell: 10 - power_law_index: 2. - - ## Parameters of the PL sims used for TF validation - power_law_pars_tf_val: - amp: - TT: 10. - EE: 1. - BB: 0.05 - TE: 2.5 - TB: 0. - EB: 0. - delta_ell: 10 - power_law_index: 0.5 diff --git a/pipeline/bundling/bundle_atomic_maps.py b/pipeline/bundling/bundle_atomic_maps.py index e23a8e1..5a70c7e 100644 --- a/pipeline/bundling/bundle_atomic_maps.py +++ b/pipeline/bundling/bundle_atomic_maps.py @@ -1,5 +1,6 @@ import argparse from soopercool import BBmeta +from soopercool import map_utils as mu from pixell import enmap, enplot import numpy as np @@ -77,12 +78,12 @@ def coadd_maps(map_list, res=10): shape, wcs = car.geometry for f in map_list: - m = enmap.read_map(f) + m = mu.read_map(f, pix_type='car') car = enmap.insert(car, m, op=np.ndarray.__iadd__) - w = enmap.read_map(f.replace("wmap", "weights")) + w = mu.read_map(f.replace("wmap", "weights"), pix_type='car') w = np.moveaxis(w.diagonal(), -1, 0) wcar = enmap.insert(wcar, w, op=np.ndarray.__iadd__) - h = enmap.read_map(f.replace("wmap", "hits"))[0] + h = mu.read_map(f.replace("wmap", "hits"), pix_type='car')[0] hits = enmap.insert(hits, h, op=np.ndarray.__iadd__) wcar[wcar == 0] = np.inf return car / wcar, wcar, hits @@ -160,7 +161,7 @@ def main(args): keep[ftag] = [] for id_waf in range(7): for fname in fname_list[ftag, id_waf]: - m = enmap.read_map(fname) + m = mu.read_map(fname, pix_type='car') binary = enmap.insert(template_CAR.copy(), m[0]) binary[binary != 0] = 1 @@ -211,7 +212,7 @@ def main(args): coadded_maps = {} coadded_weights = {} coadded_hits = {} - unit_conv = 1e6 * 13.2 # pW to uK + unit_conv = 13.2 # pW to K for ftag in freq_tags: for id_bundle in range(n_bundles): meta.timer.start("coadding") @@ -246,11 +247,12 @@ def main(args): plot_hit ) file_name = f"{maps_dir}/TQU_CAR_coadd_{ftag}_bundle{id_bundle}_allwaf.fits" # noqa - enmap.write_map(file_name, coadded_maps[ftag, id_bundle]) - enmap.write_map(file_name.replace("TQU", "weights"), - coadded_weights[ftag, id_bundle]) - enmap.write_map(file_name.replace("TQU", "hits"), - coadded_hits[ftag, id_bundle]) + mu.write_map(file_name, coadded_maps[ftag, id_bundle], + pix_type='car') + mu.write_map(file_name.replace("TQU", "weights"), + coadded_weights[ftag, id_bundle], pix_type='car') + mu.write_map(file_name.replace("TQU", "hits"), + coadded_hits[ftag, id_bundle], pix_type='car') with open(f"{maps_dir}/atomic_map_list_{ftag}_bundle{id_bundle}.txt", "w") as f: # noqa for fname in to_coadd_full[ftag, id_bundle]: @@ -269,13 +271,15 @@ def main(args): range=[100000], colorbar=1, ticks=5)[0] enplot.write(f"{plot_dir}/coadd_{ftag}_bundle{id_bundle}_waf{id_waf}_hits.png", plot_hit) # noqa file_name = f"{maps_dir}/TQU_CAR_coadd_{ftag}_bundle{id_bundle}_waf{id_waf}.fits" # noqa - enmap.write_map(file_name, coadded_maps[ftag, id_bundle]) - enmap.write_map( + mu.write_map(file_name, coadded_maps[ftag, id_bundle], + pix_type='car') + mu.write_map( file_name.replace("TQU", "weights"), - coadded_weights[ftag, id_waf, id_bundle] - ) - enmap.write_map(file_name.replace("TQU", "hits"), - coadded_hits[ftag, id_waf, id_bundle]) + coadded_weights[ftag, id_waf, id_bundle], + pix_type='car') + mu.write_map(file_name.replace("TQU", "hits"), + coadded_hits[ftag, id_waf, id_bundle], + pix_type='car') with open(f"{maps_dir}/atomic_map_list_{ftag}_waf{id_waf}_bundle{id_bundle}.txt", "w") as f: # noqa for fname in to_coadd[ftag, id_waf][id_bundle]: @@ -292,12 +296,11 @@ def main(args): coadded_hits[ftag, id_bundle], nside=256, extensive=True, method="spline" ) - hp.write_map(f"{maps_dir}/coadd_{ftag}_bundle{id_bundle}_map.fits", - TQU_hp, overwrite=True, dtype=np.float32) - hp.write_map( + mu.write_map(f"{maps_dir}/coadd_{ftag}_bundle{id_bundle}_map.fits", + TQU_hp, dtype=np.float32, pix_type='hp') + mu.write_map( f"{maps_dir}/coadd_{ftag}_bundle{id_bundle}_hits.fits", - hits_hp, overwrite=True, dtype=np.float32 - ) + hits_hp, dtype=np.float32, pix_type='hp') for id_waf in range(7): TQU_hp = reproject.map2healpix( @@ -307,8 +310,10 @@ def main(args): coadded_hits[ftag, id_waf, id_bundle], nside=256, extensive=True, method="spline" ) - hp.write_map(f"{maps_dir}/coadd_{ftag}_wafer{id_waf}_bundle{id_bundle}_map.fits", TQU_hp, overwrite=True, dtype=np.float32) # noqa - hp.write_map(f"{maps_dir}/coadd_{ftag}_wafer{id_waf}_bundle{id_bundle}_hits.fits", hits_hp, overwrite=True, dtype=np.float32) # noqa + mu.write_map(f"{maps_dir}/coadd_{ftag}_wafer{id_waf}_bundle{id_bundle}_map.fits", # noqa + TQU_hp, dtype=np.float32, pix_type='car') + hp.write_map(f"{maps_dir}/coadd_{ftag}_wafer{id_waf}_bundle{id_bundle}_hits.fits", # noqa + hits_hp, dtype=np.float32, pix_type='car') if __name__ == "__main__": diff --git a/pipeline/bundling/coadder.py b/pipeline/bundling/coadder.py new file mode 100644 index 0000000..3f4ca61 --- /dev/null +++ b/pipeline/bundling/coadder.py @@ -0,0 +1,46 @@ +#!/bin/sh python3 + +import time +import numpy as np + + +class SignFlip: + def __init__(self, state=None): + self.prng = np.random.RandomState(int(1e+6*time.time()) % 2**32) + if state: + self.prng.set_state(state) + self.state = self.prng.get_state() + self.seq = None + + def gen_seq(self, obs_weights): + self.state = self.prng.get_state() + + nums = len(obs_weights) + obs = range(nums) + obs_perm = self.prng.permutation(obs) + + obs_weights_perm = np.zeros_like(obs_weights) + self.seq = np.zeros_like(obs_weights, dtype=np.bool_) + + for ob in obs_perm: + obs_weights_perm[ob] = obs_weights[ob] + + w = obs_weights_perm + wi = np.cumsum(w) + wi = wi/np.max(wi) + + # no sign flip for the first half + noflip = np.where(wi < 0.5)[0].tolist() + # decide whether to flip the middle bundles by coin toss + if len(w) > 1 and len(noflip) < 2 and self.prng.randint(0, 2): + noflip.append(max(noflip)+1) + + for i in range(nums): + if i in noflip: + seq = False + else: + seq = True + + self.seq[obs_perm[i]] = seq + + return self.prng.get_state() diff --git a/pipeline/bundling/coordinator.py b/pipeline/bundling/coordinator.py new file mode 100644 index 0000000..ada817b --- /dev/null +++ b/pipeline/bundling/coordinator.py @@ -0,0 +1,84 @@ +#!/bin/sh python3 + +import numpy as np +import healpy as hp +import h5py + + +def read_hdf5_map(fname, to_nest=False): + """ + """ + f = h5py.File(fname, "r") + dset = f["map"] + header = dict(dset.attrs) + + if header["ORDERING"] == "NESTED": + file_nested = True + elif header["ORDERING"] == "RING": + file_nested = False + + _, npix = dset.shape + + if file_nested and not to_nest: + mapdata = hp.reorder(dset[:], n2r=True) + elif not file_nested and to_nest: + mapdata = hp.reorder(dset[:], r2n=True) + else: + mapdata = dset + + return mapdata + + +def write_hdf5_map(fname, nside, dict_maps, list_of_obsid, + nest_or_ring='RING'): + """ + """ + with h5py.File(fname, 'w') as f: + f.attrs['NSIDE'] = nside + f.attrs['ORDERING'] = nest_or_ring + f.attrs['OBS_ID'] = list_of_obsid + for k, v in dict_maps.items(): + f.create_dataset(k, data=v) + + +def gen_masks_of_given_atomic_map_list_for_bundles(nmaps, nbundles): + """ + Makes a list (length nbundles) of boolean lists (length nmaps) + corresponding to the atomic maps that have to be coadded to make up a + given bundle. This is done by uniformly distributing atomic maps into each + bundle and, if necessary, looping through the bundles until the remainders + have gone. + + Parameters + ---------- + nmaps: int + Number of atomic maps to distribute. + nbundles: int + Number of map bundles to be generated. + + Returns + ------- + boolean_mask_list: list of list of str + List of lists of booleans indicating the atomics to be coadded for + each bundle. + """ + + n_per_bundle = nmaps // nbundles + nremainder = nmaps % nbundles + boolean_mask_list = [] + + for idx in range(nbundles): + if idx < nremainder: + _n_per_bundle = n_per_bundle + 1 + else: + _n_per_bundle = n_per_bundle + + i_begin = idx * _n_per_bundle + i_end = (idx+1) * _n_per_bundle + + _m = np.zeros(nmaps, dtype=np.bool_) + _m[i_begin:i_end] = True + + boolean_mask_list.append(_m) + + return boolean_mask_list diff --git a/pipeline/bundling/generate_map_bundles.py b/pipeline/bundling/generate_map_bundles.py new file mode 100644 index 0000000..daf4d8f --- /dev/null +++ b/pipeline/bundling/generate_map_bundles.py @@ -0,0 +1,130 @@ +import os +import healpy as hp +import numpy as np +import argparse + +# Yuji's code +import coordinator +import coadder + + +def main(args): + """ + Read list of inverse noise-weighted atomic maps and list of per-pixel + inverse noise covariance matrices, separate them into bundles, coadd + them, adn write them to disk. Optionally apply a random sign flip to the + atomics within each bundle before coadding, to obtain per-bundle noise + realizations. + + Inputs: + + atomics_list: list of strings, containing the file paths of atomic + maps. + + invcov_list: list of strings, containing the file paths of per-pixel + inverse noise covariance matrices corresponding to each + atomic map. + + nbundles: integer, number of bundles to be formed from atomics. + + outdir: string, output directory. + + prefix_outmap: string, prefix to files written to outdir. + Default: None. + + do_signflip: boolean, whether to apply the sign flip procedure. + Default: False. + + maps_are_weighted: boolean, whether atomics are inverse noise-variance + weighted or not. Default: False. + """ + os.makedirs(args.outdir, exist_ok=True) + atomic_maps_list = np.load(args.atomics_list_fpath) + natomics = len(atomic_maps_list["wmap"]) + + # Load ivar weights and weighted maps + # ***** + # FIXME: This whole part needs to be adapted and generalized to CAR or + # HEALPix. + npix = len(atomic_maps_list[0][0, :]) + + # invcov-weighted input maps + weighted_maps = np.zeros(shape=(natomics, 3, npix), dtype=np.float32) + # inverse noise covariance maps + weights = np.zeros(shape=(natomics, 3, npix), dtype=np.float32) + # hits maps + hits_maps = np.zeros(shape=(natomics, npix), dtype=np.float32) + + # Random division into bundles (using Yuji's code) + bundle_mask = coordinator.gen_masks_of_given_atomic_map_list_for_bundles( + natomics, args.nbundles + ) + + # Loop over bundle-wise atomics + for id_bundle in range(args.nbundles): + print("Bundle #", id_bundle + 1) + for i in range(natomics)[bundle_mask]: + print(i, end=',') + fname_wmap = atomic_maps_list["wmap"][i] + + # FIXME: Load correctly. At the moment this is a placeholder. + weighted_maps[i] = coordinator.read_hdf5_map(fname_wmap) + fname_weights = atomic_maps_list["weights"][i] + weights[i] = coordinator.read_hdf5_map(fname_weights)[(0, 3, 5), :] + + # Coadd tthe ivar weights and hits maps + coadd_weight_map = np.sum(weights, axis=0) + mask = np.mean(coadd_weight_map[1:], axis=0) > 0. + nside = hp.npix2nside(npix) + coadd_hits_map = np.sum(hits_maps, axis=0)[mask] + + # Optionally apply signflip using Yuji's code + if args.do_signflip: + fname = 'sf_map.hdf5' + + sf = coadder.SignFlip() + obs_weights = np.sum(weights[:, 1:, :]*0.5, axis=(1, 2)) + + sf.gen_seq(obs_weights) + signs = sf.seq * 2 - 1 + else: + fname = 'coadd_map.hdf5' + signs = np.ones(natomics) + + # Divide by weights to get back the unweighted map solution + coadd_solved_map = np.divide( + np.sum(signs[:, np.newaxis, np.newaxis]*weighted_maps, axis=0), + coadd_weight_map, out=np.zeros_like(coadd_weight_map), + where=mask + ) + # ***** + + # Save maps to disk + fname = os.path.join(args.outdir, fname) + dict_maps = {"weight_map": coadd_weight_map, + "solved_map": coadd_solved_map, + "hits_map": coadd_hits_map} + + # This is just a proxy to the obs ID, e.g. + # "atomic_1709852088_ws2_f090_full" + list_of_obsid = ["_".join(atm.split('/')[-1].split("_")[:-1]) + for atm in atomic_maps_list["wmap"][bundle_mask]] + + # TODO: Adapt SOOPERCOOL to handle hdf5 maps as inputs. + coordinator.write_hdf5_map(fname, nside, dict_maps, list_of_obsid) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--atomic_list_fpath", + help="Path to npz file with path to atomic maps.") + parser.add_argument("--outdir", + help="Output directory for bundle maps list.") + parser.add_argument("--nbundles", + help="Number of bundles to make from atomic maps.") + parser.add_argument("--do_signflip", action="store_true", + help="Whether to make sign-flip noise realizations" + "from the atomic maps in each bundle.") + + args = parser.parse_args() + main(args) diff --git a/pipeline/bundling/get_atomics_list.py b/pipeline/bundling/get_atomics_list.py new file mode 100644 index 0000000..6a48760 --- /dev/null +++ b/pipeline/bundling/get_atomics_list.py @@ -0,0 +1,107 @@ +import argparse +import sqlite3 +import numpy as np +import os + + +def get_atomic_maps_list(map_dir, queries_list, map_type="wmap", db_fpath=None, + verbose=False): + """ + Outputs list of atomic maps + (e.g. "{map_dir}/17107/atomic_1710705196_ws0_f090_full_{map_type}.fits") + based on map_dir (base directory for atomic maps) and queries_list. + + Parameters + ---------- + map_dir: str + Path to directory of atomic maps and atomics database. + db_fpath: str + Full path to file name of atomic maps database for map_dir. If not + provided, default to "{map_dir}/atomic_maps.db". Default: None + map_type: str + Map type to output, e.g. 'wmap' fo weighted map, 'weights' for inverse + variance weights map, 'hits' for hits map. Default: 'wmap'. + queries_list: list of str + List with SQL query strings, e.g. + "freq_channel = 'f090'", "elevation >= '50'", etc. + verbose: boolean + Verbose output? Default: False + + Returns + ------- + atomic_maps_prefix_list: list of str + List with path prefixes to all atomic maps requested. + """ + if db_fpath is None: + db_fpath = f"{map_dir}/atomic_maps.db" + if verbose: + print(f" Querying {db_fpath}") + assert map_type in ["wmap", "weights", "hits"] + + query_target = "prefix_path" + table_name = "atomic" + where_statement = " AND ".join(queries_list) + + conn = sqlite3.connect(db_fpath) + cursor = conn.cursor() + + # Print columns + data = cursor.execute(f"SELECT * FROM {table_name};") + cols = [col[0] for col in data.description] + if verbose: + print(f"Columns of {table_name}:\n ", " ".join(cols)) + + # FIXME: Give warning if query key is not present in the database. + + # Read data + cursor.execute(f"select {query_target} from {table_name} where {where_statement};") # noqa + result = cursor.fetchall() + if verbose: + print(f" Found {len(result)} matching entries.") + atomic_maps_list = [] + + for r in result: + atomic_maps_list.append( + f"{map_dir}/{'/'.join(r[0].split('/')[-2:])}_{map_type}.fits" + ) + conn.commit() + conn.close() + + return atomic_maps_list + + +def main(args): + """ + Get list of NERSC filepaths of atomic maps corresponding to an SQL query + to the associated atomic maps database. + """ + print(f"Reading atomics at {args.map_dir}") + os.makedirs(args.outdir, exist_ok=True) + atomic_maps_list = {} + atomic_maps_list["wmap"] = get_atomic_maps_list( + args.map_dir, args.queries_list, map_type="wmap", verbose=args.verbose + ) + + for typ in ["weights", "hits"]: + atomic_maps_list[typ] = [a.replace("wmap", typ) + for a in atomic_maps_list["wmap"]] + natomics = len(atomic_maps_list["wmap"]) + np.savez(f"{args.outdir}/atomic_maps_list.npz", **atomic_maps_list) + print(f"Stored list of {natomics} atomics " + f"under {args.outdir}/atomic_maps_list.npz") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--map_dir", + help="Atomic maps directory.") + parser.add_argument("--queries_list", nargs="+", default=[], + help="String with SQL queries of form " + "\"freq_channel = 'f090'\".") + parser.add_argument("--verbose", action="store_true", + help="Verbose output?") + parser.add_argument("--outdir", + help="Output directory for atomic maps list.") + + args = parser.parse_args() + main(args) diff --git a/pipeline/bundling/run_bundling.sh b/pipeline/bundling/run_bundling.sh new file mode 100644 index 0000000..803989b --- /dev/null +++ b/pipeline/bundling/run_bundling.sh @@ -0,0 +1,9 @@ +python get_atomics_list.py \ + --map_dir /pscratch/sd/s/susannaz/shared/SO_ISO/satp3_maps/cmb_map_satp3_8May2024 \ + --queries_list "freq_channel = 'f090'" "elevation >= '50'" "ctime = '1710705196'" \ + --outdir /pscratch/sd/k/kwolz/bbdev/SOOPERCOOL/bundle_signflip/test_20241016 + +# This part of the code is not yet finished. +# python generate_map_bundles.py \ +# --nbundles 4 \ +# --outdir /pscratch/sd/k/kwolz/bbdev/SOOPERCOOL/bundle_signflip/test_20241016 diff --git a/pipeline/compute_pseudo_cells.py b/pipeline/compute_pseudo_cells.py index 6789af9..46fc6a9 100644 --- a/pipeline/compute_pseudo_cells.py +++ b/pipeline/compute_pseudo_cells.py @@ -20,7 +20,8 @@ def main(args): BBmeta.make_dir(cells_dir) - mask = mu.read_map(meta.masks["analysis_mask"], ncomp=1) + mask = mu.read_map(meta.masks["analysis_mask"], + pix_type=meta.pix_type) binning = np.load(meta.binning_file) nmt_bins = nmt.NmtBin.from_edges(binning["bin_low"], @@ -65,8 +66,10 @@ def main(args): option = type_options.replace("{", "").replace("}", "").split("|")[0] map_file = map_file.replace(type_options, option) - m = mu.read_map(f"{map_dir}/{map_file}", ncomp=3) + m = mu.read_map(f"{map_dir}/{map_file}", pix_type=meta.pix_type, + fields_hp=[0, 1, 2], + convert_K_to_muK=True) if do_plots: for i, f in enumerate(["T", "Q", "U"]): hp.mollview(m[i], @@ -78,8 +81,10 @@ def main(args): f"bundle{id_bundle}_{f}.png") plt.close() - field_spin0 = nmt.NmtField(mask, m[:1]) - field_spin2 = nmt.NmtField(mask, m[1:], purify_b=meta.pure_B) + wcs = m.wcs + + field_spin0 = nmt.NmtField(mask, m[:1], wcs=wcs) + field_spin2 = nmt.NmtField(mask, m[1:], wcs=wcs, purify_b=meta.pure_B) fields[map_set, id_bundle] = { "spin0": field_spin0, diff --git a/pipeline/compute_sims_pseudo_cells.py b/pipeline/compute_sims_pseudo_cells.py index a293a92..2bdf57d 100644 --- a/pipeline/compute_sims_pseudo_cells.py +++ b/pipeline/compute_sims_pseudo_cells.py @@ -21,7 +21,8 @@ def main(args): BBmeta.make_dir(cells_dir) - mask = mu.read_map(meta.masks["analysis_mask"], ncomp=1) + mask = mu.read_map(meta.masks["analysis_mask"], + pix_type=meta.pix_type) binning = np.load(meta.binning_file) nmt_bins = nmt.NmtBin.from_edges(binning["bin_low"], @@ -44,7 +45,8 @@ def main(args): map_set, id_bundle = map_name.split("__") map_fname = f"{base_dir}/cov_sims_{map_set}_bundle{id_bundle}.fits" - m = mu.read_map(map_fname, ncomp=3) + m = mu.read_map(map_fname, field=[0, 1, 2], + pix_type=meta.pix_type, convert_K_to_muK=True) field_spin0 = nmt.NmtField(mask, m[:1]) field_spin2 = nmt.NmtField(mask, m[1:], purify_b=meta.pure_B) diff --git a/pipeline/filtering/filter_sotodlib.py b/pipeline/filtering/filter_sotodlib.py index aaa6c9d..2231f6e 100644 --- a/pipeline/filtering/filter_sotodlib.py +++ b/pipeline/filtering/filter_sotodlib.py @@ -75,7 +75,7 @@ def main(args): out_fname = args.map_template.format(sim_id=sim_id).replace(".fits", "_filtered.fits") # noqa out_file = f"{fsims_dir}/{out_fname}" - enmap.write_map(out_file, fsim, dtype=np.float32, overwrite=True) + mu.write_map(out_file, fsim, dtype=np.float32, pix_type='car') b = """ diff --git a/pipeline/generate_simulations.py b/pipeline/generate_simulations.py index 66d55d7..e9f1496 100644 --- a/pipeline/generate_simulations.py +++ b/pipeline/generate_simulations.py @@ -1,7 +1,7 @@ import argparse from soopercool import BBmeta from soopercool import mpi_utils as mpi -import healpy as hp +from soopercool import map_utils as mu import numpy as np import matplotlib.pyplot as plt @@ -26,7 +26,8 @@ def main(args): noise_map_template = meta.covariance["noise_sims_template"] signal_map_template = meta.covariance["signal_sims_template"] - binary = hp.read_map(f"{masks_dir}/binary_mask.fits") + binary = mu.read_map(f"{masks_dir}/binary_mask.fits", + pix_type=meta.pix_type) mpi.init(True) @@ -39,23 +40,25 @@ def main(args): print(f"# {id_sim} | {ms}") fname = signal_map_template.format(id_sim=id_sim, map_set=ms) - cmb = hp.read_map(f"{signal_map_dir}/{fname}", field=[0, 1, 2]) + cmb = mu.read_map(f"{signal_map_dir}/{fname}", field=[0, 1, 2], + pix_type=meta.pix_type) for id_bundle in range(meta.n_bundles_from_map_set(ms)): fname = noise_map_template.format(id_sim=id_sim, map_set=ms, id_bundle=id_bundle) - noise = hp.read_map( - f"{noise_map_dir}/{fname}", field=[0, 1, 2] - ) + noise = mu.read_map(f"{noise_map_dir}/{fname}", + field=[0, 1, 2], + pix_type=meta.pix_type) split_map = cmb + noise map_name = f"cov_sims_{ms}_bundle{id_bundle}.fits" - hp.write_map(f"{base_dir}/{map_name}", split_map*binary, - overwrite=True, + mu.write_map(f"{base_dir}/{map_name}", split_map*binary, dtype=np.float32) if do_plots: + import healpy as hp + for i, f in enumerate("TQU"): hp.mollview(split_map[i]*binary, cmap="RdYlBu_r", diff --git a/pipeline/get_analysis_mask.py b/pipeline/get_analysis_mask.py index 0f34053..2a5e580 100644 --- a/pipeline/get_analysis_mask.py +++ b/pipeline/get_analysis_mask.py @@ -4,7 +4,6 @@ from soopercool import map_utils as mu from soopercool import utils as su import numpy as np -import healpy as hp def main(args): @@ -24,71 +23,119 @@ def main(args): masks_settings = meta.masks - # First loop over the (map_set, id_bundles) - # pairs to define a common binary mask - hit_maps = [] - for map_set in meta.map_sets_list: - n_bundles = meta.n_bundles_from_map_set(map_set) - for id_bundle in range(n_bundles): - - map_dir = meta.map_dir_from_map_set(map_set) - map_template = meta.map_template_from_map_set(map_set) - - map_file = map_template.replace( - "{id_bundle}", - str(id_bundle) - ) - type_options = [ - f for f in re.findall(r"\{.*?\}", map_template) - if "|" in f - ] - if not type_options: - raise ValueError("The map directory must contain both maps " - "and hits files, indicated by a " - "corresponding suffix.") - else: - # Select the hitmap - option = type_options[0].replace("{", "") - option = option.replace("}", "").split("|")[1] - - map_file = map_file.replace( - type_options[0], - option + # If a global hits file is indicated in the paramter file, use it. + if masks_settings["global_hits_file_overwrite"] is not None: + sum_hits = mu.read_map( + masks_settings["global_hits_file_overwrite"], + pix_type=meta.pix_type + ) + # Create binary + binary = sum_hits.copy() + binary[:] = 1 + binary[sum_hits == 0] = 0 + else: + # Loop over the (map_set, id_bundles) + # pairs to define a common binary mask + hit_maps = [] + for map_set in meta.map_sets_list: + n_bundles = meta.n_bundles_from_map_set(map_set) + for id_bundle in range(n_bundles): + + map_dir = meta.map_dir_from_map_set(map_set) + map_template = meta.map_template_from_map_set(map_set) + map_file = map_template.replace( + "{id_bundle}", + str(id_bundle) ) - - print(f"Reading hitmap for {map_set} - bundle {id_bundle}") - if verbose: - print(f" file_name: {map_dir}/{map_file}") - - hits = mu.read_map(f"{map_dir}/{map_file}", ncomp=1) - hit_maps.append(hits) - - # Create binary and normalized hitmap - binary = np.ones_like(hit_maps[0]) - sum_hits = np.zeros_like(hit_maps[0]) - for hit_map in hit_maps: - binary[hit_map == 0] = 0 - sum_hits += hit_map - sum_hits[binary == 0] = 0 + type_options = [ + f for f in re.findall(r"\{.*?\}", map_template) + if "|" in f + ] + if not type_options: + raise ValueError( + "The map directory must contain both maps " + "and hits files, indicated by a " + "corresponding suffix." + ) + else: + # Select the hitmap + option = type_options[0].replace("{", "") + option = option.replace("}", "").split("|")[1] + + type_options = [ + f for f in re.findall(r"\{.*?\}", map_template) + if "|" in f + ] + if not type_options: + raise ValueError( + "The map directory must contain both maps " + "and hits files, indicated by a " + "corresponding suffix.") + else: + # Select the hitmap + option = type_options[0].replace("{", "") + option = option.replace("}", "").split("|")[1] + + map_file = map_file.replace( + type_options[0], + option + ) + + print(f"Reading hitmap for {map_set} - bundle {id_bundle}") + if verbose: + print(f" file_name: {map_dir}/{map_file}") + + hits = mu.read_map( + f"{map_dir}/{map_file}", + pix_type=meta.pix_type + ) + hit_maps.append(hits) + + # Create binary and normalized hitmap + binary = hit_maps[0].copy() + sum_hits = hit_maps[0].copy() + binary[:] = 1 + sum_hits[:] = 0 + for hit_map in hit_maps: + binary[hit_map == 0] = 0 + sum_hits += hit_map + sum_hits[binary == 0] = 0 # Normalize and smooth hitmaps - sum_hits = hp.smoothing(sum_hits, fwhm=np.deg2rad(1.)) + sum_hits = mu.smooth_map(sum_hits, fwhm_deg=1, pix_type=meta.pix_type) sum_hits /= np.amax(sum_hits) # Save products - mu.write_map(f"{masks_dir}/binary_mask.fits", - binary, dtype=np.int32) - mu.write_map(f"{masks_dir}/normalized_hits.fits", - sum_hits, dtype=np.float32) + mu.write_map( + f"{masks_dir}/binary_mask.fits", + binary, + dtype=np.int32, + pix_type=meta.pix_type + ) + mu.write_map( + f"{masks_dir}/normalized_hits.fits", + sum_hits, + dtype=np.float32, + pix_type=meta.pix_type + ) if do_plots: - mu.plot_map(binary, - title="Binary mask", - file_name=f"{plot_dir}/binary_mask") + print("np.shape(binary)", np.shape(binary)) + mu.plot_map( + binary, + title="Binary mask", + file_name=f"{plot_dir}/binary_mask", + pix_type=meta.pix_type, + lims=[-binary.max(), binary.max()] + ) - mu.plot_map(sum_hits, - title="Normalized hitcount", - file_name=f"{plot_dir}/normalized_hits") + mu.plot_map( + sum_hits, + title="Normalized hits", + file_name=f"{plot_dir}/normalized_hits", + pix_type=meta.pix_type, + lims=[-1, 1] + ) analysis_mask = binary.copy() @@ -96,75 +143,98 @@ def main(args): print("Reading galactic mask ...") if verbose: print(f" file_name: {masks_settings['galactic_mask']}") - gal_mask = mu.read_map(masks_settings["galactic_mask"], ncomp=1) + gal_mask = mu.read_map(masks_settings["galactic_mask"], + pix_type=meta.pix_type, + geometry=analysis_mask.geometry) if do_plots: - mu.plot_map(gal_mask, - title="Galactic mask", - file_name=f"{plot_dir}/galactic_mask") + mu.plot_map( + gal_mask, + title="Galactic mask", + file_name=f"{plot_dir}/galactic_mask", + pix_type=meta.pix_type, + lims=[-1, 1] + ) analysis_mask *= gal_mask if masks_settings["external_mask"] is not None: print("Reading external mask ...") if verbose: print(f" file_name: {masks_settings['external_mask']}") - ext_mask = mu.read_map(masks_settings["external_mask"], ncomp=1) + ext_mask = mu.read_map(masks_settings["external_mask"], + pix_type=meta.pix_type, + geometry=analysis_mask.geometry) if do_plots: mu.plot_map( ext_mask, title="External mask", - file_name=f"{plot_dir}/external_mask") + file_name=f"{plot_dir}/external_mask", + pix_type=meta.pix_type, + lims=[-1, 1] + ) analysis_mask *= ext_mask - import pymaster as nmt - analysis_mask = nmt.mask_apodization( + analysis_mask = mu.apodize_mask( analysis_mask, - masks_settings["apod_radius"], - apotype=masks_settings["apod_type"] + apod_radius_deg=masks_settings["apod_radius"], + apod_type=masks_settings["apod_type"], + pix_type=meta.pix_type ) if masks_settings["point_source_mask"] is not None: print("Reading point source mask ...") if verbose: print(f" file_name: {masks_settings['point_source_mask']}") - ps_mask = mu.read_map(masks_settings["point_source_mask"], ncomp=1) - ps_mask = nmt.mask_apodization( + ps_mask = mu.read_map(masks_settings["point_source_mask"], + pix_type=meta.pix_type, + geometry=analysis_mask.geometry) + ps_mask = mu.apodize_mask( ps_mask, - masks_settings["apod_radius_point_source"], - apotype=masks_settings["apod_type"] + apod_radius_deg=masks_settings["apod_radius_point_source"], + apod_type=masks_settings["apod_type"], + pix_type=meta.pix_type ) if do_plots: mu.plot_map( ps_mask, title="Point source mask", - file_name=f"{plot_dir}/point_source_mask") + file_name=f"{plot_dir}/point_source_mask", + pix_type=meta.pix_type, + lims=[-1, 1] + ) analysis_mask *= ps_mask # Weight with hitmap analysis_mask *= sum_hits - mu.write_map(f"{masks_dir}/analysis_mask.fits", - analysis_mask, dtype=np.float32) + mu.write_map( + f"{masks_dir}/analysis_mask.fits", + analysis_mask, + pix_type=meta.pix_type + ) if do_plots: - mu.plot_map(analysis_mask, title="Analysis mask", - file_name=f"{plot_dir}/analysis_mask") + mu.plot_map( + analysis_mask, + title="Analysis mask", + file_name=f"{plot_dir}/analysis_mask", + pix_type=meta.pix_type, + lims=[-1, 1] + ) # Compute and plot spin derivatives first, second = su.get_spin_derivatives(analysis_mask) if do_plots: - mu.plot_map(first, title="First spin derivative", - file_name=f"{plot_dir}/first_spin_derivative") - mu.plot_map(second, title="Second spin derivative", - file_name=f"{plot_dir}/second_spin_derivative") - - import matplotlib.pyplot as plt - plt.figure() - plt.plot(first) - plt.show() - plt.figure() - plt.plot(second) - plt.show() + mu.plot_map( + first, + title="First spin derivative", + file_name=f"{plot_dir}/first_spin_derivative" + ) + mu.plot_map( + second, + title="Second spin derivative", + file_name=f"{plot_dir}/second_spin_derivative" + ) if args.verbose: print("---------------------------------------------------------") diff --git a/pipeline/get_mode_coupling.py b/pipeline/get_mode_coupling.py index b6ae7c8..b0be433 100644 --- a/pipeline/get_mode_coupling.py +++ b/pipeline/get_mode_coupling.py @@ -3,6 +3,7 @@ from soopercool import map_utils as mu import pymaster as nmt import numpy as np +import soopercool.utils as su def main(args): @@ -21,13 +22,31 @@ def main(args): if do_plots: BBmeta.make_dir(plot_dir) - nl = 3 * meta.nside nspec = 7 - mask = mu.read_map(meta.masks["analysis_mask"], ncomp=1) - - field_spin0 = nmt.NmtField(mask, None, spin=0) - field_spin2 = nmt.NmtField(mask, None, spin=2, purify_b=meta.pure_B) + mask = mu.read_map(meta.masks["analysis_mask"], + pix_type=meta.pix_type) + lmax = mu.lmax_from_map(mask, pix_type=meta.pix_type) + nl = lmax + 1 + + if meta.pix_type == "car": + _, wcs = mask.geometry + else: + wcs = None + + field_spin0 = nmt.NmtField( + mask, + None, + wcs=wcs, + spin=0 + ) + field_spin2 = nmt.NmtField( + mask, + None, + wcs=wcs, + spin=2, + purify_b=meta.pure_B + ) binning = np.load(meta.binning_file) nmt_bins = nmt.NmtBin.from_edges(binning["bin_low"], @@ -52,8 +71,11 @@ def main(args): beam_dir = meta.beam_dir_from_map_set(map_set) beam_file = meta.beam_file_from_map_set(map_set) - l, bl = np.loadtxt(f"{beam_dir}/{beam_file}", unpack=True) - beams[map_set] = bl[:nl] + _, bl = su.read_beam_from_file( + f"{beam_dir}/{beam_file}", + lmax=lmax + ) + beams[map_set] = bl # Beam-correct the mode coupling matrix beamed_mcm = {} diff --git a/pipeline/misc/get_binning.py b/pipeline/misc/get_binning.py index be6c233..d0190b1 100644 --- a/pipeline/misc/get_binning.py +++ b/pipeline/misc/get_binning.py @@ -2,6 +2,7 @@ import numpy as np from soopercool.utils import create_binning import argparse +from soopercool import map_utils as mu def main(args): @@ -13,18 +14,28 @@ def main(args): binning_dir = f"{out_dir}/binning" BBmeta.make_dir(binning_dir) - bin_low, bin_high, bin_center = create_binning(meta.nside, + mask = mu.read_map(meta.masks["analysis_mask"], pix_type=meta.pix_type) + lmax = mu.lmax_from_map(mask, pix_type=meta.pix_type) + print(lmax) + + bin_low, bin_high, bin_center = create_binning(lmax, args.deltal) print(bin_low, bin_high, bin_center) - file_name = f"binning_nside{meta.nside}_deltal{args.deltal}.npz" + file_name = f"binning_{meta.pix_type}_lmax{lmax}_deltal{args.deltal}" - bin_low2, bin_high2, bin_center2 = create_binning(meta.nside, + bin_low2, bin_high2, bin_center2 = create_binning(lmax, args.deltal, end_first_bin=30) print(bin_low2, bin_high2, bin_center2) np.savez( - f"{binning_dir}/{file_name}", + f"{binning_dir}/{file_name}.npz", + bin_low=bin_low, + bin_high=bin_high, + bin_center=bin_center + ) + np.savez( + f"{binning_dir}/{file_name}_large_first_bin.npz", bin_low=bin_low2, bin_high=bin_high2, bin_center=bin_center2 diff --git a/pipeline/misc/get_sat_beams.py b/pipeline/misc/get_sat_beams.py index 6859a53..69acddf 100644 --- a/pipeline/misc/get_sat_beams.py +++ b/pipeline/misc/get_sat_beams.py @@ -2,7 +2,6 @@ from soopercool import BBmeta import soopercool.SO_Noise_Calculator_Public_v3_1_2 as noise_calc import numpy as np -import os def beam_gaussian(ll, fwhm_amin): @@ -46,24 +45,21 @@ def main(args): meta = BBmeta(args.globals) verbose = args.verbose - lmax_sim = 3*meta.nside - 1 + lmax_sim = 3000 for ms in meta.map_sets_list: freq_tag = meta.freq_tag_from_map_set(ms) - beam_dir = meta.beam_dir_from_map_set(ms) + out_dir = meta.output_directory + beam_dir = f"{out_dir}/gaussian_beams" BBmeta.make_dir(beam_dir) fname = f"{beam_dir}/beam_{ms}.dat" - if not os.path.exists(fname): - if verbose: - print(f"Written to {fname}.") - l, bl = get_sat_beam(freq_tag, lmax_sim) - np.savetxt(fname, np.transpose([l, bl])) - else: - if verbose: - print(f"beam_{ms} exists, do nothing.") + if verbose: + print(f"Written to {fname}.") + l, bl = get_sat_beam(freq_tag, lmax_sim) + np.savetxt(fname, np.transpose([l, bl])) if __name__ == "__main__": diff --git a/pipeline/pre_processer_ext.py b/pipeline/pre_processer_ext.py index 81c7c0f..d84df5e 100644 --- a/pipeline/pre_processer_ext.py +++ b/pipeline/pre_processer_ext.py @@ -1,5 +1,6 @@ import argparse from soopercool import BBmeta, utils +from soopercool import map_utils as mu import numpy as np import os import healpy as hp @@ -40,48 +41,47 @@ def pre_processer(args): meta = BBmeta(args.globals) - data_dir = "../data" - outputs_dir = "../outputs" - sims_dir = meta.sims_directory - map_dir = meta.map_directory - map_plots_dir = f"{outputs_dir}/plots/maps" - beam_dir = meta.beam_directory - prep_dir = meta.pre_process_directory - alms_dir = f"{prep_dir}/external_data/alms" - plots_dir = f"{prep_dir}/external_data/plots" - ext_maps_dir = f"{prep_dir}/external_data/maps" - mask_dir = meta.mask_directory - directories = [data_dir, outputs_dir, sims_dir, - map_dir, map_plots_dir, beam_dir, - prep_dir, alms_dir, ext_maps_dir, - plots_dir, mask_dir] + output_dir = meta.output_directory + sims_dir = meta.covariance["signal_alm_sims_dir"] + + directories = [output_dir, sims_dir] for dirs in directories: os.makedirs(dirs, exist_ok=True) - freqs = [] - binary_mask = meta.read_mask("binary") + if meta.pix_type == "car": + raise NotImplementedError("Can only accept healpix for now.") + + binary_file = f"{output_dir}/masks/binary_mask.fits" + if not os.path.isfile(binary_file): + raise ValueError(f"Binary mask not found at {output_dir}/masks. " + "Have you run the mask maker?") + + binary_mask = mu.read_map(f"{output_dir}/masks/binary_mask.fits") nside_out = meta.nside lmax_out = 3*nside_out - 1 ells_beam = np.arange(3*nside_out + 1) if args.planck: npipe_dir = "/global/cfs/cdirs/cmb/data/planck2020/npipe" - splits = ['A', 'B'] - splits_dict = {'A': 0, 'B': 1} - to_muk = 1e6 + bundles = ['A', 'B'] + bundles_dict = {'A': 0, 'B': 1} + convert_to_K = 1 print("Beams") - beam_windows_dir = f"{beam_dir}/simulated_maps/npipe_aux/beam_window_functions" # noqa + bdir = meta.beam_dir_from_map_set(meta.map_sets_list[0]) + os.makedirs(bdir, exist_ok=True) + beam_windows_dir = f"{bdir}/simulated_maps/npipe_aux/beam_window_functions" # noqa + # Download Planck NPIPE RIMO from URL and save temporarily url_pref = "https://portal.nersc.gov/cfs/cmb/planck2020/misc" url = f"{url_pref}/PLANCK_RIMO_TF_R4.00.tar.gz" - fname_rimo = f"{prep_dir}/external_data/PLANCK_RIMO_TF_R4.00.tar.gz" + fname_rimo = f"{bdir}/PLANCK_RIMO_TF_R4.00.tar.gz" if not os.path.isfile(fname_rimo): print("Downloading Planck RIMO...") wget.download(url, fname_rimo) print("\n") tf = tarfile.open(fname_rimo) - tf.extractall(beam_dir) + tf.extractall(bdir) tf.close() spectra = ["TT", "EE", "BB", "TE"] @@ -93,52 +93,52 @@ def pre_processer(args): f"{spec}_2_ET", f"{spec}_2_BT", f"{spec}_2_BE"] - print("- frequency-only dependent beams") beams = {} for map_set in meta.map_sets_list: - if 'planck' in map_set: - print(map_set) - f = str(meta.freq_tag_from_map_set(map_set)).zfill(3) - freqs.append(f) - file_root = meta.file_root_from_map_set(map_set) - beam_fname = f"{beam_dir}/beam_{file_root}.dat" - beam_fname_T = f"{beam_dir}/beam_T_{file_root}.dat" - beam_fname_pol = f"{beam_dir}/beam_pol_{file_root}.dat" - - if os.path.isfile(beam_fname): - print("beams found in", beam_dir) - _, beams[map_set] = np.loadtxt(beam_fname, unpack=True) - continue - - wl = fits.open(f"{beam_windows_dir}/full_frequency/Wl_npipe6v20_{f}GHzx{f}GHz.fits") # noqa - wl_dict = {} - num = 1 - for spec in spectra: - for leak in leakage_term[spec]: - wl_dict[leak] = wl[num].data[leak] - num += 1 - - # Planck beam: sqrt of XX_2_XX term of the beam leakage matrix - bl_T = np.sqrt(wl_dict["TT_2_TT"][0])[:(3*nside_out+1)] - bl_pol = np.sqrt(wl_dict["EE_2_EE"][0])[:(3*nside_out+1)] - beams[map_set] = bl_pol - - # Save beams - np.savetxt(beam_fname_T, np.transpose([ells_beam, bl_T])) - np.savetxt(beam_fname_pol, np.transpose([ells_beam, bl_pol])) - np.savetxt(beam_fname, np.transpose([ells_beam, bl_pol])) - - print("- frequency-split dependent beams") - for split in splits: - for f in freqs: - print(f"planck_f{f}{split}") - beam_fname_T = f"{beam_dir}/beam_T_planck_{f}{split}.dat" - beam_fname_pol = f"{beam_dir}/beam_pol_planck_{f}{split}.dat" - - if os.path.isfile(beam_fname_pol): + if 'planck' not in map_set: + continue + print(map_set) + f = str(meta.freq_tag_from_map_set(map_set)).zfill(3) + beam_dir = meta.beam_dir_from_map_set(map_set) + beam_file = f"{beam_dir}/{meta.beam_file_from_map_set(map_set)}" + beam_file_T = f"{beam_dir}/beam_T_{map_set}.dat" + beam_file_pol = f"{beam_dir}/beam_pol_{map_set}.dat" + + if os.path.isfile(beam_file): + print("beams found in", beam_dir) + _, beams[map_set] = np.loadtxt(beam_file, unpack=True) + continue + + wl = fits.open(f"{beam_windows_dir}/full_frequency/Wl_npipe6v20_{f}GHzx{f}GHz.fits") # noqa + wl_dict = {} + num = 1 + for spec in spectra: + for leak in leakage_term[spec]: + wl_dict[leak] = wl[num].data[leak] + num += 1 + + # Planck beam: sqrt of XX_2_XX term of the beam leakage matrix + bl_T = np.sqrt(wl_dict["TT_2_TT"][0])[:(3*nside_out+1)] + bl_pol = np.sqrt(wl_dict["EE_2_EE"][0])[:(3*nside_out+1)] + beams[map_set] = bl_pol + + # Save beams + np.savetxt(beam_file_T, np.transpose([ells_beam, bl_T])) + np.savetxt(beam_file_pol, np.transpose([ells_beam, bl_pol])) + # DEBUG + print(f"Saving beam under {beam_file}") + np.savetxt(beam_file, np.transpose([ells_beam, bl_pol])) + + # Bundle-dependent beams + for bundle in bundles: + print(f"{map_set}_bundle{bundle}") + beam_file_T = f"{beam_dir}/beam_T_{map_set}_bundle{bundle}.dat" + beam_file_pol = f"{beam_dir}/beam_pol_{map_set}_bundle{bundle}.dat" # noqa + + if os.path.isfile(beam_file_pol): print("beams found in", beam_dir) continue - wl = fits.open(f"{beam_windows_dir}/AB/Wl_npipe6v20_{f}{split}x{f}{split}.fits") # noqa + wl = fits.open(f"{beam_windows_dir}/AB/Wl_npipe6v20_{f}{bundle}x{f}{bundle}.fits") # noqa wl_dict = {} num = 1 for spec in spectra: @@ -150,11 +150,11 @@ def pre_processer(args): bl_T = np.sqrt(wl_dict["TT_2_TT"][0])[:(3*nside_out+1)] bl_pol = np.sqrt(wl_dict["EE_2_EE"][0])[:(3*nside_out+1)] - np.savetxt(beam_fname_T, np.transpose([ells_beam, bl_T])) - np.savetxt(beam_fname_pol, np.transpose([ells_beam, bl_pol])) + np.savetxt(beam_file_T, np.transpose([ells_beam, bl_T])) + np.savetxt(beam_file_pol, np.transpose([ells_beam, bl_pol])) - if os.path.isdir(f"{beam_dir}/simulated_maps"): - shutil.rmtree(f"{beam_dir}/simulated_maps") + if os.path.isdir(f"{bdir}/simulated_maps"): + shutil.rmtree(f"{bdir}/simulated_maps") if not args.noise: print("---------------------") @@ -174,76 +174,81 @@ def pre_processer(args): dipole_template[2048] = dipole_amplitude * np.dot([x, y, z], hp.pix2vec(2048, np.arange(hp.nside2npix(2048)))) # noqa elif args.wmap: - to_muk = 1e3 + convert_to_K = 1.e-3 bands_dict = {'023': 'K1', '033': 'Ka1'} - wmap_dir = f"{prep_dir}/external_data/maps" for map_set in meta.map_sets_list: if 'wmap' in map_set: f = meta.freq_tag_from_map_set(map_set) - freqs.append(str(f).zfill(3)) - nsplits = meta.n_splits_from_map_set(map_set) - splits = list(range(1, nsplits+1)) + nbundles = meta.n_bundles_from_map_set(map_set) + bundles = list(range(1, nbundles+1)) if args.data: - meta.timer.start("Download WMAP data") - url_pref = "https://lambda.gsfc.nasa.gov/data/map/dr5/skymaps/1yr/raw" # noqa - # Download WMAP single-year maps from URL and store - print("Downloading WMAP single-year maps") - for f in freqs: - for yr in splits: + for map_set in meta.map_sets_list: + if 'wmap' not in map_set: + continue + f = str(meta.freq_tag_from_map_set(map_set)).zfill(3) + + map_dir = meta.map_dir_from_map_set(map_set) + meta.timer.start("Download WMAP data") + url_pref = "https://lambda.gsfc.nasa.gov/data/map/dr5/skymaps/1yr/raw" # noqa + + # Download WMAP single-year maps from URL and store + print("Downloading WMAP single-year maps") + for yr in bundles: fname_map = f"wmap_iqumap_r9_yr{yr}_{bands_dict[f]}_v5.fits" # noqa print("-", fname_map) - fname_data_map = f"{map_dir}/wmap_f{f}_split_{yr-1}.fits" + fname_data_map = f"{map_dir}/{map_set}_bundle{yr-1}_map.fits" # noqa if os.path.isfile(fname_data_map): print(" data maps already on disk") continue url = f"{url_pref}/{fname_map}" - fname_out = f"{wmap_dir}/{fname_map}" + fname_out = f"{map_dir}/{fname_map}" if os.path.isfile(fname_out): - print(" data already downloaded, at", wmap_dir) + print(" data already downloaded, at", map_dir) continue wget.download(url, fname_out) print("\n") - print("Mask") - # Download WMAP temperature analysis mask from URL and store - url_pref = "https://lambda.gsfc.nasa.gov/data/map/dr5/ancillary/masks" # noqa - url = f"{url_pref}/wmap_temperature_kq85_analysis_mask_r9_9yr_v5.fits" # noqa - fname_mask = f"{mask_dir}/wmap_temperature_analysis_mask.fits" - if not os.path.isfile(fname_mask): - wget.download(url, fname_mask) - print("\n") - mask = hp.read_map(fname_mask, field=['N_OBS']) - - print("Beams") - print("Downloading WMAP beam window functions") - ext_dir = f"{prep_dir}/external_data" - url_pref = "https://lambda.gsfc.nasa.gov/data/map/dr5/ancillary/beams" # noqa - - for f in freqs: - print(f"wmap_f{f}") - beam_fname = f"{beam_dir}/beam_wmap_f{f}.dat" - if os.path.isfile(beam_fname): - print(f"beams found at {beam_fname}") + print("Mask") + # Download WMAP temperature analysis mask from URL and store + url_pref = "https://lambda.gsfc.nasa.gov/data/map/dr5/ancillary/masks" # noqa + url = f"{url_pref}/wmap_temperature_kq85_analysis_mask_r9_9yr_v5.fits" # noqa + fname_mask = f"{output_dir}/masks/wmap_temperature_analysis_mask.fits" # noqa + + os.makedirs(f"{output_dir}/masks", exist_ok=True) + if not os.path.isfile(fname_mask): + wget.download(url, fname_mask) + print("\n") + mask = mu.read_map(fname_mask, field=['N_OBS']) + + print("Beams") + print("Downloading WMAP beam window functions") + url_pref = "https://lambda.gsfc.nasa.gov/data/map/dr5/ancillary/beams" # noqa + + print(map_set) + beam_dir = meta.beam_dir_from_map_set(map_set) + beam_file = f"{beam_dir}/{meta.beam_file_from_map_set(map_set)}" # noqa + + if os.path.isfile(beam_file): + print(f"beams found at {beam_dir}") continue - beam_tf = f"{ext_dir}/wmap_beam_{bands_dict[f]}.txt" - if not os.path.isfile(beam_tf): + if not os.path.isfile(beam_file): url = f"{url_pref}/wmap_ampl_bl_{bands_dict[f]}_9yr_v5p1.txt" # noqa - wget.download(url, beam_tf) + wget.download(url, beam_file) print("\n") # read beams # if last ell < 3*nside_out # extend beams repeating the last value bl = np.zeros(3*nside_out+1) - l, b, _ = np.loadtxt(beam_tf, unpack=True) + l, b, _ = np.loadtxt(beam_file, unpack=True) lmax_file = int(l[-1]) if lmax_file < 3*nside_out: bl[:(lmax_file+1)] = b bl[lmax_file:] = b[-1] else: bl = b[:(3*nside_out+1)] - np.savetxt(beam_fname, np.transpose([ells_beam, bl])) + np.savetxt(beam_file, np.transpose([ells_beam, bl])) meta.timer.stop("Download WMAP data") if args.data: @@ -254,13 +259,21 @@ def pre_processer(args): print("Processing external maps") meta.timer.start("Process external maps") angles = hp.rotator.coordsys2euler_zyz(coord=["G", "C"]) - for nu in freqs: + + for map_set in meta.map_sets_list: if args.planck: + if "planck" not in map_set: + continue # original nside - nside_in = 1024 if nu == '030' else 2048 + f = str(meta.freq_tag_from_map_set(map_set)).zfill(3) + nside_in = 1024 if f == '030' else 2048 elif args.wmap: + if "wmap" not in map_set: + continue nside_in = 512 + map_dir = meta.map_dir_from_map_set(map_set) + dgrade = (nside_in != nside_out) if dgrade: # if downgrading # which alms indices to clip before downgrading @@ -270,19 +283,19 @@ def pre_processer(args): clip_indices.append(hp.Alm.getidx(lmax_in, np.arange(m, lmax_out+1), m)) # noqa clip_indices = np.concatenate(clip_indices) - for split in splits: + for bundle in bundles: print("---------------------") - print(f"- channel {nu} - split {split}") + print(f"- f{f} - bundle {bundle}") if args.planck: - fname_in = f"{npipe_dir}/npipe6v20{split}/npipe6v20{split}_{nu}_map.fits" # noqa - fname_alms = f"{alms_dir}/npipe6v20{split}/alms_npipe6v20{split}_{nu}_map_ns{nside_in}.npz" # noqa - fname_out = f"{map_dir}/planck_f{nu}_split_{splits_dict[split]}.fits" # noqa - os.makedirs(f"{alms_dir}/npipe6v20{split}", exist_ok=True) + fname_in = f"{npipe_dir}/npipe6v20{bundle}/npipe6v20{bundle}_{f}_map.fits" # noqa + fname_alms = f"{map_dir}/npipe6v20{bundle}/alms_npipe6v20{bundle}_{f}_map_ns{nside_in}.npz" # noqa + fname_out = f"{map_dir}/{map_set}_bundle{bundles_dict[bundle]}_map.fits" # noqa + os.makedirs(f"{map_dir}/npipe6v20{bundle}", exist_ok=True) elif args.wmap: - fname_in = f"{wmap_dir}/wmap_iqumap_r9_yr{split}_{bands_dict[nu]}_v5.fits" # noqa - fname_alms = f"{alms_dir}/wmap_{bands_dict[nu]}/alms_wmap_{bands_dict[nu]}_yr{split}_map_{nside_in}.npz" # noqa - fname_out = f"{map_dir}/wmap_f{nu}_split_{split-1}.fits" - os.makedirs(f"{alms_dir}/wmap_{bands_dict[nu]}", + fname_in = f"{map_dir}/wmap_{bands_dict[f]}/wmap_iqumap_r9_yr{bundle}_{bands_dict[f]}_v5.fits" # noqa + fname_alms = f"{map_dir}/wmap_{bands_dict[f]}/alms_wmap_{bands_dict[f]}_yr{bundle}_map_{nside_in}.npz" # noqa + fname_out = f"{map_dir}/{map_set}_bundle{bundle-1}_map.fits" # noqa + os.makedirs(f"{map_dir}/wmap_{bands_dict[f]}", exist_ok=True) if os.path.isfile(fname_out): @@ -296,8 +309,9 @@ def pre_processer(args): alm_file['alm_B']]) else: print("reading input maps at", fname_in) - # read TQU, convert to muK - map_in = to_muk * hp.read_map(fname_in, field=[0, 1, 2]) + # read TQU, convert to K + map_in = convert_to_K * mu.read_map(fname_in, + field=[0, 1, 2]) if args.planck: print("removing dipole") map_in[0] -= dipole_template[nside_in] @@ -329,19 +343,19 @@ def pre_processer(args): map_out *= binary_mask print("writing maps to disk") - hp.write_map(fname_out, map_out, - overwrite=True, dtype=np.float32) + mu.write_map(fname_out, map_out, dtype=np.float32) if args.plots: print("plotting maps") + if args.planck: utils.plot_map(map_out, - f"{map_plots_dir}/map_planck_f{nu}__{splits_dict[split]}", # noqa - title=f'planck_f{nu}', TQU=True) + f"{map_dir}/plots/map_{map_set}_bundle{bundles_dict[bundle]}", # noqa + title=map_set, TQU=True) elif args.wmap: utils.plot_map(map_out, - f"{map_plots_dir}/map_wmap_f{nu}__{split-1}", # noqa - title=f'wmap_f{nu}', TQU=True) + f"{map_dir}/plots/map_{map_set}_bundle{bundle-1}", # noqa + title=map_set, TQU=True) if 'm_out' in locals(): del map_out if 'alm' in locals(): @@ -352,7 +366,7 @@ def pre_processer(args): print("---------------------") print("Processing simulations") meta.timer.start("Process simulations") - nsims = meta.num_sims + nsims = meta.covariance["cov_num_sims"] # path to already downgraded and rotated npipe sims ext_sims_dir = "/global/cfs/cdirs/sobs/users/cranucci" ext_sims_dir += "/npipe" if args.planck else "/wmap" @@ -369,16 +383,23 @@ def pre_processer(args): for sim_id in range(nsims): # starting seed from 0200 to 0000 (if processing original maps) sim_id_in = sim_id+200 if process_sims else sim_id - if args.noise: - # noise output dirs - noise_dir = f"{sims_dir}/{sim_id:04d}/noise" - os.makedirs(noise_dir, exist_ok=True) - else: - # sims output dirs - os.makedirs(f"{sims_dir}/{sim_id:04d}", exist_ok=True) - - for nu in freqs: - nside_in = 1024 if nu == '030' else 2048 # original nside + + # sims output dirs + os.makedirs(f"{sims_dir}/{sim_id:04d}", exist_ok=True) + + for map_set in meta.map_sets_list: + if "planck" not in map_set and "wmap" not in map_set: + continue + + if args.noise: + # noise output dirs + noise_dir = meta.covariance["noise_map_sims_dir"][map_set] + noise_dir += f"/{sim_id:04d}" + os.makedirs(noise_dir, exist_ok=True) + + f = str(meta.freq_tag_from_map_set(map_set)).zfill(3) + # original nside + nside_in = 1024 if f == '030' else 2048 dgrade = (nside_in != nside_out) if process_sims and dgrade: @@ -389,33 +410,34 @@ def pre_processer(args): clip_indices.append(hp.Alm.getidx(lmax_in, np.arange(m, lmax_out+1), m)) # noqa clip_indices = np.concatenate(clip_indices) - for split in splits: + for bundle in bundles: print("---------------------") - print(f"sim {sim_id:04d} - channel {nu} - split {split}") + print(f"sim {sim_id:04d} - channel {f} - bundle {bundle}") if args.noise: # noise fnames (Planck and WMAP) if args.planck: - fname_in = f"{ext_sims_dir}/npipe6v20{split}_sim" + fname_in = f"{ext_sims_dir}/npipe6v20{bundle}_sim" fname_in += f"/{sim_id_in:04d}/residual" - fname_in += f"/residual_npipe6v20{split}_{nu}_{sim_id_in:04d}.fits" # noqa - fname_out = f"{noise_dir}/planck_f{nu}_split_{splits_dict[split]}.fits" # noqa + fname_in += f"/residual_npipe6v20{bundle}_{f}_{sim_id_in:04d}.fits" # noqa + fname_out = f"{noise_dir}/{map_set}_bundle{bundles_dict[bundle]}_noise.fits" # noqa elif args.wmap: fname_in = f"{ext_sims_dir}/noise/{sim_id_in:04d}" - fname_in += f"/noise_maps_mK_band{bands_dict[nu]}_yr{split}.fits" # noqa - fname_out = f"{noise_dir}/wmap_f{nu}_split_{split-1}.fits" # noqa + fname_in += f"/noise_maps_mK_band{bands_dict[f]}_yr{bundle}.fits" # noqa + fname_out = f"{noise_dir}/{map_set}_bundle{bundle-1}_noise.fits" # noqa else: # sims fnames (only Planck) - fname_in = f"{ext_sims_dir}/npipe6v20{split}_sim" - fname_in += f"/{sim_id_in:04d}/npipe6v20{split}_{nu}_map.fits" # noqa - fname_out = f"{sims_dir}/{sim_id:04d}/planck_f{nu}_split_{splits_dict[split]}.fits" # noqa + fname_in = f"{ext_sims_dir}/npipe6v20{bundle}_sim" + fname_in += f"/{sim_id_in:04d}/npipe6v20{bundle}_{f}_map.fits" # noqa + fname_out = f"{sims_dir}/{sim_id:04d}/{map_set}_bundle{bundles_dict[bundle]}_map.fits" # noqa if os.path.isfile(fname_out): print("sims already processed, found in", sims_dir) continue print("reading input maps at", fname_in) - # read TQU, convert to muK - map_in = to_muk * hp.read_map(fname_in, field=[0, 1, 2]) + # read TQU, convert to K + map_in = convert_to_K * mu.read_map(fname_in, + field=[0, 1, 2]) if process_sims: if not args.noise: @@ -440,8 +462,8 @@ def pre_processer(args): print("masking") map_out *= binary_mask print("writing maps to disk") - hp.write_map(fname_out, map_out, - overwrite=True, dtype=np.float32) + mu.write_map(fname_out, map_out, dtype=np.float32) + meta.timer.stop("Process simulations") print("---------------------") diff --git a/pipeline/run_planck_mpi4py.sh b/pipeline/run_planck_mpi4py.sh deleted file mode 100644 index 223bb48..0000000 --- a/pipeline/run_planck_mpi4py.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env bash - -paramfile='../paramfiles/paramfile_SAT_bbpower.yaml' - -echo "Running pipeline with paramfile: ${paramfile}" - -#OpenMP settings: -export OMP_NUM_THREADS=1 -export OMP_PLACES=threads -export OMP_PROC_BIND=spread - -# Run serially -echo "Pre-processing real data" -echo "-----------------------------" -python pre_processer.py --globals ${paramfile} --verbose -python mask_handler.py --globals ${paramfile} --plots --verbose -python pre_processer_ext.py --globals ${paramfile} --planck --data --plots -python pre_processer_ext.py --globals ${paramfile} --planck --noise -python pre_processer_ext.py --globals ${paramfile} --planck --sims - -echo "Running filterer for data" -echo "-------------------------" -python filterer.py --globals ${paramfile} --data - -echo "Running mcm..." -echo "--------------" -python mcmer.py --globals ${paramfile} --plot - - - -# run in parallel with salloc -N 1 -C cpu -q interactive -t 00:30:00 - -echo "Generating transfer sims" # 1m20 for 30 sims -echo "-----------------------------" -srun -n 30 -c 8 python pre_processer.py --globals ${paramfile} --verbose --sims - -echo "Running filterer for transfer" # 1m20 for 30 sims -echo "-----------------------------" -srun -n 30 -c 8 --cpu_bind=cores python filterer.py --globals ${paramfile} --transfer - -echo "Running filterer for sims" # 2m50 for 100 sims / 10 splits -echo "-------------------------" -srun -n 25 -c 10 --cpu_bind=cores python filterer.py --globals ${paramfile} --sims - -echo "Running cl estimation for tf estimation" # 1m50 for 30 sims -echo "---------------------------------------" -srun -n 30 -c 8 --cpu_bind=cores python pcler.py --globals ${paramfile} --tf_est --verbose - -echo "Running transfer estimation" -echo "---------------------------" -python transfer.py --globals ${paramfile} - -echo "Running cl estimation for validation" -echo "------------------------------------" -srun -n 30 -c 8 --cpu_bind=cores python pcler.py --globals ${paramfile} --tf_val --verbose - -echo "Transfer validation" -echo "---------------------" -python transfer_validator.py --globals ${paramfile} - - - -# # run in parallel with salloc -N 1 -C cpu -q interactive -t 01:00:00 -echo "Running pcler on data" -echo "---------------------" -python pcler.py --globals ${paramfile} --data - -echo "Running pcler on sims" # 5m for 100 sims / 21 splits -echo "---------------------" -srun -n 25 -c 10 --cpu_bind=cores python pcler.py --globals ${paramfile} --sims --verbose - -echo "Running coadder on data" -echo "---------------------" -python coadder.py --globals ${paramfile} --data - -echo "Running coadder on sims" # 0m37s for 100 sims / 21 splits -echo "---------------------" -srun -n 25 -c 10 --cpu_bind=cores python coadder.py --globals ${paramfile} --sims - -echo "Running covariance estimation" -echo "-----------------------------" -python covfefe.py --globals ${paramfile} - -echo "Create sacc files for sims and data" -echo "-----------------------------------" -python saccer.py --globals ${paramfile} --data -# 2m for 100 sims / 21 splits -srun -n 25 -c 10 --cpu_bind=cores python saccer.py --globals ${paramfile} --sims diff --git a/pipeline/run_refactored.sh b/pipeline/run_refactored.sh index 25ac071..ad8a019 100644 --- a/pipeline/run_refactored.sh +++ b/pipeline/run_refactored.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -paramfile='../paramfiles/paramfile_refactored_noisy.yaml' +paramfile='../paramfiles/paramfile_refactored.yaml' echo "Running pipeline with paramfile: ${paramfile}" diff --git a/pipeline/sacc_plotter.py b/pipeline/sacc_plotter.py index dc1cb3c..6ea6a86 100644 --- a/pipeline/sacc_plotter.py +++ b/pipeline/sacc_plotter.py @@ -4,7 +4,9 @@ from itertools import product import sacc import numpy as np +import healpy as hp import pymaster as nmt +import os def load_bpwins(coupling_file): @@ -43,8 +45,8 @@ def multipole_min_from_tf(tf_file, field_pairs, snr_cut=3.): def plot_spectrum(lb, cb, cb_err, title, ylabel, xlim, - add_theory=False, lth=None, clth=None, - cbth=None, save_file=None): + cb_data=None, cb_data_err=None, add_theory=False, + lth=None, clth=None, cbth=None, save_file=None): """ """ plt.figure(figsize=(8, 6)) @@ -62,17 +64,28 @@ def plot_spectrum(lb, cb, cb_err, title, ylabel, xlim, main.set_title(title) fac = lb * (lb + 1) / (2 * np.pi) + offset = 0 if cb_data is None else 1 if add_theory: fac_th = lth * (lth + 1) / (2 * np.pi) - main.plot(lth, fac_th * clth, c="darkgray", ls="-.", lw=2.6) + main.plot(lth, fac_th * clth, c="darkgray", ls="-.", lw=2.6, + label="theory") main.errorbar( - lb, fac * cb, yerr=fac * cb_err, marker="o", ls="None", - markerfacecolor="white", markeredgecolor="navy", + lb-offset, fac * cb, yerr=fac * cb_err, marker="o", ls="None", + markerfacecolor="white", markeredgecolor="navy", label="sims", elinewidth=1.75, ecolor="navy", markeredgewidth=1.75 ) + if cb_data is not None: + main.errorbar( + lb+offset, fac * cb_data, yerr=fac * cb_data_err, marker="o", + ls="None", markerfacecolor="white", markeredgecolor="darkorange", + elinewidth=1.75, ecolor="darkorange", markeredgewidth=1.75, + label="data" + ) + + main.legend(fontsize=13) main.set_xlim(*xlim) if add_theory: @@ -80,9 +93,18 @@ def plot_spectrum(lb, cb, cb_err, title, ylabel, xlim, sub.axhspan(-2, 2, color="gray", alpha=0.4) sub.axhspan(-1, 1, color="gray", alpha=0.8) - sub.plot(lb, (cb - cbth) / cb_err, marker="o", ls="None", - markerfacecolor="white", markeredgecolor="navy", - markeredgewidth=1.75) + sub.plot( + lb-offset, (cb - cbth) / cb_err, marker="o", ls="None", + markerfacecolor="white", markeredgecolor="navy", + markeredgewidth=1.75 + ) + if cb_data is not None: + sub.plot( + lb+offset, (cb_data - cbth) / cb_data_err, + marker="o", ls="None", + markerfacecolor="white", markeredgecolor="darkorange", + markeredgewidth=1.75 + ) sub.set_xlim(*xlim) sub.set_ylim(-4.5, 4.5) @@ -92,6 +114,7 @@ def plot_spectrum(lb, cb, cb_err, title, ylabel, xlim, else: plt.tight_layout() plt.show() + plt.close() def main(args): @@ -114,24 +137,14 @@ def main(args): nmt_binning = nmt.NmtBin.from_edges(binning["bin_low"], binning["bin_high"] + 1) lb = nmt_binning.get_effective_ells() + lmax = 3*meta.nside - 1 field_pairs = [m1+m2 for m1, m2 in product("TEB", repeat=2)] ps_names = meta.get_ps_names_list(type="all", coadd=True) types = {"T": "0", "E": "e", "B": "b"} - if args.data: - Nsims = 1 - elif args.sims: - Nsims = meta.covariance["cov_num_sims"] - - psth = np.load(meta.covariance["fiducial_cmb"]) - ps_th = {} - for field_pair in field_pairs: - if field_pair in psth: - ps_th[field_pair] = psth[field_pair] - else: - ps_th[field_pair] = psth[field_pair[::-1]] + Nsims = meta.covariance["cov_num_sims"] # Transfer function idx_bad_tf = {} @@ -150,12 +163,7 @@ def main(args): bpw_file = f"couplings_{ms1}_{ms2}.npz" bpw_mats[ms1, ms2] = load_bpwins(f"{coupling_dir}/{bpw_file}") - clth_binned = { - (ms1, ms2): binned_theory_from_unbinned(ps_th, bpw_mat) - for (ms1, ms2), bpw_mat in bpw_mats.items() - } - - plot_data = { + plot_sims = { (ms1, ms2, fp): { "x": None, "y": [], @@ -169,29 +177,100 @@ def main(args): for fp in field_pairs } - if args.sims: - # Load theory + fiducial_cmb = meta.covariance["fiducial_cmb"] + fiducial_dust = meta.covariance["fiducial_dust"] + fiducial_synch = meta.covariance["fiducial_synch"] + + for ms1, ms2 in ps_names: + nu1 = meta.freq_tag_from_map_set(ms1) + nu2 = meta.freq_tag_from_map_set(ms2) + + clth = {} + + for i, fp in enumerate(["TT", "EE", "BB", "TE"]): + cmb_cl = hp.read_cl(fiducial_cmb)[i, :lmax+1] + dust_cl = hp.read_cl( + fiducial_dust.format(nu1=nu1, nu2=nu2) + )[i, :lmax+1] + synch_cl = hp.read_cl( + fiducial_synch.format(nu1=nu1, nu2=nu2) + )[i, :lmax+1] + clth[fp] = (cmb_cl + dust_cl + synch_cl) + clth["EB"] = np.zeros(lmax+1) + clth["TB"] = np.zeros(lmax+1) + clth["l"] = np.arange(lmax+1) + + cl_th = {} + for fp in field_pairs: + if fp in clth: + cl_th[fp] = clth[fp] + else: + # reverse the order of the two fields + cl_th[fp] = clth[fp[::-1]] + + clth_binned = binned_theory_from_unbinned(cl_th, bpw_mats[(ms1, ms2)]) + + ftag1 = meta.filtering_tag_from_map_set(ms1) + ftag2 = meta.filtering_tag_from_map_set(ms2) + + for fp in field_pairs: + mask_th = (clth["l"] <= 2 * meta.nside - 1) + x_th, y_th = clth["l"][mask_th], cl_th[fp][mask_th] + + idx_bad = idx_bad_tf[ftag1, ftag2][fp] + mask = ((np.arange(len(lb)) > idx_bad) & + (lb <= 2 * meta.nside - 1)) + th_binned = clth_binned[fp][mask] + + plot_sims[ms1, ms2, fp]["x_th"] = x_th + plot_sims[ms1, ms2, fp]["y_th"] = y_th + plot_sims[ms1, ms2, fp]["th_binned"] = th_binned + + # Load data + plot_data = { + (ms1, ms2, fp): { + "y": None, + } for ms1, ms2 in ps_names + for fp in field_pairs + } + fname_data = f"{sacc_dir}/cl_and_cov_sacc.fits" + + if not os.path.isfile(fname_data): + print("No data sacc to print. Skipping...") + else: + s = sacc.Sacc.load_fits(fname_data) + for ms1, ms2 in ps_names: ftag1 = meta.filtering_tag_from_map_set(ms1) ftag2 = meta.filtering_tag_from_map_set(ms2) for fp in field_pairs: - mask_th = (psth["l"] <= 2 * meta.nside - 1) - x_th, y_th = psth["l"][mask_th], ps_th[fp][mask_th] + f1, f2 = fp + + ell, cl, cov = s.get_ell_cl( + f"cl_{types[f1]}{types[f2]}", + ms1, + ms2, + return_cov=True) idx_bad = idx_bad_tf[ftag1, ftag2][fp] - mask = ((np.arange(len(lb)) > idx_bad) & - (lb <= 2 * meta.nside - 1)) - th_binned = clth_binned[ms1, ms2][fp][mask] + mask = ( + (np.arange(len(ell)) > idx_bad) & + (ell <= 2 * meta.nside - 1)) + x, y, err = ell, cl, np.sqrt(cov.diagonal()) + x, y, err = x[mask], y[mask], err[mask] - plot_data[ms1, ms2, fp]["x_th"] = x_th - plot_data[ms1, ms2, fp]["y_th"] = y_th - plot_data[ms1, ms2, fp]["th_binned"] = th_binned + plot_data[ms1, ms2, fp]["x"] = x + plot_data[ms1, ms2, fp]["y"] = y + plot_data[ms1, ms2, fp]["err"] = err + # Load simulations for id_sim in range(Nsims): - sim_label = f"_{id_sim:04d}" if args.sims else "" - - s = sacc.Sacc.load_fits(f"{sacc_dir}/cl_and_cov_sacc{sim_label}.fits") + if verbose: + print(f"# {id_sim} | {ms1} x {ms2}") + s = sacc.Sacc.load_fits( + f"{sacc_dir}/cl_and_cov_sacc_{id_sim:04d}.fits" + ) for ms1, ms2 in ps_names: ftag1 = meta.filtering_tag_from_map_set(ms1) @@ -213,37 +292,34 @@ def main(args): x, y, err = ell, cl, np.sqrt(cov.diagonal()) x, y, err = x[mask], y[mask], err[mask] - plot_data[ms1, ms2, fp]["x"] = x - plot_data[ms1, ms2, fp]["y"].append(y) - plot_data[ms1, ms2, fp]["err"] = err - plot_data[ms1, ms2, fp]["title"] = f"{ms1} x {ms2} - {fp}" - plot_data[ms1, ms2, fp]["ylabel"] = fp + plot_sims[ms1, ms2, fp]["x"] = x + plot_sims[ms1, ms2, fp]["y"].append(y) + plot_sims[ms1, ms2, fp]["err"] = err + plot_sims[ms1, ms2, fp]["title"] = f"{ms1} x {ms2} - {fp}" + plot_sims[ms1, ms2, fp]["ylabel"] = fp for ms1, ms2 in ps_names: - if verbose: - print(f"# {id_sim} | {ms1} x {ms2}") for fp in field_pairs: - plot_data[ms1, ms2, fp]["y"] = np.mean( - plot_data[ms1, ms2, fp]["y"], axis=0 + plot_sims[ms1, ms2, fp]["y"] = np.mean( + plot_sims[ms1, ms2, fp]["y"], axis=0 ) - plot_data[ms1, ms2, fp]["err"] /= np.sqrt(Nsims) - if args.sims: - plot_name = f"plot_cells_sims_{ms1}_{ms2}_{fp}.pdf" - if args.data: - plot_name = f"plot_cells_data_{ms1}_{ms2}_{fp}.pdf" + plot_sims[ms1, ms2, fp]["err"] /= np.sqrt(Nsims) + plot_name = f"plot_cells_{ms1}_{ms2}_{fp}.pdf" plot_spectrum( - plot_data[ms1, ms2, fp]["x"], - plot_data[ms1, ms2, fp]["y"], - plot_data[ms1, ms2, fp]["err"], - title=plot_data[ms1, ms2, fp]["title"], - ylabel=plot_data[ms1, ms2, fp]["ylabel"], + plot_sims[ms1, ms2, fp]["x"], + plot_sims[ms1, ms2, fp]["y"], + plot_sims[ms1, ms2, fp]["err"], + cb_data=plot_data[ms1, ms2, fp]["y"], + cb_data_err=plot_data[ms1, ms2, fp]["err"], + title=plot_sims[ms1, ms2, fp]["title"], + ylabel=plot_sims[ms1, ms2, fp]["ylabel"], xlim=(30, 2 * meta.nside - 1), - add_theory=args.sims, - lth=plot_data[ms1, ms2, fp]["x_th"], - clth=plot_data[ms1, ms2, fp]["y_th"], - cbth=plot_data[ms1, ms2, fp]["th_binned"], + add_theory=True, + lth=plot_sims[ms1, ms2, fp]["x_th"], + clth=plot_sims[ms1, ms2, fp]["y_th"], + cbth=plot_sims[ms1, ms2, fp]["th_binned"], save_file=f"{plot_dir}/{plot_name}" ) @@ -253,9 +329,5 @@ def main(args): parser.add_argument("--globals", type=str, help="Path to the global configuration file") parser.add_argument("--verbose", action="store_true", help="Verbose mode.") - - mode = parser.add_mutually_exclusive_group() - mode.add_argument("--sims", action="store_true") - mode.add_argument("--data", action="store_true") args = parser.parse_args() main(args) diff --git a/pipeline/simulations/coadd_simulated_maps.py b/pipeline/simulations/coadd_simulated_maps.py new file mode 100644 index 0000000..26e1464 --- /dev/null +++ b/pipeline/simulations/coadd_simulated_maps.py @@ -0,0 +1,134 @@ +import argparse +from soopercool import BBmeta +from soopercool import mpi_utils as mpi +import healpy as hp +import numpy as np +import matplotlib.pyplot as plt + + +def main(args): + """ + """ + meta = BBmeta(args.globals) + verbose = args.verbose + + do_plots = not args.no_plots + + out_dir = meta.output_directory + masks_dir = f"{out_dir}/masks" + + sims_dir = f"{out_dir}/cov_sims" + BBmeta.make_dir(sims_dir) + + signal_alm_dir = meta.covariance["signal_alm_sims_dir"] + signal_alm_template = meta.covariance["signal_alm_sims_template"] + + binary = hp.read_map(f"{masks_dir}/binary_mask.fits") + + lmax = 3*meta.nside - 1 + ll = np.arange(lmax + 1) + cl2dl = ll*(ll+1)/2./np.pi + + beams = { + ms: meta.read_beam(ms)[1][:lmax+1] + for ms in meta.map_sets_list + } + + if do_plots: + field_pairs = ["TT", "EE", "BB", "TE"] + fiducial_cmb = meta.covariance["fiducial_cmb"] + fiducial_dust = meta.covariance["fiducial_dust"] + fiducial_synch = meta.covariance["fiducial_synch"] + clth = {} + + for ms in meta.map_sets_list: + nu = meta.freq_tag_from_map_set(ms) + for i, fp in enumerate(field_pairs): + cmb_cl = hp.read_cl(fiducial_cmb)[i, :lmax+1] + dust_cl = hp.read_cl( + fiducial_dust.format(nu1=nu, nu2=nu) + )[i, :lmax+1] + synch_cl = hp.read_cl( + fiducial_synch.format(nu1=nu, nu2=nu) + )[i, :lmax+1] + clth[ms, fp] = (cmb_cl + dust_cl + synch_cl) * beams[ms]**2 + + mpi.init(True) + + for id_sim in mpi.taskrange(meta.covariance["cov_num_sims"] - 1): + + base_dir = f"{sims_dir}/{id_sim:04d}" + BBmeta.make_dir(base_dir) + if do_plots: + plots_dir = f"{out_dir}/plots/cov_sims/{id_sim:04d}" + BBmeta.make_dir(plots_dir) + + for ms in meta.map_sets_list: + if verbose: + print(f"# {id_sim} | {ms}") + + noise_map_dir = meta.covariance["noise_map_sims_dir"][ms] + noise_map_template = meta.covariance["noise_map_sims_template"][ms] + + ft = meta.freq_tag_from_map_set(ms) + fname = signal_alm_template.format(id_sim=id_sim, freq_tag=ft) + + alms = hp.read_alm(f"{signal_alm_dir}/{fname}", hdu=(1, 2, 3)) + alms_beamed = [hp.almxfl(alm, beams[ms]) for alm in alms] + signal = hp.alm2map(alms_beamed, nside=meta.nside) + + if do_plots: + signal_cl = {fp: hp.anafast(signal)[i] + for i, fp in enumerate(field_pairs)} + + for id_bundle in range(meta.n_bundles_from_map_set(ms)): + + fname = noise_map_template.format(id_sim=id_sim, map_set=ms, + id_bundle=id_bundle) + noise = hp.read_map( + f"{noise_map_dir}/{fname}", field=[0, 1, 2] + ) + if do_plots and id_bundle == 0: + noise_cl = { + fp: hp.anafast(noise)[i] + for i, fp in enumerate(field_pairs) + } + for i, fp in enumerate(field_pairs): + plt.title(f"{ms} | bundle {id_bundle} | {fp}") + plt.loglog(ll, cl2dl*signal_cl[fp], c="navy", + label="Signal") + plt.loglog(ll, cl2dl*noise_cl[fp], c="grey", alpha=0.5, + label="Noise") + plt.loglog(ll, cl2dl*clth[ms, fp], c="darkorange", + label="Theory (beamed)") + plt.xlabel(r"$\ell$", fontsize=15) + plt.ylabel(r"$D_\ell$", fontsize=15) + plt.legend() + plt.savefig(f"{plots_dir}/cl_{ms}_bundle{id_bundle}_{fp}.png") # noqa + plt.close() + + split_map = signal + noise + + map_name = f"cov_sims_{ms}_bundle{id_bundle}.fits" + hp.write_map(f"{base_dir}/{map_name}", split_map*binary, + overwrite=True, + dtype=np.float32) + + if do_plots: + for i, f in enumerate("TQU"): + hp.mollview(split_map[i]*binary, + cmap="RdYlBu_r", + title=f"{ms} - {id_sim} - {f}", + min=-300 if f == "T" else -10, + max=300 if f == "T" else 10) + plt.savefig(f"{plots_dir}/map_{ms}_bundle{id_bundle}_{f}.png") # noqa + plt.close() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--globals", help="Path to the global parameter file.") + parser.add_argument("--no_plots", action="store_true", help="Do not plot the maps.") # noqa + parser.add_argument("--verbose", action="store_true", help="Verbose mode.") + args = parser.parse_args() + main(args) diff --git a/pipeline/simulations/generate_mock_cmb_sky.py b/pipeline/simulations/generate_mock_cmb_sky.py index 3ee0bf4..512e070 100644 --- a/pipeline/simulations/generate_mock_cmb_sky.py +++ b/pipeline/simulations/generate_mock_cmb_sky.py @@ -1,16 +1,17 @@ import argparse from soopercool import BBmeta -import healpy as hp from soopercool import utils -import matplotlib.pyplot as plt +from soopercool import map_utils as mu +import soopercool.utils as su import numpy as np +from soopercool import sim_utils def main(args): """ """ meta = BBmeta(args.globals) - verbose = args.verbose + # verbose = args.verbose out_dir = meta.output_directory sims_dir = f"{out_dir}/cmb_sims" @@ -18,7 +19,9 @@ def main(args): BBmeta.make_dir(sims_dir) BBmeta.make_dir(plot_dir) - lmax_sim = 3*meta.nside - 1 + mask = mu.read_map(meta.masks["analysis_mask"], pix_type=meta.pix_type) + lmax = mu.lmax_from_map(mask, pix_type=meta.pix_type) + lmax_sim = lmax + 500 # Create the CMB fiducial cl lth, clth = utils.get_theory_cls( @@ -29,55 +32,98 @@ def main(args): l=lth, **clth) beams = { - ms: meta.read_beam(ms)[1] + ms: su.read_beam_from_file( + f"{meta.beam_dir_from_map_set(ms)}/" + f"{meta.beam_file_from_map_set(ms)}", + lmax=lmax_sim + )[1] for ms in meta.map_sets_list } - hp_ordering = ["TT", "TE", "TB", "EE", "EB", "BB"] - + template = mu.template_from_map(mask, ncomp=3, pix_type=meta.pix_type) for id_sim in range(meta.covariance["cov_num_sims"]): - alms = hp.synalm([clth[fp] for fp in hp_ordering]) + + meta.timer.start("cmb_sim") + + alms = sim_utils.get_alms_from_cls( + ps_dict=clth, + lmax=lmax_sim, + fields="TEB", + components=None + ) for ms in meta.map_sets_list: - if verbose: - print(f"# {id_sim+1} | {ms}") - - alms_beamed = [hp.almxfl(alm, beams[ms]) for alm in alms] - - map = hp.alm2map(alms_beamed, nside=meta.nside) - hp.write_map(f"{sims_dir}/cmb_{ms}_{id_sim:04d}.fits", map, - overwrite=True, dtype=np.float32) - hp.write_cl(f"{sims_dir}/cl_{ms}_{id_sim:04d}.fits", - hp.anafast(map), overwrite=True, dtype=np.float32) - - # Plotting - cls = {ms: [] for ms in meta.map_sets_list} - for ms in meta.map_sets_list: - for id_sim in range(meta.covariance["cov_num_sims"]): - cls[ms].append( - hp.read_cl(f"{sims_dir}/cl_{ms}_{id_sim:04d}.fits") + + alms_beamed = sim_utils.beam_alms( + alms, + beams[ms] ) - cl_mean = {ms: np.mean(np.array(cls[ms]), axis=0) - for ms in meta.map_sets_list} - cl_std = {ms: np.std(np.array(cls[ms]), axis=0) - for ms in meta.map_sets_list} - - ll = np.arange(lmax_sim + 1) - cl2dl = ll*(ll + 1)/2./np.pi - for ms in meta.map_sets_list: - for ip, fp in enumerate(["TT", "EE", "BB", "TE"]): - plt.title(f"{ms} | {fp}") - plt.errorbar(ll, cl2dl*cl_mean[ms][ip], cl2dl*cl_std[ms][ip], - label="data", color="navy", lw=0.5, zorder=-32) - plt.plot(ll, cl2dl*clth[fp], label=" theory", - color="k", ls="--") - plt.plot(ll, cl2dl*clth[fp]*beams[ms]**2, label=" beamed theory", - color="darkorange", ls="--") - plt.ylabel(r"$D_\ell$") - plt.xlabel(r"$\ell$") - plt.yscale("log") - plt.legend() - plt.savefig(f"{plot_dir}/cl_{ms}_{fp}.png") - plt.clf() + + map = sim_utils.get_map_from_alms( + alms_beamed, + template=template + ) + + mu.write_map( + f"{sims_dir}/cmb_{ms}_{id_sim:04d}.fits", + map, + pix_type=meta.pix_type) + + meta.timer.stop( + "cmb_sim", + text_to_output=f"Simulated CMB maps for sim {id_sim:04d}" + ) + + # Plot functions, not sure if we want to add it + # from pixell import enplot + # for i, f in enumerate("TQU"): + # plot = enplot.plot(maps["sat_f093"][i], color="planck", ticks=10) + # enplot.write(f"{plot_dir}/cmb_sat_f093_{f}", plot) + + # hp_ordering = ["TT", "TE", "TB", "EE", "EB", "BB"] + + # for id_sim in range(meta.covariance["cov_num_sims"]): + # alms = hp.synalm([clth[fp] for fp in hp_ordering]) + # for ms in meta.map_sets_list: + # if verbose: + # print(f"# {id_sim+1} | {ms}") + + # alms_beamed = [hp.almxfl(alm, beams[ms]) for alm in alms] + + # map = hp.alm2map(alms_beamed, nside=meta.nside) + # mu.write_map(f"{sims_dir}/cmb_{ms}_{id_sim:04d}.fits", map, + # dtype=np.float32) + # hp.write_cl(f"{sims_dir}/cl_{ms}_{id_sim:04d}.fits", + # hp.anafast(map), overwrite=True, dtype=np.float32) + + # # Plotting + # cls = {ms: [] for ms in meta.map_sets_list} + # for ms in meta.map_sets_list: + # for id_sim in range(meta.covariance["cov_num_sims"]): + # cls[ms].append( + # hp.read_cl(f"{sims_dir}/cl_{ms}_{id_sim:04d}.fits") + # ) + # cl_mean = {ms: np.mean(np.array(cls[ms]), axis=0) + # for ms in meta.map_sets_list} + # cl_std = {ms: np.std(np.array(cls[ms]), axis=0) + # for ms in meta.map_sets_list} + + # ll = np.arange(lmax_sim + 1) + # cl2dl = ll*(ll + 1)/2./np.pi + # for ms in meta.map_sets_list: + # for ip, fp in enumerate(["TT", "EE", "BB", "TE"]): + # plt.title(f"{ms} | {fp}") + # plt.errorbar(ll, cl2dl*cl_mean[ms][ip], cl2dl*cl_std[ms][ip], + # label="data", color="navy", lw=0.5, zorder=-32) + # plt.plot(ll, cl2dl*clth[fp], label=" theory", + # color="k", ls="--") + # plt.plot(ll, cl2dl*clth[fp]*beams[ms]**2, label=" beamed theory", + # color="darkorange", ls="--") + # plt.ylabel(r"$D_\ell$") + # plt.xlabel(r"$\ell$") + # plt.yscale("log") + # plt.legend() + # plt.savefig(f"{plot_dir}/cl_{ms}_{fp}.png") + # plt.clf() if __name__ == "__main__": diff --git a/pipeline/simulations/generate_noise_from_data.py b/pipeline/simulations/generate_noise_from_data.py index fb10223..359491c 100644 --- a/pipeline/simulations/generate_noise_from_data.py +++ b/pipeline/simulations/generate_noise_from_data.py @@ -1,5 +1,6 @@ import argparse from soopercool import BBmeta +from soopercool import map_utils as mu from soopercool import mpi_utils as mpi import numpy as np import healpy as hp @@ -94,9 +95,8 @@ def main(args): noise_maps = hp.alm2map(noise_alms, nside=meta.nside) fname = f"homogeneous_noise_{ms}_bundle{id_bundle}_{id_sim:04d}.fits" # noqa - hp.write_map(f"{noise_sims_dir}/{fname}", noise_maps, - overwrite=True, - dtype=np.float32) + mu.write_map(f"{noise_sims_dir}/{fname}", noise_maps[ms], + dtype=np.float32, convert_muK_to_K=True) for ip, fp in enumerate(["TT", "EE", "BB", "TE"]): plt.title(f"{ms} | {fp}") plt.plot(hp.anafast(noise_maps)[ip], c="navy", diff --git a/pipeline/simulations/generate_sat_noise.py b/pipeline/simulations/generate_sat_noise.py index 10e2bed..c989bac 100644 --- a/pipeline/simulations/generate_sat_noise.py +++ b/pipeline/simulations/generate_sat_noise.py @@ -3,6 +3,7 @@ import healpy as hp import soopercool.SO_Noise_Calculator_Public_v3_1_2 as noise_calc from soopercool import mpi_utils as mpi +from soopercool import map_utils as mu import numpy as np @@ -95,9 +96,9 @@ def main(args): nlth[ms][1]["TT"], nlth[ms][1]["EE"], n_bundles, meta.nside, seed ) - hp.write_map( + mu.write_map( f"{sims_dir}/{id_sim:04d}/noise_sims_{ms}_{id_sim:04d}_bundle{id_bundle}.fits", # noqa - noise, overwrite=True, dtype=np.float32 + noise, dtype=np.float32, convert_muK_to_K=True ) diff --git a/pipeline/simulations/generate_tf_estimation_sims.py b/pipeline/simulations/generate_tf_estimation_sims.py index a3e9ff3..c941877 100644 --- a/pipeline/simulations/generate_tf_estimation_sims.py +++ b/pipeline/simulations/generate_tf_estimation_sims.py @@ -1,9 +1,10 @@ import argparse from soopercool import BBmeta, utils from soopercool import mpi_utils as mpi +from soopercool import map_utils as mu +from soopercool import sim_utils + import numpy as np -import healpy as hp -import matplotlib.pyplot as plt def main(args): @@ -21,7 +22,9 @@ def main(args): if do_plots: BBmeta.make_dir(plot_dir) - lmax_sim = 3 * meta.nside - 1 + mask = mu.read_map(meta.masks["analysis_mask"], pix_type=meta.pix_type) + lmax = mu.lmax_from_map(mask, pix_type=meta.pix_type) + lmax_sim = lmax + 500 lth = np.arange(lmax_sim + 1) tf_settings = meta.transfer_settings @@ -33,88 +36,105 @@ def main(args): Nsims = tf_settings["tf_est_num_sims"] - hp_ordering = ["TT", "TE", "TB", "EE", "EB", "BB"] - if tf_settings["beams_list"] is not None: beams = {} for beam_label in tf_settings["beams_list"]: - _, bl = meta.read_beam(beam_label) + _, bl = meta.read_beam(beam_label, lmax=lmax_sim) beams[beam_label] = bl else: beams = {None: None} + template = mu.template_from_map(mask, ncomp=3, pix_type=meta.pix_type) + # from pspy import so_map + # template = so_map.full_sky_car_template(ncomp=3, res=10).data mpi.init(True) for id_sim in mpi.taskrange(Nsims - 1): - almsTEB = hp.synalm( - [cl_power_law_tf_est[k] for k in hp_ordering], - lmax=lmax_sim + almsTEB = sim_utils.get_alms_from_cls( + ps_dict=cl_power_law_tf_est, + lmax=lmax_sim, + fields="TEB", + components=None ) + print(np.sum(almsTEB[0]-almsTEB[1])) + print(almsTEB[0]) + print(almsTEB[1]) + print(almsTEB[2]) for beam_label, bl in beams.items(): if verbose: print(f"# {id_sim} | {beam_label}") - suffix = "" - if tf_settings["do_not_beam_est_sims"]: - bl = None - - elif beam_label is not None: + if not tf_settings["do_not_beam_est_sims"]: suffix = f"_{beam_label}" + almsTEB_post = sim_utils.beam_alms( + almsTEB.copy(), + bl + ) + else: + suffix = "" + almsTEB_post = almsTEB sims = { - f"pure{f}": utils.generate_map_from_alms( - almsTEB * select[:, None], - meta.nside, - bl=bl - ) - for f, select in zip("TEB", np.eye(3)) + f"pure{f}": sim_utils.get_map_from_alms( + almsTEB_post * select[:, None], + template=template + ) for f, select in zip("TEB", np.eye(3)) } for f in "TEB": fname = f"pure{f}_power_law_tf_est_{id_sim:04d}{suffix}.fits" - hp.write_map( + mu.write_map( f"{sim_dir}/{fname}", sims[f"pure{f}"], - overwrite=True, - dtype=np.float32 + pix_type=meta.pix_type ) - if do_plots: - ps_hp_order = ["TT", "EE", "BB", "TE", "EB", "TB"] - for beam_label in beams: - suffix = "" if beam_label is None else f"_{beam_label}" - for f in "TEB": - - cls_dict = {fp: [] for fp in hp_ordering} - - for id_sim in range(Nsims): - fname = f"pure{f}_power_law_tf_est_{id_sim:04d}{suffix}" - alms = hp.map2alm( - hp.read_map( - f"{sim_dir}/{fname}.fits", - field=[0, 1, 2] - ) + from pixell import enplot + for i, mode in zip([1, 2], "QU"): + plot = enplot.plot(sims[f"pure{f}"][i], ticks=10, + color="planck") + enplot.write( + f"{plot_dir}/pure{f}_power_law_tf_est_{id_sim:04d}{suffix}_{mode}", # noqa + plot ) - cls = hp.alm2cl(alms) - - for i, fp in enumerate(ps_hp_order): - cls_dict[fp] += [cls[i]] - - for fp in ps_hp_order: - cls_dict[fp] = np.mean(cls_dict[fp], axis=0) - - plt.figure(figsize=(10, 8)) - plt.xlabel(r"$\ell$", fontsize=15) - plt.ylabel(r"$C_\ell$", fontsize=15) - for fp in ps_hp_order: - plt.plot(cls_dict[fp], label=fp, lw=1.7) - plt.yscale("symlog", linthresh=1e-6) - plt.xlim(0, lmax_sim) - plt.legend() - plt.title(f"Power law pure{f} simulation") - plt.savefig(f"{plot_dir}/power_law_pure{f}{suffix}.png", - bbox_inches="tight") + + # if do_plots: + # ps_hp_order = ["TT", "EE", "BB", "TE", "EB", "TB"] + # for beam_label in beams: + # suffix = "" if beam_label is None else f"_{beam_label}" + # for f in "TEB": + + # cls_dict = {fp: [] for fp in hp_ordering} + + # for id_sim in range(Nsims): + # fname = f"pure{f}_power_law_tf_est_{id_sim:04d}{suffix}" + # alms = hp.map2alm( + # mu.read_map( + # f"{sim_dir}/{fname}.fits", + # field=[0, 1, 2], + # pix_type=meta.pix_type, + # convert_K_to_muK=True), + # ) + # cls = hp.alm2cl(alms) + + # for i, fp in enumerate(ps_hp_order): + # cls_dict[fp] += [cls[i]] + + # for fp in ps_hp_order: + # cls_dict[fp] = np.mean(cls_dict[fp], axis=0) + + # plt.figure(figsize=(10, 8)) + # plt.xlabel(r"$\ell$", fontsize=15) + # plt.ylabel(r"$C_\ell$", fontsize=15) + # for fp in ps_hp_order: + # plt.plot(cls_dict[fp], label=fp, lw=1.7) + # plt.yscale("symlog", linthresh=1e-6) + # plt.xlim(0, lmax_sim) + # plt.legend() + # plt.title(f"Power law pure{f} simulation") + # plt.savefig(f"{plot_dir}/power_law_pure{f}{suffix}.png", + # bbox_inches="tight") if __name__ == "__main__": diff --git a/pipeline/transfer/compute_pseudo_cells_tf_estimation.py b/pipeline/transfer/compute_pseudo_cells_tf_estimation.py index 83081cf..0fe8ac9 100644 --- a/pipeline/transfer/compute_pseudo_cells_tf_estimation.py +++ b/pipeline/transfer/compute_pseudo_cells_tf_estimation.py @@ -1,10 +1,10 @@ import argparse from soopercool import BBmeta -import healpy as hp import pymaster as nmt import numpy as np from soopercool import ps_utils from soopercool import mpi_utils as mpi +from soopercool import map_utils as mu def main(args): @@ -22,7 +22,7 @@ def main(args): binning["bin_high"] + 1) mask_file = meta.masks["analysis_mask"] - mask = hp.read_map(mask_file) + mask = mu.read_map(mask_file, pix_type=meta.pix_type) filtering_tags = meta.get_filtering_tags() filtering_tag_pairs = meta.get_independent_filtering_pairs() @@ -65,21 +65,26 @@ def main(args): ) filtered_map_file = f"{filtered_map_dir}/{filtered_map_file}" - map = hp.read_map(unfiltered_map_file, - field=[0, 1, 2]) - map_filtered = hp.read_map(filtered_map_file, - field=[0, 1, 2]) + map = mu.read_map( + unfiltered_map_file, pix_type=meta.pix_type, + fields_hp=[0, 1, 2] + ) + map_filtered = mu.read_map( + filtered_map_file, pix_type=meta.pix_type, + fields_hp=[0, 1, 2] + ) + wcs = map.wcs field = { - "spin0": nmt.NmtField(mask, map[:1]), + "spin0": nmt.NmtField(mask, map[:1], wcs=wcs), "spin2": nmt.NmtField(mask, map[1:], - purify_b=meta.pure_B) + purify_b=meta.pure_B, wcs=wcs) } field_filtered = { - "spin0": nmt.NmtField(mask, map_filtered[:1]), + "spin0": nmt.NmtField(mask, map_filtered[:1], wcs=wcs), "spin2": nmt.NmtField(mask, map_filtered[1:], - purify_b=meta.pure_B) + purify_b=meta.pure_B, wcs=wcs) } fields[ftag]["unfiltered"][pure_type] = field diff --git a/pipeline/tutorial_planck/get_bandpower_window_function.sh b/pipeline/tutorial_planck/get_bandpower_window_function.sh new file mode 100644 index 0000000..11f19db --- /dev/null +++ b/pipeline/tutorial_planck/get_bandpower_window_function.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +paramfile='../paramfiles/paramfile_planck.yaml' + +echo "Running pipeline with paramfile: ${paramfile}" + +#OpenMP settings: +export OMP_NUM_THREADS=1 +export OMP_PLACES=threads +export OMP_PROC_BIND=spread + +# Preprocessing (need not be run if already done) +python misc/get_binning.py --globals ${paramfile} --deltal 10 +python get_analysis_mask.py --globals ${paramfile} + +# Compute mask-based mode coupling matrix +python get_mode_coupling.py --globals ${paramfile} + +# Generate transfer function simulations +srun -n 10 -c 24 python simulations/generate_tf_estimation_sims.py --globals ${paramfile} --verbose --no-plots + +# Compute transfer function +srun -n 10 -c 24 python transfer/compute_pseudo_cells_tf_estimation.py --globals ${paramfile} +python transfer/compute_transfer_function.py --globals ${paramfile} + +# Compute bandpower window function +python get_full_couplings.py --globals ${paramfile} \ No newline at end of file diff --git a/pipeline/tutorial_planck/get_covariance_matrix.sh b/pipeline/tutorial_planck/get_covariance_matrix.sh new file mode 100644 index 0000000..12331d9 --- /dev/null +++ b/pipeline/tutorial_planck/get_covariance_matrix.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +paramfile='../paramfiles/paramfile_planck.yaml' + +echo "Running pipeline with paramfile: ${paramfile}" + +#OpenMP settings: +export OMP_NUM_THREADS=1 +export OMP_PLACES=threads +export OMP_PROC_BIND=spread + +# Co-add signal and noise bundle simulations +#srun -n 10 -c 24 +python simulations/coadd_simulated_maps.py --globals ${paramfile} --verbose + +# Compute cross-bundle spectra from sims +srun -n 10 -c 24 python compute_sims_pseudo_cells.py --globals ${paramfile} --verbose + +# Coadd simulation cross-bundle spectra +python coadd_sims_pseudo_cells.py --globals ${paramfile} --verbose + +# Compute covariance matrix +python compute_covariance_from_sims.py --globals ${paramfile} \ No newline at end of file diff --git a/pipeline/tutorial_planck/get_sacc_file.sh b/pipeline/tutorial_planck/get_sacc_file.sh new file mode 100644 index 0000000..4040450 --- /dev/null +++ b/pipeline/tutorial_planck/get_sacc_file.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +paramfile='../paramfiles/paramfile_planck.yaml' + +echo "Running pipeline with paramfile: ${paramfile}" + +#OpenMP settings: +export OMP_NUM_THREADS=1 +export OMP_PLACES=threads +export OMP_PROC_BIND=spread + +# Package data into SACC file +python create_sacc_file.py --globals ${paramfile} --data + +# Package sims into SACC files (optional) +srun -n 10 -c 24 python create_sacc_file.py --globals ${paramfile} --sims + +# Plot SACC contents (includes sims if present) +python sacc_plotter.py --globals ${paramfile} --verbose \ No newline at end of file diff --git a/pipeline/tutorial_planck/maps_to_power_spectra.sh b/pipeline/tutorial_planck/maps_to_power_spectra.sh new file mode 100644 index 0000000..b05576a --- /dev/null +++ b/pipeline/tutorial_planck/maps_to_power_spectra.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +paramfile='../paramfiles/paramfile_planck.yaml' + +echo "Running pipeline with paramfile: ${paramfile}" + +# Compute multipole binning (with constant bin width 10) and analysis mask +python misc/get_binning.py --globals ${paramfile} --deltal 10 +python get_analysis_mask.py --globals ${paramfile} + +# Load Planck maps and beams and save them to disk +python pre_processer_ext.py --data --planck --globals ${paramfile} + +# Compute Planck per-bundle power spectra +python compute_pseudo_cells.py --globals ${paramfile} + +# Coadd Planck per-bundle power spectra +python coadd_pseudo_cells.py --globals ${paramfile} diff --git a/soopercool/map_utils.py b/soopercool/map_utils.py index 5d65756..aef64ce 100644 --- a/soopercool/map_utils.py +++ b/soopercool/map_utils.py @@ -1,52 +1,410 @@ +import numpy as np import healpy as hp +from pixell import enmap, enplot import matplotlib.pyplot as plt +from pixell import uharm +import pymaster as nmt -def read_map(map_file, ncomp): +def _check_pix_type(pix_type): """ + Error handling for pixellization types. + + Parameters + ---------- + pix_type : str + Pixellization type. + """ + if not (pix_type in ['hp', 'car']): + raise ValueError(f"Unknown pixelisation type {pix_type}.") + + +def ud_grade(map_in, nside_out, power=None, pix_type='hp'): + """ + Utility function to upgrade or downgrade a map. + Only support the healpix pixellization type. + + Parameters + ---------- + map_in : np.ndarray + Input map. + nside_out : int + Output nside. + power : float, optional + Set to -2 to keep the sum invariant (for hits) + pix_type : str, optional + Pixellization type. + + Returns + ------- + map_out : np.ndarray + Output map. + """ + if pix_type != 'hp': + raise ValueError("Can't U/D-grade non-HEALPix maps") + return hp.ud_grade(map_in, nside_out=nside_out, power=power) + + +def lmax_from_map(map, pix_type="hp"): + """ + Determine the maximum multipole from a map and its + pixellization type. + + Parameters + ---------- + map : np.ndarray or enmap.ndmap + Input map. + pix_type : str, optional + Pixellization type. + + Returns + ------- + int + Maximum multipole. + """ + _check_pix_type(pix_type) + if pix_type == "hp": + nside = hp.npix2nside(map.shape[-1]) + return 3 * nside - 1 + else: + _, wcs = map.geometry + res = np.deg2rad(np.min(np.abs(wcs.wcs.cdelt))) + lmax = uharm.res2lmax(res) + return lmax + + +def _get_pix_type(map_file): + """ + Determine the pixellization type from a map file. + Since `read_fits_header` does not handle compression, + assign the HEALPIX type to compressed files. + + Parameters + ---------- + map_file : str + Map file name. + + Returns + ------- + str + Pixellization type. """ - return hp.read_map(map_file, field=[i for i in range(ncomp)]) + if "fits.gz" in map_file: + return "hp" + header = enmap.read_fits_header(map_file) + if "WCSAXES" in header: + return "car" + else: + return "hp" -def write_map(map_file, map, dtype=None): + +def read_map(map_file, + pix_type='hp', + fields_hp=None, + convert_K_to_muK=False, + geometry=None): """ + Read a map from a file, regardless of the pixellization type. + + Parameters + ---------- + map_file : str + Map file name. + pix_type : str, optional + Pixellization type. + fields_hp : tuple, optional + Fields to read from a HEALPix map. + convert_K_to_muK : bool, optional + Convert K to muK. + geometry : enmap.geometry, optional + Enmap geometry. + + Returns + ------- + map_out : np.ndarray + Loaded map. """ - hp.write_map(map_file, map, overwrite=True, dtype=dtype) + conv = 1 + if convert_K_to_muK: + conv = 1.e6 + _check_pix_type(pix_type) + if pix_type == 'hp': + kwargs = {"field": fields_hp} if fields_hp is not None else {} + m = hp.read_map(map_file, **kwargs) + else: + m = enmap.read_map(map_file, geometry=geometry) + + return conv*m + +def write_map(map_file, map, dtype=None, pix_type='hp', + convert_muK_to_K=False): + """ + Write a map to a file, regardless of the pixellization type. -def plot_map(map, title=None, file_name=None, lims=None): + Parameters + ---------- + map_file : str + Map file name. + map : np.ndarray + Map to write. + dtype : np.dtype, optional + Data type. + pix_type : str, optional + Pixellization type. + convert_muK_to_K : bool, optional + Convert muK to K. """ + if convert_muK_to_K: + map *= 1.e-6 + _check_pix_type(pix_type) + if pix_type == 'hp': + hp.write_map(map_file, map, overwrite=True, dtype=dtype) + else: + enmap.write_map(map_file, map) + + +def smooth_map(map, fwhm_deg, pix_type="hp"): + """ + Apply a Gaussian smoothing to a map with + a given FWHM in degrees. + + Parameters + ---------- + map : np.ndarray + Input map. + fwhm_deg : float + FWHM in degrees. + pix_type : str, optional + Pixellization type. + + Returns + ------- + map_out : np.ndarray + Smoothed map. + """ + _check_pix_type(pix_type) + if pix_type == "hp": + return hp.smoothing(map, fwhm=np.deg2rad(fwhm_deg)) + else: + sigma_deg = fwhm_deg / np.sqrt(8 * np.log(2)) + return enmap.smooth_gauss(map, np.deg2rad(sigma_deg)) + + +def _plot_map_hp(map, lims=None, file_name=None, title=None): + """ + Hidden function to plot HEALPIX maps and either show it + or save it to a file. + + + Parameters + ---------- + map : np.ndarray + Input map. + lims : list, optional + Color scale limits. + If map is a single component, lims is a list [min, max]. + If map is a 3-component map, lims is a list of 2-element lists. + file_name : str, optional + Output file name. + title : str, optional + Plot title. """ ncomp = map.shape[0] if len(map.shape) == 2 else 1 cmap = "YlOrRd" if ncomp == 1 else "RdYlBu_r" - kwargs = {"title": title, "cmap": cmap} - if lims is None: range_args = [{} for i in range(ncomp)] - if ncomp == 1: - if lims is not None: - range_args = [{ - "min": lims[0], - "max": lims[1] - }] - to_plot = [map] - - elif ncomp == 3: - if lims is not None: - range_args = [ - { - "min": lims[i][0], - "max": lims[i][1] - } for i in lims(3) - ] + if ncomp == 1 and lims is not None: + range_args = [{ + "min": lims[0], + "max": lims[1] + }] + if ncomp == 3 and lims is not None: + range_args = [ + { + "min": lims[i][0], + "max": lims[i][1] + } for i in lims(3) + ] for i in range(ncomp): - hp.mollview(to_plot[i], **kwargs, **range_args[i]) - + if ncomp != 1: + f = "TQU"[i] + print("np.shape(map)", np.shape(np.atleast_2d(map))) + hp.mollview( + np.atleast_2d(map)[i], + cmap=cmap, + title=title, + **range_args[i], + cbar=True + ) if file_name: if ncomp == 1: plt.savefig(f"{file_name}.png", bbox_inches="tight") else: - plt.savefig(f"{file_name}_{'TQU'[i]}.png", bbox_inches="tight") + plt.savefig(f"{file_name}_{f}.png", bbox_inches="tight") else: plt.show() + + +def _plot_map_car(map, lims=None, file_name=None): + """ + Hidden function to plot CAR maps and either show it + or save it to a file. + + Parameters + ---------- + map : np.ndarray + Input map. + lims : list, optional + Color scale limits. + If map is a single component, lims is a list [min, max]. + If map is a 3-component map, lims is a list of 2-element lists. + file_name : str, optional + Output file name. + """ + ncomp = map.shape[0] if len(map.shape) == 3 else 1 + + if lims is None: + range_args = {} + + if ncomp == 1 and lims is not None: + range_args = { + "min": lims[0], + "max": lims[1] + } + if ncomp == 3 and lims is not None: + range_args = { + "min": [lims[i][0] for i in range(ncomp)], + "max": [lims[i][1] for i in range(ncomp)] + } + + plot = enplot.plot( + map, + colorbar=True, + ticks=10, + **range_args + ) + for i in range(ncomp): + suffix = "" + if ncomp != 1: + suffix = f"_{'TQU'[i]}" + + if file_name: + enplot.write( + f"{file_name}{suffix}.png", + plot[i] + ) + else: + enplot.show(plot[i]) + + +def plot_map(map, file_name=None, lims=None, title=None, pix_type="hp"): + """ + Plot a map regardless of the pixellization type. + + Parameters + ---------- + map : np.ndarray + Input map. + file_name : str, optional + Output file name. + lims : list, optional + Color scale limits. + If map is a single component, lims is a list [min, max]. + If map is a 3-component map, lims is a list of 2-element lists. + title : str, optional + Plot title. + pix_type : str, optional + Pixellization type. + """ + _check_pix_type(pix_type) + + if pix_type == "hp": + _plot_map_hp(map, lims, file_name=file_name, title=title) + else: + _plot_map_car(map, lims, file_name=file_name) + + +def apodize_mask(mask, apod_radius_deg, apod_type, pix_type="hp"): + """ + Apodize a mask with a given apod radius and type regardless + of the pixellization type. + CAR apodization code inspired from pspy. + + Parameters + ---------- + mask : np.ndarray or enmap.ndmap + Input mask. + apod_radius_deg : float + Apodization radius in degrees. + apod_type : str + Apodization type + pix_type : str, optional + Pixellization type. + + Returns + ------- + mask_apo : np.ndarray or enmap.ndmap + Apodized mask. + """ + _check_pix_type(pix_type) + if pix_type == "hp": + mask_apo = nmt.mask_apodization( + mask, + apod_radius_deg, + apod_type + ) + else: + distance = enmap.distance_transform(mask) + distance = np.rad2deg(distance) + + mask_apo = mask.copy() + idx = np.where(distance > apod_radius_deg) + + if apod_type == "C1": + mask_apo = 0.5 - 0.5 * np.cos(-np.pi * distance / apod_radius_deg) + elif apod_type == "C2": + mask_apo = ( + distance / apod_radius_deg - + np.sin(2 * np.pi * distance / apod_radius_deg) / (2 * np.pi) + ) + else: + raise ValueError(f"Unknown apodization type {apod_type}") + mask_apo[idx] = 1 + + return mask_apo + + +def template_from_map(map, ncomp, pix_type="hp"): + """ + Generate a template from a map regardless of the pixellization type. + + Parameters + ---------- + map : np.ndarray or enmap.ndmap + Input map. + ncomp : int + Number of components of the output template. + pix_type : str, optional + Pixellization type. + + Returns + ------- + template : np.ndarray or enmap.ndmap + Template. + """ + _check_pix_type(pix_type) + if pix_type == "hp": + if map.shape > 1: + new_shape = (ncomp,) + map.shape[1:] + else: + new_shape = (ncomp, len(map)) + return np.zeros(new_shape) + + else: + shape, wcs = map.geometry + new_shape = (ncomp,) + shape[-2:] + + return enmap.zeros(new_shape, wcs) diff --git a/soopercool/metadata_manager.py b/soopercool/metadata_manager.py index 87c8946..0db9df8 100644 --- a/soopercool/metadata_manager.py +++ b/soopercool/metadata_manager.py @@ -1,7 +1,8 @@ +import soopercool.map_utils as mu +import soopercool.utils as su import yaml import numpy as np import os -import healpy as hp import time @@ -169,12 +170,14 @@ def read_mask(self, mask_type): Type of mask to load. Can be "binary", "galactic", "point_source" or "analysis". """ - return hp.ud_grade( - hp.read_map(getattr(self, f"{mask_type}_mask_name")), - nside_out=self.nside - ) + mask = mu.read_map(getattr(self, f"{mask_type}_mask_name"), + pix_type=self.pix_type) + if self.pix_type == 'hp': + mask = mu.ud_grade(mask, nside_out=self.nside, + pix_type=self.pix_type) + return mask - def save_mask(self, mask_type, mask, overwrite=False): + def save_mask(self, mask_type, mask): """ Save the mask given a mask type. @@ -185,21 +188,22 @@ def save_mask(self, mask_type, mask, overwrite=False): Can be "binary", "galactic", "point_source" or "analysis". mask : array-like Mask to save. - overwrite : bool, optional - Overwrite the mask if it already exists. """ - return hp.write_map(getattr(self, f"{mask_type}_mask_name"), mask, - overwrite=overwrite, dtype=np.float32) + return mu.write_map(getattr(self, f"{mask_type}_mask_name"), mask, + dtype=np.float32, pix_type=self.pix_type) def read_hitmap(self): """ Read the hitmap. For now, we assume that all tags share the same hitmap. """ - hitmap = hp.read_map(self.nhits_map_name) - return hp.ud_grade(hitmap, self.nside, power=-2) + hitmap = mu.read_map(self.nhits_map_name, pix_type=self.pix_type) + if self.pix_type == 'hp': + hitmap = mu.ud_grade(hitmap, self.nside, power=-2, + pix_type=self.pix_type) + return hitmap - def save_hitmap(self, map, overwrite=True): + def save_hitmap(self, map): """ Save the hitmap to disk. @@ -208,10 +212,9 @@ def save_hitmap(self, map, overwrite=True): map : array-like Mask to save. """ - hp.write_map( + mu.write_map( os.path.join(self.mask_directory, self.masks["nhits_map"]), - map, dtype=np.float32, overwrite=overwrite - ) + map, dtype=np.float32, pix_type=self.pix_type) def read_nmt_binning(self): """ @@ -236,13 +239,12 @@ def get_effective_ells(self): binner = self.read_nmt_binning() return binner.get_effective_ells() - def read_beam(self, map_set): + def read_beam(self, map_set, lmax=None): """ """ beam_dir = self.beam_dir_from_map_set(map_set) beam_file = self.beam_file_from_map_set(map_set) - beam_file = f"{beam_dir}/{beam_file}" - l, bl = np.loadtxt(beam_file, unpack=True) + l, bl = su.read_beam_from_file(f"{beam_dir}/{beam_file}", lmax=lmax) if self.beam_floor is not None: bl[bl < self.beam_floor] = self.beam_floor return l, bl @@ -418,49 +420,6 @@ def print_banner(self, msg): print("==============================================================") print('') - def get_nhits_map_from_toast_schedule(self, filter_tag): - from soopercool.utils import toast_filter_map - import subprocess - - tag_settings = self.tags_settings[filter_tag] - - if tag_settings["filtering_type"] != "toast": - raise NotImplementedError(f"Filterer type {tag_settings['filtering_type']} " # noqa - "not implemented") - kwargs = { - "map": None, - "map_file": self.masks["input_nhits_path"], - "mask": None, - "template": tag_settings["template"], - "config": tag_settings["config"], - "schedule": tag_settings["schedule"], - "nside": self.nside, - "instrument": tag_settings["tf_instrument"], - "band": tag_settings["tf_band"], - "sbatch_job_name": "get_nhits_map", - "sbatch_dir": self.scripts_dir, - "nhits_map_only": True - } - sbatch_file = toast_filter_map(**kwargs) - - if self.slurm: - # Running with SLURM job scheduller - cmd = "sbatch {}".format(str(sbatch_file.resolve())) - if self.slurm_autosubmit: - subprocess.run(cmd, shell=True, check=True) - raise Exception( - 'Submitted SLURM script for nhits map calculation. \ - Please run the script again after SLURM job finished.') - else: - self.print_banner( - msg='To submit these scripts to SLURM:\n {}'.format(cmd) - ) - raise Exception( - 'Pleas __rerun__ the script after SLURM job finished.') - else: - # Run the script directly - subprocess.run(str(sbatch_file.resolve()), shell=True, check=True) - def get_map_filename_transfer(self, id_sim, cl_type, pure_type=None, filter_tag=None): """ @@ -473,7 +432,8 @@ def get_map_filename_transfer(self, id_sim, cl_type, return f"{path_to_maps}/{file_name}" - def read_map(self, map_set, id_split, id_sim=None, pol_only=False): + def read_map(self, map_set, id_split, id_sim=None, pol_only=False, + convert_K_to_muK=False): """ Read a map given a map set and split index. Can also read a given covariance simulation if `id_sim` is provided. @@ -489,13 +449,16 @@ def read_map(self, map_set, id_split, id_sim=None, pol_only=False): If None, return the data map. pol_only : bool, optional Return only the polarization maps. + convert_K_to_muK: bool, optional + Convert maps from K to muK units upon loading. """ field = [1, 2] if pol_only else [0, 1, 2] fname = self.get_map_filename(map_set, id_split, id_sim) - return hp.read_map(fname, field=field) + return mu.read_map(fname, field=field, pix_type=self.pix_type, + convert_K_to_muK=convert_K_to_muK) def read_map_transfer(self, id_sim, signal=None, e_or_b=None, - pol_only=False): + pol_only=False, convert_K_to_muK=False): """ Read a map given a simulation index. Can also read a pure-E or pure-B simulation if `e_or_b` is provided. @@ -514,13 +477,16 @@ def read_map_transfer(self, id_sim, signal=None, e_or_b=None, If None, assumes validation mode. pol_only : bool, optional Return only the polarization maps. + convert_K_to_muK: bool, optional + Convert maps from K to muK units upon loading. """ field = [1, 2] if pol_only else [0, 1, 2] if (not signal and not e_or_b) or (signal and e_or_b): raise ValueError("You have to set to None either `signal` or " "`e_or_b`") fname = self.get_map_filename_transfer(id_sim, signal, e_or_b) - return hp.read_map_transfer(fname, field=field) + return mu.read_map(fname, field=field, pix_type=self.pix_type, + convert_K_to_muK=convert_K_to_muK) def get_ps_names_list(self, type="all", coadd=False): """ diff --git a/soopercool/ps_utils.py b/soopercool/ps_utils.py index 0d39776..0fe44cf 100644 --- a/soopercool/ps_utils.py +++ b/soopercool/ps_utils.py @@ -1,7 +1,7 @@ +import soopercool.map_utils as mu from itertools import product import pymaster as nmt import numpy as np -import healpy as hp def get_validation_power_spectra(meta, id_sim, mask, nmt_binning, @@ -28,7 +28,7 @@ def get_validation_power_spectra(meta, id_sim, mask, nmt_binning, map_files = [mf.replace(".fits", "_filtered.fits") for mf in map_files] - maps = [hp.read_map(m, field=[0, 1, 2]) + maps = [mu.read_map(m, field=[0, 1, 2], convert_K_to_muK=True) for m in map_files] field = [{ diff --git a/soopercool/sim_utils.py b/soopercool/sim_utils.py new file mode 100644 index 0000000..69e40ae --- /dev/null +++ b/soopercool/sim_utils.py @@ -0,0 +1,165 @@ +from pixell import curvedsky +import healpy as hp +import numpy as np + + +def get_alm_ordering(fields="TEB", components=None): + """ + Build an iterator that yields the ordering of the alm components + given fields and components (e.g. frequencies). + Expected behavior for T,E,B fields and f1, f2 is : + almT_f1, almE_f1, almB_f1, almT_f2, almE_f2, almB_f2 + + Parameters + ---------- + fields : str + The fields to consider. Default is "TEB". + components : list + The components to consider. Default is None. + """ + if components is not None: + for c in components: + for f in fields: + yield c, f + else: + for f in fields: + yield f + + +def get_ps_matrix_for_sim(ps_dict, lmax, components=None, fields="TEB"): + """ + Build a matrix of power spectra for simulation of correlated alms. + + Parameters + ---------- + ps_dict : dict + A dictionary containing the power spectra. + lmax : int + The maximum multipole to consider. + components : list + The components to consider. Default is None. + fields : str + The fields to consider. Default is "TEB". + + Returns + ------- + ps_matrix : np.ndarray + A 3D array containing the power spectra with dimension + (nfields * ncomponents, nfields * ncomponents, lmax). + """ + ordering = list(get_alm_ordering(fields=fields, components=components)) + + nalms = len(components) * len(fields) \ + if components is not None else len(fields) + ps_matrix = np.zeros((nalms, nalms, lmax)) + + for i, lab1 in enumerate(ordering): + for j, lab2 in enumerate(ordering): + if components is not None: + c1, f1 = lab1 + c2, f2 = lab2 + ps_matrix[i, j, :] = ps_dict[c1, c2][f1+f2][:lmax] + else: + f1 = lab1 + f2 = lab2 + ps_matrix[i, j, :] = ps_dict[f1+f2][:lmax] + return ps_matrix + + +def get_alms_from_cls(ps_dict, lmax, fields="TEB", components=None, seed=None): + """ + Generate alms from power spectra. + + Parameters + ---------- + ps_dict : dict + A dictionary containing the power spectra. + lmax : int + The maximum multipole to consider. + fields : str + The fields to consider. Default is "TEB". + components : list + The components to consider. Default is None. + seed : int + The seed for the random number generator. Default is None. + """ + ps_matrix = get_ps_matrix_for_sim(ps_dict, + lmax, + components=components, + fields=fields) + alms = curvedsky.rand_alm( + ps_matrix, + lmax=lmax, + seed=seed + ) + ordering = list(get_alm_ordering(fields, components)) + alms_dict = {} + if components is not None: + for comp in components: + alms_dict[comp] = { + comp: [ + alms[ordering.index((comp, f))] + for f in fields + ] + } + return alms_dict + + else: + return alms + + +def beam_alms(alms, beam, fields="TEB"): + """ + Apply a beam to the alms. + + Parameters + ---------- + alms : dict + A dictionary containing the alms. + beam : np.ndarray + The beam to apply. + fields : str + The fields to consider. Default is "TEB". + + Returns + ------- + beamed_alms : dict + A dictionary containing the beam-convolved alms. + """ + for i, field in enumerate(fields): + alms[i] = curvedsky.almxfl(alms[i], beam) + return alms + + +def get_map_from_alms(alms, template): + """ + Get a map from alms and a template. + + Parameters + ---------- + alms : np.ndarray + The alms to use. + template : np.ndarray + The template to use. + + Returns + ------- + map : np.ndarray + Returns the corresponding map. + """ + if hasattr(template, "geometry"): + pix_type = "car" + else: + pix_type = "hp" + + if pix_type == "hp": + map = hp.alm2map( + alms, + nside=hp.npix2nside(template.shape[-1]) + ) + else: + map = curvedsky.alm2map( + alms, + template.copy(), + ) + return map diff --git a/soopercool/utils.py b/soopercool/utils.py index 1bc38b0..0a24d7d 100644 --- a/soopercool/utils.py +++ b/soopercool/utils.py @@ -1,3 +1,4 @@ +import soopercool.map_utils as mu import numpy as np import os import healpy as hp @@ -186,18 +187,20 @@ def beam_hpix(ll, nside): return beam_gaussian(ll, fwhm_hp_amin) -def create_binning(nside, delta_ell, end_first_bin=None): +def create_binning(lmax, delta_ell, end_first_bin=None): """ """ if end_first_bin is not None: - bin_low = np.arange(end_first_bin, 3*nside, delta_ell) + bin_low = np.arange(end_first_bin, + lmax + 1, + delta_ell) bin_high = bin_low + delta_ell - 1 bin_low = np.concatenate(([0], bin_low)) bin_high = np.concatenate(([end_first_bin-1], bin_high)) else: - bin_low = np.arange(0, 3*nside, delta_ell) + bin_low = np.arange(0, lmax + 1, delta_ell) bin_high = bin_low + delta_ell - 1 - bin_high[-1] = 3*nside - 1 + bin_high[-1] = lmax bin_center = (bin_low + bin_high) / 2 return bin_low, bin_high, bin_center @@ -214,6 +217,8 @@ def power_law_cl(ell, amp, delta_ell, power_law_index): A = amp # A is power spectrum amplitude at pivot ell == 1 - delta_ell pl_ps[spec] = A / (ell + delta_ell) ** power_law_index + if spec != spec[::-1]: + pl_ps[spec[::-1]] = pl_ps[spec] return pl_ps @@ -234,8 +239,8 @@ def m_filter_map(map_file, mask_file, out_dir, m_cut): Maximum nonzero m-degree of the multipole expansion. All higher degrees are set to zero. """ - map = hp.read_map(map_file, field=(0, 1, 2)) - mask = hp.read_map(mask_file) + map = mu.read_map(map_file, field=(0, 1, 2)) + mask = mu.read_map(mask_file) mask[mask != 0] = 1. map_masked = map * mask @@ -252,9 +257,8 @@ def m_filter_map(map_file, mask_file, out_dir, m_cut): fname = os.path.basename(map_file) fname_out = fname.replace(".fits", "_filtered.fits") - hp.write_map(f"{out_dir}/{fname_out}", - filtered_map, overwrite=True, - dtype=np.float32) + mu.write_map(f"{out_dir}/{fname_out}", + filtered_map, dtype=np.float32) def m_filter_map_old(map, map_file, mask, m_cut): @@ -289,9 +293,8 @@ def m_filter_map_old(map, map_file, mask, m_cut): filtered_map = hp.alm2map(alms, nside=nside, lmax=lmax) - hp.write_map(map_file.replace('.fits', '_filtered.fits'), - filtered_map, overwrite=True, - dtype=np.float32) + mu.write_map(map_file.replace('.fits', '_filtered.fits'), + filtered_map, dtype=np.float32) def toast_filter_map(map, map_file, mask, @@ -702,3 +705,12 @@ def get_spin_derivatives(map): cmap.set_under("w") return first, second + + +def read_beam_from_file(beam_file, lmax=None): + """ + """ + l, bl = np.loadtxt(beam_file, unpack=True) + if lmax is not None: + return l[:lmax+1], bl[:lmax+1] + return l, bl